Merge pull request #943 from liangyongxiang/fcitx5-gtk
[gentoo-zh.git] / sys-kernel / xanmod-hybird / files / patch-5.12.8-xanmod1-cacule
blobafed6d58801a48fffe328019d565e81ab607cb1d
1 diff --git a/.config b/.config
2 new file mode 100644
3 index 000000000000..43ad2064e2c5
4 --- /dev/null
5 +++ b/.config
6 @@ -0,0 +1,11069 @@
7 +#
8 +# Automatically generated file; DO NOT EDIT.
9 +# Linux/x86 5.12.8 Kernel Configuration
11 +CONFIG_CC_VERSION_TEXT="gcc-11 (Debian 11.1.0-2) 11.1.0"
12 +CONFIG_CC_IS_GCC=y
13 +CONFIG_GCC_VERSION=110100
14 +CONFIG_CLANG_VERSION=0
15 +CONFIG_LD_IS_BFD=y
16 +CONFIG_LD_VERSION=23502
17 +CONFIG_LLD_VERSION=0
18 +CONFIG_CC_CAN_LINK=y
19 +CONFIG_CC_CAN_LINK_STATIC=y
20 +CONFIG_CC_HAS_ASM_GOTO=y
21 +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
22 +CONFIG_CC_HAS_ASM_INLINE=y
23 +CONFIG_IRQ_WORK=y
24 +CONFIG_BUILDTIME_TABLE_SORT=y
25 +CONFIG_THREAD_INFO_IN_TASK=y
28 +# General setup
30 +CONFIG_INIT_ENV_ARG_LIMIT=32
31 +# CONFIG_COMPILE_TEST is not set
32 +CONFIG_LOCALVERSION=""
33 +# CONFIG_LOCALVERSION_AUTO is not set
34 +CONFIG_BUILD_SALT=""
35 +CONFIG_HAVE_KERNEL_GZIP=y
36 +CONFIG_HAVE_KERNEL_BZIP2=y
37 +CONFIG_HAVE_KERNEL_LZMA=y
38 +CONFIG_HAVE_KERNEL_XZ=y
39 +CONFIG_HAVE_KERNEL_LZO=y
40 +CONFIG_HAVE_KERNEL_LZ4=y
41 +CONFIG_HAVE_KERNEL_ZSTD=y
42 +# CONFIG_KERNEL_GZIP is not set
43 +# CONFIG_KERNEL_BZIP2 is not set
44 +# CONFIG_KERNEL_LZMA is not set
45 +# CONFIG_KERNEL_XZ is not set
46 +# CONFIG_KERNEL_LZO is not set
47 +# CONFIG_KERNEL_LZ4 is not set
48 +CONFIG_KERNEL_ZSTD=y
49 +CONFIG_DEFAULT_INIT=""
50 +CONFIG_DEFAULT_HOSTNAME="(none)"
51 +CONFIG_SWAP=y
52 +CONFIG_SYSVIPC=y
53 +CONFIG_SYSVIPC_SYSCTL=y
54 +CONFIG_POSIX_MQUEUE=y
55 +CONFIG_POSIX_MQUEUE_SYSCTL=y
56 +CONFIG_WATCH_QUEUE=y
57 +CONFIG_CROSS_MEMORY_ATTACH=y
58 +CONFIG_USELIB=y
59 +CONFIG_AUDIT=y
60 +CONFIG_HAVE_ARCH_AUDITSYSCALL=y
61 +CONFIG_AUDITSYSCALL=y
64 +# IRQ subsystem
66 +CONFIG_GENERIC_IRQ_PROBE=y
67 +CONFIG_GENERIC_IRQ_SHOW=y
68 +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
69 +CONFIG_GENERIC_PENDING_IRQ=y
70 +CONFIG_GENERIC_IRQ_MIGRATION=y
71 +CONFIG_HARDIRQS_SW_RESEND=y
72 +CONFIG_GENERIC_IRQ_CHIP=y
73 +CONFIG_IRQ_DOMAIN=y
74 +CONFIG_IRQ_DOMAIN_HIERARCHY=y
75 +CONFIG_GENERIC_MSI_IRQ=y
76 +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y
77 +CONFIG_IRQ_MSI_IOMMU=y
78 +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y
79 +CONFIG_GENERIC_IRQ_RESERVATION_MODE=y
80 +CONFIG_IRQ_FORCED_THREADING=y
81 +CONFIG_SPARSE_IRQ=y
82 +# CONFIG_GENERIC_IRQ_DEBUGFS is not set
83 +# end of IRQ subsystem
85 +CONFIG_CLOCKSOURCE_WATCHDOG=y
86 +CONFIG_ARCH_CLOCKSOURCE_INIT=y
87 +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y
88 +CONFIG_GENERIC_TIME_VSYSCALL=y
89 +CONFIG_GENERIC_CLOCKEVENTS=y
90 +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
91 +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
92 +CONFIG_GENERIC_CMOS_UPDATE=y
93 +CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y
94 +CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y
97 +# Timers subsystem
99 +CONFIG_TICK_ONESHOT=y
100 +CONFIG_NO_HZ_COMMON=y
101 +# CONFIG_HZ_PERIODIC is not set
102 +# CONFIG_NO_HZ_IDLE is not set
103 +CONFIG_NO_HZ_FULL=y
104 +CONFIG_CONTEXT_TRACKING=y
105 +# CONFIG_CONTEXT_TRACKING_FORCE is not set
106 +# CONFIG_NO_HZ is not set
107 +CONFIG_HIGH_RES_TIMERS=y
108 +# end of Timers subsystem
110 +# CONFIG_PREEMPT_NONE is not set
111 +# CONFIG_PREEMPT_VOLUNTARY is not set
112 +CONFIG_PREEMPT=y
113 +CONFIG_PREEMPT_COUNT=y
114 +CONFIG_PREEMPTION=y
115 +CONFIG_PREEMPT_DYNAMIC=y
118 +# CPU/Task time and stats accounting
120 +CONFIG_VIRT_CPU_ACCOUNTING=y
121 +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y
122 +# CONFIG_IRQ_TIME_ACCOUNTING is not set
123 +CONFIG_BSD_PROCESS_ACCT=y
124 +CONFIG_BSD_PROCESS_ACCT_V3=y
125 +CONFIG_TASKSTATS=y
126 +CONFIG_TASK_DELAY_ACCT=y
127 +CONFIG_TASK_XACCT=y
128 +CONFIG_TASK_IO_ACCOUNTING=y
129 +CONFIG_PSI=y
130 +CONFIG_PSI_DEFAULT_DISABLED=y
131 +# end of CPU/Task time and stats accounting
133 +CONFIG_CPU_ISOLATION=y
136 +# RCU Subsystem
138 +CONFIG_TREE_RCU=y
139 +CONFIG_PREEMPT_RCU=y
140 +CONFIG_RCU_EXPERT=y
141 +CONFIG_SRCU=y
142 +CONFIG_TREE_SRCU=y
143 +CONFIG_TASKS_RCU_GENERIC=y
144 +CONFIG_TASKS_RCU=y
145 +CONFIG_TASKS_TRACE_RCU=y
146 +CONFIG_RCU_STALL_COMMON=y
147 +CONFIG_RCU_NEED_SEGCBLIST=y
148 +CONFIG_RCU_FANOUT=64
149 +CONFIG_RCU_FANOUT_LEAF=16
150 +# CONFIG_RCU_FAST_NO_HZ is not set
151 +CONFIG_RCU_BOOST=y
152 +CONFIG_RCU_BOOST_DELAY=0
153 +CONFIG_RCU_NOCB_CPU=y
154 +# CONFIG_TASKS_TRACE_RCU_READ_MB is not set
155 +# end of RCU Subsystem
157 +CONFIG_BUILD_BIN2C=y
158 +# CONFIG_IKCONFIG is not set
159 +CONFIG_IKHEADERS=m
160 +CONFIG_LOG_BUF_SHIFT=18
161 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12
162 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13
163 +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
166 +# Scheduler features
168 +CONFIG_UCLAMP_TASK=y
169 +CONFIG_UCLAMP_BUCKETS_COUNT=5
170 +# end of Scheduler features
172 +CONFIG_CACULE_SCHED=y
173 +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
174 +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y
175 +CONFIG_CC_HAS_INT128=y
176 +CONFIG_ARCH_SUPPORTS_INT128=y
177 +CONFIG_NUMA_BALANCING=y
178 +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y
179 +CONFIG_CGROUPS=y
180 +CONFIG_PAGE_COUNTER=y
181 +CONFIG_MEMCG=y
182 +CONFIG_MEMCG_SWAP=y
183 +CONFIG_MEMCG_KMEM=y
184 +CONFIG_BLK_CGROUP=y
185 +CONFIG_CGROUP_WRITEBACK=y
186 +CONFIG_CGROUP_SCHED=y
187 +CONFIG_FAIR_GROUP_SCHED=y
188 +CONFIG_CFS_BANDWIDTH=y
189 +# CONFIG_RT_GROUP_SCHED is not set
190 +CONFIG_UCLAMP_TASK_GROUP=y
191 +CONFIG_CGROUP_PIDS=y
192 +CONFIG_CGROUP_RDMA=y
193 +CONFIG_CGROUP_FREEZER=y
194 +CONFIG_CGROUP_HUGETLB=y
195 +CONFIG_CPUSETS=y
196 +CONFIG_PROC_PID_CPUSET=y
197 +CONFIG_CGROUP_DEVICE=y
198 +CONFIG_CGROUP_CPUACCT=y
199 +CONFIG_CGROUP_PERF=y
200 +CONFIG_CGROUP_BPF=y
201 +# CONFIG_CGROUP_DEBUG is not set
202 +CONFIG_SOCK_CGROUP_DATA=y
203 +CONFIG_NAMESPACES=y
204 +CONFIG_UTS_NS=y
205 +CONFIG_TIME_NS=y
206 +CONFIG_IPC_NS=y
207 +CONFIG_USER_NS=y
208 +CONFIG_PID_NS=y
209 +CONFIG_NET_NS=y
210 +CONFIG_CHECKPOINT_RESTORE=y
211 +CONFIG_SCHED_AUTOGROUP=y
212 +CONFIG_SCHED_AUTOGROUP_DEFAULT_ENABLED=y
213 +# CONFIG_SYSFS_DEPRECATED is not set
214 +CONFIG_RELAY=y
215 +CONFIG_BLK_DEV_INITRD=y
216 +CONFIG_INITRAMFS_SOURCE=""
217 +CONFIG_RD_GZIP=y
218 +CONFIG_RD_BZIP2=y
219 +CONFIG_RD_LZMA=y
220 +CONFIG_RD_XZ=y
221 +CONFIG_RD_LZO=y
222 +CONFIG_RD_LZ4=y
223 +CONFIG_RD_ZSTD=y
224 +CONFIG_BOOT_CONFIG=y
225 +# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set
226 +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y
227 +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
228 +CONFIG_LD_ORPHAN_WARN=y
229 +CONFIG_SYSCTL=y
230 +CONFIG_HAVE_UID16=y
231 +CONFIG_SYSCTL_EXCEPTION_TRACE=y
232 +CONFIG_HAVE_PCSPKR_PLATFORM=y
233 +CONFIG_BPF=y
234 +CONFIG_EXPERT=y
235 +CONFIG_UID16=y
236 +CONFIG_MULTIUSER=y
237 +CONFIG_SGETMASK_SYSCALL=y
238 +CONFIG_SYSFS_SYSCALL=y
239 +CONFIG_FHANDLE=y
240 +CONFIG_POSIX_TIMERS=y
241 +CONFIG_PRINTK=y
242 +CONFIG_PRINTK_NMI=y
243 +CONFIG_BUG=y
244 +CONFIG_ELF_CORE=y
245 +CONFIG_PCSPKR_PLATFORM=y
246 +CONFIG_BASE_FULL=y
247 +CONFIG_FUTEX=y
248 +CONFIG_FUTEX2=y
249 +CONFIG_FUTEX_PI=y
250 +CONFIG_EPOLL=y
251 +CONFIG_SIGNALFD=y
252 +CONFIG_TIMERFD=y
253 +CONFIG_EVENTFD=y
254 +CONFIG_SHMEM=y
255 +CONFIG_AIO=y
256 +CONFIG_IO_URING=y
257 +CONFIG_ADVISE_SYSCALLS=y
258 +CONFIG_HAVE_ARCH_USERFAULTFD_WP=y
259 +CONFIG_MEMBARRIER=y
260 +CONFIG_KALLSYMS=y
261 +CONFIG_KALLSYMS_ALL=y
262 +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y
263 +CONFIG_KALLSYMS_BASE_RELATIVE=y
264 +CONFIG_BPF_SYSCALL=y
265 +CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y
266 +CONFIG_BPF_JIT_ALWAYS_ON=y
267 +CONFIG_BPF_JIT_DEFAULT_ON=y
268 +CONFIG_USERMODE_DRIVER=y
269 +# CONFIG_BPF_PRELOAD is not set
270 +CONFIG_USERFAULTFD=y
271 +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y
272 +CONFIG_KCMP=y
273 +CONFIG_RSEQ=y
274 +# CONFIG_DEBUG_RSEQ is not set
275 +# CONFIG_EMBEDDED is not set
276 +CONFIG_HAVE_PERF_EVENTS=y
277 +CONFIG_PC104=y
280 +# Kernel Performance Events And Counters
282 +CONFIG_PERF_EVENTS=y
283 +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
284 +# end of Kernel Performance Events And Counters
286 +CONFIG_VM_EVENT_COUNTERS=y
287 +CONFIG_SLUB_DEBUG=y
288 +# CONFIG_COMPAT_BRK is not set
289 +# CONFIG_SLAB is not set
290 +CONFIG_SLUB=y
291 +# CONFIG_SLOB is not set
292 +CONFIG_SLAB_MERGE_DEFAULT=y
293 +CONFIG_SLAB_FREELIST_RANDOM=y
294 +CONFIG_SLAB_FREELIST_HARDENED=y
295 +CONFIG_SHUFFLE_PAGE_ALLOCATOR=y
296 +CONFIG_SLUB_CPU_PARTIAL=y
297 +CONFIG_SYSTEM_DATA_VERIFICATION=y
298 +CONFIG_PROFILING=y
299 +# end of General setup
301 +CONFIG_64BIT=y
302 +CONFIG_X86_64=y
303 +CONFIG_X86=y
304 +CONFIG_INSTRUCTION_DECODER=y
305 +CONFIG_OUTPUT_FORMAT="elf64-x86-64"
306 +CONFIG_LOCKDEP_SUPPORT=y
307 +CONFIG_STACKTRACE_SUPPORT=y
308 +CONFIG_MMU=y
309 +CONFIG_ARCH_MMAP_RND_BITS_MIN=28
310 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32
311 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8
312 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16
313 +CONFIG_GENERIC_ISA_DMA=y
314 +CONFIG_GENERIC_BUG=y
315 +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
316 +CONFIG_ARCH_MAY_HAVE_PC_FDC=y
317 +CONFIG_GENERIC_CALIBRATE_DELAY=y
318 +CONFIG_ARCH_HAS_CPU_RELAX=y
319 +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
320 +CONFIG_ARCH_HAS_FILTER_PGPROT=y
321 +CONFIG_HAVE_SETUP_PER_CPU_AREA=y
322 +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
323 +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
324 +CONFIG_ARCH_HIBERNATION_POSSIBLE=y
325 +CONFIG_ARCH_SUSPEND_POSSIBLE=y
326 +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y
327 +CONFIG_ZONE_DMA32=y
328 +CONFIG_AUDIT_ARCH=y
329 +CONFIG_HAVE_INTEL_TXT=y
330 +CONFIG_X86_64_SMP=y
331 +CONFIG_ARCH_SUPPORTS_UPROBES=y
332 +CONFIG_FIX_EARLYCON_MEM=y
333 +CONFIG_DYNAMIC_PHYSICAL_MASK=y
334 +CONFIG_PGTABLE_LEVELS=5
335 +CONFIG_CC_HAS_SANE_STACKPROTECTOR=y
338 +# Processor type and features
340 +CONFIG_ZONE_DMA=y
341 +CONFIG_SMP=y
342 +CONFIG_X86_FEATURE_NAMES=y
343 +CONFIG_X86_X2APIC=y
344 +CONFIG_X86_MPPARSE=y
345 +# CONFIG_GOLDFISH is not set
346 +CONFIG_RETPOLINE=y
347 +CONFIG_X86_CPU_RESCTRL=y
348 +CONFIG_X86_EXTENDED_PLATFORM=y
349 +CONFIG_X86_NUMACHIP=y
350 +# CONFIG_X86_VSMP is not set
351 +CONFIG_X86_UV=y
352 +# CONFIG_X86_GOLDFISH is not set
353 +# CONFIG_X86_INTEL_MID is not set
354 +CONFIG_X86_INTEL_LPSS=y
355 +CONFIG_X86_AMD_PLATFORM_DEVICE=y
356 +CONFIG_IOSF_MBI=y
357 +CONFIG_IOSF_MBI_DEBUG=y
358 +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y
359 +CONFIG_SCHED_OMIT_FRAME_POINTER=y
360 +CONFIG_HYPERVISOR_GUEST=y
361 +CONFIG_PARAVIRT=y
362 +CONFIG_PARAVIRT_XXL=y
363 +# CONFIG_PARAVIRT_DEBUG is not set
364 +CONFIG_PARAVIRT_SPINLOCKS=y
365 +CONFIG_X86_HV_CALLBACK_VECTOR=y
366 +CONFIG_XEN=y
367 +CONFIG_XEN_PV=y
368 +CONFIG_XEN_512GB=y
369 +CONFIG_XEN_PV_SMP=y
370 +CONFIG_XEN_DOM0=y
371 +CONFIG_XEN_PVHVM=y
372 +CONFIG_XEN_PVHVM_SMP=y
373 +CONFIG_XEN_PVHVM_GUEST=y
374 +CONFIG_XEN_SAVE_RESTORE=y
375 +# CONFIG_XEN_DEBUG_FS is not set
376 +CONFIG_XEN_PVH=y
377 +CONFIG_KVM_GUEST=y
378 +CONFIG_ARCH_CPUIDLE_HALTPOLL=y
379 +CONFIG_PVH=y
380 +# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set
381 +CONFIG_PARAVIRT_CLOCK=y
382 +CONFIG_JAILHOUSE_GUEST=y
383 +CONFIG_ACRN_GUEST=y
384 +# CONFIG_MK8 is not set
385 +# CONFIG_MK8SSE3 is not set
386 +# CONFIG_MK10 is not set
387 +# CONFIG_MBARCELONA is not set
388 +# CONFIG_MBOBCAT is not set
389 +# CONFIG_MJAGUAR is not set
390 +# CONFIG_MBULLDOZER is not set
391 +# CONFIG_MPILEDRIVER is not set
392 +# CONFIG_MSTEAMROLLER is not set
393 +# CONFIG_MEXCAVATOR is not set
394 +# CONFIG_MZEN is not set
395 +# CONFIG_MZEN2 is not set
396 +# CONFIG_MZEN3 is not set
397 +# CONFIG_MPSC is not set
398 +# CONFIG_MCORE2 is not set
399 +# CONFIG_MATOM is not set
400 +# CONFIG_MNEHALEM is not set
401 +# CONFIG_MWESTMERE is not set
402 +# CONFIG_MSILVERMONT is not set
403 +# CONFIG_MGOLDMONT is not set
404 +# CONFIG_MGOLDMONTPLUS is not set
405 +# CONFIG_MSANDYBRIDGE is not set
406 +# CONFIG_MIVYBRIDGE is not set
407 +# CONFIG_MHASWELL is not set
408 +# CONFIG_MBROADWELL is not set
409 +# CONFIG_MSKYLAKE is not set
410 +# CONFIG_MSKYLAKEX is not set
411 +# CONFIG_MCANNONLAKE is not set
412 +# CONFIG_MICELAKE is not set
413 +# CONFIG_MCASCADELAKE is not set
414 +# CONFIG_MCOOPERLAKE is not set
415 +# CONFIG_MTIGERLAKE is not set
416 +# CONFIG_MSAPPHIRERAPIDS is not set
417 +# CONFIG_MROCKETLAKE is not set
418 +# CONFIG_MALDERLAKE is not set
419 +CONFIG_GENERIC_CPU=y
420 +# CONFIG_GENERIC_CPU2 is not set
421 +# CONFIG_GENERIC_CPU3 is not set
422 +# CONFIG_GENERIC_CPU4 is not set
423 +# CONFIG_MNATIVE_INTEL is not set
424 +# CONFIG_MNATIVE_AMD is not set
425 +CONFIG_X86_INTERNODE_CACHE_SHIFT=6
426 +CONFIG_X86_L1_CACHE_SHIFT=6
427 +CONFIG_X86_TSC=y
428 +CONFIG_X86_CMPXCHG64=y
429 +CONFIG_X86_CMOV=y
430 +CONFIG_X86_MINIMUM_CPU_FAMILY=64
431 +CONFIG_X86_DEBUGCTLMSR=y
432 +CONFIG_IA32_FEAT_CTL=y
433 +CONFIG_X86_VMX_FEATURE_NAMES=y
434 +CONFIG_PROCESSOR_SELECT=y
435 +CONFIG_CPU_SUP_INTEL=y
436 +CONFIG_CPU_SUP_AMD=y
437 +CONFIG_CPU_SUP_HYGON=y
438 +CONFIG_CPU_SUP_CENTAUR=y
439 +CONFIG_CPU_SUP_ZHAOXIN=y
440 +CONFIG_HPET_TIMER=y
441 +CONFIG_HPET_EMULATE_RTC=y
442 +CONFIG_DMI=y
443 +CONFIG_GART_IOMMU=y
444 +# CONFIG_MAXSMP is not set
445 +CONFIG_NR_CPUS_RANGE_BEGIN=2
446 +CONFIG_NR_CPUS_RANGE_END=512
447 +CONFIG_NR_CPUS_DEFAULT=64
448 +CONFIG_NR_CPUS=512
449 +CONFIG_SCHED_SMT=y
450 +CONFIG_SCHED_MC=y
451 +CONFIG_SCHED_MC_PRIO=y
452 +CONFIG_X86_LOCAL_APIC=y
453 +CONFIG_X86_IO_APIC=y
454 +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
455 +CONFIG_X86_MCE=y
456 +CONFIG_X86_MCELOG_LEGACY=y
457 +CONFIG_X86_MCE_INTEL=y
458 +CONFIG_X86_MCE_AMD=y
459 +CONFIG_X86_MCE_THRESHOLD=y
460 +CONFIG_X86_MCE_INJECT=m
463 +# Performance monitoring
465 +CONFIG_PERF_EVENTS_INTEL_UNCORE=y
466 +CONFIG_PERF_EVENTS_INTEL_RAPL=m
467 +CONFIG_PERF_EVENTS_INTEL_CSTATE=m
468 +# CONFIG_PERF_EVENTS_AMD_POWER is not set
469 +# end of Performance monitoring
471 +CONFIG_X86_16BIT=y
472 +CONFIG_X86_ESPFIX64=y
473 +CONFIG_X86_VSYSCALL_EMULATION=y
474 +CONFIG_X86_IOPL_IOPERM=y
475 +CONFIG_I8K=m
476 +CONFIG_MICROCODE=y
477 +CONFIG_MICROCODE_INTEL=y
478 +CONFIG_MICROCODE_AMD=y
479 +CONFIG_MICROCODE_OLD_INTERFACE=y
480 +CONFIG_X86_MSR=m
481 +CONFIG_X86_CPUID=m
482 +CONFIG_X86_5LEVEL=y
483 +CONFIG_X86_DIRECT_GBPAGES=y
484 +# CONFIG_X86_CPA_STATISTICS is not set
485 +CONFIG_AMD_MEM_ENCRYPT=y
486 +# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set
487 +CONFIG_NUMA=y
488 +CONFIG_AMD_NUMA=y
489 +CONFIG_X86_64_ACPI_NUMA=y
490 +# CONFIG_NUMA_EMU is not set
491 +CONFIG_NODES_SHIFT=10
492 +CONFIG_ARCH_SPARSEMEM_ENABLE=y
493 +CONFIG_ARCH_SPARSEMEM_DEFAULT=y
494 +CONFIG_ARCH_SELECT_MEMORY_MODEL=y
495 +CONFIG_ARCH_MEMORY_PROBE=y
496 +CONFIG_ARCH_PROC_KCORE_TEXT=y
497 +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
498 +CONFIG_X86_PMEM_LEGACY_DEVICE=y
499 +CONFIG_X86_PMEM_LEGACY=y
500 +CONFIG_X86_CHECK_BIOS_CORRUPTION=y
501 +CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y
502 +CONFIG_X86_RESERVE_LOW=64
503 +CONFIG_MTRR=y
504 +CONFIG_MTRR_SANITIZER=y
505 +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
506 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
507 +CONFIG_X86_PAT=y
508 +CONFIG_ARCH_USES_PG_UNCACHED=y
509 +CONFIG_ARCH_RANDOM=y
510 +CONFIG_X86_SMAP=y
511 +CONFIG_X86_UMIP=y
512 +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y
513 +# CONFIG_X86_INTEL_TSX_MODE_OFF is not set
514 +# CONFIG_X86_INTEL_TSX_MODE_ON is not set
515 +CONFIG_X86_INTEL_TSX_MODE_AUTO=y
516 +CONFIG_X86_SGX=y
517 +CONFIG_EFI=y
518 +CONFIG_EFI_STUB=y
519 +CONFIG_EFI_MIXED=y
520 +# CONFIG_HZ_100 is not set
521 +# CONFIG_HZ_250 is not set
522 +# CONFIG_HZ_300 is not set
523 +CONFIG_HZ_500=y
524 +# CONFIG_HZ_1000 is not set
525 +CONFIG_HZ=500
526 +CONFIG_SCHED_HRTICK=y
527 +CONFIG_KEXEC=y
528 +CONFIG_KEXEC_FILE=y
529 +CONFIG_ARCH_HAS_KEXEC_PURGATORY=y
530 +CONFIG_KEXEC_SIG=y
531 +# CONFIG_KEXEC_SIG_FORCE is not set
532 +CONFIG_KEXEC_BZIMAGE_VERIFY_SIG=y
533 +CONFIG_CRASH_DUMP=y
534 +CONFIG_KEXEC_JUMP=y
535 +CONFIG_PHYSICAL_START=0x1000000
536 +CONFIG_RELOCATABLE=y
537 +CONFIG_RANDOMIZE_BASE=y
538 +CONFIG_X86_NEED_RELOCS=y
539 +CONFIG_PHYSICAL_ALIGN=0x200000
540 +CONFIG_DYNAMIC_MEMORY_LAYOUT=y
541 +CONFIG_RANDOMIZE_MEMORY=y
542 +CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa
543 +CONFIG_HOTPLUG_CPU=y
544 +# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
545 +# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
546 +# CONFIG_COMPAT_VDSO is not set
547 +# CONFIG_LEGACY_VSYSCALL_EMULATE is not set
548 +CONFIG_LEGACY_VSYSCALL_XONLY=y
549 +# CONFIG_LEGACY_VSYSCALL_NONE is not set
550 +# CONFIG_CMDLINE_BOOL is not set
551 +CONFIG_MODIFY_LDT_SYSCALL=y
552 +CONFIG_HAVE_LIVEPATCH=y
553 +# end of Processor type and features
555 +CONFIG_ARCH_HAS_ADD_PAGES=y
556 +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
557 +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
558 +CONFIG_USE_PERCPU_NUMA_NODE_ID=y
559 +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y
560 +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y
561 +CONFIG_ARCH_ENABLE_THP_MIGRATION=y
564 +# Power management and ACPI options
566 +CONFIG_ARCH_HIBERNATION_HEADER=y
567 +CONFIG_SUSPEND=y
568 +CONFIG_SUSPEND_FREEZER=y
569 +# CONFIG_SUSPEND_SKIP_SYNC is not set
570 +CONFIG_HIBERNATE_CALLBACKS=y
571 +CONFIG_HIBERNATION=y
572 +CONFIG_HIBERNATION_SNAPSHOT_DEV=y
573 +CONFIG_PM_STD_PARTITION=""
574 +CONFIG_PM_SLEEP=y
575 +CONFIG_PM_SLEEP_SMP=y
576 +# CONFIG_PM_AUTOSLEEP is not set
577 +CONFIG_PM_WAKELOCKS=y
578 +CONFIG_PM_WAKELOCKS_LIMIT=100
579 +CONFIG_PM_WAKELOCKS_GC=y
580 +CONFIG_PM=y
581 +CONFIG_PM_DEBUG=y
582 +CONFIG_PM_ADVANCED_DEBUG=y
583 +# CONFIG_PM_TEST_SUSPEND is not set
584 +CONFIG_PM_SLEEP_DEBUG=y
585 +# CONFIG_DPM_WATCHDOG is not set
586 +CONFIG_PM_TRACE=y
587 +CONFIG_PM_TRACE_RTC=y
588 +CONFIG_PM_CLK=y
589 +CONFIG_PM_GENERIC_DOMAINS=y
590 +CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
591 +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y
592 +CONFIG_ENERGY_MODEL=y
593 +CONFIG_ARCH_SUPPORTS_ACPI=y
594 +CONFIG_ACPI=y
595 +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y
596 +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y
597 +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y
598 +CONFIG_ACPI_DEBUGGER=y
599 +CONFIG_ACPI_DEBUGGER_USER=y
600 +CONFIG_ACPI_SPCR_TABLE=y
601 +CONFIG_ACPI_FPDT=y
602 +CONFIG_ACPI_LPIT=y
603 +CONFIG_ACPI_SLEEP=y
604 +CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y
605 +CONFIG_ACPI_EC_DEBUGFS=m
606 +CONFIG_ACPI_AC=y
607 +CONFIG_ACPI_BATTERY=y
608 +CONFIG_ACPI_BUTTON=y
609 +CONFIG_ACPI_VIDEO=m
610 +CONFIG_ACPI_FAN=y
611 +CONFIG_ACPI_TAD=m
612 +CONFIG_ACPI_DOCK=y
613 +CONFIG_ACPI_CPU_FREQ_PSS=y
614 +CONFIG_ACPI_PROCESSOR_CSTATE=y
615 +CONFIG_ACPI_PROCESSOR_IDLE=y
616 +CONFIG_ACPI_CPPC_LIB=y
617 +CONFIG_ACPI_PROCESSOR=y
618 +CONFIG_ACPI_IPMI=m
619 +CONFIG_ACPI_HOTPLUG_CPU=y
620 +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m
621 +CONFIG_ACPI_THERMAL=y
622 +CONFIG_ACPI_PLATFORM_PROFILE=m
623 +CONFIG_ACPI_CUSTOM_DSDT_FILE=""
624 +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y
625 +CONFIG_ACPI_TABLE_UPGRADE=y
626 +CONFIG_ACPI_DEBUG=y
627 +CONFIG_ACPI_PCI_SLOT=y
628 +CONFIG_ACPI_CONTAINER=y
629 +CONFIG_ACPI_HOTPLUG_MEMORY=y
630 +CONFIG_ACPI_HOTPLUG_IOAPIC=y
631 +CONFIG_ACPI_SBS=m
632 +CONFIG_ACPI_HED=y
633 +# CONFIG_ACPI_CUSTOM_METHOD is not set
634 +CONFIG_ACPI_BGRT=y
635 +# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set
636 +CONFIG_ACPI_NFIT=m
637 +# CONFIG_NFIT_SECURITY_DEBUG is not set
638 +CONFIG_ACPI_NUMA=y
639 +CONFIG_ACPI_HMAT=y
640 +CONFIG_HAVE_ACPI_APEI=y
641 +CONFIG_HAVE_ACPI_APEI_NMI=y
642 +CONFIG_ACPI_APEI=y
643 +CONFIG_ACPI_APEI_GHES=y
644 +CONFIG_ACPI_APEI_PCIEAER=y
645 +CONFIG_ACPI_APEI_MEMORY_FAILURE=y
646 +CONFIG_ACPI_APEI_EINJ=m
647 +# CONFIG_ACPI_APEI_ERST_DEBUG is not set
648 +CONFIG_ACPI_DPTF=y
649 +CONFIG_DPTF_POWER=m
650 +CONFIG_DPTF_PCH_FIVR=m
651 +CONFIG_ACPI_WATCHDOG=y
652 +CONFIG_ACPI_EXTLOG=m
653 +CONFIG_ACPI_ADXL=y
654 +CONFIG_ACPI_CONFIGFS=m
655 +CONFIG_PMIC_OPREGION=y
656 +CONFIG_BYTCRC_PMIC_OPREGION=y
657 +CONFIG_CHTCRC_PMIC_OPREGION=y
658 +CONFIG_XPOWER_PMIC_OPREGION=y
659 +CONFIG_BXT_WC_PMIC_OPREGION=y
660 +CONFIG_CHT_WC_PMIC_OPREGION=y
661 +CONFIG_CHT_DC_TI_PMIC_OPREGION=y
662 +CONFIG_TPS68470_PMIC_OPREGION=y
663 +CONFIG_X86_PM_TIMER=y
666 +# CPU Frequency scaling
668 +CONFIG_CPU_FREQ=y
669 +CONFIG_CPU_FREQ_GOV_ATTR_SET=y
670 +CONFIG_CPU_FREQ_GOV_COMMON=y
671 +CONFIG_CPU_FREQ_STAT=y
672 +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
673 +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
674 +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
675 +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set
676 +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
677 +CONFIG_CPU_FREQ_GOV_POWERSAVE=y
678 +CONFIG_CPU_FREQ_GOV_USERSPACE=y
679 +CONFIG_CPU_FREQ_GOV_ONDEMAND=y
680 +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
681 +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
684 +# CPU frequency scaling drivers
686 +CONFIG_X86_INTEL_PSTATE=y
687 +CONFIG_X86_PCC_CPUFREQ=y
688 +CONFIG_X86_ACPI_CPUFREQ=y
689 +CONFIG_X86_ACPI_CPUFREQ_CPB=y
690 +CONFIG_X86_POWERNOW_K8=y
691 +CONFIG_X86_AMD_FREQ_SENSITIVITY=m
692 +CONFIG_X86_SPEEDSTEP_CENTRINO=y
693 +CONFIG_X86_P4_CLOCKMOD=m
696 +# shared options
698 +CONFIG_X86_SPEEDSTEP_LIB=m
699 +# end of CPU Frequency scaling
702 +# CPU Idle
704 +CONFIG_CPU_IDLE=y
705 +CONFIG_CPU_IDLE_GOV_LADDER=y
706 +CONFIG_CPU_IDLE_GOV_MENU=y
707 +CONFIG_CPU_IDLE_GOV_TEO=y
708 +CONFIG_CPU_IDLE_GOV_HALTPOLL=y
709 +CONFIG_HALTPOLL_CPUIDLE=m
710 +# end of CPU Idle
712 +CONFIG_INTEL_IDLE=y
713 +# end of Power management and ACPI options
716 +# Bus options (PCI etc.)
718 +CONFIG_PCI_DIRECT=y
719 +CONFIG_PCI_MMCONFIG=y
720 +CONFIG_PCI_XEN=y
721 +CONFIG_MMCONF_FAM10H=y
722 +# CONFIG_PCI_CNB20LE_QUIRK is not set
723 +CONFIG_ISA_BUS=y
724 +CONFIG_ISA_DMA_API=y
725 +CONFIG_AMD_NB=y
726 +# CONFIG_X86_SYSFB is not set
727 +# end of Bus options (PCI etc.)
730 +# Binary Emulations
732 +CONFIG_IA32_EMULATION=y
733 +CONFIG_X86_X32=y
734 +CONFIG_COMPAT_32=y
735 +CONFIG_COMPAT=y
736 +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y
737 +CONFIG_SYSVIPC_COMPAT=y
738 +# end of Binary Emulations
741 +# Firmware Drivers
743 +CONFIG_EDD=y
744 +CONFIG_EDD_OFF=y
745 +CONFIG_FIRMWARE_MEMMAP=y
746 +CONFIG_DMIID=y
747 +CONFIG_DMI_SYSFS=m
748 +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y
749 +CONFIG_ISCSI_IBFT_FIND=y
750 +CONFIG_ISCSI_IBFT=m
751 +CONFIG_FW_CFG_SYSFS=m
752 +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set
753 +# CONFIG_GOOGLE_FIRMWARE is not set
756 +# EFI (Extensible Firmware Interface) Support
758 +CONFIG_EFI_VARS=y
759 +CONFIG_EFI_ESRT=y
760 +CONFIG_EFI_VARS_PSTORE=m
761 +# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set
762 +CONFIG_EFI_RUNTIME_MAP=y
763 +# CONFIG_EFI_FAKE_MEMMAP is not set
764 +CONFIG_EFI_SOFT_RESERVE=y
765 +CONFIG_EFI_RUNTIME_WRAPPERS=y
766 +CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y
767 +CONFIG_EFI_BOOTLOADER_CONTROL=m
768 +CONFIG_EFI_CAPSULE_LOADER=m
769 +CONFIG_EFI_TEST=m
770 +CONFIG_APPLE_PROPERTIES=y
771 +CONFIG_RESET_ATTACK_MITIGATION=y
772 +CONFIG_EFI_RCI2_TABLE=y
773 +# CONFIG_EFI_DISABLE_PCI_DMA is not set
774 +# end of EFI (Extensible Firmware Interface) Support
776 +CONFIG_EFI_EMBEDDED_FIRMWARE=y
777 +CONFIG_UEFI_CPER=y
778 +CONFIG_UEFI_CPER_X86=y
779 +CONFIG_EFI_DEV_PATH_PARSER=y
780 +CONFIG_EFI_EARLYCON=y
781 +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y
784 +# Tegra firmware driver
786 +# end of Tegra firmware driver
787 +# end of Firmware Drivers
789 +CONFIG_HAVE_KVM=y
790 +CONFIG_HAVE_KVM_IRQCHIP=y
791 +CONFIG_HAVE_KVM_IRQFD=y
792 +CONFIG_HAVE_KVM_IRQ_ROUTING=y
793 +CONFIG_HAVE_KVM_EVENTFD=y
794 +CONFIG_KVM_MMIO=y
795 +CONFIG_KVM_ASYNC_PF=y
796 +CONFIG_HAVE_KVM_MSI=y
797 +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y
798 +CONFIG_KVM_VFIO=y
799 +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y
800 +CONFIG_KVM_COMPAT=y
801 +CONFIG_HAVE_KVM_IRQ_BYPASS=y
802 +CONFIG_HAVE_KVM_NO_POLL=y
803 +CONFIG_KVM_XFER_TO_GUEST_WORK=y
804 +CONFIG_VIRTUALIZATION=y
805 +CONFIG_KVM=m
806 +CONFIG_KVM_WERROR=y
807 +CONFIG_KVM_INTEL=m
808 +CONFIG_KVM_AMD=m
809 +CONFIG_KVM_AMD_SEV=y
810 +CONFIG_KVM_XEN=y
811 +CONFIG_AS_AVX512=y
812 +CONFIG_AS_SHA1_NI=y
813 +CONFIG_AS_SHA256_NI=y
814 +CONFIG_AS_TPAUSE=y
817 +# General architecture-dependent options
819 +CONFIG_CRASH_CORE=y
820 +CONFIG_KEXEC_CORE=y
821 +CONFIG_HOTPLUG_SMT=y
822 +CONFIG_GENERIC_ENTRY=y
823 +CONFIG_KPROBES=y
824 +CONFIG_JUMP_LABEL=y
825 +# CONFIG_STATIC_KEYS_SELFTEST is not set
826 +# CONFIG_STATIC_CALL_SELFTEST is not set
827 +CONFIG_OPTPROBES=y
828 +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
829 +CONFIG_ARCH_USE_BUILTIN_BSWAP=y
830 +CONFIG_KRETPROBES=y
831 +CONFIG_USER_RETURN_NOTIFIER=y
832 +CONFIG_HAVE_IOREMAP_PROT=y
833 +CONFIG_HAVE_KPROBES=y
834 +CONFIG_HAVE_KRETPROBES=y
835 +CONFIG_HAVE_OPTPROBES=y
836 +CONFIG_HAVE_KPROBES_ON_FTRACE=y
837 +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y
838 +CONFIG_HAVE_NMI=y
839 +CONFIG_HAVE_ARCH_TRACEHOOK=y
840 +CONFIG_HAVE_DMA_CONTIGUOUS=y
841 +CONFIG_GENERIC_SMP_IDLE_THREAD=y
842 +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y
843 +CONFIG_ARCH_HAS_SET_MEMORY=y
844 +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y
845 +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y
846 +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y
847 +CONFIG_HAVE_ASM_MODVERSIONS=y
848 +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
849 +CONFIG_HAVE_RSEQ=y
850 +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y
851 +CONFIG_HAVE_HW_BREAKPOINT=y
852 +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
853 +CONFIG_HAVE_USER_RETURN_NOTIFIER=y
854 +CONFIG_HAVE_PERF_EVENTS_NMI=y
855 +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y
856 +CONFIG_HAVE_PERF_REGS=y
857 +CONFIG_HAVE_PERF_USER_STACK_DUMP=y
858 +CONFIG_HAVE_ARCH_JUMP_LABEL=y
859 +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y
860 +CONFIG_MMU_GATHER_TABLE_FREE=y
861 +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y
862 +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
863 +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
864 +CONFIG_HAVE_CMPXCHG_LOCAL=y
865 +CONFIG_HAVE_CMPXCHG_DOUBLE=y
866 +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y
867 +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y
868 +CONFIG_HAVE_ARCH_SECCOMP=y
869 +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
870 +CONFIG_SECCOMP=y
871 +CONFIG_SECCOMP_FILTER=y
872 +# CONFIG_SECCOMP_CACHE_DEBUG is not set
873 +CONFIG_HAVE_ARCH_STACKLEAK=y
874 +CONFIG_HAVE_STACKPROTECTOR=y
875 +CONFIG_STACKPROTECTOR=y
876 +CONFIG_STACKPROTECTOR_STRONG=y
877 +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y
878 +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y
879 +CONFIG_LTO_NONE=y
880 +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y
881 +CONFIG_HAVE_CONTEXT_TRACKING=y
882 +CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK=y
883 +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
884 +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
885 +CONFIG_HAVE_MOVE_PUD=y
886 +CONFIG_HAVE_MOVE_PMD=y
887 +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
888 +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y
889 +CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG=y
890 +CONFIG_HAVE_ARCH_HUGE_VMAP=y
891 +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y
892 +CONFIG_HAVE_ARCH_SOFT_DIRTY=y
893 +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y
894 +CONFIG_MODULES_USE_ELF_RELA=y
895 +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y
896 +CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y
897 +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y
898 +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y
899 +CONFIG_HAVE_EXIT_THREAD=y
900 +CONFIG_ARCH_MMAP_RND_BITS=28
901 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y
902 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8
903 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y
904 +CONFIG_HAVE_STACK_VALIDATION=y
905 +CONFIG_HAVE_RELIABLE_STACKTRACE=y
906 +CONFIG_ISA_BUS_API=y
907 +CONFIG_OLD_SIGSUSPEND3=y
908 +CONFIG_COMPAT_OLD_SIGACTION=y
909 +CONFIG_COMPAT_32BIT_TIME=y
910 +CONFIG_HAVE_ARCH_VMAP_STACK=y
911 +CONFIG_VMAP_STACK=y
912 +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y
913 +CONFIG_STRICT_KERNEL_RWX=y
914 +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
915 +CONFIG_STRICT_MODULE_RWX=y
916 +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y
917 +CONFIG_ARCH_USE_MEMREMAP_PROT=y
918 +# CONFIG_LOCK_EVENT_COUNTS is not set
919 +CONFIG_ARCH_HAS_MEM_ENCRYPT=y
920 +CONFIG_HAVE_STATIC_CALL=y
921 +CONFIG_HAVE_STATIC_CALL_INLINE=y
922 +CONFIG_HAVE_PREEMPT_DYNAMIC=y
923 +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y
924 +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
925 +CONFIG_ARCH_HAS_ELFCORE_COMPAT=y
928 +# GCOV-based kernel profiling
930 +# CONFIG_GCOV_KERNEL is not set
931 +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y
932 +# end of GCOV-based kernel profiling
934 +CONFIG_HAVE_GCC_PLUGINS=y
935 +# end of General architecture-dependent options
937 +CONFIG_RT_MUTEXES=y
938 +CONFIG_BASE_SMALL=0
939 +CONFIG_MODULE_SIG_FORMAT=y
940 +CONFIG_MODULES=y
941 +# CONFIG_MODULE_FORCE_LOAD is not set
942 +CONFIG_MODULE_UNLOAD=y
943 +# CONFIG_MODULE_FORCE_UNLOAD is not set
944 +CONFIG_MODVERSIONS=y
945 +CONFIG_ASM_MODVERSIONS=y
946 +CONFIG_MODULE_SRCVERSION_ALL=y
947 +CONFIG_MODULE_SIG=y
948 +# CONFIG_MODULE_SIG_FORCE is not set
949 +CONFIG_MODULE_SIG_ALL=y
950 +# CONFIG_MODULE_SIG_SHA1 is not set
951 +# CONFIG_MODULE_SIG_SHA224 is not set
952 +# CONFIG_MODULE_SIG_SHA256 is not set
953 +# CONFIG_MODULE_SIG_SHA384 is not set
954 +CONFIG_MODULE_SIG_SHA512=y
955 +CONFIG_MODULE_SIG_HASH="sha512"
956 +# CONFIG_MODULE_COMPRESS is not set
957 +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set
958 +# CONFIG_TRIM_UNUSED_KSYMS is not set
959 +CONFIG_MODULES_TREE_LOOKUP=y
960 +CONFIG_BLOCK=y
961 +CONFIG_BLK_SCSI_REQUEST=y
962 +CONFIG_BLK_CGROUP_RWSTAT=y
963 +CONFIG_BLK_DEV_BSG=y
964 +CONFIG_BLK_DEV_BSGLIB=y
965 +CONFIG_BLK_DEV_INTEGRITY=y
966 +CONFIG_BLK_DEV_INTEGRITY_T10=y
967 +CONFIG_BLK_DEV_ZONED=y
968 +CONFIG_BLK_DEV_THROTTLING=y
969 +# CONFIG_BLK_DEV_THROTTLING_LOW is not set
970 +CONFIG_BLK_CMDLINE_PARSER=y
971 +CONFIG_BLK_WBT=y
972 +CONFIG_BLK_CGROUP_IOLATENCY=y
973 +# CONFIG_BLK_CGROUP_IOCOST is not set
974 +CONFIG_BLK_WBT_MQ=y
975 +CONFIG_BLK_DEBUG_FS=y
976 +CONFIG_BLK_DEBUG_FS_ZONED=y
977 +CONFIG_BLK_SED_OPAL=y
978 +CONFIG_BLK_INLINE_ENCRYPTION=y
979 +CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
982 +# Partition Types
984 +CONFIG_PARTITION_ADVANCED=y
985 +# CONFIG_ACORN_PARTITION is not set
986 +CONFIG_AIX_PARTITION=y
987 +CONFIG_OSF_PARTITION=y
988 +CONFIG_AMIGA_PARTITION=y
989 +CONFIG_ATARI_PARTITION=y
990 +CONFIG_MAC_PARTITION=y
991 +CONFIG_MSDOS_PARTITION=y
992 +CONFIG_BSD_DISKLABEL=y
993 +CONFIG_MINIX_SUBPARTITION=y
994 +CONFIG_SOLARIS_X86_PARTITION=y
995 +CONFIG_UNIXWARE_DISKLABEL=y
996 +CONFIG_LDM_PARTITION=y
997 +# CONFIG_LDM_DEBUG is not set
998 +CONFIG_SGI_PARTITION=y
999 +CONFIG_ULTRIX_PARTITION=y
1000 +CONFIG_SUN_PARTITION=y
1001 +CONFIG_KARMA_PARTITION=y
1002 +CONFIG_EFI_PARTITION=y
1003 +CONFIG_SYSV68_PARTITION=y
1004 +CONFIG_CMDLINE_PARTITION=y
1005 +# end of Partition Types
1007 +CONFIG_BLOCK_COMPAT=y
1008 +CONFIG_BLK_MQ_PCI=y
1009 +CONFIG_BLK_MQ_VIRTIO=y
1010 +CONFIG_BLK_MQ_RDMA=y
1011 +CONFIG_BLK_PM=y
1014 +# IO Schedulers
1016 +CONFIG_MQ_IOSCHED_DEADLINE=m
1017 +CONFIG_MQ_IOSCHED_KYBER=m
1018 +CONFIG_IOSCHED_BFQ=y
1019 +CONFIG_BFQ_GROUP_IOSCHED=y
1020 +# CONFIG_BFQ_CGROUP_DEBUG is not set
1021 +# end of IO Schedulers
1023 +CONFIG_PREEMPT_NOTIFIERS=y
1024 +CONFIG_PADATA=y
1025 +CONFIG_ASN1=y
1026 +CONFIG_UNINLINE_SPIN_UNLOCK=y
1027 +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
1028 +CONFIG_MUTEX_SPIN_ON_OWNER=y
1029 +CONFIG_RWSEM_SPIN_ON_OWNER=y
1030 +CONFIG_LOCK_SPIN_ON_OWNER=y
1031 +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y
1032 +CONFIG_QUEUED_SPINLOCKS=y
1033 +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y
1034 +CONFIG_QUEUED_RWLOCKS=y
1035 +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y
1036 +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y
1037 +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y
1038 +CONFIG_FREEZER=y
1041 +# Executable file formats
1043 +CONFIG_BINFMT_ELF=y
1044 +CONFIG_COMPAT_BINFMT_ELF=y
1045 +CONFIG_ELFCORE=y
1046 +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
1047 +CONFIG_BINFMT_SCRIPT=y
1048 +CONFIG_BINFMT_MISC=m
1049 +CONFIG_COREDUMP=y
1050 +# end of Executable file formats
1053 +# Memory Management options
1055 +CONFIG_SELECT_MEMORY_MODEL=y
1056 +CONFIG_SPARSEMEM_MANUAL=y
1057 +CONFIG_SPARSEMEM=y
1058 +CONFIG_NEED_MULTIPLE_NODES=y
1059 +CONFIG_SPARSEMEM_EXTREME=y
1060 +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
1061 +CONFIG_SPARSEMEM_VMEMMAP=y
1062 +CONFIG_CLEAN_LOW_KBYTES=524288
1063 +CONFIG_CLEAN_MIN_KBYTES=0
1064 +CONFIG_HAVE_FAST_GUP=y
1065 +CONFIG_NUMA_KEEP_MEMINFO=y
1066 +CONFIG_MEMORY_ISOLATION=y
1067 +CONFIG_HAVE_BOOTMEM_INFO_NODE=y
1068 +CONFIG_MEMORY_HOTPLUG=y
1069 +CONFIG_MEMORY_HOTPLUG_SPARSE=y
1070 +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
1071 +CONFIG_MEMORY_HOTREMOVE=y
1072 +CONFIG_SPLIT_PTLOCK_CPUS=4
1073 +CONFIG_MEMORY_BALLOON=y
1074 +CONFIG_BALLOON_COMPACTION=y
1075 +CONFIG_COMPACTION=y
1076 +CONFIG_PAGE_REPORTING=y
1077 +CONFIG_MIGRATION=y
1078 +CONFIG_CONTIG_ALLOC=y
1079 +CONFIG_PHYS_ADDR_T_64BIT=y
1080 +CONFIG_BOUNCE=y
1081 +CONFIG_VIRT_TO_BUS=y
1082 +CONFIG_MMU_NOTIFIER=y
1083 +CONFIG_KSM=y
1084 +CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
1085 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
1086 +CONFIG_MEMORY_FAILURE=y
1087 +CONFIG_HWPOISON_INJECT=m
1088 +CONFIG_TRANSPARENT_HUGEPAGE=y
1089 +# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set
1090 +CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
1091 +CONFIG_ARCH_WANTS_THP_SWAP=y
1092 +CONFIG_THP_SWAP=y
1093 +CONFIG_CLEANCACHE=y
1094 +CONFIG_FRONTSWAP=y
1095 +# CONFIG_CMA is not set
1096 +CONFIG_MEM_SOFT_DIRTY=y
1097 +CONFIG_ZSWAP=y
1098 +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set
1099 +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO is not set
1100 +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set
1101 +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4=y
1102 +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set
1103 +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set
1104 +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lz4"
1105 +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD is not set
1106 +CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD=y
1107 +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set
1108 +CONFIG_ZSWAP_ZPOOL_DEFAULT="z3fold"
1109 +# CONFIG_ZSWAP_DEFAULT_ON is not set
1110 +CONFIG_ZPOOL=y
1111 +CONFIG_ZBUD=m
1112 +CONFIG_Z3FOLD=y
1113 +CONFIG_ZSMALLOC=m
1114 +# CONFIG_ZSMALLOC_STAT is not set
1115 +CONFIG_GENERIC_EARLY_IOREMAP=y
1116 +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set
1117 +CONFIG_IDLE_PAGE_TRACKING=y
1118 +CONFIG_ARCH_HAS_PTE_DEVMAP=y
1119 +CONFIG_ZONE_DEVICE=y
1120 +CONFIG_DEV_PAGEMAP_OPS=y
1121 +CONFIG_HMM_MIRROR=y
1122 +CONFIG_DEVICE_PRIVATE=y
1123 +CONFIG_VMAP_PFN=y
1124 +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y
1125 +CONFIG_ARCH_HAS_PKEYS=y
1126 +# CONFIG_PERCPU_STATS is not set
1127 +# CONFIG_GUP_TEST is not set
1128 +# CONFIG_READ_ONLY_THP_FOR_FS is not set
1129 +CONFIG_ARCH_HAS_PTE_SPECIAL=y
1130 +CONFIG_MAPPING_DIRTY_HELPERS=y
1131 +CONFIG_LRU_GEN=y
1132 +CONFIG_NR_LRU_GENS=4
1133 +CONFIG_TIERS_PER_GEN=2
1134 +# CONFIG_LRU_GEN_ENABLED is not set
1135 +# CONFIG_LRU_GEN_STATS is not set
1136 +# end of Memory Management options
1138 +CONFIG_NET=y
1139 +CONFIG_WANT_COMPAT_NETLINK_MESSAGES=y
1140 +CONFIG_COMPAT_NETLINK_MESSAGES=y
1141 +CONFIG_NET_INGRESS=y
1142 +CONFIG_NET_EGRESS=y
1143 +CONFIG_NET_REDIRECT=y
1144 +CONFIG_SKB_EXTENSIONS=y
1147 +# Networking options
1149 +CONFIG_PACKET=y
1150 +CONFIG_PACKET_DIAG=m
1151 +CONFIG_UNIX=y
1152 +CONFIG_UNIX_SCM=y
1153 +CONFIG_UNIX_DIAG=m
1154 +CONFIG_TLS=m
1155 +CONFIG_TLS_DEVICE=y
1156 +# CONFIG_TLS_TOE is not set
1157 +CONFIG_XFRM=y
1158 +CONFIG_XFRM_OFFLOAD=y
1159 +CONFIG_XFRM_ALGO=m
1160 +CONFIG_XFRM_USER=m
1161 +CONFIG_XFRM_USER_COMPAT=m
1162 +CONFIG_XFRM_INTERFACE=m
1163 +# CONFIG_XFRM_SUB_POLICY is not set
1164 +# CONFIG_XFRM_MIGRATE is not set
1165 +CONFIG_XFRM_STATISTICS=y
1166 +CONFIG_XFRM_AH=m
1167 +CONFIG_XFRM_ESP=m
1168 +CONFIG_XFRM_IPCOMP=m
1169 +CONFIG_NET_KEY=m
1170 +# CONFIG_NET_KEY_MIGRATE is not set
1171 +CONFIG_XFRM_ESPINTCP=y
1172 +CONFIG_SMC=m
1173 +CONFIG_SMC_DIAG=m
1174 +CONFIG_XDP_SOCKETS=y
1175 +CONFIG_XDP_SOCKETS_DIAG=m
1176 +CONFIG_INET=y
1177 +CONFIG_IP_MULTICAST=y
1178 +CONFIG_IP_ADVANCED_ROUTER=y
1179 +CONFIG_IP_FIB_TRIE_STATS=y
1180 +CONFIG_IP_MULTIPLE_TABLES=y
1181 +CONFIG_IP_ROUTE_MULTIPATH=y
1182 +CONFIG_IP_ROUTE_VERBOSE=y
1183 +CONFIG_IP_ROUTE_CLASSID=y
1184 +# CONFIG_IP_PNP is not set
1185 +CONFIG_NET_IPIP=m
1186 +CONFIG_NET_IPGRE_DEMUX=m
1187 +CONFIG_NET_IP_TUNNEL=m
1188 +CONFIG_NET_IPGRE=m
1189 +CONFIG_NET_IPGRE_BROADCAST=y
1190 +CONFIG_IP_MROUTE_COMMON=y
1191 +CONFIG_IP_MROUTE=y
1192 +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
1193 +CONFIG_IP_PIMSM_V1=y
1194 +CONFIG_IP_PIMSM_V2=y
1195 +CONFIG_SYN_COOKIES=y
1196 +CONFIG_NET_IPVTI=m
1197 +CONFIG_NET_UDP_TUNNEL=m
1198 +CONFIG_NET_FOU=m
1199 +CONFIG_NET_FOU_IP_TUNNELS=y
1200 +CONFIG_INET_AH=m
1201 +CONFIG_INET_ESP=m
1202 +CONFIG_INET_ESP_OFFLOAD=m
1203 +CONFIG_INET_ESPINTCP=y
1204 +CONFIG_INET_IPCOMP=m
1205 +CONFIG_INET_XFRM_TUNNEL=m
1206 +CONFIG_INET_TUNNEL=m
1207 +CONFIG_INET_DIAG=m
1208 +CONFIG_INET_TCP_DIAG=m
1209 +CONFIG_INET_UDP_DIAG=m
1210 +CONFIG_INET_RAW_DIAG=m
1211 +CONFIG_INET_DIAG_DESTROY=y
1212 +CONFIG_TCP_CONG_ADVANCED=y
1213 +CONFIG_TCP_CONG_BIC=m
1214 +CONFIG_TCP_CONG_CUBIC=m
1215 +CONFIG_TCP_CONG_WESTWOOD=m
1216 +CONFIG_TCP_CONG_HTCP=m
1217 +CONFIG_TCP_CONG_HSTCP=m
1218 +CONFIG_TCP_CONG_HYBLA=m
1219 +CONFIG_TCP_CONG_VEGAS=m
1220 +CONFIG_TCP_CONG_NV=m
1221 +CONFIG_TCP_CONG_SCALABLE=m
1222 +CONFIG_TCP_CONG_LP=m
1223 +CONFIG_TCP_CONG_VENO=m
1224 +CONFIG_TCP_CONG_YEAH=m
1225 +CONFIG_TCP_CONG_ILLINOIS=m
1226 +CONFIG_TCP_CONG_DCTCP=m
1227 +CONFIG_TCP_CONG_CDG=m
1228 +CONFIG_TCP_CONG_BBR=m
1229 +CONFIG_TCP_CONG_BBR2=y
1230 +CONFIG_DEFAULT_BBR2=y
1231 +# CONFIG_DEFAULT_RENO is not set
1232 +CONFIG_DEFAULT_TCP_CONG="bbr2"
1233 +CONFIG_TCP_MD5SIG=y
1234 +CONFIG_IPV6=y
1235 +CONFIG_IPV6_ROUTER_PREF=y
1236 +CONFIG_IPV6_ROUTE_INFO=y
1237 +# CONFIG_IPV6_OPTIMISTIC_DAD is not set
1238 +CONFIG_INET6_AH=m
1239 +CONFIG_INET6_ESP=m
1240 +CONFIG_INET6_ESP_OFFLOAD=m
1241 +CONFIG_INET6_ESPINTCP=y
1242 +CONFIG_INET6_IPCOMP=m
1243 +CONFIG_IPV6_MIP6=m
1244 +CONFIG_IPV6_ILA=m
1245 +CONFIG_INET6_XFRM_TUNNEL=m
1246 +CONFIG_INET6_TUNNEL=m
1247 +CONFIG_IPV6_VTI=m
1248 +CONFIG_IPV6_SIT=m
1249 +CONFIG_IPV6_SIT_6RD=y
1250 +CONFIG_IPV6_NDISC_NODETYPE=y
1251 +CONFIG_IPV6_TUNNEL=m
1252 +CONFIG_IPV6_GRE=m
1253 +CONFIG_IPV6_FOU=m
1254 +CONFIG_IPV6_FOU_TUNNEL=m
1255 +CONFIG_IPV6_MULTIPLE_TABLES=y
1256 +CONFIG_IPV6_SUBTREES=y
1257 +CONFIG_IPV6_MROUTE=y
1258 +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
1259 +CONFIG_IPV6_PIMSM_V2=y
1260 +CONFIG_IPV6_SEG6_LWTUNNEL=y
1261 +CONFIG_IPV6_SEG6_HMAC=y
1262 +CONFIG_IPV6_SEG6_BPF=y
1263 +# CONFIG_IPV6_RPL_LWTUNNEL is not set
1264 +CONFIG_NETLABEL=y
1265 +CONFIG_MPTCP=y
1266 +CONFIG_INET_MPTCP_DIAG=m
1267 +CONFIG_MPTCP_IPV6=y
1268 +CONFIG_NETWORK_SECMARK=y
1269 +CONFIG_NET_PTP_CLASSIFY=y
1270 +CONFIG_NETWORK_PHY_TIMESTAMPING=y
1271 +CONFIG_NETFILTER=y
1272 +CONFIG_NETFILTER_ADVANCED=y
1273 +CONFIG_BRIDGE_NETFILTER=m
1276 +# Core Netfilter Configuration
1278 +CONFIG_NETFILTER_INGRESS=y
1279 +CONFIG_NETFILTER_NETLINK=m
1280 +CONFIG_NETFILTER_FAMILY_BRIDGE=y
1281 +CONFIG_NETFILTER_FAMILY_ARP=y
1282 +CONFIG_NETFILTER_NETLINK_ACCT=m
1283 +CONFIG_NETFILTER_NETLINK_QUEUE=m
1284 +CONFIG_NETFILTER_NETLINK_LOG=m
1285 +CONFIG_NETFILTER_NETLINK_OSF=m
1286 +CONFIG_NF_CONNTRACK=m
1287 +CONFIG_NF_LOG_COMMON=m
1288 +CONFIG_NF_LOG_NETDEV=m
1289 +CONFIG_NETFILTER_CONNCOUNT=m
1290 +CONFIG_NF_CONNTRACK_MARK=y
1291 +CONFIG_NF_CONNTRACK_SECMARK=y
1292 +CONFIG_NF_CONNTRACK_ZONES=y
1293 +# CONFIG_NF_CONNTRACK_PROCFS is not set
1294 +CONFIG_NF_CONNTRACK_EVENTS=y
1295 +CONFIG_NF_CONNTRACK_TIMEOUT=y
1296 +CONFIG_NF_CONNTRACK_TIMESTAMP=y
1297 +CONFIG_NF_CONNTRACK_LABELS=y
1298 +CONFIG_NF_CT_PROTO_DCCP=y
1299 +CONFIG_NF_CT_PROTO_GRE=y
1300 +CONFIG_NF_CT_PROTO_SCTP=y
1301 +CONFIG_NF_CT_PROTO_UDPLITE=y
1302 +CONFIG_NF_CONNTRACK_AMANDA=m
1303 +CONFIG_NF_CONNTRACK_FTP=m
1304 +CONFIG_NF_CONNTRACK_H323=m
1305 +CONFIG_NF_CONNTRACK_IRC=m
1306 +CONFIG_NF_CONNTRACK_BROADCAST=m
1307 +CONFIG_NF_CONNTRACK_NETBIOS_NS=m
1308 +CONFIG_NF_CONNTRACK_SNMP=m
1309 +CONFIG_NF_CONNTRACK_PPTP=m
1310 +CONFIG_NF_CONNTRACK_SANE=m
1311 +CONFIG_NF_CONNTRACK_SIP=m
1312 +CONFIG_NF_CONNTRACK_TFTP=m
1313 +CONFIG_NF_CT_NETLINK=m
1314 +CONFIG_NF_CT_NETLINK_TIMEOUT=m
1315 +CONFIG_NF_CT_NETLINK_HELPER=m
1316 +CONFIG_NETFILTER_NETLINK_GLUE_CT=y
1317 +CONFIG_NF_NAT=m
1318 +CONFIG_NF_NAT_AMANDA=m
1319 +CONFIG_NF_NAT_FTP=m
1320 +CONFIG_NF_NAT_IRC=m
1321 +CONFIG_NF_NAT_SIP=m
1322 +CONFIG_NF_NAT_TFTP=m
1323 +CONFIG_NF_NAT_REDIRECT=y
1324 +CONFIG_NF_NAT_MASQUERADE=y
1325 +CONFIG_NETFILTER_SYNPROXY=m
1326 +CONFIG_NF_TABLES=m
1327 +CONFIG_NF_TABLES_INET=y
1328 +CONFIG_NF_TABLES_NETDEV=y
1329 +CONFIG_NFT_NUMGEN=m
1330 +CONFIG_NFT_CT=m
1331 +CONFIG_NFT_FLOW_OFFLOAD=m
1332 +CONFIG_NFT_COUNTER=m
1333 +CONFIG_NFT_CONNLIMIT=m
1334 +CONFIG_NFT_LOG=m
1335 +CONFIG_NFT_LIMIT=m
1336 +CONFIG_NFT_MASQ=m
1337 +CONFIG_NFT_REDIR=m
1338 +CONFIG_NFT_NAT=m
1339 +CONFIG_NFT_TUNNEL=m
1340 +CONFIG_NFT_OBJREF=m
1341 +CONFIG_NFT_QUEUE=m
1342 +CONFIG_NFT_QUOTA=m
1343 +CONFIG_NFT_REJECT=m
1344 +CONFIG_NFT_REJECT_INET=m
1345 +CONFIG_NFT_COMPAT=m
1346 +CONFIG_NFT_HASH=m
1347 +CONFIG_NFT_FIB=m
1348 +CONFIG_NFT_FIB_INET=m
1349 +CONFIG_NFT_XFRM=m
1350 +CONFIG_NFT_SOCKET=m
1351 +CONFIG_NFT_OSF=m
1352 +CONFIG_NFT_TPROXY=m
1353 +CONFIG_NFT_SYNPROXY=m
1354 +CONFIG_NF_DUP_NETDEV=m
1355 +CONFIG_NFT_DUP_NETDEV=m
1356 +CONFIG_NFT_FWD_NETDEV=m
1357 +CONFIG_NFT_FIB_NETDEV=m
1358 +CONFIG_NFT_REJECT_NETDEV=m
1359 +CONFIG_NF_FLOW_TABLE_INET=m
1360 +CONFIG_NF_FLOW_TABLE=m
1361 +CONFIG_NETFILTER_XTABLES=m
1364 +# Xtables combined modules
1366 +CONFIG_NETFILTER_XT_MARK=m
1367 +CONFIG_NETFILTER_XT_CONNMARK=m
1368 +CONFIG_NETFILTER_XT_SET=m
1371 +# Xtables targets
1373 +CONFIG_NETFILTER_XT_TARGET_AUDIT=m
1374 +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
1375 +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
1376 +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
1377 +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
1378 +CONFIG_NETFILTER_XT_TARGET_CT=m
1379 +CONFIG_NETFILTER_XT_TARGET_DSCP=m
1380 +CONFIG_NETFILTER_XT_TARGET_HL=m
1381 +CONFIG_NETFILTER_XT_TARGET_HMARK=m
1382 +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
1383 +CONFIG_NETFILTER_XT_TARGET_LED=m
1384 +CONFIG_NETFILTER_XT_TARGET_LOG=m
1385 +CONFIG_NETFILTER_XT_TARGET_MARK=m
1386 +CONFIG_NETFILTER_XT_NAT=m
1387 +CONFIG_NETFILTER_XT_TARGET_NETMAP=m
1388 +CONFIG_NETFILTER_XT_TARGET_NFLOG=m
1389 +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
1390 +# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
1391 +CONFIG_NETFILTER_XT_TARGET_RATEEST=m
1392 +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
1393 +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m
1394 +CONFIG_NETFILTER_XT_TARGET_TEE=m
1395 +CONFIG_NETFILTER_XT_TARGET_TPROXY=m
1396 +CONFIG_NETFILTER_XT_TARGET_TRACE=m
1397 +CONFIG_NETFILTER_XT_TARGET_SECMARK=m
1398 +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
1399 +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
1402 +# Xtables matches
1404 +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
1405 +CONFIG_NETFILTER_XT_MATCH_BPF=m
1406 +CONFIG_NETFILTER_XT_MATCH_CGROUP=m
1407 +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
1408 +CONFIG_NETFILTER_XT_MATCH_COMMENT=m
1409 +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
1410 +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
1411 +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
1412 +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
1413 +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
1414 +CONFIG_NETFILTER_XT_MATCH_CPU=m
1415 +CONFIG_NETFILTER_XT_MATCH_DCCP=m
1416 +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
1417 +CONFIG_NETFILTER_XT_MATCH_DSCP=m
1418 +CONFIG_NETFILTER_XT_MATCH_ECN=m
1419 +CONFIG_NETFILTER_XT_MATCH_ESP=m
1420 +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
1421 +CONFIG_NETFILTER_XT_MATCH_HELPER=m
1422 +CONFIG_NETFILTER_XT_MATCH_HL=m
1423 +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
1424 +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
1425 +CONFIG_NETFILTER_XT_MATCH_IPVS=m
1426 +CONFIG_NETFILTER_XT_MATCH_L2TP=m
1427 +CONFIG_NETFILTER_XT_MATCH_LENGTH=m
1428 +CONFIG_NETFILTER_XT_MATCH_LIMIT=m
1429 +CONFIG_NETFILTER_XT_MATCH_MAC=m
1430 +CONFIG_NETFILTER_XT_MATCH_MARK=m
1431 +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
1432 +CONFIG_NETFILTER_XT_MATCH_NFACCT=m
1433 +CONFIG_NETFILTER_XT_MATCH_OSF=m
1434 +CONFIG_NETFILTER_XT_MATCH_OWNER=m
1435 +CONFIG_NETFILTER_XT_MATCH_POLICY=m
1436 +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
1437 +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
1438 +CONFIG_NETFILTER_XT_MATCH_QUOTA=m
1439 +CONFIG_NETFILTER_XT_MATCH_RATEEST=m
1440 +CONFIG_NETFILTER_XT_MATCH_REALM=m
1441 +CONFIG_NETFILTER_XT_MATCH_RECENT=m
1442 +CONFIG_NETFILTER_XT_MATCH_SCTP=m
1443 +CONFIG_NETFILTER_XT_MATCH_SOCKET=m
1444 +CONFIG_NETFILTER_XT_MATCH_STATE=m
1445 +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
1446 +CONFIG_NETFILTER_XT_MATCH_STRING=m
1447 +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
1448 +CONFIG_NETFILTER_XT_MATCH_TIME=m
1449 +CONFIG_NETFILTER_XT_MATCH_U32=m
1450 +# end of Core Netfilter Configuration
1452 +CONFIG_IP_SET=m
1453 +CONFIG_IP_SET_MAX=256
1454 +CONFIG_IP_SET_BITMAP_IP=m
1455 +CONFIG_IP_SET_BITMAP_IPMAC=m
1456 +CONFIG_IP_SET_BITMAP_PORT=m
1457 +CONFIG_IP_SET_HASH_IP=m
1458 +CONFIG_IP_SET_HASH_IPMARK=m
1459 +CONFIG_IP_SET_HASH_IPPORT=m
1460 +CONFIG_IP_SET_HASH_IPPORTIP=m
1461 +CONFIG_IP_SET_HASH_IPPORTNET=m
1462 +CONFIG_IP_SET_HASH_IPMAC=m
1463 +CONFIG_IP_SET_HASH_MAC=m
1464 +CONFIG_IP_SET_HASH_NETPORTNET=m
1465 +CONFIG_IP_SET_HASH_NET=m
1466 +CONFIG_IP_SET_HASH_NETNET=m
1467 +CONFIG_IP_SET_HASH_NETPORT=m
1468 +CONFIG_IP_SET_HASH_NETIFACE=m
1469 +CONFIG_IP_SET_LIST_SET=m
1470 +CONFIG_IP_VS=m
1471 +CONFIG_IP_VS_IPV6=y
1472 +# CONFIG_IP_VS_DEBUG is not set
1473 +CONFIG_IP_VS_TAB_BITS=12
1476 +# IPVS transport protocol load balancing support
1478 +CONFIG_IP_VS_PROTO_TCP=y
1479 +CONFIG_IP_VS_PROTO_UDP=y
1480 +CONFIG_IP_VS_PROTO_AH_ESP=y
1481 +CONFIG_IP_VS_PROTO_ESP=y
1482 +CONFIG_IP_VS_PROTO_AH=y
1483 +CONFIG_IP_VS_PROTO_SCTP=y
1486 +# IPVS scheduler
1488 +CONFIG_IP_VS_RR=m
1489 +CONFIG_IP_VS_WRR=m
1490 +CONFIG_IP_VS_LC=m
1491 +CONFIG_IP_VS_WLC=m
1492 +CONFIG_IP_VS_FO=m
1493 +CONFIG_IP_VS_OVF=m
1494 +CONFIG_IP_VS_LBLC=m
1495 +CONFIG_IP_VS_LBLCR=m
1496 +CONFIG_IP_VS_DH=m
1497 +CONFIG_IP_VS_SH=m
1498 +CONFIG_IP_VS_MH=m
1499 +CONFIG_IP_VS_SED=m
1500 +CONFIG_IP_VS_NQ=m
1501 +CONFIG_IP_VS_TWOS=m
1504 +# IPVS SH scheduler
1506 +CONFIG_IP_VS_SH_TAB_BITS=8
1509 +# IPVS MH scheduler
1511 +CONFIG_IP_VS_MH_TAB_INDEX=12
1514 +# IPVS application helper
1516 +CONFIG_IP_VS_FTP=m
1517 +CONFIG_IP_VS_NFCT=y
1518 +CONFIG_IP_VS_PE_SIP=m
1521 +# IP: Netfilter Configuration
1523 +CONFIG_NF_DEFRAG_IPV4=m
1524 +CONFIG_NF_SOCKET_IPV4=m
1525 +CONFIG_NF_TPROXY_IPV4=m
1526 +CONFIG_NF_TABLES_IPV4=y
1527 +CONFIG_NFT_REJECT_IPV4=m
1528 +CONFIG_NFT_DUP_IPV4=m
1529 +CONFIG_NFT_FIB_IPV4=m
1530 +CONFIG_NF_TABLES_ARP=y
1531 +CONFIG_NF_FLOW_TABLE_IPV4=m
1532 +CONFIG_NF_DUP_IPV4=m
1533 +CONFIG_NF_LOG_ARP=m
1534 +CONFIG_NF_LOG_IPV4=m
1535 +CONFIG_NF_REJECT_IPV4=m
1536 +CONFIG_NF_NAT_SNMP_BASIC=m
1537 +CONFIG_NF_NAT_PPTP=m
1538 +CONFIG_NF_NAT_H323=m
1539 +CONFIG_IP_NF_IPTABLES=m
1540 +CONFIG_IP_NF_MATCH_AH=m
1541 +CONFIG_IP_NF_MATCH_ECN=m
1542 +CONFIG_IP_NF_MATCH_RPFILTER=m
1543 +CONFIG_IP_NF_MATCH_TTL=m
1544 +CONFIG_IP_NF_FILTER=m
1545 +CONFIG_IP_NF_TARGET_REJECT=m
1546 +CONFIG_IP_NF_TARGET_SYNPROXY=m
1547 +CONFIG_IP_NF_NAT=m
1548 +CONFIG_IP_NF_TARGET_MASQUERADE=m
1549 +CONFIG_IP_NF_TARGET_NETMAP=m
1550 +CONFIG_IP_NF_TARGET_REDIRECT=m
1551 +CONFIG_IP_NF_MANGLE=m
1552 +CONFIG_IP_NF_TARGET_CLUSTERIP=m
1553 +CONFIG_IP_NF_TARGET_ECN=m
1554 +CONFIG_IP_NF_TARGET_TTL=m
1555 +CONFIG_IP_NF_RAW=m
1556 +CONFIG_IP_NF_SECURITY=m
1557 +CONFIG_IP_NF_ARPTABLES=m
1558 +CONFIG_IP_NF_ARPFILTER=m
1559 +CONFIG_IP_NF_ARP_MANGLE=m
1560 +# end of IP: Netfilter Configuration
1563 +# IPv6: Netfilter Configuration
1565 +CONFIG_NF_SOCKET_IPV6=m
1566 +CONFIG_NF_TPROXY_IPV6=m
1567 +CONFIG_NF_TABLES_IPV6=y
1568 +CONFIG_NFT_REJECT_IPV6=m
1569 +CONFIG_NFT_DUP_IPV6=m
1570 +CONFIG_NFT_FIB_IPV6=m
1571 +CONFIG_NF_FLOW_TABLE_IPV6=m
1572 +CONFIG_NF_DUP_IPV6=m
1573 +CONFIG_NF_REJECT_IPV6=m
1574 +CONFIG_NF_LOG_IPV6=m
1575 +CONFIG_IP6_NF_IPTABLES=m
1576 +CONFIG_IP6_NF_MATCH_AH=m
1577 +CONFIG_IP6_NF_MATCH_EUI64=m
1578 +CONFIG_IP6_NF_MATCH_FRAG=m
1579 +CONFIG_IP6_NF_MATCH_OPTS=m
1580 +CONFIG_IP6_NF_MATCH_HL=m
1581 +CONFIG_IP6_NF_MATCH_IPV6HEADER=m
1582 +CONFIG_IP6_NF_MATCH_MH=m
1583 +CONFIG_IP6_NF_MATCH_RPFILTER=m
1584 +CONFIG_IP6_NF_MATCH_RT=m
1585 +CONFIG_IP6_NF_MATCH_SRH=m
1586 +CONFIG_IP6_NF_TARGET_HL=m
1587 +CONFIG_IP6_NF_FILTER=m
1588 +CONFIG_IP6_NF_TARGET_REJECT=m
1589 +CONFIG_IP6_NF_TARGET_SYNPROXY=m
1590 +CONFIG_IP6_NF_MANGLE=m
1591 +CONFIG_IP6_NF_RAW=m
1592 +CONFIG_IP6_NF_SECURITY=m
1593 +CONFIG_IP6_NF_NAT=m
1594 +CONFIG_IP6_NF_TARGET_MASQUERADE=m
1595 +CONFIG_IP6_NF_TARGET_NPT=m
1596 +# end of IPv6: Netfilter Configuration
1598 +CONFIG_NF_DEFRAG_IPV6=m
1601 +# DECnet: Netfilter Configuration
1603 +CONFIG_DECNET_NF_GRABULATOR=m
1604 +# end of DECnet: Netfilter Configuration
1606 +CONFIG_NF_TABLES_BRIDGE=m
1607 +CONFIG_NFT_BRIDGE_META=m
1608 +CONFIG_NFT_BRIDGE_REJECT=m
1609 +CONFIG_NF_LOG_BRIDGE=m
1610 +CONFIG_NF_CONNTRACK_BRIDGE=m
1611 +CONFIG_BRIDGE_NF_EBTABLES=m
1612 +CONFIG_BRIDGE_EBT_BROUTE=m
1613 +CONFIG_BRIDGE_EBT_T_FILTER=m
1614 +CONFIG_BRIDGE_EBT_T_NAT=m
1615 +CONFIG_BRIDGE_EBT_802_3=m
1616 +CONFIG_BRIDGE_EBT_AMONG=m
1617 +CONFIG_BRIDGE_EBT_ARP=m
1618 +CONFIG_BRIDGE_EBT_IP=m
1619 +CONFIG_BRIDGE_EBT_IP6=m
1620 +CONFIG_BRIDGE_EBT_LIMIT=m
1621 +CONFIG_BRIDGE_EBT_MARK=m
1622 +CONFIG_BRIDGE_EBT_PKTTYPE=m
1623 +CONFIG_BRIDGE_EBT_STP=m
1624 +CONFIG_BRIDGE_EBT_VLAN=m
1625 +CONFIG_BRIDGE_EBT_ARPREPLY=m
1626 +CONFIG_BRIDGE_EBT_DNAT=m
1627 +CONFIG_BRIDGE_EBT_MARK_T=m
1628 +CONFIG_BRIDGE_EBT_REDIRECT=m
1629 +CONFIG_BRIDGE_EBT_SNAT=m
1630 +CONFIG_BRIDGE_EBT_LOG=m
1631 +CONFIG_BRIDGE_EBT_NFLOG=m
1632 +CONFIG_BPFILTER=y
1633 +CONFIG_BPFILTER_UMH=m
1634 +CONFIG_IP_DCCP=m
1635 +CONFIG_INET_DCCP_DIAG=m
1638 +# DCCP CCIDs Configuration
1640 +# CONFIG_IP_DCCP_CCID2_DEBUG is not set
1641 +# CONFIG_IP_DCCP_CCID3 is not set
1642 +# end of DCCP CCIDs Configuration
1645 +# DCCP Kernel Hacking
1647 +# CONFIG_IP_DCCP_DEBUG is not set
1648 +# end of DCCP Kernel Hacking
1650 +CONFIG_IP_SCTP=m
1651 +# CONFIG_SCTP_DBG_OBJCNT is not set
1652 +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set
1653 +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y
1654 +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set
1655 +CONFIG_SCTP_COOKIE_HMAC_MD5=y
1656 +CONFIG_SCTP_COOKIE_HMAC_SHA1=y
1657 +CONFIG_INET_SCTP_DIAG=m
1658 +CONFIG_RDS=m
1659 +CONFIG_RDS_RDMA=m
1660 +CONFIG_RDS_TCP=m
1661 +# CONFIG_RDS_DEBUG is not set
1662 +CONFIG_TIPC=m
1663 +CONFIG_TIPC_MEDIA_IB=y
1664 +CONFIG_TIPC_MEDIA_UDP=y
1665 +CONFIG_TIPC_CRYPTO=y
1666 +CONFIG_TIPC_DIAG=m
1667 +CONFIG_ATM=m
1668 +CONFIG_ATM_CLIP=m
1669 +# CONFIG_ATM_CLIP_NO_ICMP is not set
1670 +CONFIG_ATM_LANE=m
1671 +CONFIG_ATM_MPOA=m
1672 +CONFIG_ATM_BR2684=m
1673 +# CONFIG_ATM_BR2684_IPFILTER is not set
1674 +CONFIG_L2TP=m
1675 +CONFIG_L2TP_DEBUGFS=m
1676 +CONFIG_L2TP_V3=y
1677 +CONFIG_L2TP_IP=m
1678 +CONFIG_L2TP_ETH=m
1679 +CONFIG_STP=m
1680 +CONFIG_GARP=m
1681 +CONFIG_MRP=m
1682 +CONFIG_BRIDGE=m
1683 +CONFIG_BRIDGE_IGMP_SNOOPING=y
1684 +CONFIG_BRIDGE_VLAN_FILTERING=y
1685 +CONFIG_BRIDGE_MRP=y
1686 +CONFIG_BRIDGE_CFM=y
1687 +CONFIG_HAVE_NET_DSA=y
1688 +CONFIG_NET_DSA=m
1689 +CONFIG_NET_DSA_TAG_8021Q=m
1690 +CONFIG_NET_DSA_TAG_AR9331=m
1691 +CONFIG_NET_DSA_TAG_BRCM_COMMON=m
1692 +CONFIG_NET_DSA_TAG_BRCM=m
1693 +CONFIG_NET_DSA_TAG_BRCM_PREPEND=m
1694 +CONFIG_NET_DSA_TAG_HELLCREEK=m
1695 +CONFIG_NET_DSA_TAG_GSWIP=m
1696 +CONFIG_NET_DSA_TAG_DSA_COMMON=m
1697 +CONFIG_NET_DSA_TAG_DSA=m
1698 +CONFIG_NET_DSA_TAG_EDSA=m
1699 +CONFIG_NET_DSA_TAG_MTK=m
1700 +CONFIG_NET_DSA_TAG_KSZ=m
1701 +CONFIG_NET_DSA_TAG_RTL4_A=m
1702 +CONFIG_NET_DSA_TAG_OCELOT=m
1703 +CONFIG_NET_DSA_TAG_OCELOT_8021Q=m
1704 +CONFIG_NET_DSA_TAG_QCA=m
1705 +CONFIG_NET_DSA_TAG_LAN9303=m
1706 +CONFIG_NET_DSA_TAG_SJA1105=m
1707 +CONFIG_NET_DSA_TAG_TRAILER=m
1708 +CONFIG_NET_DSA_TAG_XRS700X=m
1709 +CONFIG_VLAN_8021Q=m
1710 +CONFIG_VLAN_8021Q_GVRP=y
1711 +CONFIG_VLAN_8021Q_MVRP=y
1712 +CONFIG_DECNET=m
1713 +# CONFIG_DECNET_ROUTER is not set
1714 +CONFIG_LLC=m
1715 +CONFIG_LLC2=m
1716 +CONFIG_ATALK=m
1717 +CONFIG_DEV_APPLETALK=m
1718 +# CONFIG_IPDDP is not set
1719 +CONFIG_X25=m
1720 +CONFIG_LAPB=m
1721 +CONFIG_PHONET=m
1722 +CONFIG_6LOWPAN=m
1723 +# CONFIG_6LOWPAN_DEBUGFS is not set
1724 +CONFIG_6LOWPAN_NHC=m
1725 +CONFIG_6LOWPAN_NHC_DEST=m
1726 +CONFIG_6LOWPAN_NHC_FRAGMENT=m
1727 +CONFIG_6LOWPAN_NHC_HOP=m
1728 +CONFIG_6LOWPAN_NHC_IPV6=m
1729 +CONFIG_6LOWPAN_NHC_MOBILITY=m
1730 +CONFIG_6LOWPAN_NHC_ROUTING=m
1731 +CONFIG_6LOWPAN_NHC_UDP=m
1732 +# CONFIG_6LOWPAN_GHC_EXT_HDR_HOP is not set
1733 +# CONFIG_6LOWPAN_GHC_UDP is not set
1734 +# CONFIG_6LOWPAN_GHC_ICMPV6 is not set
1735 +# CONFIG_6LOWPAN_GHC_EXT_HDR_DEST is not set
1736 +# CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG is not set
1737 +# CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE is not set
1738 +CONFIG_IEEE802154=m
1739 +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set
1740 +CONFIG_IEEE802154_SOCKET=m
1741 +CONFIG_IEEE802154_6LOWPAN=m
1742 +CONFIG_MAC802154=m
1743 +CONFIG_NET_SCHED=y
1746 +# Queueing/Scheduling
1748 +CONFIG_NET_SCH_CBQ=m
1749 +CONFIG_NET_SCH_HTB=m
1750 +CONFIG_NET_SCH_HFSC=m
1751 +CONFIG_NET_SCH_ATM=m
1752 +CONFIG_NET_SCH_PRIO=m
1753 +CONFIG_NET_SCH_MULTIQ=m
1754 +CONFIG_NET_SCH_RED=m
1755 +CONFIG_NET_SCH_SFB=m
1756 +CONFIG_NET_SCH_SFQ=m
1757 +CONFIG_NET_SCH_TEQL=m
1758 +CONFIG_NET_SCH_TBF=m
1759 +CONFIG_NET_SCH_CBS=m
1760 +CONFIG_NET_SCH_ETF=m
1761 +CONFIG_NET_SCH_TAPRIO=m
1762 +CONFIG_NET_SCH_GRED=m
1763 +CONFIG_NET_SCH_DSMARK=m
1764 +CONFIG_NET_SCH_NETEM=m
1765 +CONFIG_NET_SCH_DRR=m
1766 +CONFIG_NET_SCH_MQPRIO=m
1767 +CONFIG_NET_SCH_SKBPRIO=m
1768 +CONFIG_NET_SCH_CHOKE=m
1769 +CONFIG_NET_SCH_QFQ=m
1770 +CONFIG_NET_SCH_CODEL=m
1771 +CONFIG_NET_SCH_FQ_CODEL=m
1772 +CONFIG_NET_SCH_CAKE=m
1773 +CONFIG_NET_SCH_FQ=m
1774 +CONFIG_NET_SCH_HHF=m
1775 +CONFIG_NET_SCH_PIE=y
1776 +CONFIG_NET_SCH_FQ_PIE=y
1777 +CONFIG_NET_SCH_INGRESS=m
1778 +CONFIG_NET_SCH_PLUG=m
1779 +CONFIG_NET_SCH_ETS=m
1780 +CONFIG_NET_SCH_DEFAULT=y
1781 +# CONFIG_DEFAULT_FQ is not set
1782 +# CONFIG_DEFAULT_CODEL is not set
1783 +# CONFIG_DEFAULT_FQ_CODEL is not set
1784 +CONFIG_DEFAULT_FQ_PIE=y
1785 +# CONFIG_DEFAULT_SFQ is not set
1786 +# CONFIG_DEFAULT_PFIFO_FAST is not set
1787 +CONFIG_DEFAULT_NET_SCH="fq_pie"
1790 +# Classification
1792 +CONFIG_NET_CLS=y
1793 +CONFIG_NET_CLS_BASIC=m
1794 +CONFIG_NET_CLS_TCINDEX=m
1795 +CONFIG_NET_CLS_ROUTE4=m
1796 +CONFIG_NET_CLS_FW=m
1797 +CONFIG_NET_CLS_U32=m
1798 +# CONFIG_CLS_U32_PERF is not set
1799 +CONFIG_CLS_U32_MARK=y
1800 +CONFIG_NET_CLS_RSVP=m
1801 +CONFIG_NET_CLS_RSVP6=m
1802 +CONFIG_NET_CLS_FLOW=m
1803 +CONFIG_NET_CLS_CGROUP=m
1804 +CONFIG_NET_CLS_BPF=m
1805 +CONFIG_NET_CLS_FLOWER=m
1806 +CONFIG_NET_CLS_MATCHALL=m
1807 +CONFIG_NET_EMATCH=y
1808 +CONFIG_NET_EMATCH_STACK=32
1809 +CONFIG_NET_EMATCH_CMP=m
1810 +CONFIG_NET_EMATCH_NBYTE=m
1811 +CONFIG_NET_EMATCH_U32=m
1812 +CONFIG_NET_EMATCH_META=m
1813 +CONFIG_NET_EMATCH_TEXT=m
1814 +CONFIG_NET_EMATCH_CANID=m
1815 +CONFIG_NET_EMATCH_IPSET=m
1816 +CONFIG_NET_EMATCH_IPT=m
1817 +CONFIG_NET_CLS_ACT=y
1818 +CONFIG_NET_ACT_POLICE=m
1819 +CONFIG_NET_ACT_GACT=m
1820 +CONFIG_GACT_PROB=y
1821 +CONFIG_NET_ACT_MIRRED=m
1822 +CONFIG_NET_ACT_SAMPLE=m
1823 +CONFIG_NET_ACT_IPT=m
1824 +CONFIG_NET_ACT_NAT=m
1825 +CONFIG_NET_ACT_PEDIT=m
1826 +CONFIG_NET_ACT_SIMP=m
1827 +CONFIG_NET_ACT_SKBEDIT=m
1828 +CONFIG_NET_ACT_CSUM=m
1829 +CONFIG_NET_ACT_MPLS=m
1830 +CONFIG_NET_ACT_VLAN=m
1831 +CONFIG_NET_ACT_BPF=m
1832 +CONFIG_NET_ACT_CONNMARK=m
1833 +CONFIG_NET_ACT_CTINFO=m
1834 +CONFIG_NET_ACT_SKBMOD=m
1835 +# CONFIG_NET_ACT_IFE is not set
1836 +CONFIG_NET_ACT_TUNNEL_KEY=m
1837 +CONFIG_NET_ACT_CT=m
1838 +CONFIG_NET_ACT_GATE=m
1839 +CONFIG_NET_TC_SKB_EXT=y
1840 +CONFIG_NET_SCH_FIFO=y
1841 +CONFIG_DCB=y
1842 +CONFIG_DNS_RESOLVER=y
1843 +CONFIG_BATMAN_ADV=m
1844 +# CONFIG_BATMAN_ADV_BATMAN_V is not set
1845 +CONFIG_BATMAN_ADV_BLA=y
1846 +CONFIG_BATMAN_ADV_DAT=y
1847 +CONFIG_BATMAN_ADV_NC=y
1848 +CONFIG_BATMAN_ADV_MCAST=y
1849 +# CONFIG_BATMAN_ADV_DEBUG is not set
1850 +CONFIG_OPENVSWITCH=m
1851 +CONFIG_OPENVSWITCH_GRE=m
1852 +CONFIG_OPENVSWITCH_VXLAN=m
1853 +CONFIG_OPENVSWITCH_GENEVE=m
1854 +CONFIG_VSOCKETS=m
1855 +CONFIG_VSOCKETS_DIAG=m
1856 +CONFIG_VSOCKETS_LOOPBACK=m
1857 +CONFIG_VMWARE_VMCI_VSOCKETS=m
1858 +CONFIG_VIRTIO_VSOCKETS=m
1859 +CONFIG_VIRTIO_VSOCKETS_COMMON=m
1860 +CONFIG_HYPERV_VSOCKETS=m
1861 +CONFIG_NETLINK_DIAG=m
1862 +CONFIG_MPLS=y
1863 +CONFIG_NET_MPLS_GSO=m
1864 +CONFIG_MPLS_ROUTING=m
1865 +CONFIG_MPLS_IPTUNNEL=m
1866 +CONFIG_NET_NSH=m
1867 +CONFIG_HSR=m
1868 +CONFIG_NET_SWITCHDEV=y
1869 +CONFIG_NET_L3_MASTER_DEV=y
1870 +CONFIG_QRTR=m
1871 +CONFIG_QRTR_SMD=m
1872 +CONFIG_QRTR_TUN=m
1873 +CONFIG_QRTR_MHI=m
1874 +CONFIG_NET_NCSI=y
1875 +CONFIG_NCSI_OEM_CMD_GET_MAC=y
1876 +CONFIG_RPS=y
1877 +CONFIG_RFS_ACCEL=y
1878 +CONFIG_SOCK_RX_QUEUE_MAPPING=y
1879 +CONFIG_XPS=y
1880 +CONFIG_CGROUP_NET_PRIO=y
1881 +CONFIG_CGROUP_NET_CLASSID=y
1882 +CONFIG_NET_RX_BUSY_POLL=y
1883 +CONFIG_BQL=y
1884 +CONFIG_BPF_JIT=y
1885 +CONFIG_BPF_STREAM_PARSER=y
1886 +CONFIG_NET_FLOW_LIMIT=y
1889 +# Network testing
1891 +CONFIG_NET_PKTGEN=m
1892 +# end of Network testing
1893 +# end of Networking options
1895 +CONFIG_HAMRADIO=y
1898 +# Packet Radio protocols
1900 +CONFIG_AX25=m
1901 +CONFIG_AX25_DAMA_SLAVE=y
1902 +CONFIG_NETROM=m
1903 +CONFIG_ROSE=m
1906 +# AX.25 network device drivers
1908 +CONFIG_MKISS=m
1909 +CONFIG_6PACK=m
1910 +CONFIG_BPQETHER=m
1911 +CONFIG_BAYCOM_SER_FDX=m
1912 +CONFIG_BAYCOM_SER_HDX=m
1913 +CONFIG_BAYCOM_PAR=m
1914 +CONFIG_YAM=m
1915 +# end of AX.25 network device drivers
1917 +CONFIG_CAN=m
1918 +CONFIG_CAN_RAW=m
1919 +CONFIG_CAN_BCM=m
1920 +CONFIG_CAN_GW=m
1921 +CONFIG_CAN_J1939=m
1922 +CONFIG_CAN_ISOTP=m
1925 +# CAN Device Drivers
1927 +CONFIG_CAN_VCAN=m
1928 +CONFIG_CAN_VXCAN=m
1929 +CONFIG_CAN_SLCAN=m
1930 +CONFIG_CAN_DEV=m
1931 +CONFIG_CAN_CALC_BITTIMING=y
1932 +CONFIG_CAN_JANZ_ICAN3=m
1933 +CONFIG_CAN_KVASER_PCIEFD=m
1934 +CONFIG_CAN_C_CAN=m
1935 +CONFIG_CAN_C_CAN_PLATFORM=m
1936 +CONFIG_CAN_C_CAN_PCI=m
1937 +CONFIG_CAN_CC770=m
1938 +CONFIG_CAN_CC770_ISA=m
1939 +CONFIG_CAN_CC770_PLATFORM=m
1940 +CONFIG_CAN_IFI_CANFD=m
1941 +CONFIG_CAN_M_CAN=m
1942 +CONFIG_CAN_M_CAN_PCI=m
1943 +CONFIG_CAN_M_CAN_PLATFORM=m
1944 +CONFIG_CAN_M_CAN_TCAN4X5X=m
1945 +CONFIG_CAN_PEAK_PCIEFD=m
1946 +CONFIG_CAN_SJA1000=m
1947 +CONFIG_CAN_EMS_PCI=m
1948 +CONFIG_CAN_EMS_PCMCIA=m
1949 +CONFIG_CAN_F81601=m
1950 +CONFIG_CAN_KVASER_PCI=m
1951 +CONFIG_CAN_PEAK_PCI=m
1952 +CONFIG_CAN_PEAK_PCIEC=y
1953 +CONFIG_CAN_PEAK_PCMCIA=m
1954 +CONFIG_CAN_PLX_PCI=m
1955 +CONFIG_CAN_SJA1000_ISA=m
1956 +CONFIG_CAN_SJA1000_PLATFORM=m
1957 +CONFIG_CAN_SOFTING=m
1958 +CONFIG_CAN_SOFTING_CS=m
1961 +# CAN SPI interfaces
1963 +CONFIG_CAN_HI311X=m
1964 +CONFIG_CAN_MCP251X=m
1965 +CONFIG_CAN_MCP251XFD=m
1966 +# CONFIG_CAN_MCP251XFD_SANITY is not set
1967 +# end of CAN SPI interfaces
1970 +# CAN USB interfaces
1972 +CONFIG_CAN_8DEV_USB=m
1973 +CONFIG_CAN_EMS_USB=m
1974 +CONFIG_CAN_ESD_USB2=m
1975 +CONFIG_CAN_GS_USB=m
1976 +CONFIG_CAN_KVASER_USB=m
1977 +CONFIG_CAN_MCBA_USB=m
1978 +CONFIG_CAN_PEAK_USB=m
1979 +CONFIG_CAN_UCAN=m
1980 +# end of CAN USB interfaces
1982 +# CONFIG_CAN_DEBUG_DEVICES is not set
1983 +# end of CAN Device Drivers
1985 +CONFIG_BT=m
1986 +CONFIG_BT_BREDR=y
1987 +CONFIG_BT_RFCOMM=m
1988 +CONFIG_BT_RFCOMM_TTY=y
1989 +CONFIG_BT_BNEP=m
1990 +CONFIG_BT_BNEP_MC_FILTER=y
1991 +CONFIG_BT_BNEP_PROTO_FILTER=y
1992 +CONFIG_BT_CMTP=m
1993 +CONFIG_BT_HIDP=m
1994 +CONFIG_BT_HS=y
1995 +CONFIG_BT_LE=y
1996 +CONFIG_BT_6LOWPAN=m
1997 +CONFIG_BT_LEDS=y
1998 +CONFIG_BT_MSFTEXT=y
1999 +CONFIG_BT_DEBUGFS=y
2000 +# CONFIG_BT_SELFTEST is not set
2003 +# Bluetooth device drivers
2005 +CONFIG_BT_INTEL=m
2006 +CONFIG_BT_BCM=m
2007 +CONFIG_BT_RTL=m
2008 +CONFIG_BT_QCA=m
2009 +CONFIG_BT_HCIBTUSB=m
2010 +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y
2011 +CONFIG_BT_HCIBTUSB_BCM=y
2012 +CONFIG_BT_HCIBTUSB_MTK=y
2013 +CONFIG_BT_HCIBTUSB_RTL=y
2014 +CONFIG_BT_HCIBTSDIO=m
2015 +CONFIG_BT_HCIUART=m
2016 +CONFIG_BT_HCIUART_SERDEV=y
2017 +CONFIG_BT_HCIUART_H4=y
2018 +CONFIG_BT_HCIUART_NOKIA=m
2019 +CONFIG_BT_HCIUART_BCSP=y
2020 +CONFIG_BT_HCIUART_ATH3K=y
2021 +CONFIG_BT_HCIUART_LL=y
2022 +CONFIG_BT_HCIUART_3WIRE=y
2023 +CONFIG_BT_HCIUART_INTEL=y
2024 +CONFIG_BT_HCIUART_BCM=y
2025 +CONFIG_BT_HCIUART_RTL=y
2026 +CONFIG_BT_HCIUART_QCA=y
2027 +CONFIG_BT_HCIUART_AG6XX=y
2028 +CONFIG_BT_HCIUART_MRVL=y
2029 +CONFIG_BT_HCIBCM203X=m
2030 +CONFIG_BT_HCIBPA10X=m
2031 +CONFIG_BT_HCIBFUSB=m
2032 +CONFIG_BT_HCIDTL1=m
2033 +CONFIG_BT_HCIBT3C=m
2034 +CONFIG_BT_HCIBLUECARD=m
2035 +CONFIG_BT_HCIVHCI=m
2036 +CONFIG_BT_MRVL=m
2037 +CONFIG_BT_MRVL_SDIO=m
2038 +CONFIG_BT_ATH3K=m
2039 +CONFIG_BT_MTKSDIO=m
2040 +CONFIG_BT_MTKUART=m
2041 +CONFIG_BT_HCIRSI=m
2042 +# end of Bluetooth device drivers
2044 +CONFIG_AF_RXRPC=m
2045 +CONFIG_AF_RXRPC_IPV6=y
2046 +# CONFIG_AF_RXRPC_INJECT_LOSS is not set
2047 +# CONFIG_AF_RXRPC_DEBUG is not set
2048 +CONFIG_RXKAD=y
2049 +CONFIG_AF_KCM=m
2050 +CONFIG_STREAM_PARSER=y
2051 +CONFIG_FIB_RULES=y
2052 +CONFIG_WIRELESS=y
2053 +CONFIG_WIRELESS_EXT=y
2054 +CONFIG_WEXT_CORE=y
2055 +CONFIG_WEXT_PROC=y
2056 +CONFIG_WEXT_SPY=y
2057 +CONFIG_WEXT_PRIV=y
2058 +CONFIG_CFG80211=m
2059 +# CONFIG_NL80211_TESTMODE is not set
2060 +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
2061 +# CONFIG_CFG80211_CERTIFICATION_ONUS is not set
2062 +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y
2063 +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y
2064 +CONFIG_CFG80211_DEFAULT_PS=y
2065 +CONFIG_CFG80211_DEBUGFS=y
2066 +CONFIG_CFG80211_CRDA_SUPPORT=y
2067 +CONFIG_CFG80211_WEXT=y
2068 +CONFIG_CFG80211_WEXT_EXPORT=y
2069 +CONFIG_LIB80211=m
2070 +CONFIG_LIB80211_CRYPT_WEP=m
2071 +CONFIG_LIB80211_CRYPT_CCMP=m
2072 +CONFIG_LIB80211_CRYPT_TKIP=m
2073 +# CONFIG_LIB80211_DEBUG is not set
2074 +CONFIG_MAC80211=m
2075 +CONFIG_MAC80211_HAS_RC=y
2076 +CONFIG_MAC80211_RC_MINSTREL=y
2077 +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
2078 +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
2079 +CONFIG_MAC80211_MESH=y
2080 +CONFIG_MAC80211_LEDS=y
2081 +CONFIG_MAC80211_DEBUGFS=y
2082 +CONFIG_MAC80211_MESSAGE_TRACING=y
2083 +# CONFIG_MAC80211_DEBUG_MENU is not set
2084 +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0
2085 +CONFIG_RFKILL=y
2086 +CONFIG_RFKILL_LEDS=y
2087 +CONFIG_RFKILL_INPUT=y
2088 +CONFIG_RFKILL_GPIO=m
2089 +CONFIG_NET_9P=m
2090 +CONFIG_NET_9P_VIRTIO=m
2091 +CONFIG_NET_9P_XEN=m
2092 +CONFIG_NET_9P_RDMA=m
2093 +# CONFIG_NET_9P_DEBUG is not set
2094 +CONFIG_CAIF=m
2095 +# CONFIG_CAIF_DEBUG is not set
2096 +CONFIG_CAIF_NETDEV=m
2097 +CONFIG_CAIF_USB=m
2098 +CONFIG_CEPH_LIB=m
2099 +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set
2100 +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y
2101 +CONFIG_NFC=m
2102 +CONFIG_NFC_DIGITAL=m
2103 +CONFIG_NFC_NCI=m
2104 +CONFIG_NFC_NCI_SPI=m
2105 +CONFIG_NFC_NCI_UART=m
2106 +CONFIG_NFC_HCI=m
2107 +CONFIG_NFC_SHDLC=y
2110 +# Near Field Communication (NFC) devices
2112 +CONFIG_NFC_TRF7970A=m
2113 +CONFIG_NFC_MEI_PHY=m
2114 +CONFIG_NFC_SIM=m
2115 +CONFIG_NFC_PORT100=m
2116 +CONFIG_NFC_VIRTUAL_NCI=m
2117 +CONFIG_NFC_FDP=m
2118 +CONFIG_NFC_FDP_I2C=m
2119 +CONFIG_NFC_PN544=m
2120 +CONFIG_NFC_PN544_I2C=m
2121 +CONFIG_NFC_PN544_MEI=m
2122 +CONFIG_NFC_PN533=m
2123 +CONFIG_NFC_PN533_USB=m
2124 +CONFIG_NFC_PN533_I2C=m
2125 +CONFIG_NFC_PN532_UART=m
2126 +CONFIG_NFC_MICROREAD=m
2127 +CONFIG_NFC_MICROREAD_I2C=m
2128 +CONFIG_NFC_MICROREAD_MEI=m
2129 +CONFIG_NFC_MRVL=m
2130 +CONFIG_NFC_MRVL_USB=m
2131 +CONFIG_NFC_MRVL_UART=m
2132 +CONFIG_NFC_MRVL_I2C=m
2133 +CONFIG_NFC_MRVL_SPI=m
2134 +CONFIG_NFC_ST21NFCA=m
2135 +CONFIG_NFC_ST21NFCA_I2C=m
2136 +CONFIG_NFC_ST_NCI=m
2137 +CONFIG_NFC_ST_NCI_I2C=m
2138 +CONFIG_NFC_ST_NCI_SPI=m
2139 +CONFIG_NFC_NXP_NCI=m
2140 +CONFIG_NFC_NXP_NCI_I2C=m
2141 +CONFIG_NFC_S3FWRN5=m
2142 +CONFIG_NFC_S3FWRN5_I2C=m
2143 +CONFIG_NFC_S3FWRN82_UART=m
2144 +CONFIG_NFC_ST95HF=m
2145 +# end of Near Field Communication (NFC) devices
2147 +CONFIG_PSAMPLE=m
2148 +CONFIG_NET_IFE=m
2149 +CONFIG_LWTUNNEL=y
2150 +CONFIG_LWTUNNEL_BPF=y
2151 +CONFIG_DST_CACHE=y
2152 +CONFIG_GRO_CELLS=y
2153 +CONFIG_SOCK_VALIDATE_XMIT=y
2154 +CONFIG_NET_SOCK_MSG=y
2155 +CONFIG_NET_DEVLINK=y
2156 +CONFIG_PAGE_POOL=y
2157 +CONFIG_FAILOVER=m
2158 +CONFIG_ETHTOOL_NETLINK=y
2159 +CONFIG_HAVE_EBPF_JIT=y
2162 +# Device Drivers
2164 +CONFIG_HAVE_EISA=y
2165 +# CONFIG_EISA is not set
2166 +CONFIG_HAVE_PCI=y
2167 +CONFIG_PCI=y
2168 +CONFIG_PCI_DOMAINS=y
2169 +CONFIG_PCIEPORTBUS=y
2170 +CONFIG_HOTPLUG_PCI_PCIE=y
2171 +CONFIG_PCIEAER=y
2172 +# CONFIG_PCIEAER_INJECT is not set
2173 +# CONFIG_PCIE_ECRC is not set
2174 +CONFIG_PCIEASPM=y
2175 +CONFIG_PCIEASPM_DEFAULT=y
2176 +# CONFIG_PCIEASPM_POWERSAVE is not set
2177 +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set
2178 +# CONFIG_PCIEASPM_PERFORMANCE is not set
2179 +CONFIG_PCIE_PME=y
2180 +CONFIG_PCIE_DPC=y
2181 +CONFIG_PCIE_PTM=y
2182 +# CONFIG_PCIE_EDR is not set
2183 +CONFIG_PCI_MSI=y
2184 +CONFIG_PCI_MSI_IRQ_DOMAIN=y
2185 +CONFIG_PCI_QUIRKS=y
2186 +# CONFIG_PCI_DEBUG is not set
2187 +CONFIG_PCI_REALLOC_ENABLE_AUTO=y
2188 +CONFIG_PCI_STUB=m
2189 +CONFIG_PCI_PF_STUB=m
2190 +CONFIG_XEN_PCIDEV_FRONTEND=m
2191 +CONFIG_PCI_ATS=y
2192 +CONFIG_PCI_LOCKLESS_CONFIG=y
2193 +CONFIG_PCI_IOV=y
2194 +CONFIG_PCI_PRI=y
2195 +CONFIG_PCI_PASID=y
2196 +# CONFIG_PCI_P2PDMA is not set
2197 +CONFIG_PCI_LABEL=y
2198 +CONFIG_PCI_HYPERV=m
2199 +# CONFIG_PCIE_BUS_TUNE_OFF is not set
2200 +CONFIG_PCIE_BUS_DEFAULT=y
2201 +# CONFIG_PCIE_BUS_SAFE is not set
2202 +# CONFIG_PCIE_BUS_PERFORMANCE is not set
2203 +# CONFIG_PCIE_BUS_PEER2PEER is not set
2204 +CONFIG_HOTPLUG_PCI=y
2205 +CONFIG_HOTPLUG_PCI_ACPI=y
2206 +CONFIG_HOTPLUG_PCI_ACPI_IBM=m
2207 +CONFIG_HOTPLUG_PCI_CPCI=y
2208 +CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m
2209 +CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m
2210 +CONFIG_HOTPLUG_PCI_SHPC=y
2213 +# PCI controller drivers
2215 +CONFIG_VMD=m
2216 +CONFIG_PCI_HYPERV_INTERFACE=m
2219 +# DesignWare PCI Core Support
2221 +CONFIG_PCIE_DW=y
2222 +CONFIG_PCIE_DW_HOST=y
2223 +CONFIG_PCIE_DW_EP=y
2224 +CONFIG_PCIE_DW_PLAT=y
2225 +CONFIG_PCIE_DW_PLAT_HOST=y
2226 +CONFIG_PCIE_DW_PLAT_EP=y
2227 +# CONFIG_PCI_MESON is not set
2228 +# end of DesignWare PCI Core Support
2231 +# Mobiveil PCIe Core Support
2233 +# end of Mobiveil PCIe Core Support
2236 +# Cadence PCIe controllers support
2238 +# end of Cadence PCIe controllers support
2239 +# end of PCI controller drivers
2242 +# PCI Endpoint
2244 +CONFIG_PCI_ENDPOINT=y
2245 +CONFIG_PCI_ENDPOINT_CONFIGFS=y
2246 +# CONFIG_PCI_EPF_TEST is not set
2247 +CONFIG_PCI_EPF_NTB=m
2248 +# end of PCI Endpoint
2251 +# PCI switch controller drivers
2253 +CONFIG_PCI_SW_SWITCHTEC=m
2254 +# end of PCI switch controller drivers
2256 +CONFIG_CXL_BUS=m
2257 +CONFIG_CXL_MEM=m
2258 +# CONFIG_CXL_MEM_RAW_COMMANDS is not set
2259 +CONFIG_PCCARD=m
2260 +CONFIG_PCMCIA=m
2261 +CONFIG_PCMCIA_LOAD_CIS=y
2262 +CONFIG_CARDBUS=y
2265 +# PC-card bridges
2267 +CONFIG_YENTA=m
2268 +CONFIG_YENTA_O2=y
2269 +CONFIG_YENTA_RICOH=y
2270 +CONFIG_YENTA_TI=y
2271 +CONFIG_YENTA_ENE_TUNE=y
2272 +CONFIG_YENTA_TOSHIBA=y
2273 +CONFIG_PD6729=m
2274 +CONFIG_I82092=m
2275 +CONFIG_PCCARD_NONSTATIC=y
2276 +CONFIG_RAPIDIO=y
2277 +CONFIG_RAPIDIO_TSI721=m
2278 +CONFIG_RAPIDIO_DISC_TIMEOUT=30
2279 +# CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS is not set
2280 +CONFIG_RAPIDIO_DMA_ENGINE=y
2281 +# CONFIG_RAPIDIO_DEBUG is not set
2282 +CONFIG_RAPIDIO_ENUM_BASIC=m
2283 +CONFIG_RAPIDIO_CHMAN=m
2284 +CONFIG_RAPIDIO_MPORT_CDEV=m
2287 +# RapidIO Switch drivers
2289 +CONFIG_RAPIDIO_TSI57X=m
2290 +CONFIG_RAPIDIO_CPS_XX=m
2291 +CONFIG_RAPIDIO_TSI568=m
2292 +CONFIG_RAPIDIO_CPS_GEN2=m
2293 +CONFIG_RAPIDIO_RXS_GEN3=m
2294 +# end of RapidIO Switch drivers
2297 +# Generic Driver Options
2299 +CONFIG_AUXILIARY_BUS=y
2300 +CONFIG_UEVENT_HELPER=y
2301 +CONFIG_UEVENT_HELPER_PATH=""
2302 +CONFIG_DEVTMPFS=y
2303 +CONFIG_DEVTMPFS_MOUNT=y
2304 +# CONFIG_STANDALONE is not set
2305 +CONFIG_PREVENT_FIRMWARE_BUILD=y
2308 +# Firmware loader
2310 +CONFIG_FW_LOADER=y
2311 +CONFIG_FW_LOADER_PAGED_BUF=y
2312 +CONFIG_EXTRA_FIRMWARE=""
2313 +CONFIG_FW_LOADER_USER_HELPER=y
2314 +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set
2315 +CONFIG_FW_LOADER_COMPRESS=y
2316 +CONFIG_FW_CACHE=y
2317 +# end of Firmware loader
2319 +CONFIG_WANT_DEV_COREDUMP=y
2320 +CONFIG_ALLOW_DEV_COREDUMP=y
2321 +CONFIG_DEV_COREDUMP=y
2322 +# CONFIG_DEBUG_DRIVER is not set
2323 +# CONFIG_DEBUG_DEVRES is not set
2324 +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set
2325 +CONFIG_HMEM_REPORTING=y
2326 +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set
2327 +CONFIG_SYS_HYPERVISOR=y
2328 +CONFIG_GENERIC_CPU_AUTOPROBE=y
2329 +CONFIG_GENERIC_CPU_VULNERABILITIES=y
2330 +CONFIG_REGMAP=y
2331 +CONFIG_REGMAP_I2C=y
2332 +CONFIG_REGMAP_SLIMBUS=m
2333 +CONFIG_REGMAP_SPI=y
2334 +CONFIG_REGMAP_SPMI=m
2335 +CONFIG_REGMAP_W1=m
2336 +CONFIG_REGMAP_MMIO=y
2337 +CONFIG_REGMAP_IRQ=y
2338 +CONFIG_REGMAP_SOUNDWIRE=m
2339 +CONFIG_REGMAP_SCCB=m
2340 +CONFIG_REGMAP_I3C=m
2341 +CONFIG_REGMAP_SPI_AVMM=m
2342 +CONFIG_DMA_SHARED_BUFFER=y
2343 +# CONFIG_DMA_FENCE_TRACE is not set
2344 +# end of Generic Driver Options
2347 +# Bus devices
2349 +CONFIG_MHI_BUS=m
2350 +# CONFIG_MHI_BUS_DEBUG is not set
2351 +CONFIG_MHI_BUS_PCI_GENERIC=m
2352 +# end of Bus devices
2354 +CONFIG_CONNECTOR=y
2355 +CONFIG_PROC_EVENTS=y
2356 +CONFIG_GNSS=m
2357 +CONFIG_GNSS_SERIAL=m
2358 +CONFIG_GNSS_MTK_SERIAL=m
2359 +CONFIG_GNSS_SIRF_SERIAL=m
2360 +CONFIG_GNSS_UBX_SERIAL=m
2361 +CONFIG_MTD=m
2362 +# CONFIG_MTD_TESTS is not set
2365 +# Partition parsers
2367 +CONFIG_MTD_AR7_PARTS=m
2368 +CONFIG_MTD_CMDLINE_PARTS=m
2369 +CONFIG_MTD_REDBOOT_PARTS=m
2370 +CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
2371 +# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
2372 +# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
2373 +# end of Partition parsers
2376 +# User Modules And Translation Layers
2378 +CONFIG_MTD_BLKDEVS=m
2379 +CONFIG_MTD_BLOCK=m
2380 +CONFIG_MTD_BLOCK_RO=m
2381 +CONFIG_FTL=m
2382 +CONFIG_NFTL=m
2383 +CONFIG_NFTL_RW=y
2384 +CONFIG_INFTL=m
2385 +CONFIG_RFD_FTL=m
2386 +CONFIG_SSFDC=m
2387 +CONFIG_SM_FTL=m
2388 +CONFIG_MTD_OOPS=m
2389 +CONFIG_MTD_PSTORE=m
2390 +CONFIG_MTD_SWAP=m
2391 +# CONFIG_MTD_PARTITIONED_MASTER is not set
2394 +# RAM/ROM/Flash chip drivers
2396 +CONFIG_MTD_CFI=m
2397 +CONFIG_MTD_JEDECPROBE=m
2398 +CONFIG_MTD_GEN_PROBE=m
2399 +# CONFIG_MTD_CFI_ADV_OPTIONS is not set
2400 +CONFIG_MTD_MAP_BANK_WIDTH_1=y
2401 +CONFIG_MTD_MAP_BANK_WIDTH_2=y
2402 +CONFIG_MTD_MAP_BANK_WIDTH_4=y
2403 +CONFIG_MTD_CFI_I1=y
2404 +CONFIG_MTD_CFI_I2=y
2405 +CONFIG_MTD_CFI_INTELEXT=m
2406 +CONFIG_MTD_CFI_AMDSTD=m
2407 +CONFIG_MTD_CFI_STAA=m
2408 +CONFIG_MTD_CFI_UTIL=m
2409 +CONFIG_MTD_RAM=m
2410 +CONFIG_MTD_ROM=m
2411 +CONFIG_MTD_ABSENT=m
2412 +# end of RAM/ROM/Flash chip drivers
2415 +# Mapping drivers for chip access
2417 +CONFIG_MTD_COMPLEX_MAPPINGS=y
2418 +CONFIG_MTD_PHYSMAP=m
2419 +# CONFIG_MTD_PHYSMAP_COMPAT is not set
2420 +CONFIG_MTD_PHYSMAP_GPIO_ADDR=y
2421 +CONFIG_MTD_SBC_GXX=m
2422 +CONFIG_MTD_AMD76XROM=m
2423 +CONFIG_MTD_ICHXROM=m
2424 +CONFIG_MTD_ESB2ROM=m
2425 +CONFIG_MTD_CK804XROM=m
2426 +CONFIG_MTD_SCB2_FLASH=m
2427 +CONFIG_MTD_NETtel=m
2428 +CONFIG_MTD_L440GX=m
2429 +CONFIG_MTD_PCI=m
2430 +CONFIG_MTD_PCMCIA=m
2431 +# CONFIG_MTD_PCMCIA_ANONYMOUS is not set
2432 +CONFIG_MTD_INTEL_VR_NOR=m
2433 +CONFIG_MTD_PLATRAM=m
2434 +# end of Mapping drivers for chip access
2437 +# Self-contained MTD device drivers
2439 +CONFIG_MTD_PMC551=m
2440 +# CONFIG_MTD_PMC551_BUGFIX is not set
2441 +# CONFIG_MTD_PMC551_DEBUG is not set
2442 +CONFIG_MTD_DATAFLASH=m
2443 +# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
2444 +CONFIG_MTD_DATAFLASH_OTP=y
2445 +CONFIG_MTD_MCHP23K256=m
2446 +CONFIG_MTD_SST25L=m
2447 +CONFIG_MTD_SLRAM=m
2448 +CONFIG_MTD_PHRAM=m
2449 +CONFIG_MTD_MTDRAM=m
2450 +CONFIG_MTDRAM_TOTAL_SIZE=4096
2451 +CONFIG_MTDRAM_ERASE_SIZE=128
2452 +CONFIG_MTD_BLOCK2MTD=m
2455 +# Disk-On-Chip Device Drivers
2457 +# CONFIG_MTD_DOCG3 is not set
2458 +# end of Self-contained MTD device drivers
2461 +# NAND
2463 +CONFIG_MTD_NAND_CORE=m
2464 +CONFIG_MTD_ONENAND=m
2465 +CONFIG_MTD_ONENAND_VERIFY_WRITE=y
2466 +CONFIG_MTD_ONENAND_GENERIC=m
2467 +# CONFIG_MTD_ONENAND_OTP is not set
2468 +CONFIG_MTD_ONENAND_2X_PROGRAM=y
2469 +CONFIG_MTD_RAW_NAND=m
2472 +# Raw/parallel NAND flash controllers
2474 +CONFIG_MTD_NAND_DENALI=m
2475 +CONFIG_MTD_NAND_DENALI_PCI=m
2476 +CONFIG_MTD_NAND_CAFE=m
2477 +CONFIG_MTD_NAND_MXIC=m
2478 +CONFIG_MTD_NAND_GPIO=m
2479 +CONFIG_MTD_NAND_PLATFORM=m
2480 +CONFIG_MTD_NAND_ARASAN=m
2483 +# Misc
2485 +CONFIG_MTD_SM_COMMON=m
2486 +CONFIG_MTD_NAND_NANDSIM=m
2487 +CONFIG_MTD_NAND_RICOH=m
2488 +CONFIG_MTD_NAND_DISKONCHIP=m
2489 +# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
2490 +CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
2491 +# CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE is not set
2492 +CONFIG_MTD_SPI_NAND=m
2495 +# ECC engine support
2497 +CONFIG_MTD_NAND_ECC=y
2498 +CONFIG_MTD_NAND_ECC_SW_HAMMING=y
2499 +# CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC is not set
2500 +CONFIG_MTD_NAND_ECC_SW_BCH=y
2501 +# end of ECC engine support
2502 +# end of NAND
2505 +# LPDDR & LPDDR2 PCM memory drivers
2507 +CONFIG_MTD_LPDDR=m
2508 +CONFIG_MTD_QINFO_PROBE=m
2509 +# end of LPDDR & LPDDR2 PCM memory drivers
2511 +CONFIG_MTD_SPI_NOR=m
2512 +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y
2513 +# CONFIG_MTD_SPI_NOR_SWP_DISABLE is not set
2514 +CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE=y
2515 +# CONFIG_MTD_SPI_NOR_SWP_KEEP is not set
2516 +# CONFIG_SPI_INTEL_SPI_PCI is not set
2517 +# CONFIG_SPI_INTEL_SPI_PLATFORM is not set
2518 +CONFIG_MTD_UBI=m
2519 +CONFIG_MTD_UBI_WL_THRESHOLD=4096
2520 +CONFIG_MTD_UBI_BEB_LIMIT=20
2521 +CONFIG_MTD_UBI_FASTMAP=y
2522 +CONFIG_MTD_UBI_GLUEBI=m
2523 +CONFIG_MTD_UBI_BLOCK=y
2524 +CONFIG_MTD_HYPERBUS=m
2525 +# CONFIG_OF is not set
2526 +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y
2527 +CONFIG_PARPORT=m
2528 +CONFIG_PARPORT_PC=m
2529 +CONFIG_PARPORT_SERIAL=m
2530 +CONFIG_PARPORT_PC_FIFO=y
2531 +# CONFIG_PARPORT_PC_SUPERIO is not set
2532 +CONFIG_PARPORT_PC_PCMCIA=m
2533 +CONFIG_PARPORT_AX88796=m
2534 +CONFIG_PARPORT_1284=y
2535 +CONFIG_PARPORT_NOT_PC=y
2536 +CONFIG_PNP=y
2537 +# CONFIG_PNP_DEBUG_MESSAGES is not set
2540 +# Protocols
2542 +CONFIG_PNPACPI=y
2543 +CONFIG_BLK_DEV=y
2544 +CONFIG_BLK_DEV_NULL_BLK=m
2545 +CONFIG_BLK_DEV_FD=m
2546 +CONFIG_CDROM=y
2547 +CONFIG_PARIDE=m
2550 +# Parallel IDE high-level drivers
2552 +CONFIG_PARIDE_PD=m
2553 +CONFIG_PARIDE_PCD=m
2554 +CONFIG_PARIDE_PF=m
2555 +CONFIG_PARIDE_PT=m
2556 +CONFIG_PARIDE_PG=m
2559 +# Parallel IDE protocol modules
2561 +CONFIG_PARIDE_ATEN=m
2562 +CONFIG_PARIDE_BPCK=m
2563 +CONFIG_PARIDE_COMM=m
2564 +CONFIG_PARIDE_DSTR=m
2565 +CONFIG_PARIDE_FIT2=m
2566 +CONFIG_PARIDE_FIT3=m
2567 +CONFIG_PARIDE_EPAT=m
2568 +CONFIG_PARIDE_EPATC8=y
2569 +CONFIG_PARIDE_EPIA=m
2570 +CONFIG_PARIDE_FRIQ=m
2571 +CONFIG_PARIDE_FRPW=m
2572 +CONFIG_PARIDE_KBIC=m
2573 +CONFIG_PARIDE_KTTI=m
2574 +CONFIG_PARIDE_ON20=m
2575 +CONFIG_PARIDE_ON26=m
2576 +CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m
2577 +CONFIG_ZRAM=m
2578 +CONFIG_ZRAM_DEF_COMP_LZORLE=y
2579 +# CONFIG_ZRAM_DEF_COMP_ZSTD is not set
2580 +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set
2581 +# CONFIG_ZRAM_DEF_COMP_LZO is not set
2582 +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set
2583 +# CONFIG_ZRAM_DEF_COMP_842 is not set
2584 +CONFIG_ZRAM_DEF_COMP="lzo-rle"
2585 +CONFIG_ZRAM_WRITEBACK=y
2586 +CONFIG_ZRAM_MEMORY_TRACKING=y
2587 +CONFIG_BLK_DEV_UMEM=m
2588 +CONFIG_BLK_DEV_LOOP=y
2589 +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
2590 +CONFIG_BLK_DEV_CRYPTOLOOP=m
2591 +CONFIG_BLK_DEV_DRBD=m
2592 +# CONFIG_DRBD_FAULT_INJECTION is not set
2593 +CONFIG_BLK_DEV_NBD=m
2594 +CONFIG_BLK_DEV_SX8=m
2595 +CONFIG_BLK_DEV_RAM=m
2596 +CONFIG_BLK_DEV_RAM_COUNT=16
2597 +CONFIG_BLK_DEV_RAM_SIZE=65536
2598 +CONFIG_CDROM_PKTCDVD=m
2599 +CONFIG_CDROM_PKTCDVD_BUFFERS=8
2600 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set
2601 +CONFIG_ATA_OVER_ETH=m
2602 +CONFIG_XEN_BLKDEV_FRONTEND=y
2603 +CONFIG_XEN_BLKDEV_BACKEND=m
2604 +CONFIG_VIRTIO_BLK=m
2605 +CONFIG_BLK_DEV_RBD=m
2606 +CONFIG_BLK_DEV_RSXX=m
2607 +CONFIG_BLK_DEV_RNBD=y
2608 +CONFIG_BLK_DEV_RNBD_CLIENT=m
2609 +CONFIG_BLK_DEV_RNBD_SERVER=m
2612 +# NVME Support
2614 +CONFIG_NVME_CORE=m
2615 +CONFIG_BLK_DEV_NVME=m
2616 +CONFIG_NVME_MULTIPATH=y
2617 +CONFIG_NVME_HWMON=y
2618 +CONFIG_NVME_FABRICS=m
2619 +CONFIG_NVME_RDMA=m
2620 +CONFIG_NVME_FC=m
2621 +CONFIG_NVME_TCP=m
2622 +CONFIG_NVME_TARGET=m
2623 +CONFIG_NVME_TARGET_PASSTHRU=y
2624 +CONFIG_NVME_TARGET_LOOP=m
2625 +CONFIG_NVME_TARGET_RDMA=m
2626 +CONFIG_NVME_TARGET_FC=m
2627 +# CONFIG_NVME_TARGET_FCLOOP is not set
2628 +CONFIG_NVME_TARGET_TCP=m
2629 +# end of NVME Support
2632 +# Misc devices
2634 +CONFIG_SENSORS_LIS3LV02D=m
2635 +CONFIG_AD525X_DPOT=m
2636 +CONFIG_AD525X_DPOT_I2C=m
2637 +CONFIG_AD525X_DPOT_SPI=m
2638 +CONFIG_DUMMY_IRQ=m
2639 +CONFIG_IBM_ASM=m
2640 +CONFIG_PHANTOM=m
2641 +CONFIG_TIFM_CORE=m
2642 +CONFIG_TIFM_7XX1=m
2643 +CONFIG_ICS932S401=m
2644 +CONFIG_ENCLOSURE_SERVICES=m
2645 +CONFIG_SGI_XP=m
2646 +CONFIG_HP_ILO=m
2647 +CONFIG_SGI_GRU=m
2648 +# CONFIG_SGI_GRU_DEBUG is not set
2649 +CONFIG_APDS9802ALS=m
2650 +CONFIG_ISL29003=m
2651 +CONFIG_ISL29020=m
2652 +CONFIG_SENSORS_TSL2550=m
2653 +CONFIG_SENSORS_BH1770=m
2654 +CONFIG_SENSORS_APDS990X=m
2655 +CONFIG_HMC6352=m
2656 +CONFIG_DS1682=m
2657 +CONFIG_VMWARE_BALLOON=m
2658 +CONFIG_LATTICE_ECP3_CONFIG=m
2659 +CONFIG_SRAM=y
2660 +# CONFIG_PCI_ENDPOINT_TEST is not set
2661 +CONFIG_XILINX_SDFEC=m
2662 +CONFIG_MISC_RTSX=m
2663 +CONFIG_PVPANIC=m
2664 +CONFIG_C2PORT=m
2665 +CONFIG_C2PORT_DURAMAR_2150=m
2668 +# EEPROM support
2670 +CONFIG_EEPROM_AT24=m
2671 +CONFIG_EEPROM_AT25=m
2672 +CONFIG_EEPROM_LEGACY=m
2673 +CONFIG_EEPROM_MAX6875=m
2674 +CONFIG_EEPROM_93CX6=m
2675 +CONFIG_EEPROM_93XX46=m
2676 +CONFIG_EEPROM_IDT_89HPESX=m
2677 +CONFIG_EEPROM_EE1004=m
2678 +# end of EEPROM support
2680 +CONFIG_CB710_CORE=m
2681 +# CONFIG_CB710_DEBUG is not set
2682 +CONFIG_CB710_DEBUG_ASSUMPTIONS=y
2685 +# Texas Instruments shared transport line discipline
2687 +CONFIG_TI_ST=m
2688 +# end of Texas Instruments shared transport line discipline
2690 +CONFIG_SENSORS_LIS3_I2C=m
2691 +CONFIG_ALTERA_STAPL=m
2692 +CONFIG_INTEL_MEI=m
2693 +CONFIG_INTEL_MEI_ME=m
2694 +CONFIG_INTEL_MEI_TXE=m
2695 +CONFIG_INTEL_MEI_HDCP=m
2696 +CONFIG_VMWARE_VMCI=m
2697 +CONFIG_GENWQE=m
2698 +CONFIG_GENWQE_PLATFORM_ERROR_RECOVERY=0
2699 +CONFIG_ECHO=m
2700 +CONFIG_BCM_VK=m
2701 +CONFIG_BCM_VK_TTY=y
2702 +CONFIG_MISC_ALCOR_PCI=m
2703 +CONFIG_MISC_RTSX_PCI=m
2704 +CONFIG_MISC_RTSX_USB=m
2705 +CONFIG_HABANA_AI=m
2706 +CONFIG_UACCE=m
2707 +# end of Misc devices
2709 +CONFIG_HAVE_IDE=y
2710 +# CONFIG_IDE is not set
2713 +# SCSI device support
2715 +CONFIG_SCSI_MOD=y
2716 +CONFIG_RAID_ATTRS=m
2717 +CONFIG_SCSI=y
2718 +CONFIG_SCSI_DMA=y
2719 +CONFIG_SCSI_NETLINK=y
2720 +CONFIG_SCSI_PROC_FS=y
2723 +# SCSI support type (disk, tape, CD-ROM)
2725 +CONFIG_BLK_DEV_SD=y
2726 +CONFIG_CHR_DEV_ST=m
2727 +CONFIG_BLK_DEV_SR=y
2728 +CONFIG_CHR_DEV_SG=y
2729 +CONFIG_CHR_DEV_SCH=m
2730 +CONFIG_SCSI_ENCLOSURE=m
2731 +CONFIG_SCSI_CONSTANTS=y
2732 +CONFIG_SCSI_LOGGING=y
2733 +CONFIG_SCSI_SCAN_ASYNC=y
2736 +# SCSI Transports
2738 +CONFIG_SCSI_SPI_ATTRS=m
2739 +CONFIG_SCSI_FC_ATTRS=m
2740 +CONFIG_SCSI_ISCSI_ATTRS=m
2741 +CONFIG_SCSI_SAS_ATTRS=m
2742 +CONFIG_SCSI_SAS_LIBSAS=m
2743 +CONFIG_SCSI_SAS_ATA=y
2744 +CONFIG_SCSI_SAS_HOST_SMP=y
2745 +CONFIG_SCSI_SRP_ATTRS=m
2746 +# end of SCSI Transports
2748 +CONFIG_SCSI_LOWLEVEL=y
2749 +CONFIG_ISCSI_TCP=m
2750 +CONFIG_ISCSI_BOOT_SYSFS=m
2751 +CONFIG_SCSI_CXGB3_ISCSI=m
2752 +CONFIG_SCSI_CXGB4_ISCSI=m
2753 +CONFIG_SCSI_BNX2_ISCSI=m
2754 +CONFIG_SCSI_BNX2X_FCOE=m
2755 +CONFIG_BE2ISCSI=m
2756 +CONFIG_BLK_DEV_3W_XXXX_RAID=m
2757 +CONFIG_SCSI_HPSA=m
2758 +CONFIG_SCSI_3W_9XXX=m
2759 +CONFIG_SCSI_3W_SAS=m
2760 +CONFIG_SCSI_ACARD=m
2761 +CONFIG_SCSI_AACRAID=m
2762 +CONFIG_SCSI_AIC7XXX=m
2763 +CONFIG_AIC7XXX_CMDS_PER_DEVICE=8
2764 +CONFIG_AIC7XXX_RESET_DELAY_MS=5000
2765 +# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
2766 +CONFIG_AIC7XXX_DEBUG_MASK=0
2767 +CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
2768 +CONFIG_SCSI_AIC79XX=m
2769 +CONFIG_AIC79XX_CMDS_PER_DEVICE=32
2770 +CONFIG_AIC79XX_RESET_DELAY_MS=5000
2771 +# CONFIG_AIC79XX_DEBUG_ENABLE is not set
2772 +CONFIG_AIC79XX_DEBUG_MASK=0
2773 +CONFIG_AIC79XX_REG_PRETTY_PRINT=y
2774 +CONFIG_SCSI_AIC94XX=m
2775 +# CONFIG_AIC94XX_DEBUG is not set
2776 +CONFIG_SCSI_MVSAS=m
2777 +# CONFIG_SCSI_MVSAS_DEBUG is not set
2778 +# CONFIG_SCSI_MVSAS_TASKLET is not set
2779 +CONFIG_SCSI_MVUMI=m
2780 +CONFIG_SCSI_DPT_I2O=m
2781 +CONFIG_SCSI_ADVANSYS=m
2782 +CONFIG_SCSI_ARCMSR=m
2783 +CONFIG_SCSI_ESAS2R=m
2784 +CONFIG_MEGARAID_NEWGEN=y
2785 +CONFIG_MEGARAID_MM=m
2786 +CONFIG_MEGARAID_MAILBOX=m
2787 +CONFIG_MEGARAID_LEGACY=m
2788 +CONFIG_MEGARAID_SAS=m
2789 +CONFIG_SCSI_MPT3SAS=m
2790 +CONFIG_SCSI_MPT2SAS_MAX_SGE=128
2791 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128
2792 +CONFIG_SCSI_MPT2SAS=m
2793 +CONFIG_SCSI_SMARTPQI=m
2794 +CONFIG_SCSI_UFSHCD=m
2795 +CONFIG_SCSI_UFSHCD_PCI=m
2796 +CONFIG_SCSI_UFS_DWC_TC_PCI=m
2797 +CONFIG_SCSI_UFSHCD_PLATFORM=m
2798 +CONFIG_SCSI_UFS_CDNS_PLATFORM=m
2799 +CONFIG_SCSI_UFS_DWC_TC_PLATFORM=m
2800 +CONFIG_SCSI_UFS_BSG=y
2801 +CONFIG_SCSI_UFS_CRYPTO=y
2802 +CONFIG_SCSI_HPTIOP=m
2803 +CONFIG_SCSI_BUSLOGIC=m
2804 +CONFIG_SCSI_FLASHPOINT=y
2805 +CONFIG_SCSI_MYRB=m
2806 +CONFIG_SCSI_MYRS=m
2807 +CONFIG_VMWARE_PVSCSI=m
2808 +CONFIG_XEN_SCSI_FRONTEND=m
2809 +CONFIG_HYPERV_STORAGE=m
2810 +CONFIG_LIBFC=m
2811 +CONFIG_LIBFCOE=m
2812 +CONFIG_FCOE=m
2813 +CONFIG_FCOE_FNIC=m
2814 +CONFIG_SCSI_SNIC=m
2815 +# CONFIG_SCSI_SNIC_DEBUG_FS is not set
2816 +CONFIG_SCSI_DMX3191D=m
2817 +CONFIG_SCSI_FDOMAIN=m
2818 +CONFIG_SCSI_FDOMAIN_PCI=m
2819 +CONFIG_SCSI_ISCI=m
2820 +CONFIG_SCSI_IPS=m
2821 +CONFIG_SCSI_INITIO=m
2822 +CONFIG_SCSI_INIA100=m
2823 +CONFIG_SCSI_PPA=m
2824 +CONFIG_SCSI_IMM=m
2825 +# CONFIG_SCSI_IZIP_EPP16 is not set
2826 +# CONFIG_SCSI_IZIP_SLOW_CTR is not set
2827 +CONFIG_SCSI_STEX=m
2828 +CONFIG_SCSI_SYM53C8XX_2=m
2829 +CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
2830 +CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
2831 +CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
2832 +CONFIG_SCSI_SYM53C8XX_MMIO=y
2833 +CONFIG_SCSI_IPR=m
2834 +CONFIG_SCSI_IPR_TRACE=y
2835 +CONFIG_SCSI_IPR_DUMP=y
2836 +CONFIG_SCSI_QLOGIC_1280=m
2837 +CONFIG_SCSI_QLA_FC=m
2838 +CONFIG_TCM_QLA2XXX=m
2839 +# CONFIG_TCM_QLA2XXX_DEBUG is not set
2840 +CONFIG_SCSI_QLA_ISCSI=m
2841 +CONFIG_QEDI=m
2842 +CONFIG_QEDF=m
2843 +CONFIG_SCSI_LPFC=m
2844 +# CONFIG_SCSI_LPFC_DEBUG_FS is not set
2845 +CONFIG_SCSI_DC395x=m
2846 +CONFIG_SCSI_AM53C974=m
2847 +CONFIG_SCSI_WD719X=m
2848 +CONFIG_SCSI_DEBUG=m
2849 +CONFIG_SCSI_PMCRAID=m
2850 +CONFIG_SCSI_PM8001=m
2851 +CONFIG_SCSI_BFA_FC=m
2852 +CONFIG_SCSI_VIRTIO=m
2853 +CONFIG_SCSI_CHELSIO_FCOE=m
2854 +CONFIG_SCSI_LOWLEVEL_PCMCIA=y
2855 +CONFIG_PCMCIA_AHA152X=m
2856 +CONFIG_PCMCIA_FDOMAIN=m
2857 +CONFIG_PCMCIA_QLOGIC=m
2858 +CONFIG_PCMCIA_SYM53C500=m
2859 +CONFIG_SCSI_DH=y
2860 +CONFIG_SCSI_DH_RDAC=m
2861 +CONFIG_SCSI_DH_HP_SW=m
2862 +CONFIG_SCSI_DH_EMC=m
2863 +CONFIG_SCSI_DH_ALUA=m
2864 +# end of SCSI device support
2866 +CONFIG_ATA=y
2867 +CONFIG_SATA_HOST=y
2868 +CONFIG_PATA_TIMINGS=y
2869 +CONFIG_ATA_VERBOSE_ERROR=y
2870 +CONFIG_ATA_FORCE=y
2871 +CONFIG_ATA_ACPI=y
2872 +CONFIG_SATA_ZPODD=y
2873 +CONFIG_SATA_PMP=y
2876 +# Controllers with non-SFF native interface
2878 +CONFIG_SATA_AHCI=m
2879 +CONFIG_SATA_MOBILE_LPM_POLICY=3
2880 +CONFIG_SATA_AHCI_PLATFORM=m
2881 +CONFIG_SATA_INIC162X=m
2882 +CONFIG_SATA_ACARD_AHCI=m
2883 +CONFIG_SATA_SIL24=m
2884 +CONFIG_ATA_SFF=y
2887 +# SFF controllers with custom DMA interface
2889 +CONFIG_PDC_ADMA=m
2890 +CONFIG_SATA_QSTOR=m
2891 +CONFIG_SATA_SX4=m
2892 +CONFIG_ATA_BMDMA=y
2895 +# SATA SFF controllers with BMDMA
2897 +CONFIG_ATA_PIIX=y
2898 +CONFIG_SATA_DWC=m
2899 +CONFIG_SATA_DWC_OLD_DMA=y
2900 +# CONFIG_SATA_DWC_DEBUG is not set
2901 +CONFIG_SATA_MV=m
2902 +CONFIG_SATA_NV=m
2903 +CONFIG_SATA_PROMISE=m
2904 +CONFIG_SATA_SIL=m
2905 +CONFIG_SATA_SIS=m
2906 +CONFIG_SATA_SVW=m
2907 +CONFIG_SATA_ULI=m
2908 +CONFIG_SATA_VIA=m
2909 +CONFIG_SATA_VITESSE=m
2912 +# PATA SFF controllers with BMDMA
2914 +CONFIG_PATA_ALI=m
2915 +CONFIG_PATA_AMD=m
2916 +CONFIG_PATA_ARTOP=m
2917 +CONFIG_PATA_ATIIXP=m
2918 +CONFIG_PATA_ATP867X=m
2919 +CONFIG_PATA_CMD64X=m
2920 +CONFIG_PATA_CYPRESS=m
2921 +CONFIG_PATA_EFAR=m
2922 +CONFIG_PATA_HPT366=m
2923 +CONFIG_PATA_HPT37X=m
2924 +CONFIG_PATA_HPT3X2N=m
2925 +CONFIG_PATA_HPT3X3=m
2926 +# CONFIG_PATA_HPT3X3_DMA is not set
2927 +CONFIG_PATA_IT8213=m
2928 +CONFIG_PATA_IT821X=m
2929 +CONFIG_PATA_JMICRON=m
2930 +CONFIG_PATA_MARVELL=m
2931 +CONFIG_PATA_NETCELL=m
2932 +CONFIG_PATA_NINJA32=m
2933 +CONFIG_PATA_NS87415=m
2934 +CONFIG_PATA_OLDPIIX=m
2935 +CONFIG_PATA_OPTIDMA=m
2936 +CONFIG_PATA_PDC2027X=m
2937 +CONFIG_PATA_PDC_OLD=m
2938 +CONFIG_PATA_RADISYS=m
2939 +CONFIG_PATA_RDC=m
2940 +CONFIG_PATA_SCH=m
2941 +CONFIG_PATA_SERVERWORKS=m
2942 +CONFIG_PATA_SIL680=m
2943 +CONFIG_PATA_SIS=y
2944 +CONFIG_PATA_TOSHIBA=m
2945 +CONFIG_PATA_TRIFLEX=m
2946 +CONFIG_PATA_VIA=m
2947 +CONFIG_PATA_WINBOND=m
2950 +# PIO-only SFF controllers
2952 +CONFIG_PATA_CMD640_PCI=m
2953 +CONFIG_PATA_MPIIX=m
2954 +CONFIG_PATA_NS87410=m
2955 +CONFIG_PATA_OPTI=m
2956 +CONFIG_PATA_PCMCIA=m
2957 +CONFIG_PATA_PLATFORM=m
2958 +CONFIG_PATA_RZ1000=m
2961 +# Generic fallback / legacy drivers
2963 +CONFIG_PATA_ACPI=m
2964 +CONFIG_ATA_GENERIC=y
2965 +CONFIG_PATA_LEGACY=m
2966 +CONFIG_MD=y
2967 +CONFIG_BLK_DEV_MD=y
2968 +CONFIG_MD_AUTODETECT=y
2969 +CONFIG_MD_LINEAR=m
2970 +CONFIG_MD_RAID0=m
2971 +CONFIG_MD_RAID1=m
2972 +CONFIG_MD_RAID10=m
2973 +CONFIG_MD_RAID456=m
2974 +CONFIG_MD_MULTIPATH=m
2975 +CONFIG_MD_FAULTY=m
2976 +CONFIG_MD_CLUSTER=m
2977 +CONFIG_BCACHE=m
2978 +# CONFIG_BCACHE_DEBUG is not set
2979 +# CONFIG_BCACHE_CLOSURES_DEBUG is not set
2980 +CONFIG_BCACHE_ASYNC_REGISTRATION=y
2981 +CONFIG_BLK_DEV_DM_BUILTIN=y
2982 +CONFIG_BLK_DEV_DM=y
2983 +# CONFIG_DM_DEBUG is not set
2984 +CONFIG_DM_BUFIO=m
2985 +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set
2986 +CONFIG_DM_BIO_PRISON=m
2987 +CONFIG_DM_PERSISTENT_DATA=m
2988 +CONFIG_DM_UNSTRIPED=m
2989 +CONFIG_DM_CRYPT=m
2990 +CONFIG_DM_SNAPSHOT=m
2991 +CONFIG_DM_THIN_PROVISIONING=m
2992 +CONFIG_DM_CACHE=m
2993 +CONFIG_DM_CACHE_SMQ=m
2994 +CONFIG_DM_WRITECACHE=m
2995 +CONFIG_DM_EBS=m
2996 +CONFIG_DM_ERA=m
2997 +CONFIG_DM_CLONE=m
2998 +CONFIG_DM_MIRROR=m
2999 +CONFIG_DM_LOG_USERSPACE=m
3000 +CONFIG_DM_RAID=m
3001 +CONFIG_DM_ZERO=m
3002 +CONFIG_DM_MULTIPATH=m
3003 +CONFIG_DM_MULTIPATH_QL=m
3004 +CONFIG_DM_MULTIPATH_ST=m
3005 +CONFIG_DM_MULTIPATH_HST=m
3006 +CONFIG_DM_MULTIPATH_IOA=m
3007 +CONFIG_DM_DELAY=m
3008 +# CONFIG_DM_DUST is not set
3009 +CONFIG_DM_INIT=y
3010 +CONFIG_DM_UEVENT=y
3011 +CONFIG_DM_FLAKEY=m
3012 +CONFIG_DM_VERITY=m
3013 +CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
3014 +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG_SECONDARY_KEYRING is not set
3015 +# CONFIG_DM_VERITY_FEC is not set
3016 +CONFIG_DM_SWITCH=m
3017 +CONFIG_DM_LOG_WRITES=m
3018 +CONFIG_DM_INTEGRITY=m
3019 +CONFIG_DM_ZONED=m
3020 +CONFIG_TARGET_CORE=m
3021 +CONFIG_TCM_IBLOCK=m
3022 +CONFIG_TCM_FILEIO=m
3023 +CONFIG_TCM_PSCSI=m
3024 +CONFIG_TCM_USER2=m
3025 +CONFIG_LOOPBACK_TARGET=m
3026 +CONFIG_TCM_FC=m
3027 +CONFIG_ISCSI_TARGET=m
3028 +CONFIG_ISCSI_TARGET_CXGB4=m
3029 +CONFIG_SBP_TARGET=m
3030 +CONFIG_FUSION=y
3031 +CONFIG_FUSION_SPI=m
3032 +CONFIG_FUSION_FC=m
3033 +CONFIG_FUSION_SAS=m
3034 +CONFIG_FUSION_MAX_SGE=128
3035 +CONFIG_FUSION_CTL=m
3036 +CONFIG_FUSION_LAN=m
3037 +CONFIG_FUSION_LOGGING=y
3040 +# IEEE 1394 (FireWire) support
3042 +CONFIG_FIREWIRE=m
3043 +CONFIG_FIREWIRE_OHCI=m
3044 +CONFIG_FIREWIRE_SBP2=m
3045 +CONFIG_FIREWIRE_NET=m
3046 +CONFIG_FIREWIRE_NOSY=m
3047 +# end of IEEE 1394 (FireWire) support
3049 +CONFIG_MACINTOSH_DRIVERS=y
3050 +CONFIG_MAC_EMUMOUSEBTN=m
3051 +CONFIG_NETDEVICES=y
3052 +CONFIG_MII=m
3053 +CONFIG_NET_CORE=y
3054 +CONFIG_BONDING=m
3055 +CONFIG_DUMMY=m
3056 +CONFIG_WIREGUARD=m
3057 +# CONFIG_WIREGUARD_DEBUG is not set
3058 +CONFIG_EQUALIZER=m
3059 +CONFIG_NET_FC=y
3060 +CONFIG_IFB=m
3061 +CONFIG_NET_TEAM=m
3062 +CONFIG_NET_TEAM_MODE_BROADCAST=m
3063 +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
3064 +CONFIG_NET_TEAM_MODE_RANDOM=m
3065 +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
3066 +CONFIG_NET_TEAM_MODE_LOADBALANCE=m
3067 +CONFIG_MACVLAN=m
3068 +CONFIG_MACVTAP=m
3069 +CONFIG_IPVLAN_L3S=y
3070 +CONFIG_IPVLAN=m
3071 +CONFIG_IPVTAP=m
3072 +CONFIG_VXLAN=m
3073 +CONFIG_GENEVE=m
3074 +CONFIG_BAREUDP=m
3075 +CONFIG_GTP=m
3076 +CONFIG_MACSEC=m
3077 +CONFIG_NETCONSOLE=m
3078 +CONFIG_NETCONSOLE_DYNAMIC=y
3079 +CONFIG_NETPOLL=y
3080 +CONFIG_NET_POLL_CONTROLLER=y
3081 +CONFIG_NTB_NETDEV=m
3082 +CONFIG_RIONET=m
3083 +CONFIG_RIONET_TX_SIZE=128
3084 +CONFIG_RIONET_RX_SIZE=128
3085 +CONFIG_TUN=y
3086 +CONFIG_TAP=m
3087 +# CONFIG_TUN_VNET_CROSS_LE is not set
3088 +CONFIG_VETH=m
3089 +CONFIG_VIRTIO_NET=m
3090 +CONFIG_NLMON=m
3091 +CONFIG_NET_VRF=m
3092 +CONFIG_VSOCKMON=m
3093 +CONFIG_MHI_NET=m
3094 +CONFIG_SUNGEM_PHY=m
3095 +CONFIG_ARCNET=m
3096 +CONFIG_ARCNET_1201=m
3097 +CONFIG_ARCNET_1051=m
3098 +CONFIG_ARCNET_RAW=m
3099 +CONFIG_ARCNET_CAP=m
3100 +CONFIG_ARCNET_COM90xx=m
3101 +CONFIG_ARCNET_COM90xxIO=m
3102 +CONFIG_ARCNET_RIM_I=m
3103 +CONFIG_ARCNET_COM20020=m
3104 +CONFIG_ARCNET_COM20020_PCI=m
3105 +CONFIG_ARCNET_COM20020_CS=m
3106 +CONFIG_ATM_DRIVERS=y
3107 +CONFIG_ATM_DUMMY=m
3108 +CONFIG_ATM_TCP=m
3109 +CONFIG_ATM_LANAI=m
3110 +CONFIG_ATM_ENI=m
3111 +# CONFIG_ATM_ENI_DEBUG is not set
3112 +# CONFIG_ATM_ENI_TUNE_BURST is not set
3113 +CONFIG_ATM_FIRESTREAM=m
3114 +CONFIG_ATM_ZATM=m
3115 +# CONFIG_ATM_ZATM_DEBUG is not set
3116 +CONFIG_ATM_NICSTAR=m
3117 +# CONFIG_ATM_NICSTAR_USE_SUNI is not set
3118 +# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set
3119 +CONFIG_ATM_IDT77252=m
3120 +# CONFIG_ATM_IDT77252_DEBUG is not set
3121 +# CONFIG_ATM_IDT77252_RCV_ALL is not set
3122 +CONFIG_ATM_IDT77252_USE_SUNI=y
3123 +CONFIG_ATM_AMBASSADOR=m
3124 +# CONFIG_ATM_AMBASSADOR_DEBUG is not set
3125 +CONFIG_ATM_HORIZON=m
3126 +# CONFIG_ATM_HORIZON_DEBUG is not set
3127 +CONFIG_ATM_IA=m
3128 +# CONFIG_ATM_IA_DEBUG is not set
3129 +CONFIG_ATM_FORE200E=m
3130 +# CONFIG_ATM_FORE200E_USE_TASKLET is not set
3131 +CONFIG_ATM_FORE200E_TX_RETRY=16
3132 +CONFIG_ATM_FORE200E_DEBUG=0
3133 +CONFIG_ATM_HE=m
3134 +CONFIG_ATM_HE_USE_SUNI=y
3135 +CONFIG_ATM_SOLOS=m
3136 +CONFIG_CAIF_DRIVERS=y
3137 +CONFIG_CAIF_TTY=m
3138 +CONFIG_CAIF_HSI=m
3139 +CONFIG_CAIF_VIRTIO=m
3142 +# Distributed Switch Architecture drivers
3144 +CONFIG_B53=m
3145 +CONFIG_B53_SPI_DRIVER=m
3146 +CONFIG_B53_MDIO_DRIVER=m
3147 +CONFIG_B53_MMAP_DRIVER=m
3148 +CONFIG_B53_SRAB_DRIVER=m
3149 +CONFIG_B53_SERDES=m
3150 +CONFIG_NET_DSA_BCM_SF2=m
3151 +# CONFIG_NET_DSA_LOOP is not set
3152 +CONFIG_NET_DSA_HIRSCHMANN_HELLCREEK=m
3153 +CONFIG_NET_DSA_LANTIQ_GSWIP=m
3154 +CONFIG_NET_DSA_MT7530=m
3155 +CONFIG_NET_DSA_MV88E6060=m
3156 +CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON=m
3157 +CONFIG_NET_DSA_MICROCHIP_KSZ9477=m
3158 +CONFIG_NET_DSA_MICROCHIP_KSZ9477_I2C=m
3159 +CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI=m
3160 +CONFIG_NET_DSA_MICROCHIP_KSZ8795=m
3161 +CONFIG_NET_DSA_MICROCHIP_KSZ8795_SPI=m
3162 +CONFIG_NET_DSA_MV88E6XXX=m
3163 +CONFIG_NET_DSA_MV88E6XXX_PTP=y
3164 +CONFIG_NET_DSA_MSCC_SEVILLE=m
3165 +CONFIG_NET_DSA_AR9331=m
3166 +CONFIG_NET_DSA_SJA1105=m
3167 +CONFIG_NET_DSA_SJA1105_PTP=y
3168 +CONFIG_NET_DSA_SJA1105_TAS=y
3169 +CONFIG_NET_DSA_SJA1105_VL=y
3170 +CONFIG_NET_DSA_XRS700X=m
3171 +CONFIG_NET_DSA_XRS700X_I2C=m
3172 +CONFIG_NET_DSA_XRS700X_MDIO=m
3173 +CONFIG_NET_DSA_QCA8K=m
3174 +CONFIG_NET_DSA_REALTEK_SMI=m
3175 +CONFIG_NET_DSA_SMSC_LAN9303=m
3176 +CONFIG_NET_DSA_SMSC_LAN9303_I2C=m
3177 +CONFIG_NET_DSA_SMSC_LAN9303_MDIO=m
3178 +CONFIG_NET_DSA_VITESSE_VSC73XX=m
3179 +CONFIG_NET_DSA_VITESSE_VSC73XX_SPI=m
3180 +CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM=m
3181 +# end of Distributed Switch Architecture drivers
3183 +CONFIG_ETHERNET=y
3184 +CONFIG_MDIO=m
3185 +CONFIG_NET_VENDOR_3COM=y
3186 +CONFIG_PCMCIA_3C574=m
3187 +CONFIG_PCMCIA_3C589=m
3188 +CONFIG_VORTEX=m
3189 +CONFIG_TYPHOON=m
3190 +CONFIG_NET_VENDOR_ADAPTEC=y
3191 +CONFIG_ADAPTEC_STARFIRE=m
3192 +CONFIG_NET_VENDOR_AGERE=y
3193 +CONFIG_ET131X=m
3194 +CONFIG_NET_VENDOR_ALACRITECH=y
3195 +CONFIG_SLICOSS=m
3196 +CONFIG_NET_VENDOR_ALTEON=y
3197 +CONFIG_ACENIC=m
3198 +# CONFIG_ACENIC_OMIT_TIGON_I is not set
3199 +CONFIG_ALTERA_TSE=m
3200 +CONFIG_NET_VENDOR_AMAZON=y
3201 +CONFIG_ENA_ETHERNET=m
3202 +CONFIG_NET_VENDOR_AMD=y
3203 +CONFIG_AMD8111_ETH=m
3204 +CONFIG_PCNET32=m
3205 +CONFIG_PCMCIA_NMCLAN=m
3206 +CONFIG_AMD_XGBE=m
3207 +CONFIG_AMD_XGBE_DCB=y
3208 +CONFIG_AMD_XGBE_HAVE_ECC=y
3209 +CONFIG_NET_VENDOR_AQUANTIA=y
3210 +CONFIG_AQTION=m
3211 +CONFIG_NET_VENDOR_ARC=y
3212 +CONFIG_NET_VENDOR_ATHEROS=y
3213 +CONFIG_ATL2=m
3214 +CONFIG_ATL1=m
3215 +CONFIG_ATL1E=m
3216 +CONFIG_ATL1C=m
3217 +CONFIG_ALX=m
3218 +CONFIG_NET_VENDOR_BROADCOM=y
3219 +CONFIG_B44=m
3220 +CONFIG_B44_PCI_AUTOSELECT=y
3221 +CONFIG_B44_PCICORE_AUTOSELECT=y
3222 +CONFIG_B44_PCI=y
3223 +CONFIG_BCMGENET=m
3224 +CONFIG_BNX2=m
3225 +CONFIG_CNIC=m
3226 +CONFIG_TIGON3=m
3227 +CONFIG_TIGON3_HWMON=y
3228 +CONFIG_BNX2X=m
3229 +CONFIG_BNX2X_SRIOV=y
3230 +CONFIG_SYSTEMPORT=m
3231 +CONFIG_BNXT=m
3232 +CONFIG_BNXT_SRIOV=y
3233 +CONFIG_BNXT_FLOWER_OFFLOAD=y
3234 +CONFIG_BNXT_DCB=y
3235 +CONFIG_BNXT_HWMON=y
3236 +CONFIG_NET_VENDOR_BROCADE=y
3237 +CONFIG_BNA=m
3238 +CONFIG_NET_VENDOR_CADENCE=y
3239 +CONFIG_MACB=m
3240 +CONFIG_MACB_USE_HWSTAMP=y
3241 +CONFIG_MACB_PCI=m
3242 +CONFIG_NET_VENDOR_CAVIUM=y
3243 +CONFIG_THUNDER_NIC_PF=m
3244 +CONFIG_THUNDER_NIC_VF=m
3245 +CONFIG_THUNDER_NIC_BGX=m
3246 +CONFIG_THUNDER_NIC_RGX=m
3247 +CONFIG_CAVIUM_PTP=m
3248 +CONFIG_LIQUIDIO=m
3249 +CONFIG_LIQUIDIO_VF=m
3250 +CONFIG_NET_VENDOR_CHELSIO=y
3251 +CONFIG_CHELSIO_T1=m
3252 +CONFIG_CHELSIO_T1_1G=y
3253 +CONFIG_CHELSIO_T3=m
3254 +CONFIG_CHELSIO_T4=m
3255 +CONFIG_CHELSIO_T4_DCB=y
3256 +CONFIG_CHELSIO_T4_FCOE=y
3257 +CONFIG_CHELSIO_T4VF=m
3258 +CONFIG_CHELSIO_LIB=m
3259 +CONFIG_CHELSIO_INLINE_CRYPTO=y
3260 +CONFIG_CHELSIO_IPSEC_INLINE=m
3261 +CONFIG_CHELSIO_TLS_DEVICE=m
3262 +CONFIG_NET_VENDOR_CISCO=y
3263 +CONFIG_ENIC=m
3264 +CONFIG_NET_VENDOR_CORTINA=y
3265 +CONFIG_CX_ECAT=m
3266 +CONFIG_DNET=m
3267 +CONFIG_NET_VENDOR_DEC=y
3268 +CONFIG_NET_TULIP=y
3269 +CONFIG_DE2104X=m
3270 +CONFIG_DE2104X_DSL=0
3271 +CONFIG_TULIP=m
3272 +# CONFIG_TULIP_MWI is not set
3273 +# CONFIG_TULIP_MMIO is not set
3274 +# CONFIG_TULIP_NAPI is not set
3275 +CONFIG_DE4X5=m
3276 +CONFIG_WINBOND_840=m
3277 +CONFIG_DM9102=m
3278 +CONFIG_ULI526X=m
3279 +CONFIG_PCMCIA_XIRCOM=m
3280 +CONFIG_NET_VENDOR_DLINK=y
3281 +CONFIG_DL2K=m
3282 +CONFIG_SUNDANCE=m
3283 +# CONFIG_SUNDANCE_MMIO is not set
3284 +CONFIG_NET_VENDOR_EMULEX=y
3285 +CONFIG_BE2NET=m
3286 +CONFIG_BE2NET_HWMON=y
3287 +CONFIG_BE2NET_BE2=y
3288 +CONFIG_BE2NET_BE3=y
3289 +CONFIG_BE2NET_LANCER=y
3290 +CONFIG_BE2NET_SKYHAWK=y
3291 +CONFIG_NET_VENDOR_EZCHIP=y
3292 +CONFIG_NET_VENDOR_FUJITSU=y
3293 +CONFIG_PCMCIA_FMVJ18X=m
3294 +CONFIG_NET_VENDOR_GOOGLE=y
3295 +CONFIG_GVE=m
3296 +CONFIG_NET_VENDOR_HUAWEI=y
3297 +CONFIG_HINIC=m
3298 +CONFIG_NET_VENDOR_I825XX=y
3299 +CONFIG_NET_VENDOR_INTEL=y
3300 +CONFIG_E100=m
3301 +CONFIG_E1000=m
3302 +CONFIG_E1000E=m
3303 +CONFIG_E1000E_HWTS=y
3304 +CONFIG_IGB=m
3305 +CONFIG_IGB_HWMON=y
3306 +CONFIG_IGB_DCA=y
3307 +CONFIG_IGBVF=m
3308 +CONFIG_IXGB=m
3309 +CONFIG_IXGBE=m
3310 +CONFIG_IXGBE_HWMON=y
3311 +CONFIG_IXGBE_DCA=y
3312 +CONFIG_IXGBE_DCB=y
3313 +CONFIG_IXGBE_IPSEC=y
3314 +CONFIG_IXGBEVF=m
3315 +CONFIG_IXGBEVF_IPSEC=y
3316 +CONFIG_I40E=m
3317 +CONFIG_I40E_DCB=y
3318 +CONFIG_IAVF=m
3319 +CONFIG_I40EVF=m
3320 +CONFIG_ICE=m
3321 +CONFIG_FM10K=m
3322 +CONFIG_IGC=m
3323 +CONFIG_JME=m
3324 +CONFIG_NET_VENDOR_MARVELL=y
3325 +CONFIG_MVMDIO=m
3326 +CONFIG_SKGE=m
3327 +# CONFIG_SKGE_DEBUG is not set
3328 +CONFIG_SKGE_GENESIS=y
3329 +CONFIG_SKY2=m
3330 +# CONFIG_SKY2_DEBUG is not set
3331 +CONFIG_PRESTERA=m
3332 +CONFIG_PRESTERA_PCI=m
3333 +CONFIG_NET_VENDOR_MELLANOX=y
3334 +CONFIG_MLX4_EN=m
3335 +CONFIG_MLX4_EN_DCB=y
3336 +CONFIG_MLX4_CORE=m
3337 +CONFIG_MLX4_DEBUG=y
3338 +CONFIG_MLX4_CORE_GEN2=y
3339 +CONFIG_MLX5_CORE=m
3340 +CONFIG_MLX5_ACCEL=y
3341 +CONFIG_MLX5_FPGA=y
3342 +CONFIG_MLX5_CORE_EN=y
3343 +CONFIG_MLX5_EN_ARFS=y
3344 +CONFIG_MLX5_EN_RXNFC=y
3345 +CONFIG_MLX5_MPFS=y
3346 +CONFIG_MLX5_ESWITCH=y
3347 +CONFIG_MLX5_CLS_ACT=y
3348 +CONFIG_MLX5_TC_CT=y
3349 +CONFIG_MLX5_CORE_EN_DCB=y
3350 +CONFIG_MLX5_CORE_IPOIB=y
3351 +CONFIG_MLX5_FPGA_IPSEC=y
3352 +CONFIG_MLX5_IPSEC=y
3353 +CONFIG_MLX5_EN_IPSEC=y
3354 +CONFIG_MLX5_FPGA_TLS=y
3355 +CONFIG_MLX5_TLS=y
3356 +CONFIG_MLX5_EN_TLS=y
3357 +CONFIG_MLX5_SW_STEERING=y
3358 +CONFIG_MLX5_SF=y
3359 +CONFIG_MLX5_SF_MANAGER=y
3360 +CONFIG_MLXSW_CORE=m
3361 +CONFIG_MLXSW_CORE_HWMON=y
3362 +CONFIG_MLXSW_CORE_THERMAL=y
3363 +CONFIG_MLXSW_PCI=m
3364 +CONFIG_MLXSW_I2C=m
3365 +CONFIG_MLXSW_SWITCHIB=m
3366 +CONFIG_MLXSW_SWITCHX2=m
3367 +CONFIG_MLXSW_SPECTRUM=m
3368 +CONFIG_MLXSW_SPECTRUM_DCB=y
3369 +CONFIG_MLXSW_MINIMAL=m
3370 +CONFIG_MLXFW=m
3371 +CONFIG_NET_VENDOR_MICREL=y
3372 +CONFIG_KS8842=m
3373 +CONFIG_KS8851=m
3374 +CONFIG_KS8851_MLL=m
3375 +CONFIG_KSZ884X_PCI=m
3376 +CONFIG_NET_VENDOR_MICROCHIP=y
3377 +CONFIG_ENC28J60=m
3378 +# CONFIG_ENC28J60_WRITEVERIFY is not set
3379 +CONFIG_ENCX24J600=m
3380 +CONFIG_LAN743X=m
3381 +CONFIG_NET_VENDOR_MICROSEMI=y
3382 +CONFIG_MSCC_OCELOT_SWITCH_LIB=m
3383 +CONFIG_NET_VENDOR_MYRI=y
3384 +CONFIG_MYRI10GE=m
3385 +CONFIG_MYRI10GE_DCA=y
3386 +CONFIG_FEALNX=m
3387 +CONFIG_NET_VENDOR_NATSEMI=y
3388 +CONFIG_NATSEMI=m
3389 +CONFIG_NS83820=m
3390 +CONFIG_NET_VENDOR_NETERION=y
3391 +CONFIG_S2IO=m
3392 +CONFIG_VXGE=m
3393 +# CONFIG_VXGE_DEBUG_TRACE_ALL is not set
3394 +CONFIG_NET_VENDOR_NETRONOME=y
3395 +CONFIG_NFP=m
3396 +CONFIG_NFP_APP_FLOWER=y
3397 +CONFIG_NFP_APP_ABM_NIC=y
3398 +# CONFIG_NFP_DEBUG is not set
3399 +CONFIG_NET_VENDOR_NI=y
3400 +CONFIG_NI_XGE_MANAGEMENT_ENET=m
3401 +CONFIG_NET_VENDOR_8390=y
3402 +CONFIG_PCMCIA_AXNET=m
3403 +CONFIG_NE2K_PCI=m
3404 +CONFIG_PCMCIA_PCNET=m
3405 +CONFIG_NET_VENDOR_NVIDIA=y
3406 +CONFIG_FORCEDETH=m
3407 +CONFIG_NET_VENDOR_OKI=y
3408 +CONFIG_ETHOC=m
3409 +CONFIG_NET_VENDOR_PACKET_ENGINES=y
3410 +CONFIG_HAMACHI=m
3411 +CONFIG_YELLOWFIN=m
3412 +CONFIG_NET_VENDOR_PENSANDO=y
3413 +CONFIG_IONIC=m
3414 +CONFIG_NET_VENDOR_QLOGIC=y
3415 +CONFIG_QLA3XXX=m
3416 +CONFIG_QLCNIC=m
3417 +CONFIG_QLCNIC_SRIOV=y
3418 +CONFIG_QLCNIC_DCB=y
3419 +CONFIG_QLCNIC_HWMON=y
3420 +CONFIG_NETXEN_NIC=m
3421 +CONFIG_QED=m
3422 +CONFIG_QED_LL2=y
3423 +CONFIG_QED_SRIOV=y
3424 +CONFIG_QEDE=m
3425 +CONFIG_QED_RDMA=y
3426 +CONFIG_QED_ISCSI=y
3427 +CONFIG_QED_FCOE=y
3428 +CONFIG_QED_OOO=y
3429 +CONFIG_NET_VENDOR_QUALCOMM=y
3430 +CONFIG_QCOM_EMAC=m
3431 +CONFIG_RMNET=m
3432 +CONFIG_NET_VENDOR_RDC=y
3433 +CONFIG_R6040=m
3434 +CONFIG_NET_VENDOR_REALTEK=y
3435 +CONFIG_ATP=m
3436 +CONFIG_8139CP=m
3437 +CONFIG_8139TOO=m
3438 +CONFIG_8139TOO_PIO=y
3439 +# CONFIG_8139TOO_TUNE_TWISTER is not set
3440 +CONFIG_8139TOO_8129=y
3441 +# CONFIG_8139_OLD_RX_RESET is not set
3442 +CONFIG_R8169=m
3443 +CONFIG_NET_VENDOR_RENESAS=y
3444 +CONFIG_NET_VENDOR_ROCKER=y
3445 +CONFIG_ROCKER=m
3446 +CONFIG_NET_VENDOR_SAMSUNG=y
3447 +CONFIG_SXGBE_ETH=m
3448 +CONFIG_NET_VENDOR_SEEQ=y
3449 +CONFIG_NET_VENDOR_SOLARFLARE=y
3450 +CONFIG_SFC=m
3451 +CONFIG_SFC_MTD=y
3452 +CONFIG_SFC_MCDI_MON=y
3453 +CONFIG_SFC_SRIOV=y
3454 +CONFIG_SFC_MCDI_LOGGING=y
3455 +CONFIG_SFC_FALCON=m
3456 +CONFIG_SFC_FALCON_MTD=y
3457 +CONFIG_NET_VENDOR_SILAN=y
3458 +CONFIG_SC92031=m
3459 +CONFIG_NET_VENDOR_SIS=y
3460 +CONFIG_SIS900=m
3461 +CONFIG_SIS190=m
3462 +CONFIG_NET_VENDOR_SMSC=y
3463 +CONFIG_PCMCIA_SMC91C92=m
3464 +CONFIG_EPIC100=m
3465 +CONFIG_SMSC911X=m
3466 +CONFIG_SMSC9420=m
3467 +CONFIG_NET_VENDOR_SOCIONEXT=y
3468 +CONFIG_NET_VENDOR_STMICRO=y
3469 +CONFIG_STMMAC_ETH=m
3470 +# CONFIG_STMMAC_SELFTESTS is not set
3471 +CONFIG_STMMAC_PLATFORM=m
3472 +CONFIG_DWMAC_GENERIC=m
3473 +CONFIG_DWMAC_INTEL=m
3474 +CONFIG_STMMAC_PCI=m
3475 +CONFIG_NET_VENDOR_SUN=y
3476 +CONFIG_HAPPYMEAL=m
3477 +CONFIG_SUNGEM=m
3478 +CONFIG_CASSINI=m
3479 +CONFIG_NIU=m
3480 +CONFIG_NET_VENDOR_SYNOPSYS=y
3481 +CONFIG_DWC_XLGMAC=m
3482 +CONFIG_DWC_XLGMAC_PCI=m
3483 +CONFIG_NET_VENDOR_TEHUTI=y
3484 +CONFIG_TEHUTI=m
3485 +CONFIG_NET_VENDOR_TI=y
3486 +# CONFIG_TI_CPSW_PHY_SEL is not set
3487 +CONFIG_TLAN=m
3488 +CONFIG_NET_VENDOR_VIA=y
3489 +CONFIG_VIA_RHINE=m
3490 +CONFIG_VIA_RHINE_MMIO=y
3491 +CONFIG_VIA_VELOCITY=m
3492 +CONFIG_NET_VENDOR_WIZNET=y
3493 +CONFIG_WIZNET_W5100=m
3494 +CONFIG_WIZNET_W5300=m
3495 +# CONFIG_WIZNET_BUS_DIRECT is not set
3496 +# CONFIG_WIZNET_BUS_INDIRECT is not set
3497 +CONFIG_WIZNET_BUS_ANY=y
3498 +CONFIG_WIZNET_W5100_SPI=m
3499 +CONFIG_NET_VENDOR_XILINX=y
3500 +CONFIG_XILINX_EMACLITE=m
3501 +CONFIG_XILINX_AXI_EMAC=m
3502 +CONFIG_XILINX_LL_TEMAC=m
3503 +CONFIG_NET_VENDOR_XIRCOM=y
3504 +CONFIG_PCMCIA_XIRC2PS=m
3505 +CONFIG_FDDI=y
3506 +CONFIG_DEFXX=m
3507 +# CONFIG_DEFXX_MMIO is not set
3508 +CONFIG_SKFP=m
3509 +# CONFIG_HIPPI is not set
3510 +CONFIG_NET_SB1000=m
3511 +CONFIG_PHYLINK=m
3512 +CONFIG_PHYLIB=m
3513 +CONFIG_SWPHY=y
3514 +CONFIG_LED_TRIGGER_PHY=y
3515 +CONFIG_FIXED_PHY=m
3516 +CONFIG_SFP=m
3519 +# MII PHY device drivers
3521 +CONFIG_AMD_PHY=m
3522 +CONFIG_ADIN_PHY=m
3523 +CONFIG_AQUANTIA_PHY=m
3524 +CONFIG_AX88796B_PHY=m
3525 +CONFIG_BROADCOM_PHY=m
3526 +CONFIG_BCM54140_PHY=m
3527 +CONFIG_BCM7XXX_PHY=m
3528 +CONFIG_BCM84881_PHY=m
3529 +CONFIG_BCM87XX_PHY=m
3530 +CONFIG_BCM_NET_PHYLIB=m
3531 +CONFIG_CICADA_PHY=m
3532 +CONFIG_CORTINA_PHY=m
3533 +CONFIG_DAVICOM_PHY=m
3534 +CONFIG_ICPLUS_PHY=m
3535 +CONFIG_LXT_PHY=m
3536 +CONFIG_INTEL_XWAY_PHY=m
3537 +CONFIG_LSI_ET1011C_PHY=m
3538 +CONFIG_MARVELL_PHY=m
3539 +CONFIG_MARVELL_10G_PHY=m
3540 +CONFIG_MICREL_PHY=m
3541 +CONFIG_MICROCHIP_PHY=m
3542 +CONFIG_MICROCHIP_T1_PHY=m
3543 +CONFIG_MICROSEMI_PHY=m
3544 +CONFIG_NATIONAL_PHY=m
3545 +CONFIG_NXP_TJA11XX_PHY=m
3546 +CONFIG_AT803X_PHY=m
3547 +CONFIG_QSEMI_PHY=m
3548 +CONFIG_REALTEK_PHY=m
3549 +CONFIG_RENESAS_PHY=m
3550 +CONFIG_ROCKCHIP_PHY=m
3551 +CONFIG_SMSC_PHY=m
3552 +CONFIG_STE10XP=m
3553 +CONFIG_TERANETICS_PHY=m
3554 +CONFIG_DP83822_PHY=m
3555 +CONFIG_DP83TC811_PHY=m
3556 +CONFIG_DP83848_PHY=m
3557 +CONFIG_DP83867_PHY=m
3558 +CONFIG_DP83869_PHY=m
3559 +CONFIG_VITESSE_PHY=m
3560 +CONFIG_XILINX_GMII2RGMII=m
3561 +CONFIG_MICREL_KS8995MA=m
3562 +CONFIG_MDIO_DEVICE=m
3563 +CONFIG_MDIO_BUS=m
3564 +CONFIG_MDIO_DEVRES=m
3565 +CONFIG_MDIO_BITBANG=m
3566 +CONFIG_MDIO_BCM_UNIMAC=m
3567 +CONFIG_MDIO_CAVIUM=m
3568 +CONFIG_MDIO_GPIO=m
3569 +CONFIG_MDIO_I2C=m
3570 +CONFIG_MDIO_MVUSB=m
3571 +CONFIG_MDIO_MSCC_MIIM=m
3572 +CONFIG_MDIO_THUNDER=m
3575 +# MDIO Multiplexers
3579 +# PCS device drivers
3581 +CONFIG_PCS_XPCS=m
3582 +CONFIG_PCS_LYNX=m
3583 +# end of PCS device drivers
3585 +CONFIG_PLIP=m
3586 +CONFIG_PPP=y
3587 +CONFIG_PPP_BSDCOMP=m
3588 +CONFIG_PPP_DEFLATE=m
3589 +CONFIG_PPP_FILTER=y
3590 +CONFIG_PPP_MPPE=m
3591 +CONFIG_PPP_MULTILINK=y
3592 +CONFIG_PPPOATM=m
3593 +CONFIG_PPPOE=m
3594 +CONFIG_PPTP=m
3595 +CONFIG_PPPOL2TP=m
3596 +CONFIG_PPP_ASYNC=m
3597 +CONFIG_PPP_SYNC_TTY=m
3598 +CONFIG_SLIP=m
3599 +CONFIG_SLHC=y
3600 +CONFIG_SLIP_COMPRESSED=y
3601 +CONFIG_SLIP_SMART=y
3602 +CONFIG_SLIP_MODE_SLIP6=y
3603 +CONFIG_USB_NET_DRIVERS=m
3604 +CONFIG_USB_CATC=m
3605 +CONFIG_USB_KAWETH=m
3606 +CONFIG_USB_PEGASUS=m
3607 +CONFIG_USB_RTL8150=m
3608 +CONFIG_USB_RTL8152=m
3609 +CONFIG_USB_LAN78XX=m
3610 +CONFIG_USB_USBNET=m
3611 +CONFIG_USB_NET_AX8817X=m
3612 +CONFIG_USB_NET_AX88179_178A=m
3613 +CONFIG_USB_NET_CDCETHER=m
3614 +CONFIG_USB_NET_CDC_EEM=m
3615 +CONFIG_USB_NET_CDC_NCM=m
3616 +CONFIG_USB_NET_HUAWEI_CDC_NCM=m
3617 +CONFIG_USB_NET_CDC_MBIM=m
3618 +CONFIG_USB_NET_DM9601=m
3619 +CONFIG_USB_NET_SR9700=m
3620 +CONFIG_USB_NET_SR9800=m
3621 +CONFIG_USB_NET_SMSC75XX=m
3622 +CONFIG_USB_NET_SMSC95XX=m
3623 +CONFIG_USB_NET_GL620A=m
3624 +CONFIG_USB_NET_NET1080=m
3625 +CONFIG_USB_NET_PLUSB=m
3626 +CONFIG_USB_NET_MCS7830=m
3627 +CONFIG_USB_NET_RNDIS_HOST=m
3628 +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m
3629 +CONFIG_USB_NET_CDC_SUBSET=m
3630 +CONFIG_USB_ALI_M5632=y
3631 +CONFIG_USB_AN2720=y
3632 +CONFIG_USB_BELKIN=y
3633 +CONFIG_USB_ARMLINUX=y
3634 +CONFIG_USB_EPSON2888=y
3635 +CONFIG_USB_KC2190=y
3636 +CONFIG_USB_NET_ZAURUS=m
3637 +CONFIG_USB_NET_CX82310_ETH=m
3638 +CONFIG_USB_NET_KALMIA=m
3639 +CONFIG_USB_NET_QMI_WWAN=m
3640 +CONFIG_USB_HSO=m
3641 +CONFIG_USB_NET_INT51X1=m
3642 +CONFIG_USB_CDC_PHONET=m
3643 +CONFIG_USB_IPHETH=m
3644 +CONFIG_USB_SIERRA_NET=m
3645 +CONFIG_USB_VL600=m
3646 +CONFIG_USB_NET_CH9200=m
3647 +CONFIG_USB_NET_AQC111=m
3648 +CONFIG_USB_RTL8153_ECM=m
3649 +CONFIG_WLAN=y
3650 +CONFIG_WLAN_VENDOR_ADMTEK=y
3651 +CONFIG_ADM8211=m
3652 +CONFIG_ATH_COMMON=m
3653 +CONFIG_WLAN_VENDOR_ATH=y
3654 +# CONFIG_ATH_DEBUG is not set
3655 +CONFIG_ATH5K=m
3656 +# CONFIG_ATH5K_DEBUG is not set
3657 +CONFIG_ATH5K_PCI=y
3658 +CONFIG_ATH9K_HW=m
3659 +CONFIG_ATH9K_COMMON=m
3660 +CONFIG_ATH9K_COMMON_DEBUG=y
3661 +CONFIG_ATH9K_BTCOEX_SUPPORT=y
3662 +CONFIG_ATH9K=m
3663 +CONFIG_ATH9K_PCI=y
3664 +CONFIG_ATH9K_AHB=y
3665 +CONFIG_ATH9K_DEBUGFS=y
3666 +CONFIG_ATH9K_STATION_STATISTICS=y
3667 +# CONFIG_ATH9K_DYNACK is not set
3668 +CONFIG_ATH9K_WOW=y
3669 +CONFIG_ATH9K_RFKILL=y
3670 +CONFIG_ATH9K_CHANNEL_CONTEXT=y
3671 +CONFIG_ATH9K_PCOEM=y
3672 +CONFIG_ATH9K_PCI_NO_EEPROM=m
3673 +CONFIG_ATH9K_HTC=m
3674 +CONFIG_ATH9K_HTC_DEBUGFS=y
3675 +CONFIG_ATH9K_HWRNG=y
3676 +CONFIG_ATH9K_COMMON_SPECTRAL=y
3677 +CONFIG_CARL9170=m
3678 +CONFIG_CARL9170_LEDS=y
3679 +# CONFIG_CARL9170_DEBUGFS is not set
3680 +CONFIG_CARL9170_WPC=y
3681 +CONFIG_CARL9170_HWRNG=y
3682 +CONFIG_ATH6KL=m
3683 +CONFIG_ATH6KL_SDIO=m
3684 +CONFIG_ATH6KL_USB=m
3685 +# CONFIG_ATH6KL_DEBUG is not set
3686 +CONFIG_AR5523=m
3687 +CONFIG_WIL6210=m
3688 +CONFIG_WIL6210_ISR_COR=y
3689 +CONFIG_WIL6210_DEBUGFS=y
3690 +CONFIG_ATH10K=m
3691 +CONFIG_ATH10K_CE=y
3692 +CONFIG_ATH10K_PCI=m
3693 +CONFIG_ATH10K_SDIO=m
3694 +CONFIG_ATH10K_USB=m
3695 +# CONFIG_ATH10K_DEBUG is not set
3696 +CONFIG_ATH10K_DEBUGFS=y
3697 +CONFIG_ATH10K_SPECTRAL=y
3698 +CONFIG_WCN36XX=m
3699 +# CONFIG_WCN36XX_DEBUGFS is not set
3700 +CONFIG_ATH11K=m
3701 +CONFIG_ATH11K_AHB=m
3702 +CONFIG_ATH11K_PCI=m
3703 +# CONFIG_ATH11K_DEBUG is not set
3704 +CONFIG_ATH11K_DEBUGFS=y
3705 +CONFIG_ATH11K_SPECTRAL=y
3706 +CONFIG_WLAN_VENDOR_ATMEL=y
3707 +CONFIG_ATMEL=m
3708 +CONFIG_PCI_ATMEL=m
3709 +CONFIG_PCMCIA_ATMEL=m
3710 +CONFIG_AT76C50X_USB=m
3711 +CONFIG_WLAN_VENDOR_BROADCOM=y
3712 +CONFIG_B43=m
3713 +CONFIG_B43_BCMA=y
3714 +CONFIG_B43_SSB=y
3715 +CONFIG_B43_BUSES_BCMA_AND_SSB=y
3716 +# CONFIG_B43_BUSES_BCMA is not set
3717 +# CONFIG_B43_BUSES_SSB is not set
3718 +CONFIG_B43_PCI_AUTOSELECT=y
3719 +CONFIG_B43_PCICORE_AUTOSELECT=y
3720 +# CONFIG_B43_SDIO is not set
3721 +CONFIG_B43_BCMA_PIO=y
3722 +CONFIG_B43_PIO=y
3723 +CONFIG_B43_PHY_G=y
3724 +CONFIG_B43_PHY_N=y
3725 +CONFIG_B43_PHY_LP=y
3726 +CONFIG_B43_PHY_HT=y
3727 +CONFIG_B43_LEDS=y
3728 +CONFIG_B43_HWRNG=y
3729 +# CONFIG_B43_DEBUG is not set
3730 +CONFIG_B43LEGACY=m
3731 +CONFIG_B43LEGACY_PCI_AUTOSELECT=y
3732 +CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y
3733 +CONFIG_B43LEGACY_LEDS=y
3734 +CONFIG_B43LEGACY_HWRNG=y
3735 +# CONFIG_B43LEGACY_DEBUG is not set
3736 +CONFIG_B43LEGACY_DMA=y
3737 +CONFIG_B43LEGACY_PIO=y
3738 +CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
3739 +# CONFIG_B43LEGACY_DMA_MODE is not set
3740 +# CONFIG_B43LEGACY_PIO_MODE is not set
3741 +CONFIG_BRCMUTIL=m
3742 +CONFIG_BRCMSMAC=m
3743 +CONFIG_BRCMFMAC=m
3744 +CONFIG_BRCMFMAC_PROTO_BCDC=y
3745 +CONFIG_BRCMFMAC_PROTO_MSGBUF=y
3746 +CONFIG_BRCMFMAC_SDIO=y
3747 +CONFIG_BRCMFMAC_USB=y
3748 +CONFIG_BRCMFMAC_PCIE=y
3749 +CONFIG_BRCM_TRACING=y
3750 +# CONFIG_BRCMDBG is not set
3751 +CONFIG_WLAN_VENDOR_CISCO=y
3752 +CONFIG_AIRO=m
3753 +CONFIG_AIRO_CS=m
3754 +CONFIG_WLAN_VENDOR_INTEL=y
3755 +CONFIG_IPW2100=m
3756 +CONFIG_IPW2100_MONITOR=y
3757 +# CONFIG_IPW2100_DEBUG is not set
3758 +CONFIG_IPW2200=m
3759 +CONFIG_IPW2200_MONITOR=y
3760 +CONFIG_IPW2200_RADIOTAP=y
3761 +CONFIG_IPW2200_PROMISCUOUS=y
3762 +CONFIG_IPW2200_QOS=y
3763 +# CONFIG_IPW2200_DEBUG is not set
3764 +CONFIG_LIBIPW=m
3765 +# CONFIG_LIBIPW_DEBUG is not set
3766 +CONFIG_IWLEGACY=m
3767 +CONFIG_IWL4965=m
3768 +CONFIG_IWL3945=m
3771 +# iwl3945 / iwl4965 Debugging Options
3773 +# CONFIG_IWLEGACY_DEBUG is not set
3774 +CONFIG_IWLEGACY_DEBUGFS=y
3775 +# end of iwl3945 / iwl4965 Debugging Options
3777 +CONFIG_IWLWIFI=m
3778 +CONFIG_IWLWIFI_LEDS=y
3779 +CONFIG_IWLDVM=m
3780 +CONFIG_IWLMVM=m
3781 +CONFIG_IWLWIFI_OPMODE_MODULAR=y
3782 +# CONFIG_IWLWIFI_BCAST_FILTERING is not set
3785 +# Debugging Options
3787 +# CONFIG_IWLWIFI_DEBUG is not set
3788 +CONFIG_IWLWIFI_DEBUGFS=y
3789 +# end of Debugging Options
3791 +CONFIG_WLAN_VENDOR_INTERSIL=y
3792 +CONFIG_HOSTAP=m
3793 +CONFIG_HOSTAP_FIRMWARE=y
3794 +CONFIG_HOSTAP_FIRMWARE_NVRAM=y
3795 +CONFIG_HOSTAP_PLX=m
3796 +CONFIG_HOSTAP_PCI=m
3797 +CONFIG_HOSTAP_CS=m
3798 +CONFIG_HERMES=m
3799 +# CONFIG_HERMES_PRISM is not set
3800 +CONFIG_HERMES_CACHE_FW_ON_INIT=y
3801 +CONFIG_PLX_HERMES=m
3802 +CONFIG_TMD_HERMES=m
3803 +CONFIG_NORTEL_HERMES=m
3804 +CONFIG_PCMCIA_HERMES=m
3805 +CONFIG_PCMCIA_SPECTRUM=m
3806 +CONFIG_ORINOCO_USB=m
3807 +CONFIG_P54_COMMON=m
3808 +CONFIG_P54_USB=m
3809 +CONFIG_P54_PCI=m
3810 +CONFIG_P54_SPI=m
3811 +# CONFIG_P54_SPI_DEFAULT_EEPROM is not set
3812 +CONFIG_P54_LEDS=y
3813 +# CONFIG_PRISM54 is not set
3814 +CONFIG_WLAN_VENDOR_MARVELL=y
3815 +CONFIG_LIBERTAS=m
3816 +CONFIG_LIBERTAS_USB=m
3817 +CONFIG_LIBERTAS_CS=m
3818 +CONFIG_LIBERTAS_SDIO=m
3819 +CONFIG_LIBERTAS_SPI=m
3820 +# CONFIG_LIBERTAS_DEBUG is not set
3821 +CONFIG_LIBERTAS_MESH=y
3822 +CONFIG_LIBERTAS_THINFIRM=m
3823 +# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set
3824 +CONFIG_LIBERTAS_THINFIRM_USB=m
3825 +CONFIG_MWIFIEX=m
3826 +CONFIG_MWIFIEX_SDIO=m
3827 +CONFIG_MWIFIEX_PCIE=m
3828 +CONFIG_MWIFIEX_USB=m
3829 +CONFIG_MWL8K=m
3830 +CONFIG_WLAN_VENDOR_MEDIATEK=y
3831 +CONFIG_MT7601U=m
3832 +CONFIG_MT76_CORE=m
3833 +CONFIG_MT76_LEDS=y
3834 +CONFIG_MT76_USB=m
3835 +CONFIG_MT76_SDIO=m
3836 +CONFIG_MT76x02_LIB=m
3837 +CONFIG_MT76x02_USB=m
3838 +CONFIG_MT76_CONNAC_LIB=m
3839 +CONFIG_MT76x0_COMMON=m
3840 +CONFIG_MT76x0U=m
3841 +CONFIG_MT76x0E=m
3842 +CONFIG_MT76x2_COMMON=m
3843 +CONFIG_MT76x2E=m
3844 +CONFIG_MT76x2U=m
3845 +CONFIG_MT7603E=m
3846 +CONFIG_MT7615_COMMON=m
3847 +CONFIG_MT7615E=m
3848 +CONFIG_MT7663_USB_SDIO_COMMON=m
3849 +CONFIG_MT7663U=m
3850 +CONFIG_MT7663S=m
3851 +CONFIG_MT7915E=m
3852 +CONFIG_MT7921E=m
3853 +CONFIG_WLAN_VENDOR_MICROCHIP=y
3854 +CONFIG_WILC1000=m
3855 +CONFIG_WILC1000_SDIO=m
3856 +CONFIG_WILC1000_SPI=m
3857 +CONFIG_WILC1000_HW_OOB_INTR=y
3858 +CONFIG_WLAN_VENDOR_RALINK=y
3859 +CONFIG_RT2X00=m
3860 +CONFIG_RT2400PCI=m
3861 +CONFIG_RT2500PCI=m
3862 +CONFIG_RT61PCI=m
3863 +CONFIG_RT2800PCI=m
3864 +CONFIG_RT2800PCI_RT33XX=y
3865 +CONFIG_RT2800PCI_RT35XX=y
3866 +CONFIG_RT2800PCI_RT53XX=y
3867 +CONFIG_RT2800PCI_RT3290=y
3868 +CONFIG_RT2500USB=m
3869 +CONFIG_RT73USB=m
3870 +CONFIG_RT2800USB=m
3871 +CONFIG_RT2800USB_RT33XX=y
3872 +CONFIG_RT2800USB_RT35XX=y
3873 +CONFIG_RT2800USB_RT3573=y
3874 +CONFIG_RT2800USB_RT53XX=y
3875 +CONFIG_RT2800USB_RT55XX=y
3876 +CONFIG_RT2800USB_UNKNOWN=y
3877 +CONFIG_RT2800_LIB=m
3878 +CONFIG_RT2800_LIB_MMIO=m
3879 +CONFIG_RT2X00_LIB_MMIO=m
3880 +CONFIG_RT2X00_LIB_PCI=m
3881 +CONFIG_RT2X00_LIB_USB=m
3882 +CONFIG_RT2X00_LIB=m
3883 +CONFIG_RT2X00_LIB_FIRMWARE=y
3884 +CONFIG_RT2X00_LIB_CRYPTO=y
3885 +CONFIG_RT2X00_LIB_LEDS=y
3886 +# CONFIG_RT2X00_LIB_DEBUGFS is not set
3887 +# CONFIG_RT2X00_DEBUG is not set
3888 +CONFIG_WLAN_VENDOR_REALTEK=y
3889 +CONFIG_RTL8180=m
3890 +CONFIG_RTL8187=m
3891 +CONFIG_RTL8187_LEDS=y
3892 +CONFIG_RTL_CARDS=m
3893 +CONFIG_RTL8192CE=m
3894 +CONFIG_RTL8192SE=m
3895 +CONFIG_RTL8192DE=m
3896 +CONFIG_RTL8723AE=m
3897 +CONFIG_RTL8723BE=m
3898 +CONFIG_RTL8188EE=m
3899 +CONFIG_RTL8192EE=m
3900 +CONFIG_RTL8821AE=m
3901 +CONFIG_RTL8192CU=m
3902 +CONFIG_RTLWIFI=m
3903 +CONFIG_RTLWIFI_PCI=m
3904 +CONFIG_RTLWIFI_USB=m
3905 +# CONFIG_RTLWIFI_DEBUG is not set
3906 +CONFIG_RTL8192C_COMMON=m
3907 +CONFIG_RTL8723_COMMON=m
3908 +CONFIG_RTLBTCOEXIST=m
3909 +CONFIG_RTL8XXXU=m
3910 +CONFIG_RTL8XXXU_UNTESTED=y
3911 +CONFIG_RTW88=m
3912 +CONFIG_RTW88_CORE=m
3913 +CONFIG_RTW88_PCI=m
3914 +CONFIG_RTW88_8822B=m
3915 +CONFIG_RTW88_8822C=m
3916 +CONFIG_RTW88_8723D=m
3917 +CONFIG_RTW88_8821C=m
3918 +CONFIG_RTW88_8822BE=m
3919 +CONFIG_RTW88_8822CE=m
3920 +CONFIG_RTW88_8723DE=m
3921 +CONFIG_RTW88_8821CE=m
3922 +CONFIG_RTW88_DEBUG=y
3923 +CONFIG_RTW88_DEBUGFS=y
3924 +CONFIG_WLAN_VENDOR_RSI=y
3925 +CONFIG_RSI_91X=m
3926 +# CONFIG_RSI_DEBUGFS is not set
3927 +CONFIG_RSI_SDIO=m
3928 +CONFIG_RSI_USB=m
3929 +CONFIG_RSI_COEX=y
3930 +CONFIG_WLAN_VENDOR_ST=y
3931 +CONFIG_CW1200=m
3932 +CONFIG_CW1200_WLAN_SDIO=m
3933 +CONFIG_CW1200_WLAN_SPI=m
3934 +CONFIG_WLAN_VENDOR_TI=y
3935 +CONFIG_WL1251=m
3936 +CONFIG_WL1251_SPI=m
3937 +CONFIG_WL1251_SDIO=m
3938 +CONFIG_WL12XX=m
3939 +CONFIG_WL18XX=m
3940 +CONFIG_WLCORE=m
3941 +CONFIG_WLCORE_SDIO=m
3942 +CONFIG_WILINK_PLATFORM_DATA=y
3943 +CONFIG_WLAN_VENDOR_ZYDAS=y
3944 +CONFIG_USB_ZD1201=m
3945 +CONFIG_ZD1211RW=m
3946 +# CONFIG_ZD1211RW_DEBUG is not set
3947 +CONFIG_WLAN_VENDOR_QUANTENNA=y
3948 +CONFIG_QTNFMAC=m
3949 +CONFIG_QTNFMAC_PCIE=m
3950 +CONFIG_PCMCIA_RAYCS=m
3951 +CONFIG_PCMCIA_WL3501=m
3952 +CONFIG_MAC80211_HWSIM=m
3953 +CONFIG_USB_NET_RNDIS_WLAN=m
3954 +CONFIG_VIRT_WIFI=m
3955 +CONFIG_WAN=y
3956 +CONFIG_LANMEDIA=m
3957 +CONFIG_HDLC=m
3958 +CONFIG_HDLC_RAW=m
3959 +CONFIG_HDLC_RAW_ETH=m
3960 +CONFIG_HDLC_CISCO=m
3961 +CONFIG_HDLC_FR=m
3962 +CONFIG_HDLC_PPP=m
3963 +CONFIG_HDLC_X25=m
3964 +CONFIG_PCI200SYN=m
3965 +CONFIG_WANXL=m
3966 +CONFIG_PC300TOO=m
3967 +CONFIG_FARSYNC=m
3968 +CONFIG_LAPBETHER=m
3969 +CONFIG_SBNI=m
3970 +# CONFIG_SBNI_MULTILINE is not set
3971 +CONFIG_IEEE802154_DRIVERS=m
3972 +CONFIG_IEEE802154_FAKELB=m
3973 +CONFIG_IEEE802154_AT86RF230=m
3974 +CONFIG_IEEE802154_AT86RF230_DEBUGFS=y
3975 +CONFIG_IEEE802154_MRF24J40=m
3976 +CONFIG_IEEE802154_CC2520=m
3977 +CONFIG_IEEE802154_ATUSB=m
3978 +CONFIG_IEEE802154_ADF7242=m
3979 +CONFIG_IEEE802154_CA8210=m
3980 +CONFIG_IEEE802154_CA8210_DEBUGFS=y
3981 +CONFIG_IEEE802154_MCR20A=m
3982 +CONFIG_IEEE802154_HWSIM=m
3983 +CONFIG_XEN_NETDEV_FRONTEND=y
3984 +CONFIG_XEN_NETDEV_BACKEND=m
3985 +CONFIG_VMXNET3=m
3986 +CONFIG_FUJITSU_ES=m
3987 +CONFIG_USB4_NET=m
3988 +CONFIG_HYPERV_NET=m
3989 +CONFIG_NETDEVSIM=m
3990 +CONFIG_NET_FAILOVER=m
3991 +CONFIG_ISDN=y
3992 +CONFIG_ISDN_CAPI=y
3993 +CONFIG_CAPI_TRACE=y
3994 +CONFIG_ISDN_CAPI_MIDDLEWARE=y
3995 +CONFIG_MISDN=m
3996 +CONFIG_MISDN_DSP=m
3997 +CONFIG_MISDN_L1OIP=m
4000 +# mISDN hardware drivers
4002 +CONFIG_MISDN_HFCPCI=m
4003 +CONFIG_MISDN_HFCMULTI=m
4004 +CONFIG_MISDN_HFCUSB=m
4005 +CONFIG_MISDN_AVMFRITZ=m
4006 +CONFIG_MISDN_SPEEDFAX=m
4007 +CONFIG_MISDN_INFINEON=m
4008 +CONFIG_MISDN_W6692=m
4009 +CONFIG_MISDN_NETJET=m
4010 +CONFIG_MISDN_HDLC=m
4011 +CONFIG_MISDN_IPAC=m
4012 +CONFIG_MISDN_ISAR=m
4013 +CONFIG_NVM=y
4014 +CONFIG_NVM_PBLK=m
4015 +# CONFIG_NVM_PBLK_DEBUG is not set
4018 +# Input device support
4020 +CONFIG_INPUT=y
4021 +CONFIG_INPUT_LEDS=m
4022 +CONFIG_INPUT_FF_MEMLESS=m
4023 +CONFIG_INPUT_SPARSEKMAP=m
4024 +CONFIG_INPUT_MATRIXKMAP=m
4027 +# Userland interfaces
4029 +CONFIG_INPUT_MOUSEDEV=y
4030 +CONFIG_INPUT_MOUSEDEV_PSAUX=y
4031 +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
4032 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
4033 +CONFIG_INPUT_JOYDEV=m
4034 +CONFIG_INPUT_EVDEV=y
4035 +# CONFIG_INPUT_EVBUG is not set
4038 +# Input Device Drivers
4040 +CONFIG_INPUT_KEYBOARD=y
4041 +CONFIG_KEYBOARD_ADC=m
4042 +CONFIG_KEYBOARD_ADP5520=m
4043 +CONFIG_KEYBOARD_ADP5588=m
4044 +CONFIG_KEYBOARD_ADP5589=m
4045 +CONFIG_KEYBOARD_APPLESPI=m
4046 +CONFIG_KEYBOARD_ATKBD=y
4047 +CONFIG_KEYBOARD_QT1050=m
4048 +CONFIG_KEYBOARD_QT1070=m
4049 +CONFIG_KEYBOARD_QT2160=m
4050 +CONFIG_KEYBOARD_DLINK_DIR685=m
4051 +CONFIG_KEYBOARD_LKKBD=m
4052 +CONFIG_KEYBOARD_GPIO=m
4053 +CONFIG_KEYBOARD_GPIO_POLLED=m
4054 +CONFIG_KEYBOARD_TCA6416=m
4055 +CONFIG_KEYBOARD_TCA8418=m
4056 +CONFIG_KEYBOARD_MATRIX=m
4057 +CONFIG_KEYBOARD_LM8323=m
4058 +CONFIG_KEYBOARD_LM8333=m
4059 +CONFIG_KEYBOARD_MAX7359=m
4060 +CONFIG_KEYBOARD_MCS=m
4061 +CONFIG_KEYBOARD_MPR121=m
4062 +CONFIG_KEYBOARD_NEWTON=m
4063 +CONFIG_KEYBOARD_OPENCORES=m
4064 +CONFIG_KEYBOARD_SAMSUNG=m
4065 +CONFIG_KEYBOARD_STOWAWAY=m
4066 +CONFIG_KEYBOARD_SUNKBD=m
4067 +CONFIG_KEYBOARD_IQS62X=m
4068 +CONFIG_KEYBOARD_TM2_TOUCHKEY=m
4069 +CONFIG_KEYBOARD_TWL4030=m
4070 +CONFIG_KEYBOARD_XTKBD=m
4071 +CONFIG_KEYBOARD_CROS_EC=m
4072 +CONFIG_KEYBOARD_MTK_PMIC=m
4073 +CONFIG_INPUT_MOUSE=y
4074 +CONFIG_MOUSE_PS2=m
4075 +CONFIG_MOUSE_PS2_ALPS=y
4076 +CONFIG_MOUSE_PS2_BYD=y
4077 +CONFIG_MOUSE_PS2_LOGIPS2PP=y
4078 +CONFIG_MOUSE_PS2_SYNAPTICS=y
4079 +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y
4080 +CONFIG_MOUSE_PS2_CYPRESS=y
4081 +CONFIG_MOUSE_PS2_LIFEBOOK=y
4082 +CONFIG_MOUSE_PS2_TRACKPOINT=y
4083 +CONFIG_MOUSE_PS2_ELANTECH=y
4084 +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y
4085 +CONFIG_MOUSE_PS2_SENTELIC=y
4086 +CONFIG_MOUSE_PS2_TOUCHKIT=y
4087 +CONFIG_MOUSE_PS2_FOCALTECH=y
4088 +CONFIG_MOUSE_PS2_VMMOUSE=y
4089 +CONFIG_MOUSE_PS2_SMBUS=y
4090 +CONFIG_MOUSE_SERIAL=m
4091 +CONFIG_MOUSE_APPLETOUCH=m
4092 +CONFIG_MOUSE_BCM5974=m
4093 +CONFIG_MOUSE_CYAPA=m
4094 +CONFIG_MOUSE_ELAN_I2C=m
4095 +CONFIG_MOUSE_ELAN_I2C_I2C=y
4096 +CONFIG_MOUSE_ELAN_I2C_SMBUS=y
4097 +CONFIG_MOUSE_VSXXXAA=m
4098 +CONFIG_MOUSE_GPIO=m
4099 +CONFIG_MOUSE_SYNAPTICS_I2C=m
4100 +CONFIG_MOUSE_SYNAPTICS_USB=m
4101 +CONFIG_INPUT_JOYSTICK=y
4102 +CONFIG_JOYSTICK_ANALOG=m
4103 +CONFIG_JOYSTICK_A3D=m
4104 +CONFIG_JOYSTICK_ADC=m
4105 +CONFIG_JOYSTICK_ADI=m
4106 +CONFIG_JOYSTICK_COBRA=m
4107 +CONFIG_JOYSTICK_GF2K=m
4108 +CONFIG_JOYSTICK_GRIP=m
4109 +CONFIG_JOYSTICK_GRIP_MP=m
4110 +CONFIG_JOYSTICK_GUILLEMOT=m
4111 +CONFIG_JOYSTICK_INTERACT=m
4112 +CONFIG_JOYSTICK_SIDEWINDER=m
4113 +CONFIG_JOYSTICK_TMDC=m
4114 +CONFIG_JOYSTICK_IFORCE=m
4115 +CONFIG_JOYSTICK_IFORCE_USB=m
4116 +CONFIG_JOYSTICK_IFORCE_232=m
4117 +CONFIG_JOYSTICK_WARRIOR=m
4118 +CONFIG_JOYSTICK_MAGELLAN=m
4119 +CONFIG_JOYSTICK_SPACEORB=m
4120 +CONFIG_JOYSTICK_SPACEBALL=m
4121 +CONFIG_JOYSTICK_STINGER=m
4122 +CONFIG_JOYSTICK_TWIDJOY=m
4123 +CONFIG_JOYSTICK_ZHENHUA=m
4124 +CONFIG_JOYSTICK_DB9=m
4125 +CONFIG_JOYSTICK_GAMECON=m
4126 +CONFIG_JOYSTICK_TURBOGRAFX=m
4127 +CONFIG_JOYSTICK_AS5011=m
4128 +CONFIG_JOYSTICK_JOYDUMP=m
4129 +CONFIG_JOYSTICK_XPAD=m
4130 +CONFIG_JOYSTICK_XPAD_FF=y
4131 +CONFIG_JOYSTICK_XPAD_LEDS=y
4132 +CONFIG_JOYSTICK_WALKERA0701=m
4133 +CONFIG_JOYSTICK_PSXPAD_SPI=m
4134 +CONFIG_JOYSTICK_PSXPAD_SPI_FF=y
4135 +CONFIG_JOYSTICK_PXRC=m
4136 +CONFIG_JOYSTICK_FSIA6B=m
4137 +CONFIG_INPUT_TABLET=y
4138 +CONFIG_TABLET_USB_ACECAD=m
4139 +CONFIG_TABLET_USB_AIPTEK=m
4140 +CONFIG_TABLET_USB_HANWANG=m
4141 +CONFIG_TABLET_USB_KBTAB=m
4142 +CONFIG_TABLET_USB_PEGASUS=m
4143 +CONFIG_TABLET_SERIAL_WACOM4=m
4144 +CONFIG_INPUT_TOUCHSCREEN=y
4145 +CONFIG_TOUCHSCREEN_PROPERTIES=y
4146 +CONFIG_TOUCHSCREEN_88PM860X=m
4147 +CONFIG_TOUCHSCREEN_ADS7846=m
4148 +CONFIG_TOUCHSCREEN_AD7877=m
4149 +CONFIG_TOUCHSCREEN_AD7879=m
4150 +CONFIG_TOUCHSCREEN_AD7879_I2C=m
4151 +CONFIG_TOUCHSCREEN_AD7879_SPI=m
4152 +CONFIG_TOUCHSCREEN_ADC=m
4153 +CONFIG_TOUCHSCREEN_ATMEL_MXT=m
4154 +CONFIG_TOUCHSCREEN_ATMEL_MXT_T37=y
4155 +CONFIG_TOUCHSCREEN_AUO_PIXCIR=m
4156 +CONFIG_TOUCHSCREEN_BU21013=m
4157 +CONFIG_TOUCHSCREEN_BU21029=m
4158 +CONFIG_TOUCHSCREEN_CHIPONE_ICN8505=m
4159 +CONFIG_TOUCHSCREEN_CY8CTMA140=m
4160 +CONFIG_TOUCHSCREEN_CY8CTMG110=m
4161 +CONFIG_TOUCHSCREEN_CYTTSP_CORE=m
4162 +CONFIG_TOUCHSCREEN_CYTTSP_I2C=m
4163 +CONFIG_TOUCHSCREEN_CYTTSP_SPI=m
4164 +CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m
4165 +CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m
4166 +CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m
4167 +CONFIG_TOUCHSCREEN_DA9034=m
4168 +CONFIG_TOUCHSCREEN_DA9052=m
4169 +CONFIG_TOUCHSCREEN_DYNAPRO=m
4170 +CONFIG_TOUCHSCREEN_HAMPSHIRE=m
4171 +CONFIG_TOUCHSCREEN_EETI=m
4172 +CONFIG_TOUCHSCREEN_EGALAX_SERIAL=m
4173 +CONFIG_TOUCHSCREEN_EXC3000=m
4174 +CONFIG_TOUCHSCREEN_FUJITSU=m
4175 +CONFIG_TOUCHSCREEN_GOODIX=m
4176 +CONFIG_TOUCHSCREEN_HIDEEP=m
4177 +CONFIG_TOUCHSCREEN_ILI210X=m
4178 +CONFIG_TOUCHSCREEN_S6SY761=m
4179 +CONFIG_TOUCHSCREEN_GUNZE=m
4180 +CONFIG_TOUCHSCREEN_EKTF2127=m
4181 +CONFIG_TOUCHSCREEN_ELAN=y
4182 +CONFIG_TOUCHSCREEN_ELO=m
4183 +CONFIG_TOUCHSCREEN_WACOM_W8001=m
4184 +CONFIG_TOUCHSCREEN_WACOM_I2C=m
4185 +CONFIG_TOUCHSCREEN_MAX11801=m
4186 +CONFIG_TOUCHSCREEN_MCS5000=m
4187 +CONFIG_TOUCHSCREEN_MMS114=m
4188 +CONFIG_TOUCHSCREEN_MELFAS_MIP4=m
4189 +CONFIG_TOUCHSCREEN_MTOUCH=m
4190 +CONFIG_TOUCHSCREEN_INEXIO=m
4191 +CONFIG_TOUCHSCREEN_MK712=m
4192 +CONFIG_TOUCHSCREEN_PENMOUNT=m
4193 +CONFIG_TOUCHSCREEN_EDT_FT5X06=m
4194 +CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
4195 +CONFIG_TOUCHSCREEN_TOUCHWIN=m
4196 +CONFIG_TOUCHSCREEN_TI_AM335X_TSC=m
4197 +CONFIG_TOUCHSCREEN_UCB1400=m
4198 +CONFIG_TOUCHSCREEN_PIXCIR=m
4199 +CONFIG_TOUCHSCREEN_WDT87XX_I2C=m
4200 +CONFIG_TOUCHSCREEN_WM831X=m
4201 +CONFIG_TOUCHSCREEN_WM97XX=m
4202 +CONFIG_TOUCHSCREEN_WM9705=y
4203 +CONFIG_TOUCHSCREEN_WM9712=y
4204 +CONFIG_TOUCHSCREEN_WM9713=y
4205 +CONFIG_TOUCHSCREEN_USB_COMPOSITE=m
4206 +CONFIG_TOUCHSCREEN_MC13783=m
4207 +CONFIG_TOUCHSCREEN_USB_EGALAX=y
4208 +CONFIG_TOUCHSCREEN_USB_PANJIT=y
4209 +CONFIG_TOUCHSCREEN_USB_3M=y
4210 +CONFIG_TOUCHSCREEN_USB_ITM=y
4211 +CONFIG_TOUCHSCREEN_USB_ETURBO=y
4212 +CONFIG_TOUCHSCREEN_USB_GUNZE=y
4213 +CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y
4214 +CONFIG_TOUCHSCREEN_USB_IRTOUCH=y
4215 +CONFIG_TOUCHSCREEN_USB_IDEALTEK=y
4216 +CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y
4217 +CONFIG_TOUCHSCREEN_USB_GOTOP=y
4218 +CONFIG_TOUCHSCREEN_USB_JASTEC=y
4219 +CONFIG_TOUCHSCREEN_USB_ELO=y
4220 +CONFIG_TOUCHSCREEN_USB_E2I=y
4221 +CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y
4222 +CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y
4223 +CONFIG_TOUCHSCREEN_USB_NEXIO=y
4224 +CONFIG_TOUCHSCREEN_USB_EASYTOUCH=y
4225 +CONFIG_TOUCHSCREEN_TOUCHIT213=m
4226 +CONFIG_TOUCHSCREEN_TSC_SERIO=m
4227 +CONFIG_TOUCHSCREEN_TSC200X_CORE=m
4228 +CONFIG_TOUCHSCREEN_TSC2004=m
4229 +CONFIG_TOUCHSCREEN_TSC2005=m
4230 +CONFIG_TOUCHSCREEN_TSC2007=m
4231 +CONFIG_TOUCHSCREEN_TSC2007_IIO=y
4232 +CONFIG_TOUCHSCREEN_PCAP=m
4233 +CONFIG_TOUCHSCREEN_RM_TS=m
4234 +CONFIG_TOUCHSCREEN_SILEAD=m
4235 +CONFIG_TOUCHSCREEN_SIS_I2C=m
4236 +CONFIG_TOUCHSCREEN_ST1232=m
4237 +CONFIG_TOUCHSCREEN_STMFTS=m
4238 +CONFIG_TOUCHSCREEN_SUR40=m
4239 +CONFIG_TOUCHSCREEN_SURFACE3_SPI=m
4240 +CONFIG_TOUCHSCREEN_SX8654=m
4241 +CONFIG_TOUCHSCREEN_TPS6507X=m
4242 +CONFIG_TOUCHSCREEN_ZET6223=m
4243 +CONFIG_TOUCHSCREEN_ZFORCE=m
4244 +CONFIG_TOUCHSCREEN_ROHM_BU21023=m
4245 +CONFIG_TOUCHSCREEN_IQS5XX=m
4246 +CONFIG_TOUCHSCREEN_ZINITIX=m
4247 +CONFIG_INPUT_MISC=y
4248 +CONFIG_INPUT_88PM860X_ONKEY=m
4249 +CONFIG_INPUT_88PM80X_ONKEY=m
4250 +CONFIG_INPUT_AD714X=m
4251 +CONFIG_INPUT_AD714X_I2C=m
4252 +CONFIG_INPUT_AD714X_SPI=m
4253 +CONFIG_INPUT_ARIZONA_HAPTICS=m
4254 +CONFIG_INPUT_BMA150=m
4255 +CONFIG_INPUT_E3X0_BUTTON=m
4256 +CONFIG_INPUT_PCSPKR=m
4257 +CONFIG_INPUT_MAX77693_HAPTIC=m
4258 +CONFIG_INPUT_MAX8925_ONKEY=m
4259 +CONFIG_INPUT_MAX8997_HAPTIC=m
4260 +CONFIG_INPUT_MC13783_PWRBUTTON=m
4261 +CONFIG_INPUT_MMA8450=m
4262 +CONFIG_INPUT_APANEL=m
4263 +CONFIG_INPUT_GPIO_BEEPER=m
4264 +CONFIG_INPUT_GPIO_DECODER=m
4265 +CONFIG_INPUT_GPIO_VIBRA=m
4266 +CONFIG_INPUT_ATLAS_BTNS=m
4267 +CONFIG_INPUT_ATI_REMOTE2=m
4268 +CONFIG_INPUT_KEYSPAN_REMOTE=m
4269 +CONFIG_INPUT_KXTJ9=m
4270 +CONFIG_INPUT_POWERMATE=m
4271 +CONFIG_INPUT_YEALINK=m
4272 +CONFIG_INPUT_CM109=m
4273 +CONFIG_INPUT_REGULATOR_HAPTIC=m
4274 +CONFIG_INPUT_RETU_PWRBUTTON=m
4275 +CONFIG_INPUT_AXP20X_PEK=m
4276 +CONFIG_INPUT_TWL4030_PWRBUTTON=m
4277 +CONFIG_INPUT_TWL4030_VIBRA=m
4278 +CONFIG_INPUT_TWL6040_VIBRA=m
4279 +CONFIG_INPUT_UINPUT=y
4280 +CONFIG_INPUT_PALMAS_PWRBUTTON=m
4281 +CONFIG_INPUT_PCF50633_PMU=m
4282 +CONFIG_INPUT_PCF8574=m
4283 +CONFIG_INPUT_PWM_BEEPER=m
4284 +CONFIG_INPUT_PWM_VIBRA=m
4285 +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
4286 +CONFIG_INPUT_DA7280_HAPTICS=m
4287 +CONFIG_INPUT_DA9052_ONKEY=m
4288 +CONFIG_INPUT_DA9055_ONKEY=m
4289 +CONFIG_INPUT_DA9063_ONKEY=m
4290 +CONFIG_INPUT_WM831X_ON=m
4291 +CONFIG_INPUT_PCAP=m
4292 +CONFIG_INPUT_ADXL34X=m
4293 +CONFIG_INPUT_ADXL34X_I2C=m
4294 +CONFIG_INPUT_ADXL34X_SPI=m
4295 +CONFIG_INPUT_IMS_PCU=m
4296 +CONFIG_INPUT_IQS269A=m
4297 +CONFIG_INPUT_CMA3000=m
4298 +CONFIG_INPUT_CMA3000_I2C=m
4299 +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m
4300 +CONFIG_INPUT_IDEAPAD_SLIDEBAR=m
4301 +CONFIG_INPUT_SOC_BUTTON_ARRAY=m
4302 +CONFIG_INPUT_DRV260X_HAPTICS=m
4303 +CONFIG_INPUT_DRV2665_HAPTICS=m
4304 +CONFIG_INPUT_DRV2667_HAPTICS=m
4305 +CONFIG_INPUT_RAVE_SP_PWRBUTTON=m
4306 +CONFIG_RMI4_CORE=m
4307 +CONFIG_RMI4_I2C=m
4308 +CONFIG_RMI4_SPI=m
4309 +CONFIG_RMI4_SMB=m
4310 +CONFIG_RMI4_F03=y
4311 +CONFIG_RMI4_F03_SERIO=m
4312 +CONFIG_RMI4_2D_SENSOR=y
4313 +CONFIG_RMI4_F11=y
4314 +CONFIG_RMI4_F12=y
4315 +CONFIG_RMI4_F30=y
4316 +CONFIG_RMI4_F34=y
4317 +CONFIG_RMI4_F3A=y
4318 +CONFIG_RMI4_F54=y
4319 +CONFIG_RMI4_F55=y
4322 +# Hardware I/O ports
4324 +CONFIG_SERIO=y
4325 +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y
4326 +CONFIG_SERIO_I8042=y
4327 +CONFIG_SERIO_SERPORT=m
4328 +CONFIG_SERIO_CT82C710=m
4329 +CONFIG_SERIO_PARKBD=m
4330 +CONFIG_SERIO_PCIPS2=m
4331 +CONFIG_SERIO_LIBPS2=y
4332 +CONFIG_SERIO_RAW=m
4333 +CONFIG_SERIO_ALTERA_PS2=m
4334 +CONFIG_SERIO_PS2MULT=m
4335 +CONFIG_SERIO_ARC_PS2=m
4336 +CONFIG_HYPERV_KEYBOARD=m
4337 +CONFIG_SERIO_GPIO_PS2=m
4338 +CONFIG_USERIO=m
4339 +CONFIG_GAMEPORT=m
4340 +CONFIG_GAMEPORT_NS558=m
4341 +CONFIG_GAMEPORT_L4=m
4342 +CONFIG_GAMEPORT_EMU10K1=m
4343 +CONFIG_GAMEPORT_FM801=m
4344 +# end of Hardware I/O ports
4345 +# end of Input device support
4348 +# Character devices
4350 +CONFIG_TTY=y
4351 +CONFIG_VT=y
4352 +CONFIG_CONSOLE_TRANSLATIONS=y
4353 +CONFIG_VT_CONSOLE=y
4354 +CONFIG_VT_CONSOLE_SLEEP=y
4355 +CONFIG_HW_CONSOLE=y
4356 +CONFIG_VT_HW_CONSOLE_BINDING=y
4357 +CONFIG_UNIX98_PTYS=y
4358 +CONFIG_LEGACY_PTYS=y
4359 +CONFIG_LEGACY_PTY_COUNT=0
4360 +CONFIG_LDISC_AUTOLOAD=y
4363 +# Serial drivers
4365 +CONFIG_SERIAL_EARLYCON=y
4366 +CONFIG_SERIAL_8250=y
4367 +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
4368 +CONFIG_SERIAL_8250_PNP=y
4369 +CONFIG_SERIAL_8250_16550A_VARIANTS=y
4370 +CONFIG_SERIAL_8250_FINTEK=y
4371 +CONFIG_SERIAL_8250_CONSOLE=y
4372 +CONFIG_SERIAL_8250_DMA=y
4373 +CONFIG_SERIAL_8250_PCI=y
4374 +CONFIG_SERIAL_8250_EXAR=m
4375 +CONFIG_SERIAL_8250_CS=m
4376 +CONFIG_SERIAL_8250_MEN_MCB=m
4377 +CONFIG_SERIAL_8250_NR_UARTS=48
4378 +CONFIG_SERIAL_8250_RUNTIME_UARTS=32
4379 +CONFIG_SERIAL_8250_EXTENDED=y
4380 +CONFIG_SERIAL_8250_MANY_PORTS=y
4381 +CONFIG_SERIAL_8250_SHARE_IRQ=y
4382 +# CONFIG_SERIAL_8250_DETECT_IRQ is not set
4383 +CONFIG_SERIAL_8250_RSA=y
4384 +CONFIG_SERIAL_8250_DWLIB=y
4385 +CONFIG_SERIAL_8250_DW=m
4386 +CONFIG_SERIAL_8250_RT288X=y
4387 +CONFIG_SERIAL_8250_LPSS=m
4388 +CONFIG_SERIAL_8250_MID=m
4391 +# Non-8250 serial port support
4393 +CONFIG_SERIAL_KGDB_NMI=y
4394 +CONFIG_SERIAL_MAX3100=m
4395 +CONFIG_SERIAL_MAX310X=y
4396 +CONFIG_SERIAL_UARTLITE=m
4397 +CONFIG_SERIAL_UARTLITE_NR_UARTS=1
4398 +CONFIG_SERIAL_CORE=y
4399 +CONFIG_SERIAL_CORE_CONSOLE=y
4400 +CONFIG_CONSOLE_POLL=y
4401 +CONFIG_SERIAL_JSM=m
4402 +CONFIG_SERIAL_LANTIQ=m
4403 +CONFIG_SERIAL_SCCNXP=y
4404 +CONFIG_SERIAL_SCCNXP_CONSOLE=y
4405 +CONFIG_SERIAL_SC16IS7XX_CORE=m
4406 +CONFIG_SERIAL_SC16IS7XX=m
4407 +CONFIG_SERIAL_SC16IS7XX_I2C=y
4408 +CONFIG_SERIAL_SC16IS7XX_SPI=y
4409 +CONFIG_SERIAL_BCM63XX=m
4410 +CONFIG_SERIAL_ALTERA_JTAGUART=m
4411 +CONFIG_SERIAL_ALTERA_UART=m
4412 +CONFIG_SERIAL_ALTERA_UART_MAXPORTS=4
4413 +CONFIG_SERIAL_ALTERA_UART_BAUDRATE=115200
4414 +CONFIG_SERIAL_ARC=m
4415 +CONFIG_SERIAL_ARC_NR_PORTS=1
4416 +CONFIG_SERIAL_RP2=m
4417 +CONFIG_SERIAL_RP2_NR_UARTS=32
4418 +CONFIG_SERIAL_FSL_LPUART=m
4419 +CONFIG_SERIAL_FSL_LINFLEXUART=m
4420 +CONFIG_SERIAL_MEN_Z135=m
4421 +CONFIG_SERIAL_SPRD=m
4422 +# end of Serial drivers
4424 +CONFIG_SERIAL_MCTRL_GPIO=y
4425 +CONFIG_SERIAL_NONSTANDARD=y
4426 +CONFIG_ROCKETPORT=m
4427 +CONFIG_CYCLADES=m
4428 +# CONFIG_CYZ_INTR is not set
4429 +CONFIG_MOXA_INTELLIO=m
4430 +CONFIG_MOXA_SMARTIO=m
4431 +CONFIG_SYNCLINK_GT=m
4432 +CONFIG_ISI=m
4433 +CONFIG_N_HDLC=m
4434 +CONFIG_N_GSM=m
4435 +CONFIG_NOZOMI=m
4436 +CONFIG_NULL_TTY=m
4437 +CONFIG_TRACE_ROUTER=m
4438 +CONFIG_TRACE_SINK=m
4439 +CONFIG_HVC_DRIVER=y
4440 +CONFIG_HVC_IRQ=y
4441 +CONFIG_HVC_XEN=y
4442 +CONFIG_HVC_XEN_FRONTEND=y
4443 +CONFIG_SERIAL_DEV_BUS=y
4444 +CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
4445 +CONFIG_TTY_PRINTK=y
4446 +CONFIG_TTY_PRINTK_LEVEL=6
4447 +CONFIG_PRINTER=m
4448 +# CONFIG_LP_CONSOLE is not set
4449 +CONFIG_PPDEV=m
4450 +CONFIG_VIRTIO_CONSOLE=y
4451 +CONFIG_IPMI_HANDLER=m
4452 +CONFIG_IPMI_DMI_DECODE=y
4453 +CONFIG_IPMI_PLAT_DATA=y
4454 +# CONFIG_IPMI_PANIC_EVENT is not set
4455 +CONFIG_IPMI_DEVICE_INTERFACE=m
4456 +CONFIG_IPMI_SI=m
4457 +CONFIG_IPMI_SSIF=m
4458 +CONFIG_IPMI_WATCHDOG=m
4459 +CONFIG_IPMI_POWEROFF=m
4460 +CONFIG_HW_RANDOM=y
4461 +CONFIG_HW_RANDOM_TIMERIOMEM=m
4462 +CONFIG_HW_RANDOM_INTEL=m
4463 +CONFIG_HW_RANDOM_AMD=m
4464 +CONFIG_HW_RANDOM_BA431=m
4465 +CONFIG_HW_RANDOM_VIA=m
4466 +CONFIG_HW_RANDOM_VIRTIO=m
4467 +CONFIG_HW_RANDOM_XIPHERA=m
4468 +CONFIG_APPLICOM=m
4471 +# PCMCIA character devices
4473 +CONFIG_SYNCLINK_CS=m
4474 +CONFIG_CARDMAN_4000=m
4475 +CONFIG_CARDMAN_4040=m
4476 +CONFIG_SCR24X=m
4477 +CONFIG_IPWIRELESS=m
4478 +# end of PCMCIA character devices
4480 +CONFIG_MWAVE=m
4481 +CONFIG_DEVMEM=y
4482 +# CONFIG_DEVKMEM is not set
4483 +CONFIG_NVRAM=m
4484 +CONFIG_RAW_DRIVER=m
4485 +CONFIG_MAX_RAW_DEVS=256
4486 +CONFIG_DEVPORT=y
4487 +CONFIG_HPET=y
4488 +CONFIG_HPET_MMAP=y
4489 +CONFIG_HPET_MMAP_DEFAULT=y
4490 +CONFIG_HANGCHECK_TIMER=m
4491 +CONFIG_UV_MMTIMER=m
4492 +CONFIG_TCG_TPM=y
4493 +CONFIG_HW_RANDOM_TPM=y
4494 +CONFIG_TCG_TIS_CORE=y
4495 +CONFIG_TCG_TIS=y
4496 +CONFIG_TCG_TIS_SPI=m
4497 +CONFIG_TCG_TIS_SPI_CR50=y
4498 +CONFIG_TCG_TIS_I2C_CR50=m
4499 +CONFIG_TCG_TIS_I2C_ATMEL=m
4500 +CONFIG_TCG_TIS_I2C_INFINEON=m
4501 +CONFIG_TCG_TIS_I2C_NUVOTON=m
4502 +CONFIG_TCG_NSC=m
4503 +CONFIG_TCG_ATMEL=m
4504 +CONFIG_TCG_INFINEON=m
4505 +CONFIG_TCG_XEN=m
4506 +CONFIG_TCG_CRB=y
4507 +CONFIG_TCG_VTPM_PROXY=m
4508 +CONFIG_TCG_TIS_ST33ZP24=m
4509 +CONFIG_TCG_TIS_ST33ZP24_I2C=m
4510 +CONFIG_TCG_TIS_ST33ZP24_SPI=m
4511 +CONFIG_TELCLOCK=m
4512 +CONFIG_XILLYBUS=m
4513 +CONFIG_XILLYBUS_PCIE=m
4514 +# end of Character devices
4516 +CONFIG_RANDOM_TRUST_CPU=y
4517 +CONFIG_RANDOM_TRUST_BOOTLOADER=y
4520 +# I2C support
4522 +CONFIG_I2C=y
4523 +CONFIG_ACPI_I2C_OPREGION=y
4524 +CONFIG_I2C_BOARDINFO=y
4525 +CONFIG_I2C_COMPAT=y
4526 +CONFIG_I2C_CHARDEV=y
4527 +CONFIG_I2C_MUX=m
4530 +# Multiplexer I2C Chip support
4532 +CONFIG_I2C_MUX_GPIO=m
4533 +CONFIG_I2C_MUX_LTC4306=m
4534 +CONFIG_I2C_MUX_PCA9541=m
4535 +CONFIG_I2C_MUX_PCA954x=m
4536 +CONFIG_I2C_MUX_REG=m
4537 +CONFIG_I2C_MUX_MLXCPLD=m
4538 +# end of Multiplexer I2C Chip support
4540 +CONFIG_I2C_HELPER_AUTO=y
4541 +CONFIG_I2C_SMBUS=m
4542 +CONFIG_I2C_ALGOBIT=m
4543 +CONFIG_I2C_ALGOPCA=m
4546 +# I2C Hardware Bus support
4550 +# PC SMBus host controller drivers
4552 +CONFIG_I2C_ALI1535=m
4553 +CONFIG_I2C_ALI1563=m
4554 +CONFIG_I2C_ALI15X3=m
4555 +CONFIG_I2C_AMD756=m
4556 +CONFIG_I2C_AMD756_S4882=m
4557 +CONFIG_I2C_AMD8111=m
4558 +CONFIG_I2C_AMD_MP2=m
4559 +CONFIG_I2C_I801=m
4560 +CONFIG_I2C_ISCH=m
4561 +CONFIG_I2C_ISMT=m
4562 +CONFIG_I2C_PIIX4=m
4563 +CONFIG_I2C_CHT_WC=m
4564 +CONFIG_I2C_NFORCE2=m
4565 +CONFIG_I2C_NFORCE2_S4985=m
4566 +CONFIG_I2C_NVIDIA_GPU=m
4567 +CONFIG_I2C_SIS5595=m
4568 +CONFIG_I2C_SIS630=m
4569 +CONFIG_I2C_SIS96X=m
4570 +CONFIG_I2C_VIA=m
4571 +CONFIG_I2C_VIAPRO=m
4574 +# ACPI drivers
4576 +CONFIG_I2C_SCMI=m
4579 +# I2C system bus drivers (mostly embedded / system-on-chip)
4581 +CONFIG_I2C_CBUS_GPIO=m
4582 +CONFIG_I2C_DESIGNWARE_CORE=y
4583 +# CONFIG_I2C_DESIGNWARE_SLAVE is not set
4584 +CONFIG_I2C_DESIGNWARE_PLATFORM=y
4585 +CONFIG_I2C_DESIGNWARE_BAYTRAIL=y
4586 +CONFIG_I2C_DESIGNWARE_PCI=m
4587 +# CONFIG_I2C_EMEV2 is not set
4588 +CONFIG_I2C_GPIO=m
4589 +# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set
4590 +CONFIG_I2C_KEMPLD=m
4591 +CONFIG_I2C_OCORES=m
4592 +CONFIG_I2C_PCA_PLATFORM=m
4593 +CONFIG_I2C_SIMTEC=m
4594 +CONFIG_I2C_XILINX=m
4597 +# External I2C/SMBus adapter drivers
4599 +CONFIG_I2C_DIOLAN_U2C=m
4600 +CONFIG_I2C_DLN2=m
4601 +CONFIG_I2C_PARPORT=m
4602 +CONFIG_I2C_ROBOTFUZZ_OSIF=m
4603 +CONFIG_I2C_TAOS_EVM=m
4604 +CONFIG_I2C_TINY_USB=m
4605 +CONFIG_I2C_VIPERBOARD=m
4608 +# Other I2C/SMBus bus drivers
4610 +CONFIG_I2C_MLXCPLD=m
4611 +CONFIG_I2C_CROS_EC_TUNNEL=m
4612 +# end of I2C Hardware Bus support
4614 +CONFIG_I2C_STUB=m
4615 +# CONFIG_I2C_SLAVE is not set
4616 +# CONFIG_I2C_DEBUG_CORE is not set
4617 +# CONFIG_I2C_DEBUG_ALGO is not set
4618 +# CONFIG_I2C_DEBUG_BUS is not set
4619 +# end of I2C support
4621 +CONFIG_I3C=m
4622 +CONFIG_CDNS_I3C_MASTER=m
4623 +CONFIG_DW_I3C_MASTER=m
4624 +CONFIG_SVC_I3C_MASTER=m
4625 +CONFIG_MIPI_I3C_HCI=m
4626 +CONFIG_SPI=y
4627 +# CONFIG_SPI_DEBUG is not set
4628 +CONFIG_SPI_MASTER=y
4629 +CONFIG_SPI_MEM=y
4632 +# SPI Master Controller Drivers
4634 +CONFIG_SPI_ALTERA=m
4635 +CONFIG_SPI_AXI_SPI_ENGINE=m
4636 +CONFIG_SPI_BITBANG=m
4637 +CONFIG_SPI_BUTTERFLY=m
4638 +CONFIG_SPI_CADENCE=m
4639 +CONFIG_SPI_DESIGNWARE=m
4640 +CONFIG_SPI_DW_DMA=y
4641 +CONFIG_SPI_DW_PCI=m
4642 +CONFIG_SPI_DW_MMIO=m
4643 +CONFIG_SPI_DLN2=m
4644 +CONFIG_SPI_NXP_FLEXSPI=m
4645 +CONFIG_SPI_GPIO=m
4646 +CONFIG_SPI_LM70_LLP=m
4647 +CONFIG_SPI_LANTIQ_SSC=m
4648 +CONFIG_SPI_OC_TINY=m
4649 +CONFIG_SPI_PXA2XX=m
4650 +CONFIG_SPI_PXA2XX_PCI=m
4651 +# CONFIG_SPI_ROCKCHIP is not set
4652 +CONFIG_SPI_SC18IS602=m
4653 +CONFIG_SPI_SIFIVE=m
4654 +CONFIG_SPI_MXIC=m
4655 +CONFIG_SPI_XCOMM=m
4656 +# CONFIG_SPI_XILINX is not set
4657 +CONFIG_SPI_ZYNQMP_GQSPI=m
4658 +CONFIG_SPI_AMD=m
4661 +# SPI Multiplexer support
4663 +CONFIG_SPI_MUX=m
4666 +# SPI Protocol Masters
4668 +CONFIG_SPI_SPIDEV=m
4669 +CONFIG_SPI_LOOPBACK_TEST=m
4670 +CONFIG_SPI_TLE62X0=m
4671 +CONFIG_SPI_SLAVE=y
4672 +CONFIG_SPI_SLAVE_TIME=m
4673 +CONFIG_SPI_SLAVE_SYSTEM_CONTROL=m
4674 +CONFIG_SPI_DYNAMIC=y
4675 +CONFIG_SPMI=m
4676 +CONFIG_HSI=m
4677 +CONFIG_HSI_BOARDINFO=y
4680 +# HSI controllers
4684 +# HSI clients
4686 +CONFIG_HSI_CHAR=m
4687 +CONFIG_PPS=y
4688 +# CONFIG_PPS_DEBUG is not set
4691 +# PPS clients support
4693 +# CONFIG_PPS_CLIENT_KTIMER is not set
4694 +CONFIG_PPS_CLIENT_LDISC=m
4695 +CONFIG_PPS_CLIENT_PARPORT=m
4696 +CONFIG_PPS_CLIENT_GPIO=m
4699 +# PPS generators support
4703 +# PTP clock support
4705 +CONFIG_PTP_1588_CLOCK=y
4706 +CONFIG_DP83640_PHY=m
4707 +CONFIG_PTP_1588_CLOCK_INES=m
4708 +CONFIG_PTP_1588_CLOCK_KVM=m
4709 +CONFIG_PTP_1588_CLOCK_IDT82P33=m
4710 +CONFIG_PTP_1588_CLOCK_IDTCM=m
4711 +CONFIG_PTP_1588_CLOCK_VMW=m
4712 +CONFIG_PTP_1588_CLOCK_OCP=m
4713 +# end of PTP clock support
4715 +CONFIG_PINCTRL=y
4716 +CONFIG_PINMUX=y
4717 +CONFIG_PINCONF=y
4718 +CONFIG_GENERIC_PINCONF=y
4719 +# CONFIG_DEBUG_PINCTRL is not set
4720 +CONFIG_PINCTRL_AMD=y
4721 +CONFIG_PINCTRL_DA9062=m
4722 +CONFIG_PINCTRL_MCP23S08_I2C=m
4723 +CONFIG_PINCTRL_MCP23S08_SPI=m
4724 +CONFIG_PINCTRL_MCP23S08=m
4725 +CONFIG_PINCTRL_SX150X=y
4726 +CONFIG_PINCTRL_BAYTRAIL=y
4727 +CONFIG_PINCTRL_CHERRYVIEW=y
4728 +CONFIG_PINCTRL_LYNXPOINT=m
4729 +CONFIG_PINCTRL_INTEL=y
4730 +CONFIG_PINCTRL_ALDERLAKE=m
4731 +CONFIG_PINCTRL_BROXTON=m
4732 +CONFIG_PINCTRL_CANNONLAKE=m
4733 +CONFIG_PINCTRL_CEDARFORK=m
4734 +CONFIG_PINCTRL_DENVERTON=m
4735 +CONFIG_PINCTRL_ELKHARTLAKE=m
4736 +CONFIG_PINCTRL_EMMITSBURG=m
4737 +CONFIG_PINCTRL_GEMINILAKE=m
4738 +CONFIG_PINCTRL_ICELAKE=m
4739 +CONFIG_PINCTRL_JASPERLAKE=m
4740 +CONFIG_PINCTRL_LAKEFIELD=m
4741 +CONFIG_PINCTRL_LEWISBURG=m
4742 +CONFIG_PINCTRL_SUNRISEPOINT=m
4743 +CONFIG_PINCTRL_TIGERLAKE=m
4746 +# Renesas pinctrl drivers
4748 +# end of Renesas pinctrl drivers
4750 +CONFIG_PINCTRL_MADERA=m
4751 +CONFIG_PINCTRL_CS47L15=y
4752 +CONFIG_PINCTRL_CS47L35=y
4753 +CONFIG_PINCTRL_CS47L85=y
4754 +CONFIG_PINCTRL_CS47L90=y
4755 +CONFIG_PINCTRL_CS47L92=y
4756 +CONFIG_GPIOLIB=y
4757 +CONFIG_GPIOLIB_FASTPATH_LIMIT=512
4758 +CONFIG_GPIO_ACPI=y
4759 +CONFIG_GPIOLIB_IRQCHIP=y
4760 +# CONFIG_DEBUG_GPIO is not set
4761 +CONFIG_GPIO_SYSFS=y
4762 +CONFIG_GPIO_CDEV=y
4763 +# CONFIG_GPIO_CDEV_V1 is not set
4764 +CONFIG_GPIO_GENERIC=m
4765 +CONFIG_GPIO_MAX730X=m
4768 +# Memory mapped GPIO drivers
4770 +CONFIG_GPIO_AMDPT=m
4771 +CONFIG_GPIO_DWAPB=m
4772 +CONFIG_GPIO_EXAR=m
4773 +CONFIG_GPIO_GENERIC_PLATFORM=m
4774 +CONFIG_GPIO_ICH=m
4775 +CONFIG_GPIO_MB86S7X=m
4776 +CONFIG_GPIO_MENZ127=m
4777 +CONFIG_GPIO_SIOX=m
4778 +CONFIG_GPIO_VX855=m
4779 +CONFIG_GPIO_AMD_FCH=m
4780 +# end of Memory mapped GPIO drivers
4783 +# Port-mapped I/O GPIO drivers
4785 +CONFIG_GPIO_104_DIO_48E=m
4786 +CONFIG_GPIO_104_IDIO_16=m
4787 +CONFIG_GPIO_104_IDI_48=m
4788 +CONFIG_GPIO_F7188X=m
4789 +CONFIG_GPIO_GPIO_MM=m
4790 +CONFIG_GPIO_IT87=m
4791 +CONFIG_GPIO_SCH=m
4792 +CONFIG_GPIO_SCH311X=m
4793 +CONFIG_GPIO_WINBOND=m
4794 +CONFIG_GPIO_WS16C48=m
4795 +# end of Port-mapped I/O GPIO drivers
4798 +# I2C GPIO expanders
4800 +CONFIG_GPIO_ADP5588=m
4801 +CONFIG_GPIO_MAX7300=m
4802 +CONFIG_GPIO_MAX732X=m
4803 +CONFIG_GPIO_PCA953X=m
4804 +CONFIG_GPIO_PCA953X_IRQ=y
4805 +CONFIG_GPIO_PCA9570=m
4806 +CONFIG_GPIO_PCF857X=m
4807 +CONFIG_GPIO_TPIC2810=m
4808 +# end of I2C GPIO expanders
4811 +# MFD GPIO expanders
4813 +CONFIG_GPIO_ADP5520=m
4814 +CONFIG_GPIO_ARIZONA=m
4815 +CONFIG_GPIO_BD9571MWV=m
4816 +CONFIG_GPIO_CRYSTAL_COVE=y
4817 +CONFIG_GPIO_DA9052=m
4818 +CONFIG_GPIO_DA9055=m
4819 +CONFIG_GPIO_DLN2=m
4820 +CONFIG_GPIO_JANZ_TTL=m
4821 +CONFIG_GPIO_KEMPLD=m
4822 +CONFIG_GPIO_LP3943=m
4823 +CONFIG_GPIO_LP873X=m
4824 +CONFIG_GPIO_MADERA=m
4825 +CONFIG_GPIO_PALMAS=y
4826 +CONFIG_GPIO_RC5T583=y
4827 +CONFIG_GPIO_TPS65086=m
4828 +CONFIG_GPIO_TPS6586X=y
4829 +CONFIG_GPIO_TPS65910=y
4830 +CONFIG_GPIO_TPS65912=m
4831 +CONFIG_GPIO_TPS68470=y
4832 +CONFIG_GPIO_TQMX86=m
4833 +CONFIG_GPIO_TWL4030=m
4834 +CONFIG_GPIO_TWL6040=m
4835 +CONFIG_GPIO_UCB1400=m
4836 +CONFIG_GPIO_WHISKEY_COVE=m
4837 +CONFIG_GPIO_WM831X=m
4838 +CONFIG_GPIO_WM8350=m
4839 +CONFIG_GPIO_WM8994=m
4840 +# end of MFD GPIO expanders
4843 +# PCI GPIO expanders
4845 +CONFIG_GPIO_AMD8111=m
4846 +CONFIG_GPIO_ML_IOH=m
4847 +CONFIG_GPIO_PCI_IDIO_16=m
4848 +CONFIG_GPIO_PCIE_IDIO_24=m
4849 +CONFIG_GPIO_RDC321X=m
4850 +# end of PCI GPIO expanders
4853 +# SPI GPIO expanders
4855 +CONFIG_GPIO_MAX3191X=m
4856 +CONFIG_GPIO_MAX7301=m
4857 +CONFIG_GPIO_MC33880=m
4858 +CONFIG_GPIO_PISOSR=m
4859 +CONFIG_GPIO_XRA1403=m
4860 +# end of SPI GPIO expanders
4863 +# USB GPIO expanders
4865 +CONFIG_GPIO_VIPERBOARD=m
4866 +# end of USB GPIO expanders
4869 +# Virtual GPIO drivers
4871 +CONFIG_GPIO_AGGREGATOR=m
4872 +# CONFIG_GPIO_MOCKUP is not set
4873 +# end of Virtual GPIO drivers
4875 +CONFIG_W1=m
4876 +CONFIG_W1_CON=y
4879 +# 1-wire Bus Masters
4881 +CONFIG_W1_MASTER_MATROX=m
4882 +CONFIG_W1_MASTER_DS2490=m
4883 +CONFIG_W1_MASTER_DS2482=m
4884 +CONFIG_W1_MASTER_DS1WM=m
4885 +CONFIG_W1_MASTER_GPIO=m
4886 +CONFIG_W1_MASTER_SGI=m
4887 +# end of 1-wire Bus Masters
4890 +# 1-wire Slaves
4892 +CONFIG_W1_SLAVE_THERM=m
4893 +CONFIG_W1_SLAVE_SMEM=m
4894 +CONFIG_W1_SLAVE_DS2405=m
4895 +CONFIG_W1_SLAVE_DS2408=m
4896 +CONFIG_W1_SLAVE_DS2408_READBACK=y
4897 +CONFIG_W1_SLAVE_DS2413=m
4898 +CONFIG_W1_SLAVE_DS2406=m
4899 +CONFIG_W1_SLAVE_DS2423=m
4900 +CONFIG_W1_SLAVE_DS2805=m
4901 +CONFIG_W1_SLAVE_DS2430=m
4902 +CONFIG_W1_SLAVE_DS2431=m
4903 +CONFIG_W1_SLAVE_DS2433=m
4904 +# CONFIG_W1_SLAVE_DS2433_CRC is not set
4905 +CONFIG_W1_SLAVE_DS2438=m
4906 +CONFIG_W1_SLAVE_DS250X=m
4907 +CONFIG_W1_SLAVE_DS2780=m
4908 +CONFIG_W1_SLAVE_DS2781=m
4909 +CONFIG_W1_SLAVE_DS28E04=m
4910 +CONFIG_W1_SLAVE_DS28E17=m
4911 +# end of 1-wire Slaves
4913 +CONFIG_POWER_RESET=y
4914 +CONFIG_POWER_RESET_MT6323=y
4915 +CONFIG_POWER_RESET_RESTART=y
4916 +CONFIG_POWER_SUPPLY=y
4917 +# CONFIG_POWER_SUPPLY_DEBUG is not set
4918 +CONFIG_POWER_SUPPLY_HWMON=y
4919 +CONFIG_PDA_POWER=m
4920 +CONFIG_GENERIC_ADC_BATTERY=m
4921 +CONFIG_MAX8925_POWER=m
4922 +CONFIG_WM831X_BACKUP=m
4923 +CONFIG_WM831X_POWER=m
4924 +CONFIG_WM8350_POWER=m
4925 +CONFIG_TEST_POWER=m
4926 +CONFIG_BATTERY_88PM860X=m
4927 +CONFIG_CHARGER_ADP5061=m
4928 +CONFIG_BATTERY_CW2015=m
4929 +CONFIG_BATTERY_DS2760=m
4930 +CONFIG_BATTERY_DS2780=m
4931 +CONFIG_BATTERY_DS2781=m
4932 +CONFIG_BATTERY_DS2782=m
4933 +CONFIG_BATTERY_SBS=m
4934 +CONFIG_CHARGER_SBS=m
4935 +CONFIG_MANAGER_SBS=m
4936 +CONFIG_BATTERY_BQ27XXX=m
4937 +CONFIG_BATTERY_BQ27XXX_I2C=m
4938 +CONFIG_BATTERY_BQ27XXX_HDQ=m
4939 +# CONFIG_BATTERY_BQ27XXX_DT_UPDATES_NVM is not set
4940 +CONFIG_BATTERY_DA9030=m
4941 +CONFIG_BATTERY_DA9052=m
4942 +CONFIG_CHARGER_DA9150=m
4943 +CONFIG_BATTERY_DA9150=m
4944 +CONFIG_CHARGER_AXP20X=m
4945 +CONFIG_BATTERY_AXP20X=m
4946 +CONFIG_AXP20X_POWER=m
4947 +CONFIG_AXP288_CHARGER=m
4948 +CONFIG_AXP288_FUEL_GAUGE=m
4949 +CONFIG_BATTERY_MAX17040=m
4950 +CONFIG_BATTERY_MAX17042=m
4951 +CONFIG_BATTERY_MAX1721X=m
4952 +CONFIG_BATTERY_TWL4030_MADC=m
4953 +CONFIG_CHARGER_88PM860X=m
4954 +CONFIG_CHARGER_PCF50633=m
4955 +CONFIG_BATTERY_RX51=m
4956 +CONFIG_CHARGER_ISP1704=m
4957 +CONFIG_CHARGER_MAX8903=m
4958 +CONFIG_CHARGER_TWL4030=m
4959 +CONFIG_CHARGER_LP8727=m
4960 +CONFIG_CHARGER_LP8788=m
4961 +CONFIG_CHARGER_GPIO=m
4962 +CONFIG_CHARGER_MANAGER=y
4963 +CONFIG_CHARGER_LT3651=m
4964 +CONFIG_CHARGER_LTC4162L=m
4965 +CONFIG_CHARGER_MAX14577=m
4966 +CONFIG_CHARGER_MAX77693=m
4967 +CONFIG_CHARGER_MAX8997=m
4968 +CONFIG_CHARGER_MAX8998=m
4969 +CONFIG_CHARGER_MP2629=m
4970 +CONFIG_CHARGER_BQ2415X=m
4971 +CONFIG_CHARGER_BQ24190=m
4972 +CONFIG_CHARGER_BQ24257=m
4973 +CONFIG_CHARGER_BQ24735=m
4974 +CONFIG_CHARGER_BQ2515X=m
4975 +CONFIG_CHARGER_BQ25890=m
4976 +CONFIG_CHARGER_BQ25980=m
4977 +CONFIG_CHARGER_BQ256XX=m
4978 +CONFIG_CHARGER_SMB347=m
4979 +CONFIG_CHARGER_TPS65090=m
4980 +CONFIG_BATTERY_GAUGE_LTC2941=m
4981 +CONFIG_BATTERY_RT5033=m
4982 +CONFIG_CHARGER_RT9455=m
4983 +CONFIG_CHARGER_CROS_USBPD=m
4984 +CONFIG_CHARGER_BD99954=m
4985 +CONFIG_CHARGER_WILCO=m
4986 +CONFIG_HWMON=y
4987 +CONFIG_HWMON_VID=m
4988 +# CONFIG_HWMON_DEBUG_CHIP is not set
4991 +# Native drivers
4993 +CONFIG_SENSORS_ABITUGURU=m
4994 +CONFIG_SENSORS_ABITUGURU3=m
4995 +CONFIG_SENSORS_AD7314=m
4996 +CONFIG_SENSORS_AD7414=m
4997 +CONFIG_SENSORS_AD7418=m
4998 +CONFIG_SENSORS_ADM1021=m
4999 +CONFIG_SENSORS_ADM1025=m
5000 +CONFIG_SENSORS_ADM1026=m
5001 +CONFIG_SENSORS_ADM1029=m
5002 +CONFIG_SENSORS_ADM1031=m
5003 +CONFIG_SENSORS_ADM1177=m
5004 +CONFIG_SENSORS_ADM9240=m
5005 +CONFIG_SENSORS_ADT7X10=m
5006 +CONFIG_SENSORS_ADT7310=m
5007 +CONFIG_SENSORS_ADT7410=m
5008 +CONFIG_SENSORS_ADT7411=m
5009 +CONFIG_SENSORS_ADT7462=m
5010 +CONFIG_SENSORS_ADT7470=m
5011 +CONFIG_SENSORS_ADT7475=m
5012 +CONFIG_SENSORS_AHT10=m
5013 +CONFIG_SENSORS_AS370=m
5014 +CONFIG_SENSORS_ASC7621=m
5015 +CONFIG_SENSORS_AXI_FAN_CONTROL=m
5016 +CONFIG_SENSORS_K8TEMP=m
5017 +CONFIG_SENSORS_K10TEMP=m
5018 +CONFIG_SENSORS_FAM15H_POWER=m
5019 +CONFIG_SENSORS_AMD_ENERGY=m
5020 +CONFIG_SENSORS_APPLESMC=m
5021 +CONFIG_SENSORS_ASB100=m
5022 +CONFIG_SENSORS_ASPEED=m
5023 +CONFIG_SENSORS_ATXP1=m
5024 +CONFIG_SENSORS_CORSAIR_CPRO=m
5025 +CONFIG_SENSORS_CORSAIR_PSU=m
5026 +CONFIG_SENSORS_DRIVETEMP=m
5027 +CONFIG_SENSORS_DS620=m
5028 +CONFIG_SENSORS_DS1621=m
5029 +CONFIG_SENSORS_DELL_SMM=m
5030 +CONFIG_SENSORS_DA9052_ADC=m
5031 +CONFIG_SENSORS_DA9055=m
5032 +CONFIG_SENSORS_I5K_AMB=m
5033 +CONFIG_SENSORS_F71805F=m
5034 +CONFIG_SENSORS_F71882FG=m
5035 +CONFIG_SENSORS_F75375S=m
5036 +CONFIG_SENSORS_MC13783_ADC=m
5037 +CONFIG_SENSORS_FSCHMD=m
5038 +CONFIG_SENSORS_FTSTEUTATES=m
5039 +CONFIG_SENSORS_GL518SM=m
5040 +CONFIG_SENSORS_GL520SM=m
5041 +CONFIG_SENSORS_G760A=m
5042 +CONFIG_SENSORS_G762=m
5043 +CONFIG_SENSORS_HIH6130=m
5044 +CONFIG_SENSORS_IBMAEM=m
5045 +CONFIG_SENSORS_IBMPEX=m
5046 +CONFIG_SENSORS_IIO_HWMON=m
5047 +CONFIG_SENSORS_I5500=m
5048 +CONFIG_SENSORS_CORETEMP=m
5049 +CONFIG_SENSORS_IT87=m
5050 +CONFIG_SENSORS_JC42=m
5051 +CONFIG_SENSORS_POWR1220=m
5052 +CONFIG_SENSORS_LINEAGE=m
5053 +CONFIG_SENSORS_LTC2945=m
5054 +CONFIG_SENSORS_LTC2947=m
5055 +CONFIG_SENSORS_LTC2947_I2C=m
5056 +CONFIG_SENSORS_LTC2947_SPI=m
5057 +CONFIG_SENSORS_LTC2990=m
5058 +CONFIG_SENSORS_LTC2992=m
5059 +CONFIG_SENSORS_LTC4151=m
5060 +CONFIG_SENSORS_LTC4215=m
5061 +CONFIG_SENSORS_LTC4222=m
5062 +CONFIG_SENSORS_LTC4245=m
5063 +CONFIG_SENSORS_LTC4260=m
5064 +CONFIG_SENSORS_LTC4261=m
5065 +CONFIG_SENSORS_MAX1111=m
5066 +CONFIG_SENSORS_MAX127=m
5067 +CONFIG_SENSORS_MAX16065=m
5068 +CONFIG_SENSORS_MAX1619=m
5069 +CONFIG_SENSORS_MAX1668=m
5070 +CONFIG_SENSORS_MAX197=m
5071 +CONFIG_SENSORS_MAX31722=m
5072 +CONFIG_SENSORS_MAX31730=m
5073 +CONFIG_SENSORS_MAX6621=m
5074 +CONFIG_SENSORS_MAX6639=m
5075 +CONFIG_SENSORS_MAX6642=m
5076 +CONFIG_SENSORS_MAX6650=m
5077 +CONFIG_SENSORS_MAX6697=m
5078 +CONFIG_SENSORS_MAX31790=m
5079 +CONFIG_SENSORS_MCP3021=m
5080 +CONFIG_SENSORS_MLXREG_FAN=m
5081 +CONFIG_SENSORS_TC654=m
5082 +CONFIG_SENSORS_TPS23861=m
5083 +CONFIG_SENSORS_MENF21BMC_HWMON=m
5084 +CONFIG_SENSORS_MR75203=m
5085 +CONFIG_SENSORS_ADCXX=m
5086 +CONFIG_SENSORS_LM63=m
5087 +CONFIG_SENSORS_LM70=m
5088 +CONFIG_SENSORS_LM73=m
5089 +CONFIG_SENSORS_LM75=m
5090 +CONFIG_SENSORS_LM77=m
5091 +CONFIG_SENSORS_LM78=m
5092 +CONFIG_SENSORS_LM80=m
5093 +CONFIG_SENSORS_LM83=m
5094 +CONFIG_SENSORS_LM85=m
5095 +CONFIG_SENSORS_LM87=m
5096 +CONFIG_SENSORS_LM90=m
5097 +CONFIG_SENSORS_LM92=m
5098 +CONFIG_SENSORS_LM93=m
5099 +CONFIG_SENSORS_LM95234=m
5100 +CONFIG_SENSORS_LM95241=m
5101 +CONFIG_SENSORS_LM95245=m
5102 +CONFIG_SENSORS_PC87360=m
5103 +CONFIG_SENSORS_PC87427=m
5104 +CONFIG_SENSORS_NTC_THERMISTOR=m
5105 +CONFIG_SENSORS_NCT6683=m
5106 +CONFIG_SENSORS_NCT6775=m
5107 +CONFIG_SENSORS_NCT7802=m
5108 +CONFIG_SENSORS_NCT7904=m
5109 +CONFIG_SENSORS_NPCM7XX=m
5110 +CONFIG_SENSORS_PCF8591=m
5111 +CONFIG_PMBUS=m
5112 +CONFIG_SENSORS_PMBUS=m
5113 +CONFIG_SENSORS_ADM1266=m
5114 +CONFIG_SENSORS_ADM1275=m
5115 +CONFIG_SENSORS_BEL_PFE=m
5116 +CONFIG_SENSORS_IBM_CFFPS=m
5117 +CONFIG_SENSORS_INSPUR_IPSPS=m
5118 +CONFIG_SENSORS_IR35221=m
5119 +CONFIG_SENSORS_IR38064=m
5120 +CONFIG_SENSORS_IRPS5401=m
5121 +CONFIG_SENSORS_ISL68137=m
5122 +CONFIG_SENSORS_LM25066=m
5123 +CONFIG_SENSORS_LTC2978=m
5124 +CONFIG_SENSORS_LTC2978_REGULATOR=y
5125 +CONFIG_SENSORS_LTC3815=m
5126 +CONFIG_SENSORS_MAX16064=m
5127 +CONFIG_SENSORS_MAX16601=m
5128 +CONFIG_SENSORS_MAX20730=m
5129 +CONFIG_SENSORS_MAX20751=m
5130 +CONFIG_SENSORS_MAX31785=m
5131 +CONFIG_SENSORS_MAX34440=m
5132 +CONFIG_SENSORS_MAX8688=m
5133 +CONFIG_SENSORS_MP2975=m
5134 +CONFIG_SENSORS_PM6764TR=m
5135 +CONFIG_SENSORS_PXE1610=m
5136 +CONFIG_SENSORS_Q54SJ108A2=m
5137 +CONFIG_SENSORS_TPS40422=m
5138 +CONFIG_SENSORS_TPS53679=m
5139 +CONFIG_SENSORS_UCD9000=m
5140 +CONFIG_SENSORS_UCD9200=m
5141 +CONFIG_SENSORS_XDPE122=m
5142 +CONFIG_SENSORS_ZL6100=m
5143 +CONFIG_SENSORS_SBTSI=m
5144 +CONFIG_SENSORS_SHT15=m
5145 +CONFIG_SENSORS_SHT21=m
5146 +CONFIG_SENSORS_SHT3x=m
5147 +CONFIG_SENSORS_SHTC1=m
5148 +CONFIG_SENSORS_SIS5595=m
5149 +CONFIG_SENSORS_DME1737=m
5150 +CONFIG_SENSORS_EMC1403=m
5151 +CONFIG_SENSORS_EMC2103=m
5152 +CONFIG_SENSORS_EMC6W201=m
5153 +CONFIG_SENSORS_SMSC47M1=m
5154 +CONFIG_SENSORS_SMSC47M192=m
5155 +CONFIG_SENSORS_SMSC47B397=m
5156 +CONFIG_SENSORS_SCH56XX_COMMON=m
5157 +CONFIG_SENSORS_SCH5627=m
5158 +CONFIG_SENSORS_SCH5636=m
5159 +CONFIG_SENSORS_STTS751=m
5160 +CONFIG_SENSORS_SMM665=m
5161 +CONFIG_SENSORS_ADC128D818=m
5162 +CONFIG_SENSORS_ADS7828=m
5163 +CONFIG_SENSORS_ADS7871=m
5164 +CONFIG_SENSORS_AMC6821=m
5165 +CONFIG_SENSORS_INA209=m
5166 +CONFIG_SENSORS_INA2XX=m
5167 +CONFIG_SENSORS_INA3221=m
5168 +CONFIG_SENSORS_TC74=m
5169 +CONFIG_SENSORS_THMC50=m
5170 +CONFIG_SENSORS_TMP102=m
5171 +CONFIG_SENSORS_TMP103=m
5172 +CONFIG_SENSORS_TMP108=m
5173 +CONFIG_SENSORS_TMP401=m
5174 +CONFIG_SENSORS_TMP421=m
5175 +CONFIG_SENSORS_TMP513=m
5176 +CONFIG_SENSORS_VIA_CPUTEMP=m
5177 +CONFIG_SENSORS_VIA686A=m
5178 +CONFIG_SENSORS_VT1211=m
5179 +CONFIG_SENSORS_VT8231=m
5180 +CONFIG_SENSORS_W83773G=m
5181 +CONFIG_SENSORS_W83781D=m
5182 +CONFIG_SENSORS_W83791D=m
5183 +CONFIG_SENSORS_W83792D=m
5184 +CONFIG_SENSORS_W83793=m
5185 +CONFIG_SENSORS_W83795=m
5186 +# CONFIG_SENSORS_W83795_FANCTRL is not set
5187 +CONFIG_SENSORS_W83L785TS=m
5188 +CONFIG_SENSORS_W83L786NG=m
5189 +CONFIG_SENSORS_W83627HF=m
5190 +CONFIG_SENSORS_W83627EHF=m
5191 +CONFIG_SENSORS_WM831X=m
5192 +CONFIG_SENSORS_WM8350=m
5193 +CONFIG_SENSORS_XGENE=m
5194 +CONFIG_SENSORS_INTEL_M10_BMC_HWMON=m
5197 +# ACPI drivers
5199 +CONFIG_SENSORS_ACPI_POWER=m
5200 +CONFIG_SENSORS_ATK0110=m
5201 +CONFIG_THERMAL=y
5202 +CONFIG_THERMAL_NETLINK=y
5203 +CONFIG_THERMAL_STATISTICS=y
5204 +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0
5205 +CONFIG_THERMAL_HWMON=y
5206 +CONFIG_THERMAL_WRITABLE_TRIPS=y
5207 +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
5208 +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
5209 +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
5210 +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set
5211 +CONFIG_THERMAL_GOV_FAIR_SHARE=y
5212 +CONFIG_THERMAL_GOV_STEP_WISE=y
5213 +CONFIG_THERMAL_GOV_BANG_BANG=y
5214 +CONFIG_THERMAL_GOV_USER_SPACE=y
5215 +CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
5216 +CONFIG_DEVFREQ_THERMAL=y
5217 +CONFIG_THERMAL_EMULATION=y
5220 +# Intel thermal drivers
5222 +CONFIG_INTEL_POWERCLAMP=m
5223 +CONFIG_X86_THERMAL_VECTOR=y
5224 +CONFIG_X86_PKG_TEMP_THERMAL=m
5225 +CONFIG_INTEL_SOC_DTS_IOSF_CORE=m
5226 +CONFIG_INTEL_SOC_DTS_THERMAL=m
5229 +# ACPI INT340X thermal drivers
5231 +CONFIG_INT340X_THERMAL=m
5232 +CONFIG_ACPI_THERMAL_REL=m
5233 +CONFIG_INT3406_THERMAL=m
5234 +CONFIG_PROC_THERMAL_MMIO_RAPL=m
5235 +# end of ACPI INT340X thermal drivers
5237 +CONFIG_INTEL_BXT_PMIC_THERMAL=m
5238 +CONFIG_INTEL_PCH_THERMAL=m
5239 +# end of Intel thermal drivers
5241 +CONFIG_GENERIC_ADC_THERMAL=m
5242 +CONFIG_WATCHDOG=y
5243 +CONFIG_WATCHDOG_CORE=y
5244 +# CONFIG_WATCHDOG_NOWAYOUT is not set
5245 +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y
5246 +CONFIG_WATCHDOG_OPEN_TIMEOUT=0
5247 +CONFIG_WATCHDOG_SYSFS=y
5250 +# Watchdog Pretimeout Governors
5252 +CONFIG_WATCHDOG_PRETIMEOUT_GOV=y
5253 +CONFIG_WATCHDOG_PRETIMEOUT_GOV_SEL=m
5254 +CONFIG_WATCHDOG_PRETIMEOUT_GOV_NOOP=y
5255 +CONFIG_WATCHDOG_PRETIMEOUT_GOV_PANIC=m
5256 +CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP=y
5257 +# CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC is not set
5260 +# Watchdog Device Drivers
5262 +CONFIG_SOFT_WATCHDOG=m
5263 +CONFIG_SOFT_WATCHDOG_PRETIMEOUT=y
5264 +CONFIG_DA9052_WATCHDOG=m
5265 +CONFIG_DA9055_WATCHDOG=m
5266 +CONFIG_DA9063_WATCHDOG=m
5267 +CONFIG_DA9062_WATCHDOG=m
5268 +CONFIG_MENF21BMC_WATCHDOG=m
5269 +CONFIG_MENZ069_WATCHDOG=m
5270 +CONFIG_WDAT_WDT=m
5271 +CONFIG_WM831X_WATCHDOG=m
5272 +CONFIG_WM8350_WATCHDOG=m
5273 +CONFIG_XILINX_WATCHDOG=m
5274 +CONFIG_ZIIRAVE_WATCHDOG=m
5275 +CONFIG_RAVE_SP_WATCHDOG=m
5276 +CONFIG_MLX_WDT=m
5277 +CONFIG_CADENCE_WATCHDOG=m
5278 +CONFIG_DW_WATCHDOG=m
5279 +CONFIG_TWL4030_WATCHDOG=m
5280 +CONFIG_MAX63XX_WATCHDOG=m
5281 +CONFIG_RETU_WATCHDOG=m
5282 +CONFIG_ACQUIRE_WDT=m
5283 +CONFIG_ADVANTECH_WDT=m
5284 +CONFIG_ALIM1535_WDT=m
5285 +CONFIG_ALIM7101_WDT=m
5286 +CONFIG_EBC_C384_WDT=m
5287 +CONFIG_F71808E_WDT=m
5288 +CONFIG_SP5100_TCO=m
5289 +CONFIG_SBC_FITPC2_WATCHDOG=m
5290 +CONFIG_EUROTECH_WDT=m
5291 +CONFIG_IB700_WDT=m
5292 +CONFIG_IBMASR=m
5293 +CONFIG_WAFER_WDT=m
5294 +CONFIG_I6300ESB_WDT=m
5295 +CONFIG_IE6XX_WDT=m
5296 +CONFIG_ITCO_WDT=m
5297 +CONFIG_ITCO_VENDOR_SUPPORT=y
5298 +CONFIG_IT8712F_WDT=m
5299 +CONFIG_IT87_WDT=m
5300 +CONFIG_HP_WATCHDOG=m
5301 +CONFIG_HPWDT_NMI_DECODING=y
5302 +CONFIG_KEMPLD_WDT=m
5303 +CONFIG_SC1200_WDT=m
5304 +CONFIG_PC87413_WDT=m
5305 +CONFIG_NV_TCO=m
5306 +CONFIG_60XX_WDT=m
5307 +CONFIG_CPU5_WDT=m
5308 +CONFIG_SMSC_SCH311X_WDT=m
5309 +CONFIG_SMSC37B787_WDT=m
5310 +CONFIG_TQMX86_WDT=m
5311 +CONFIG_VIA_WDT=m
5312 +CONFIG_W83627HF_WDT=m
5313 +CONFIG_W83877F_WDT=m
5314 +CONFIG_W83977F_WDT=m
5315 +CONFIG_MACHZ_WDT=m
5316 +CONFIG_SBC_EPX_C3_WATCHDOG=m
5317 +CONFIG_INTEL_MEI_WDT=m
5318 +CONFIG_NI903X_WDT=m
5319 +CONFIG_NIC7018_WDT=m
5320 +CONFIG_MEN_A21_WDT=m
5321 +CONFIG_XEN_WDT=m
5324 +# PCI-based Watchdog Cards
5326 +CONFIG_PCIPCWATCHDOG=m
5327 +CONFIG_WDTPCI=m
5330 +# USB-based Watchdog Cards
5332 +CONFIG_USBPCWATCHDOG=m
5333 +CONFIG_SSB_POSSIBLE=y
5334 +CONFIG_SSB=m
5335 +CONFIG_SSB_SPROM=y
5336 +CONFIG_SSB_BLOCKIO=y
5337 +CONFIG_SSB_PCIHOST_POSSIBLE=y
5338 +CONFIG_SSB_PCIHOST=y
5339 +CONFIG_SSB_B43_PCI_BRIDGE=y
5340 +CONFIG_SSB_PCMCIAHOST_POSSIBLE=y
5341 +# CONFIG_SSB_PCMCIAHOST is not set
5342 +CONFIG_SSB_SDIOHOST_POSSIBLE=y
5343 +CONFIG_SSB_SDIOHOST=y
5344 +CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
5345 +CONFIG_SSB_DRIVER_PCICORE=y
5346 +CONFIG_SSB_DRIVER_GPIO=y
5347 +CONFIG_BCMA_POSSIBLE=y
5348 +CONFIG_BCMA=m
5349 +CONFIG_BCMA_BLOCKIO=y
5350 +CONFIG_BCMA_HOST_PCI_POSSIBLE=y
5351 +CONFIG_BCMA_HOST_PCI=y
5352 +CONFIG_BCMA_HOST_SOC=y
5353 +CONFIG_BCMA_DRIVER_PCI=y
5354 +CONFIG_BCMA_SFLASH=y
5355 +CONFIG_BCMA_DRIVER_GMAC_CMN=y
5356 +CONFIG_BCMA_DRIVER_GPIO=y
5357 +# CONFIG_BCMA_DEBUG is not set
5360 +# Multifunction device drivers
5362 +CONFIG_MFD_CORE=y
5363 +CONFIG_MFD_AS3711=y
5364 +CONFIG_PMIC_ADP5520=y
5365 +CONFIG_MFD_AAT2870_CORE=y
5366 +CONFIG_MFD_BCM590XX=m
5367 +CONFIG_MFD_BD9571MWV=m
5368 +CONFIG_MFD_AXP20X=m
5369 +CONFIG_MFD_AXP20X_I2C=m
5370 +CONFIG_MFD_CROS_EC_DEV=m
5371 +CONFIG_MFD_MADERA=m
5372 +CONFIG_MFD_MADERA_I2C=m
5373 +CONFIG_MFD_MADERA_SPI=m
5374 +CONFIG_MFD_CS47L15=y
5375 +CONFIG_MFD_CS47L35=y
5376 +CONFIG_MFD_CS47L85=y
5377 +CONFIG_MFD_CS47L90=y
5378 +CONFIG_MFD_CS47L92=y
5379 +CONFIG_PMIC_DA903X=y
5380 +CONFIG_PMIC_DA9052=y
5381 +CONFIG_MFD_DA9052_SPI=y
5382 +CONFIG_MFD_DA9052_I2C=y
5383 +CONFIG_MFD_DA9055=y
5384 +CONFIG_MFD_DA9062=m
5385 +CONFIG_MFD_DA9063=y
5386 +CONFIG_MFD_DA9150=m
5387 +CONFIG_MFD_DLN2=m
5388 +CONFIG_MFD_MC13XXX=m
5389 +CONFIG_MFD_MC13XXX_SPI=m
5390 +CONFIG_MFD_MC13XXX_I2C=m
5391 +CONFIG_MFD_MP2629=m
5392 +CONFIG_HTC_PASIC3=m
5393 +CONFIG_HTC_I2CPLD=y
5394 +CONFIG_MFD_INTEL_QUARK_I2C_GPIO=m
5395 +CONFIG_LPC_ICH=m
5396 +CONFIG_LPC_SCH=m
5397 +CONFIG_INTEL_SOC_PMIC=y
5398 +CONFIG_INTEL_SOC_PMIC_BXTWC=m
5399 +CONFIG_INTEL_SOC_PMIC_CHTWC=y
5400 +CONFIG_INTEL_SOC_PMIC_CHTDC_TI=m
5401 +CONFIG_INTEL_SOC_PMIC_MRFLD=m
5402 +CONFIG_MFD_INTEL_LPSS=m
5403 +CONFIG_MFD_INTEL_LPSS_ACPI=m
5404 +CONFIG_MFD_INTEL_LPSS_PCI=m
5405 +CONFIG_MFD_INTEL_PMC_BXT=m
5406 +CONFIG_MFD_INTEL_PMT=m
5407 +CONFIG_MFD_IQS62X=m
5408 +CONFIG_MFD_JANZ_CMODIO=m
5409 +CONFIG_MFD_KEMPLD=m
5410 +CONFIG_MFD_88PM800=m
5411 +CONFIG_MFD_88PM805=m
5412 +CONFIG_MFD_88PM860X=y
5413 +CONFIG_MFD_MAX14577=y
5414 +CONFIG_MFD_MAX77693=y
5415 +CONFIG_MFD_MAX77843=y
5416 +CONFIG_MFD_MAX8907=m
5417 +CONFIG_MFD_MAX8925=y
5418 +CONFIG_MFD_MAX8997=y
5419 +CONFIG_MFD_MAX8998=y
5420 +CONFIG_MFD_MT6360=m
5421 +CONFIG_MFD_MT6397=m
5422 +CONFIG_MFD_MENF21BMC=m
5423 +CONFIG_EZX_PCAP=y
5424 +CONFIG_MFD_VIPERBOARD=m
5425 +CONFIG_MFD_RETU=m
5426 +CONFIG_MFD_PCF50633=m
5427 +CONFIG_PCF50633_ADC=m
5428 +CONFIG_PCF50633_GPIO=m
5429 +CONFIG_UCB1400_CORE=m
5430 +CONFIG_MFD_RDC321X=m
5431 +CONFIG_MFD_RT5033=m
5432 +CONFIG_MFD_RC5T583=y
5433 +CONFIG_MFD_SEC_CORE=y
5434 +CONFIG_MFD_SI476X_CORE=m
5435 +CONFIG_MFD_SM501=m
5436 +CONFIG_MFD_SM501_GPIO=y
5437 +CONFIG_MFD_SKY81452=m
5438 +CONFIG_ABX500_CORE=y
5439 +CONFIG_AB3100_CORE=y
5440 +CONFIG_AB3100_OTP=m
5441 +CONFIG_MFD_SYSCON=y
5442 +CONFIG_MFD_TI_AM335X_TSCADC=m
5443 +CONFIG_MFD_LP3943=m
5444 +CONFIG_MFD_LP8788=y
5445 +CONFIG_MFD_TI_LMU=m
5446 +CONFIG_MFD_PALMAS=y
5447 +CONFIG_TPS6105X=m
5448 +CONFIG_TPS65010=m
5449 +CONFIG_TPS6507X=m
5450 +CONFIG_MFD_TPS65086=m
5451 +CONFIG_MFD_TPS65090=y
5452 +CONFIG_MFD_TPS68470=y
5453 +CONFIG_MFD_TI_LP873X=m
5454 +CONFIG_MFD_TPS6586X=y
5455 +CONFIG_MFD_TPS65910=y
5456 +CONFIG_MFD_TPS65912=y
5457 +CONFIG_MFD_TPS65912_I2C=y
5458 +CONFIG_MFD_TPS65912_SPI=y
5459 +CONFIG_MFD_TPS80031=y
5460 +CONFIG_TWL4030_CORE=y
5461 +CONFIG_MFD_TWL4030_AUDIO=y
5462 +CONFIG_TWL6040_CORE=y
5463 +CONFIG_MFD_WL1273_CORE=m
5464 +CONFIG_MFD_LM3533=m
5465 +CONFIG_MFD_TQMX86=m
5466 +CONFIG_MFD_VX855=m
5467 +CONFIG_MFD_ARIZONA=y
5468 +CONFIG_MFD_ARIZONA_I2C=m
5469 +CONFIG_MFD_ARIZONA_SPI=m
5470 +CONFIG_MFD_CS47L24=y
5471 +CONFIG_MFD_WM5102=y
5472 +CONFIG_MFD_WM5110=y
5473 +CONFIG_MFD_WM8997=y
5474 +CONFIG_MFD_WM8998=y
5475 +CONFIG_MFD_WM8400=y
5476 +CONFIG_MFD_WM831X=y
5477 +CONFIG_MFD_WM831X_I2C=y
5478 +CONFIG_MFD_WM831X_SPI=y
5479 +CONFIG_MFD_WM8350=y
5480 +CONFIG_MFD_WM8350_I2C=y
5481 +CONFIG_MFD_WM8994=m
5482 +CONFIG_MFD_WCD934X=m
5483 +CONFIG_RAVE_SP_CORE=m
5484 +CONFIG_MFD_INTEL_M10_BMC=m
5485 +# end of Multifunction device drivers
5487 +CONFIG_REGULATOR=y
5488 +# CONFIG_REGULATOR_DEBUG is not set
5489 +CONFIG_REGULATOR_FIXED_VOLTAGE=m
5490 +CONFIG_REGULATOR_VIRTUAL_CONSUMER=m
5491 +CONFIG_REGULATOR_USERSPACE_CONSUMER=m
5492 +CONFIG_REGULATOR_88PG86X=m
5493 +CONFIG_REGULATOR_88PM800=m
5494 +CONFIG_REGULATOR_88PM8607=m
5495 +CONFIG_REGULATOR_ACT8865=m
5496 +CONFIG_REGULATOR_AD5398=m
5497 +CONFIG_REGULATOR_AAT2870=m
5498 +CONFIG_REGULATOR_ARIZONA_LDO1=m
5499 +CONFIG_REGULATOR_ARIZONA_MICSUPP=m
5500 +CONFIG_REGULATOR_AS3711=m
5501 +CONFIG_REGULATOR_AXP20X=m
5502 +CONFIG_REGULATOR_BCM590XX=m
5503 +CONFIG_REGULATOR_BD9571MWV=m
5504 +CONFIG_REGULATOR_DA903X=m
5505 +CONFIG_REGULATOR_DA9052=m
5506 +CONFIG_REGULATOR_DA9055=m
5507 +CONFIG_REGULATOR_DA9062=m
5508 +CONFIG_REGULATOR_DA9210=m
5509 +CONFIG_REGULATOR_DA9211=m
5510 +CONFIG_REGULATOR_FAN53555=m
5511 +CONFIG_REGULATOR_GPIO=m
5512 +CONFIG_REGULATOR_ISL9305=m
5513 +CONFIG_REGULATOR_ISL6271A=m
5514 +CONFIG_REGULATOR_LM363X=m
5515 +CONFIG_REGULATOR_LP3971=m
5516 +CONFIG_REGULATOR_LP3972=m
5517 +CONFIG_REGULATOR_LP872X=m
5518 +CONFIG_REGULATOR_LP8755=m
5519 +CONFIG_REGULATOR_LP8788=m
5520 +CONFIG_REGULATOR_LTC3589=m
5521 +CONFIG_REGULATOR_LTC3676=m
5522 +CONFIG_REGULATOR_MAX14577=m
5523 +CONFIG_REGULATOR_MAX1586=m
5524 +CONFIG_REGULATOR_MAX8649=m
5525 +CONFIG_REGULATOR_MAX8660=m
5526 +CONFIG_REGULATOR_MAX8907=m
5527 +CONFIG_REGULATOR_MAX8925=m
5528 +CONFIG_REGULATOR_MAX8952=m
5529 +CONFIG_REGULATOR_MAX8997=m
5530 +CONFIG_REGULATOR_MAX8998=m
5531 +CONFIG_REGULATOR_MAX77693=m
5532 +CONFIG_REGULATOR_MAX77826=m
5533 +CONFIG_REGULATOR_MC13XXX_CORE=m
5534 +CONFIG_REGULATOR_MC13783=m
5535 +CONFIG_REGULATOR_MC13892=m
5536 +CONFIG_REGULATOR_MP8859=m
5537 +CONFIG_REGULATOR_MT6311=m
5538 +CONFIG_REGULATOR_MT6315=m
5539 +CONFIG_REGULATOR_MT6323=m
5540 +CONFIG_REGULATOR_MT6358=m
5541 +CONFIG_REGULATOR_MT6360=m
5542 +CONFIG_REGULATOR_MT6397=m
5543 +CONFIG_REGULATOR_PALMAS=m
5544 +CONFIG_REGULATOR_PCA9450=m
5545 +CONFIG_REGULATOR_PCAP=m
5546 +CONFIG_REGULATOR_PCF50633=m
5547 +CONFIG_REGULATOR_PV88060=m
5548 +CONFIG_REGULATOR_PV88080=m
5549 +CONFIG_REGULATOR_PV88090=m
5550 +CONFIG_REGULATOR_PWM=m
5551 +CONFIG_REGULATOR_QCOM_SPMI=m
5552 +CONFIG_REGULATOR_QCOM_USB_VBUS=m
5553 +CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY=m
5554 +CONFIG_REGULATOR_RC5T583=m
5555 +CONFIG_REGULATOR_RT4801=m
5556 +CONFIG_REGULATOR_RT5033=m
5557 +CONFIG_REGULATOR_RTMV20=m
5558 +CONFIG_REGULATOR_S2MPA01=m
5559 +CONFIG_REGULATOR_S2MPS11=m
5560 +CONFIG_REGULATOR_S5M8767=m
5561 +CONFIG_REGULATOR_SKY81452=m
5562 +CONFIG_REGULATOR_SLG51000=m
5563 +CONFIG_REGULATOR_TPS51632=m
5564 +CONFIG_REGULATOR_TPS6105X=m
5565 +CONFIG_REGULATOR_TPS62360=m
5566 +CONFIG_REGULATOR_TPS65023=m
5567 +CONFIG_REGULATOR_TPS6507X=m
5568 +CONFIG_REGULATOR_TPS65086=m
5569 +CONFIG_REGULATOR_TPS65090=m
5570 +CONFIG_REGULATOR_TPS65132=m
5571 +CONFIG_REGULATOR_TPS6524X=m
5572 +CONFIG_REGULATOR_TPS6586X=m
5573 +CONFIG_REGULATOR_TPS65910=m
5574 +CONFIG_REGULATOR_TPS65912=m
5575 +CONFIG_REGULATOR_TPS80031=m
5576 +CONFIG_REGULATOR_TWL4030=m
5577 +CONFIG_REGULATOR_WM831X=m
5578 +CONFIG_REGULATOR_WM8350=m
5579 +CONFIG_REGULATOR_WM8400=m
5580 +CONFIG_REGULATOR_WM8994=m
5581 +CONFIG_REGULATOR_QCOM_LABIBB=m
5582 +CONFIG_RC_CORE=m
5583 +CONFIG_RC_MAP=m
5584 +CONFIG_LIRC=y
5585 +CONFIG_RC_DECODERS=y
5586 +CONFIG_IR_NEC_DECODER=m
5587 +CONFIG_IR_RC5_DECODER=m
5588 +CONFIG_IR_RC6_DECODER=m
5589 +CONFIG_IR_JVC_DECODER=m
5590 +CONFIG_IR_SONY_DECODER=m
5591 +CONFIG_IR_SANYO_DECODER=m
5592 +CONFIG_IR_SHARP_DECODER=m
5593 +CONFIG_IR_MCE_KBD_DECODER=m
5594 +CONFIG_IR_XMP_DECODER=m
5595 +CONFIG_IR_IMON_DECODER=m
5596 +CONFIG_IR_RCMM_DECODER=m
5597 +CONFIG_RC_DEVICES=y
5598 +CONFIG_RC_ATI_REMOTE=m
5599 +CONFIG_IR_ENE=m
5600 +CONFIG_IR_IMON=m
5601 +CONFIG_IR_IMON_RAW=m
5602 +CONFIG_IR_MCEUSB=m
5603 +CONFIG_IR_ITE_CIR=m
5604 +CONFIG_IR_FINTEK=m
5605 +CONFIG_IR_NUVOTON=m
5606 +CONFIG_IR_REDRAT3=m
5607 +CONFIG_IR_STREAMZAP=m
5608 +CONFIG_IR_WINBOND_CIR=m
5609 +CONFIG_IR_IGORPLUGUSB=m
5610 +CONFIG_IR_IGUANA=m
5611 +CONFIG_IR_TTUSBIR=m
5612 +CONFIG_RC_LOOPBACK=m
5613 +CONFIG_IR_SERIAL=m
5614 +CONFIG_IR_SERIAL_TRANSMITTER=y
5615 +CONFIG_IR_SIR=m
5616 +CONFIG_RC_XBOX_DVD=m
5617 +CONFIG_IR_TOY=m
5618 +CONFIG_CEC_CORE=m
5619 +CONFIG_CEC_NOTIFIER=y
5620 +CONFIG_CEC_PIN=y
5621 +CONFIG_MEDIA_CEC_RC=y
5622 +# CONFIG_CEC_PIN_ERROR_INJ is not set
5623 +CONFIG_MEDIA_CEC_SUPPORT=y
5624 +CONFIG_CEC_CH7322=m
5625 +CONFIG_CEC_CROS_EC=m
5626 +CONFIG_CEC_GPIO=m
5627 +CONFIG_CEC_SECO=m
5628 +CONFIG_CEC_SECO_RC=y
5629 +CONFIG_USB_PULSE8_CEC=m
5630 +CONFIG_USB_RAINSHADOW_CEC=m
5631 +CONFIG_MEDIA_SUPPORT=m
5632 +CONFIG_MEDIA_SUPPORT_FILTER=y
5633 +CONFIG_MEDIA_SUBDRV_AUTOSELECT=y
5636 +# Media device types
5638 +CONFIG_MEDIA_CAMERA_SUPPORT=y
5639 +CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
5640 +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
5641 +CONFIG_MEDIA_RADIO_SUPPORT=y
5642 +CONFIG_MEDIA_SDR_SUPPORT=y
5643 +CONFIG_MEDIA_PLATFORM_SUPPORT=y
5644 +CONFIG_MEDIA_TEST_SUPPORT=y
5645 +# end of Media device types
5647 +CONFIG_VIDEO_DEV=m
5648 +CONFIG_MEDIA_CONTROLLER=y
5649 +CONFIG_DVB_CORE=m
5652 +# Video4Linux options
5654 +CONFIG_VIDEO_V4L2=m
5655 +CONFIG_VIDEO_V4L2_I2C=y
5656 +CONFIG_VIDEO_V4L2_SUBDEV_API=y
5657 +# CONFIG_VIDEO_ADV_DEBUG is not set
5658 +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
5659 +CONFIG_VIDEO_TUNER=m
5660 +CONFIG_V4L2_MEM2MEM_DEV=m
5661 +CONFIG_V4L2_FLASH_LED_CLASS=m
5662 +CONFIG_V4L2_FWNODE=m
5663 +CONFIG_VIDEOBUF_GEN=m
5664 +CONFIG_VIDEOBUF_DMA_SG=m
5665 +CONFIG_VIDEOBUF_VMALLOC=m
5666 +# end of Video4Linux options
5669 +# Media controller options
5671 +CONFIG_MEDIA_CONTROLLER_DVB=y
5672 +CONFIG_MEDIA_CONTROLLER_REQUEST_API=y
5675 +# Please notice that the enabled Media controller Request API is EXPERIMENTAL
5677 +# end of Media controller options
5680 +# Digital TV options
5682 +# CONFIG_DVB_MMAP is not set
5683 +CONFIG_DVB_NET=y
5684 +CONFIG_DVB_MAX_ADAPTERS=8
5685 +CONFIG_DVB_DYNAMIC_MINORS=y
5686 +# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set
5687 +# CONFIG_DVB_ULE_DEBUG is not set
5688 +# end of Digital TV options
5691 +# Media drivers
5695 +# Drivers filtered as selected at 'Filter media drivers'
5697 +CONFIG_TTPCI_EEPROM=m
5698 +CONFIG_MEDIA_USB_SUPPORT=y
5701 +# Webcam devices
5703 +CONFIG_USB_VIDEO_CLASS=m
5704 +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
5705 +CONFIG_USB_GSPCA=m
5706 +CONFIG_USB_M5602=m
5707 +CONFIG_USB_STV06XX=m
5708 +CONFIG_USB_GL860=m
5709 +CONFIG_USB_GSPCA_BENQ=m
5710 +CONFIG_USB_GSPCA_CONEX=m
5711 +CONFIG_USB_GSPCA_CPIA1=m
5712 +CONFIG_USB_GSPCA_DTCS033=m
5713 +CONFIG_USB_GSPCA_ETOMS=m
5714 +CONFIG_USB_GSPCA_FINEPIX=m
5715 +CONFIG_USB_GSPCA_JEILINJ=m
5716 +CONFIG_USB_GSPCA_JL2005BCD=m
5717 +CONFIG_USB_GSPCA_KINECT=m
5718 +CONFIG_USB_GSPCA_KONICA=m
5719 +CONFIG_USB_GSPCA_MARS=m
5720 +CONFIG_USB_GSPCA_MR97310A=m
5721 +CONFIG_USB_GSPCA_NW80X=m
5722 +CONFIG_USB_GSPCA_OV519=m
5723 +CONFIG_USB_GSPCA_OV534=m
5724 +CONFIG_USB_GSPCA_OV534_9=m
5725 +CONFIG_USB_GSPCA_PAC207=m
5726 +CONFIG_USB_GSPCA_PAC7302=m
5727 +CONFIG_USB_GSPCA_PAC7311=m
5728 +CONFIG_USB_GSPCA_SE401=m
5729 +CONFIG_USB_GSPCA_SN9C2028=m
5730 +CONFIG_USB_GSPCA_SN9C20X=m
5731 +CONFIG_USB_GSPCA_SONIXB=m
5732 +CONFIG_USB_GSPCA_SONIXJ=m
5733 +CONFIG_USB_GSPCA_SPCA500=m
5734 +CONFIG_USB_GSPCA_SPCA501=m
5735 +CONFIG_USB_GSPCA_SPCA505=m
5736 +CONFIG_USB_GSPCA_SPCA506=m
5737 +CONFIG_USB_GSPCA_SPCA508=m
5738 +CONFIG_USB_GSPCA_SPCA561=m
5739 +CONFIG_USB_GSPCA_SPCA1528=m
5740 +CONFIG_USB_GSPCA_SQ905=m
5741 +CONFIG_USB_GSPCA_SQ905C=m
5742 +CONFIG_USB_GSPCA_SQ930X=m
5743 +CONFIG_USB_GSPCA_STK014=m
5744 +CONFIG_USB_GSPCA_STK1135=m
5745 +CONFIG_USB_GSPCA_STV0680=m
5746 +CONFIG_USB_GSPCA_SUNPLUS=m
5747 +CONFIG_USB_GSPCA_T613=m
5748 +CONFIG_USB_GSPCA_TOPRO=m
5749 +CONFIG_USB_GSPCA_TOUPTEK=m
5750 +CONFIG_USB_GSPCA_TV8532=m
5751 +CONFIG_USB_GSPCA_VC032X=m
5752 +CONFIG_USB_GSPCA_VICAM=m
5753 +CONFIG_USB_GSPCA_XIRLINK_CIT=m
5754 +CONFIG_USB_GSPCA_ZC3XX=m
5755 +CONFIG_USB_PWC=m
5756 +# CONFIG_USB_PWC_DEBUG is not set
5757 +CONFIG_USB_PWC_INPUT_EVDEV=y
5758 +CONFIG_VIDEO_CPIA2=m
5759 +CONFIG_USB_ZR364XX=m
5760 +CONFIG_USB_STKWEBCAM=m
5761 +CONFIG_USB_S2255=m
5762 +CONFIG_VIDEO_USBTV=m
5765 +# Analog TV USB devices
5767 +CONFIG_VIDEO_PVRUSB2=m
5768 +CONFIG_VIDEO_PVRUSB2_SYSFS=y
5769 +CONFIG_VIDEO_PVRUSB2_DVB=y
5770 +# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
5771 +CONFIG_VIDEO_HDPVR=m
5772 +CONFIG_VIDEO_STK1160_COMMON=m
5773 +CONFIG_VIDEO_STK1160=m
5774 +CONFIG_VIDEO_GO7007=m
5775 +CONFIG_VIDEO_GO7007_USB=m
5776 +CONFIG_VIDEO_GO7007_LOADER=m
5777 +CONFIG_VIDEO_GO7007_USB_S2250_BOARD=m
5780 +# Analog/digital TV USB devices
5782 +CONFIG_VIDEO_AU0828=m
5783 +CONFIG_VIDEO_AU0828_V4L2=y
5784 +CONFIG_VIDEO_AU0828_RC=y
5785 +CONFIG_VIDEO_CX231XX=m
5786 +CONFIG_VIDEO_CX231XX_RC=y
5787 +CONFIG_VIDEO_CX231XX_ALSA=m
5788 +CONFIG_VIDEO_CX231XX_DVB=m
5789 +CONFIG_VIDEO_TM6000=m
5790 +CONFIG_VIDEO_TM6000_ALSA=m
5791 +CONFIG_VIDEO_TM6000_DVB=m
5794 +# Digital TV USB devices
5796 +CONFIG_DVB_USB=m
5797 +# CONFIG_DVB_USB_DEBUG is not set
5798 +CONFIG_DVB_USB_DIB3000MC=m
5799 +CONFIG_DVB_USB_A800=m
5800 +CONFIG_DVB_USB_DIBUSB_MB=m
5801 +# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set
5802 +CONFIG_DVB_USB_DIBUSB_MC=m
5803 +CONFIG_DVB_USB_DIB0700=m
5804 +CONFIG_DVB_USB_UMT_010=m
5805 +CONFIG_DVB_USB_CXUSB=m
5806 +CONFIG_DVB_USB_CXUSB_ANALOG=y
5807 +CONFIG_DVB_USB_M920X=m
5808 +CONFIG_DVB_USB_DIGITV=m
5809 +CONFIG_DVB_USB_VP7045=m
5810 +CONFIG_DVB_USB_VP702X=m
5811 +CONFIG_DVB_USB_GP8PSK=m
5812 +CONFIG_DVB_USB_NOVA_T_USB2=m
5813 +CONFIG_DVB_USB_TTUSB2=m
5814 +CONFIG_DVB_USB_DTT200U=m
5815 +CONFIG_DVB_USB_OPERA1=m
5816 +CONFIG_DVB_USB_AF9005=m
5817 +CONFIG_DVB_USB_AF9005_REMOTE=m
5818 +CONFIG_DVB_USB_PCTV452E=m
5819 +CONFIG_DVB_USB_DW2102=m
5820 +CONFIG_DVB_USB_CINERGY_T2=m
5821 +CONFIG_DVB_USB_DTV5100=m
5822 +CONFIG_DVB_USB_AZ6027=m
5823 +CONFIG_DVB_USB_TECHNISAT_USB2=m
5824 +CONFIG_DVB_USB_V2=m
5825 +CONFIG_DVB_USB_AF9015=m
5826 +CONFIG_DVB_USB_AF9035=m
5827 +CONFIG_DVB_USB_ANYSEE=m
5828 +CONFIG_DVB_USB_AU6610=m
5829 +CONFIG_DVB_USB_AZ6007=m
5830 +CONFIG_DVB_USB_CE6230=m
5831 +CONFIG_DVB_USB_EC168=m
5832 +CONFIG_DVB_USB_GL861=m
5833 +CONFIG_DVB_USB_LME2510=m
5834 +CONFIG_DVB_USB_MXL111SF=m
5835 +CONFIG_DVB_USB_RTL28XXU=m
5836 +CONFIG_DVB_USB_DVBSKY=m
5837 +CONFIG_DVB_USB_ZD1301=m
5838 +CONFIG_DVB_TTUSB_BUDGET=m
5839 +CONFIG_DVB_TTUSB_DEC=m
5840 +CONFIG_SMS_USB_DRV=m
5841 +CONFIG_DVB_B2C2_FLEXCOP_USB=m
5842 +# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set
5843 +CONFIG_DVB_AS102=m
5846 +# Webcam, TV (analog/digital) USB devices
5848 +CONFIG_VIDEO_EM28XX=m
5849 +CONFIG_VIDEO_EM28XX_V4L2=m
5850 +CONFIG_VIDEO_EM28XX_ALSA=m
5851 +CONFIG_VIDEO_EM28XX_DVB=m
5852 +CONFIG_VIDEO_EM28XX_RC=m
5855 +# Software defined radio USB devices
5857 +CONFIG_USB_AIRSPY=m
5858 +CONFIG_USB_HACKRF=m
5859 +CONFIG_USB_MSI2500=m
5860 +CONFIG_MEDIA_PCI_SUPPORT=y
5863 +# Media capture support
5865 +CONFIG_VIDEO_MEYE=m
5866 +CONFIG_VIDEO_SOLO6X10=m
5867 +CONFIG_VIDEO_TW5864=m
5868 +CONFIG_VIDEO_TW68=m
5869 +CONFIG_VIDEO_TW686X=m
5872 +# Media capture/analog TV support
5874 +CONFIG_VIDEO_IVTV=m
5875 +# CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS is not set
5876 +CONFIG_VIDEO_IVTV_ALSA=m
5877 +CONFIG_VIDEO_FB_IVTV=m
5878 +CONFIG_VIDEO_FB_IVTV_FORCE_PAT=y
5879 +CONFIG_VIDEO_HEXIUM_GEMINI=m
5880 +CONFIG_VIDEO_HEXIUM_ORION=m
5881 +CONFIG_VIDEO_MXB=m
5882 +CONFIG_VIDEO_DT3155=m
5885 +# Media capture/analog/hybrid TV support
5887 +CONFIG_VIDEO_CX18=m
5888 +CONFIG_VIDEO_CX18_ALSA=m
5889 +CONFIG_VIDEO_CX23885=m
5890 +CONFIG_MEDIA_ALTERA_CI=m
5891 +CONFIG_VIDEO_CX25821=m
5892 +CONFIG_VIDEO_CX25821_ALSA=m
5893 +CONFIG_VIDEO_CX88=m
5894 +CONFIG_VIDEO_CX88_ALSA=m
5895 +CONFIG_VIDEO_CX88_BLACKBIRD=m
5896 +CONFIG_VIDEO_CX88_DVB=m
5897 +CONFIG_VIDEO_CX88_ENABLE_VP3054=y
5898 +CONFIG_VIDEO_CX88_VP3054=m
5899 +CONFIG_VIDEO_CX88_MPEG=m
5900 +CONFIG_VIDEO_BT848=m
5901 +CONFIG_DVB_BT8XX=m
5902 +CONFIG_VIDEO_SAA7134=m
5903 +CONFIG_VIDEO_SAA7134_ALSA=m
5904 +CONFIG_VIDEO_SAA7134_RC=y
5905 +CONFIG_VIDEO_SAA7134_DVB=m
5906 +CONFIG_VIDEO_SAA7134_GO7007=m
5907 +CONFIG_VIDEO_SAA7164=m
5908 +CONFIG_VIDEO_COBALT=m
5911 +# Media digital TV PCI Adapters
5913 +CONFIG_DVB_AV7110_IR=y
5914 +CONFIG_DVB_AV7110=m
5915 +CONFIG_DVB_AV7110_OSD=y
5916 +CONFIG_DVB_BUDGET_CORE=m
5917 +CONFIG_DVB_BUDGET=m
5918 +CONFIG_DVB_BUDGET_CI=m
5919 +CONFIG_DVB_BUDGET_AV=m
5920 +CONFIG_DVB_BUDGET_PATCH=m
5921 +CONFIG_DVB_B2C2_FLEXCOP_PCI=m
5922 +# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set
5923 +CONFIG_DVB_PLUTO2=m
5924 +CONFIG_DVB_DM1105=m
5925 +CONFIG_DVB_PT1=m
5926 +CONFIG_DVB_PT3=m
5927 +CONFIG_MANTIS_CORE=m
5928 +CONFIG_DVB_MANTIS=m
5929 +CONFIG_DVB_HOPPER=m
5930 +CONFIG_DVB_NGENE=m
5931 +CONFIG_DVB_DDBRIDGE=m
5932 +# CONFIG_DVB_DDBRIDGE_MSIENABLE is not set
5933 +CONFIG_DVB_SMIPCIE=m
5934 +CONFIG_DVB_NETUP_UNIDVB=m
5935 +CONFIG_VIDEO_IPU3_CIO2=m
5936 +CONFIG_CIO2_BRIDGE=y
5937 +# CONFIG_VIDEO_PCI_SKELETON is not set
5938 +CONFIG_RADIO_ADAPTERS=y
5939 +CONFIG_RADIO_TEA575X=m
5940 +CONFIG_RADIO_SI470X=m
5941 +CONFIG_USB_SI470X=m
5942 +CONFIG_I2C_SI470X=m
5943 +CONFIG_RADIO_SI4713=m
5944 +CONFIG_USB_SI4713=m
5945 +CONFIG_PLATFORM_SI4713=m
5946 +CONFIG_I2C_SI4713=m
5947 +CONFIG_RADIO_SI476X=m
5948 +CONFIG_USB_MR800=m
5949 +CONFIG_USB_DSBR=m
5950 +CONFIG_RADIO_MAXIRADIO=m
5951 +CONFIG_RADIO_SHARK=m
5952 +CONFIG_RADIO_SHARK2=m
5953 +CONFIG_USB_KEENE=m
5954 +CONFIG_USB_RAREMONO=m
5955 +CONFIG_USB_MA901=m
5956 +CONFIG_RADIO_TEA5764=m
5957 +CONFIG_RADIO_SAA7706H=m
5958 +CONFIG_RADIO_TEF6862=m
5959 +CONFIG_RADIO_WL1273=m
5960 +CONFIG_RADIO_WL128X=m
5961 +CONFIG_MEDIA_COMMON_OPTIONS=y
5964 +# common driver options
5966 +CONFIG_VIDEO_CX2341X=m
5967 +CONFIG_VIDEO_TVEEPROM=m
5968 +CONFIG_CYPRESS_FIRMWARE=m
5969 +CONFIG_VIDEOBUF2_CORE=m
5970 +CONFIG_VIDEOBUF2_V4L2=m
5971 +CONFIG_VIDEOBUF2_MEMOPS=m
5972 +CONFIG_VIDEOBUF2_DMA_CONTIG=m
5973 +CONFIG_VIDEOBUF2_VMALLOC=m
5974 +CONFIG_VIDEOBUF2_DMA_SG=m
5975 +CONFIG_VIDEOBUF2_DVB=m
5976 +CONFIG_DVB_B2C2_FLEXCOP=m
5977 +CONFIG_VIDEO_SAA7146=m
5978 +CONFIG_VIDEO_SAA7146_VV=m
5979 +CONFIG_SMS_SIANO_MDTV=m
5980 +CONFIG_SMS_SIANO_RC=y
5981 +CONFIG_SMS_SIANO_DEBUGFS=y
5982 +CONFIG_VIDEO_V4L2_TPG=m
5983 +CONFIG_V4L_PLATFORM_DRIVERS=y
5984 +CONFIG_VIDEO_CAFE_CCIC=m
5985 +CONFIG_VIDEO_VIA_CAMERA=m
5986 +CONFIG_VIDEO_CADENCE=y
5987 +CONFIG_VIDEO_CADENCE_CSI2RX=m
5988 +CONFIG_VIDEO_CADENCE_CSI2TX=m
5989 +CONFIG_VIDEO_ASPEED=m
5990 +CONFIG_V4L_MEM2MEM_DRIVERS=y
5991 +CONFIG_VIDEO_MEM2MEM_DEINTERLACE=m
5992 +CONFIG_DVB_PLATFORM_DRIVERS=y
5993 +CONFIG_SDR_PLATFORM_DRIVERS=y
5996 +# MMC/SDIO DVB adapters
5998 +CONFIG_SMS_SDIO_DRV=m
5999 +CONFIG_V4L_TEST_DRIVERS=y
6000 +CONFIG_VIDEO_VIMC=m
6001 +CONFIG_VIDEO_VIVID=m
6002 +CONFIG_VIDEO_VIVID_CEC=y
6003 +CONFIG_VIDEO_VIVID_MAX_DEVS=64
6004 +CONFIG_VIDEO_VIM2M=m
6005 +CONFIG_VIDEO_VICODEC=m
6006 +# CONFIG_DVB_TEST_DRIVERS is not set
6009 +# FireWire (IEEE 1394) Adapters
6011 +CONFIG_DVB_FIREDTV=m
6012 +CONFIG_DVB_FIREDTV_INPUT=y
6013 +# end of Media drivers
6016 +# Media ancillary drivers
6018 +CONFIG_MEDIA_ATTACH=y
6021 +# IR I2C driver auto-selected by 'Autoselect ancillary drivers'
6023 +CONFIG_VIDEO_IR_I2C=m
6026 +# Audio decoders, processors and mixers
6028 +CONFIG_VIDEO_TVAUDIO=m
6029 +CONFIG_VIDEO_TDA7432=m
6030 +CONFIG_VIDEO_TDA9840=m
6031 +CONFIG_VIDEO_TDA1997X=m
6032 +CONFIG_VIDEO_TEA6415C=m
6033 +CONFIG_VIDEO_TEA6420=m
6034 +CONFIG_VIDEO_MSP3400=m
6035 +CONFIG_VIDEO_CS3308=m
6036 +CONFIG_VIDEO_CS5345=m
6037 +CONFIG_VIDEO_CS53L32A=m
6038 +CONFIG_VIDEO_TLV320AIC23B=m
6039 +CONFIG_VIDEO_UDA1342=m
6040 +CONFIG_VIDEO_WM8775=m
6041 +CONFIG_VIDEO_WM8739=m
6042 +CONFIG_VIDEO_VP27SMPX=m
6043 +CONFIG_VIDEO_SONY_BTF_MPX=m
6044 +# end of Audio decoders, processors and mixers
6047 +# RDS decoders
6049 +CONFIG_VIDEO_SAA6588=m
6050 +# end of RDS decoders
6053 +# Video decoders
6055 +CONFIG_VIDEO_ADV7180=m
6056 +CONFIG_VIDEO_ADV7183=m
6057 +CONFIG_VIDEO_ADV7604=m
6058 +CONFIG_VIDEO_ADV7604_CEC=y
6059 +CONFIG_VIDEO_ADV7842=m
6060 +CONFIG_VIDEO_ADV7842_CEC=y
6061 +CONFIG_VIDEO_BT819=m
6062 +CONFIG_VIDEO_BT856=m
6063 +CONFIG_VIDEO_BT866=m
6064 +CONFIG_VIDEO_KS0127=m
6065 +CONFIG_VIDEO_ML86V7667=m
6066 +CONFIG_VIDEO_SAA7110=m
6067 +CONFIG_VIDEO_SAA711X=m
6068 +CONFIG_VIDEO_TC358743=m
6069 +CONFIG_VIDEO_TC358743_CEC=y
6070 +CONFIG_VIDEO_TVP514X=m
6071 +CONFIG_VIDEO_TVP5150=m
6072 +CONFIG_VIDEO_TVP7002=m
6073 +CONFIG_VIDEO_TW2804=m
6074 +CONFIG_VIDEO_TW9903=m
6075 +CONFIG_VIDEO_TW9906=m
6076 +CONFIG_VIDEO_TW9910=m
6077 +CONFIG_VIDEO_VPX3220=m
6080 +# Video and audio decoders
6082 +CONFIG_VIDEO_SAA717X=m
6083 +CONFIG_VIDEO_CX25840=m
6084 +# end of Video decoders
6087 +# Video encoders
6089 +CONFIG_VIDEO_SAA7127=m
6090 +CONFIG_VIDEO_SAA7185=m
6091 +CONFIG_VIDEO_ADV7170=m
6092 +CONFIG_VIDEO_ADV7175=m
6093 +CONFIG_VIDEO_ADV7343=m
6094 +CONFIG_VIDEO_ADV7393=m
6095 +CONFIG_VIDEO_ADV7511=m
6096 +CONFIG_VIDEO_ADV7511_CEC=y
6097 +CONFIG_VIDEO_AD9389B=m
6098 +CONFIG_VIDEO_AK881X=m
6099 +CONFIG_VIDEO_THS8200=m
6100 +# end of Video encoders
6103 +# Video improvement chips
6105 +CONFIG_VIDEO_UPD64031A=m
6106 +CONFIG_VIDEO_UPD64083=m
6107 +# end of Video improvement chips
6110 +# Audio/Video compression chips
6112 +CONFIG_VIDEO_SAA6752HS=m
6113 +# end of Audio/Video compression chips
6116 +# SDR tuner chips
6118 +CONFIG_SDR_MAX2175=m
6119 +# end of SDR tuner chips
6122 +# Miscellaneous helper chips
6124 +CONFIG_VIDEO_THS7303=m
6125 +CONFIG_VIDEO_M52790=m
6126 +CONFIG_VIDEO_I2C=m
6127 +CONFIG_VIDEO_ST_MIPID02=m
6128 +# end of Miscellaneous helper chips
6131 +# Camera sensor devices
6133 +CONFIG_VIDEO_APTINA_PLL=m
6134 +CONFIG_VIDEO_CCS_PLL=m
6135 +CONFIG_VIDEO_HI556=m
6136 +CONFIG_VIDEO_IMX214=m
6137 +CONFIG_VIDEO_IMX219=m
6138 +CONFIG_VIDEO_IMX258=m
6139 +CONFIG_VIDEO_IMX274=m
6140 +CONFIG_VIDEO_IMX290=m
6141 +CONFIG_VIDEO_IMX319=m
6142 +CONFIG_VIDEO_IMX355=m
6143 +CONFIG_VIDEO_OV02A10=m
6144 +CONFIG_VIDEO_OV2640=m
6145 +CONFIG_VIDEO_OV2659=m
6146 +CONFIG_VIDEO_OV2680=m
6147 +CONFIG_VIDEO_OV2685=m
6148 +CONFIG_VIDEO_OV2740=m
6149 +CONFIG_VIDEO_OV5647=m
6150 +CONFIG_VIDEO_OV5648=m
6151 +CONFIG_VIDEO_OV6650=m
6152 +CONFIG_VIDEO_OV5670=m
6153 +CONFIG_VIDEO_OV5675=m
6154 +CONFIG_VIDEO_OV5695=m
6155 +CONFIG_VIDEO_OV7251=m
6156 +CONFIG_VIDEO_OV772X=m
6157 +CONFIG_VIDEO_OV7640=m
6158 +CONFIG_VIDEO_OV7670=m
6159 +CONFIG_VIDEO_OV7740=m
6160 +CONFIG_VIDEO_OV8856=m
6161 +CONFIG_VIDEO_OV8865=m
6162 +CONFIG_VIDEO_OV9640=m
6163 +CONFIG_VIDEO_OV9650=m
6164 +CONFIG_VIDEO_OV9734=m
6165 +CONFIG_VIDEO_OV13858=m
6166 +CONFIG_VIDEO_VS6624=m
6167 +CONFIG_VIDEO_MT9M001=m
6168 +CONFIG_VIDEO_MT9M032=m
6169 +CONFIG_VIDEO_MT9M111=m
6170 +CONFIG_VIDEO_MT9P031=m
6171 +CONFIG_VIDEO_MT9T001=m
6172 +CONFIG_VIDEO_MT9T112=m
6173 +CONFIG_VIDEO_MT9V011=m
6174 +CONFIG_VIDEO_MT9V032=m
6175 +CONFIG_VIDEO_MT9V111=m
6176 +CONFIG_VIDEO_SR030PC30=m
6177 +CONFIG_VIDEO_NOON010PC30=m
6178 +CONFIG_VIDEO_M5MOLS=m
6179 +CONFIG_VIDEO_MAX9271_LIB=m
6180 +CONFIG_VIDEO_RDACM20=m
6181 +CONFIG_VIDEO_RDACM21=m
6182 +CONFIG_VIDEO_RJ54N1=m
6183 +CONFIG_VIDEO_S5K6AA=m
6184 +CONFIG_VIDEO_S5K6A3=m
6185 +CONFIG_VIDEO_S5K4ECGX=m
6186 +CONFIG_VIDEO_S5K5BAF=m
6187 +CONFIG_VIDEO_CCS=m
6188 +CONFIG_VIDEO_ET8EK8=m
6189 +CONFIG_VIDEO_S5C73M3=m
6190 +# end of Camera sensor devices
6193 +# Lens drivers
6195 +CONFIG_VIDEO_AD5820=m
6196 +CONFIG_VIDEO_AK7375=m
6197 +CONFIG_VIDEO_DW9714=m
6198 +CONFIG_VIDEO_DW9768=m
6199 +CONFIG_VIDEO_DW9807_VCM=m
6200 +# end of Lens drivers
6203 +# Flash devices
6205 +CONFIG_VIDEO_ADP1653=m
6206 +CONFIG_VIDEO_LM3560=m
6207 +CONFIG_VIDEO_LM3646=m
6208 +# end of Flash devices
6211 +# SPI helper chips
6213 +CONFIG_VIDEO_GS1662=m
6214 +# end of SPI helper chips
6217 +# Media SPI Adapters
6219 +CONFIG_CXD2880_SPI_DRV=m
6220 +# end of Media SPI Adapters
6222 +CONFIG_MEDIA_TUNER=m
6225 +# Customize TV tuners
6227 +CONFIG_MEDIA_TUNER_SIMPLE=m
6228 +CONFIG_MEDIA_TUNER_TDA18250=m
6229 +CONFIG_MEDIA_TUNER_TDA8290=m
6230 +CONFIG_MEDIA_TUNER_TDA827X=m
6231 +CONFIG_MEDIA_TUNER_TDA18271=m
6232 +CONFIG_MEDIA_TUNER_TDA9887=m
6233 +CONFIG_MEDIA_TUNER_TEA5761=m
6234 +CONFIG_MEDIA_TUNER_TEA5767=m
6235 +CONFIG_MEDIA_TUNER_MSI001=m
6236 +CONFIG_MEDIA_TUNER_MT20XX=m
6237 +CONFIG_MEDIA_TUNER_MT2060=m
6238 +CONFIG_MEDIA_TUNER_MT2063=m
6239 +CONFIG_MEDIA_TUNER_MT2266=m
6240 +CONFIG_MEDIA_TUNER_MT2131=m
6241 +CONFIG_MEDIA_TUNER_QT1010=m
6242 +CONFIG_MEDIA_TUNER_XC2028=m
6243 +CONFIG_MEDIA_TUNER_XC5000=m
6244 +CONFIG_MEDIA_TUNER_XC4000=m
6245 +CONFIG_MEDIA_TUNER_MXL5005S=m
6246 +CONFIG_MEDIA_TUNER_MXL5007T=m
6247 +CONFIG_MEDIA_TUNER_MC44S803=m
6248 +CONFIG_MEDIA_TUNER_MAX2165=m
6249 +CONFIG_MEDIA_TUNER_TDA18218=m
6250 +CONFIG_MEDIA_TUNER_FC0011=m
6251 +CONFIG_MEDIA_TUNER_FC0012=m
6252 +CONFIG_MEDIA_TUNER_FC0013=m
6253 +CONFIG_MEDIA_TUNER_TDA18212=m
6254 +CONFIG_MEDIA_TUNER_E4000=m
6255 +CONFIG_MEDIA_TUNER_FC2580=m
6256 +CONFIG_MEDIA_TUNER_M88RS6000T=m
6257 +CONFIG_MEDIA_TUNER_TUA9001=m
6258 +CONFIG_MEDIA_TUNER_SI2157=m
6259 +CONFIG_MEDIA_TUNER_IT913X=m
6260 +CONFIG_MEDIA_TUNER_R820T=m
6261 +CONFIG_MEDIA_TUNER_MXL301RF=m
6262 +CONFIG_MEDIA_TUNER_QM1D1C0042=m
6263 +CONFIG_MEDIA_TUNER_QM1D1B0004=m
6264 +# end of Customize TV tuners
6267 +# Customise DVB Frontends
6271 +# Multistandard (satellite) frontends
6273 +CONFIG_DVB_STB0899=m
6274 +CONFIG_DVB_STB6100=m
6275 +CONFIG_DVB_STV090x=m
6276 +CONFIG_DVB_STV0910=m
6277 +CONFIG_DVB_STV6110x=m
6278 +CONFIG_DVB_STV6111=m
6279 +CONFIG_DVB_MXL5XX=m
6280 +CONFIG_DVB_M88DS3103=m
6283 +# Multistandard (cable + terrestrial) frontends
6285 +CONFIG_DVB_DRXK=m
6286 +CONFIG_DVB_TDA18271C2DD=m
6287 +CONFIG_DVB_SI2165=m
6288 +CONFIG_DVB_MN88472=m
6289 +CONFIG_DVB_MN88473=m
6292 +# DVB-S (satellite) frontends
6294 +CONFIG_DVB_CX24110=m
6295 +CONFIG_DVB_CX24123=m
6296 +CONFIG_DVB_MT312=m
6297 +CONFIG_DVB_ZL10036=m
6298 +CONFIG_DVB_ZL10039=m
6299 +CONFIG_DVB_S5H1420=m
6300 +CONFIG_DVB_STV0288=m
6301 +CONFIG_DVB_STB6000=m
6302 +CONFIG_DVB_STV0299=m
6303 +CONFIG_DVB_STV6110=m
6304 +CONFIG_DVB_STV0900=m
6305 +CONFIG_DVB_TDA8083=m
6306 +CONFIG_DVB_TDA10086=m
6307 +CONFIG_DVB_TDA8261=m
6308 +CONFIG_DVB_VES1X93=m
6309 +CONFIG_DVB_TUNER_ITD1000=m
6310 +CONFIG_DVB_TUNER_CX24113=m
6311 +CONFIG_DVB_TDA826X=m
6312 +CONFIG_DVB_TUA6100=m
6313 +CONFIG_DVB_CX24116=m
6314 +CONFIG_DVB_CX24117=m
6315 +CONFIG_DVB_CX24120=m
6316 +CONFIG_DVB_SI21XX=m
6317 +CONFIG_DVB_TS2020=m
6318 +CONFIG_DVB_DS3000=m
6319 +CONFIG_DVB_MB86A16=m
6320 +CONFIG_DVB_TDA10071=m
6323 +# DVB-T (terrestrial) frontends
6325 +CONFIG_DVB_SP8870=m
6326 +CONFIG_DVB_SP887X=m
6327 +CONFIG_DVB_CX22700=m
6328 +CONFIG_DVB_CX22702=m
6329 +CONFIG_DVB_S5H1432=m
6330 +CONFIG_DVB_DRXD=m
6331 +CONFIG_DVB_L64781=m
6332 +CONFIG_DVB_TDA1004X=m
6333 +CONFIG_DVB_NXT6000=m
6334 +CONFIG_DVB_MT352=m
6335 +CONFIG_DVB_ZL10353=m
6336 +CONFIG_DVB_DIB3000MB=m
6337 +CONFIG_DVB_DIB3000MC=m
6338 +CONFIG_DVB_DIB7000M=m
6339 +CONFIG_DVB_DIB7000P=m
6340 +CONFIG_DVB_DIB9000=m
6341 +CONFIG_DVB_TDA10048=m
6342 +CONFIG_DVB_AF9013=m
6343 +CONFIG_DVB_EC100=m
6344 +CONFIG_DVB_STV0367=m
6345 +CONFIG_DVB_CXD2820R=m
6346 +CONFIG_DVB_CXD2841ER=m
6347 +CONFIG_DVB_RTL2830=m
6348 +CONFIG_DVB_RTL2832=m
6349 +CONFIG_DVB_RTL2832_SDR=m
6350 +CONFIG_DVB_SI2168=m
6351 +CONFIG_DVB_AS102_FE=m
6352 +CONFIG_DVB_ZD1301_DEMOD=m
6353 +CONFIG_DVB_GP8PSK_FE=m
6354 +CONFIG_DVB_CXD2880=m
6357 +# DVB-C (cable) frontends
6359 +CONFIG_DVB_VES1820=m
6360 +CONFIG_DVB_TDA10021=m
6361 +CONFIG_DVB_TDA10023=m
6362 +CONFIG_DVB_STV0297=m
6365 +# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
6367 +CONFIG_DVB_NXT200X=m
6368 +CONFIG_DVB_OR51211=m
6369 +CONFIG_DVB_OR51132=m
6370 +CONFIG_DVB_BCM3510=m
6371 +CONFIG_DVB_LGDT330X=m
6372 +CONFIG_DVB_LGDT3305=m
6373 +CONFIG_DVB_LGDT3306A=m
6374 +CONFIG_DVB_LG2160=m
6375 +CONFIG_DVB_S5H1409=m
6376 +CONFIG_DVB_AU8522=m
6377 +CONFIG_DVB_AU8522_DTV=m
6378 +CONFIG_DVB_AU8522_V4L=m
6379 +CONFIG_DVB_S5H1411=m
6380 +CONFIG_DVB_MXL692=m
6383 +# ISDB-T (terrestrial) frontends
6385 +CONFIG_DVB_S921=m
6386 +CONFIG_DVB_DIB8000=m
6387 +CONFIG_DVB_MB86A20S=m
6390 +# ISDB-S (satellite) & ISDB-T (terrestrial) frontends
6392 +CONFIG_DVB_TC90522=m
6393 +CONFIG_DVB_MN88443X=m
6396 +# Digital terrestrial only tuners/PLL
6398 +CONFIG_DVB_PLL=m
6399 +CONFIG_DVB_TUNER_DIB0070=m
6400 +CONFIG_DVB_TUNER_DIB0090=m
6403 +# SEC control devices for DVB-S
6405 +CONFIG_DVB_DRX39XYJ=m
6406 +CONFIG_DVB_LNBH25=m
6407 +CONFIG_DVB_LNBH29=m
6408 +CONFIG_DVB_LNBP21=m
6409 +CONFIG_DVB_LNBP22=m
6410 +CONFIG_DVB_ISL6405=m
6411 +CONFIG_DVB_ISL6421=m
6412 +CONFIG_DVB_ISL6423=m
6413 +CONFIG_DVB_A8293=m
6414 +CONFIG_DVB_LGS8GL5=m
6415 +CONFIG_DVB_LGS8GXX=m
6416 +CONFIG_DVB_ATBM8830=m
6417 +CONFIG_DVB_TDA665x=m
6418 +CONFIG_DVB_IX2505V=m
6419 +CONFIG_DVB_M88RS2000=m
6420 +CONFIG_DVB_AF9033=m
6421 +CONFIG_DVB_HORUS3A=m
6422 +CONFIG_DVB_ASCOT2E=m
6423 +CONFIG_DVB_HELENE=m
6426 +# Common Interface (EN50221) controller drivers
6428 +CONFIG_DVB_CXD2099=m
6429 +CONFIG_DVB_SP2=m
6430 +# end of Customise DVB Frontends
6433 +# Tools to develop new frontends
6435 +CONFIG_DVB_DUMMY_FE=m
6436 +# end of Media ancillary drivers
6439 +# Graphics support
6441 +CONFIG_AGP=y
6442 +CONFIG_AGP_AMD64=y
6443 +CONFIG_AGP_INTEL=y
6444 +CONFIG_AGP_SIS=m
6445 +CONFIG_AGP_VIA=y
6446 +CONFIG_INTEL_GTT=y
6447 +CONFIG_VGA_ARB=y
6448 +CONFIG_VGA_ARB_MAX_GPUS=16
6449 +CONFIG_VGA_SWITCHEROO=y
6450 +CONFIG_DRM=m
6451 +CONFIG_DRM_MIPI_DBI=m
6452 +CONFIG_DRM_MIPI_DSI=y
6453 +CONFIG_DRM_DP_AUX_CHARDEV=y
6454 +# CONFIG_DRM_DEBUG_SELFTEST is not set
6455 +CONFIG_DRM_KMS_HELPER=m
6456 +CONFIG_DRM_KMS_FB_HELPER=y
6457 +# CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS is not set
6458 +CONFIG_DRM_FBDEV_EMULATION=y
6459 +CONFIG_DRM_FBDEV_OVERALLOC=100
6460 +# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set
6461 +CONFIG_DRM_LOAD_EDID_FIRMWARE=y
6462 +CONFIG_DRM_DP_CEC=y
6463 +CONFIG_DRM_TTM=m
6464 +CONFIG_DRM_VRAM_HELPER=m
6465 +CONFIG_DRM_TTM_HELPER=m
6466 +CONFIG_DRM_GEM_CMA_HELPER=y
6467 +CONFIG_DRM_KMS_CMA_HELPER=y
6468 +CONFIG_DRM_GEM_SHMEM_HELPER=y
6469 +CONFIG_DRM_SCHED=m
6472 +# I2C encoder or helper chips
6474 +CONFIG_DRM_I2C_CH7006=m
6475 +CONFIG_DRM_I2C_SIL164=m
6476 +CONFIG_DRM_I2C_NXP_TDA998X=m
6477 +CONFIG_DRM_I2C_NXP_TDA9950=m
6478 +# end of I2C encoder or helper chips
6481 +# ARM devices
6483 +# end of ARM devices
6485 +CONFIG_DRM_RADEON=m
6486 +# CONFIG_DRM_RADEON_USERPTR is not set
6487 +CONFIG_DRM_AMDGPU=m
6488 +CONFIG_DRM_AMDGPU_SI=y
6489 +CONFIG_DRM_AMDGPU_CIK=y
6490 +CONFIG_DRM_AMDGPU_USERPTR=y
6491 +# CONFIG_DRM_AMDGPU_GART_DEBUGFS is not set
6494 +# ACP (Audio CoProcessor) Configuration
6496 +CONFIG_DRM_AMD_ACP=y
6497 +# end of ACP (Audio CoProcessor) Configuration
6500 +# Display Engine Configuration
6502 +CONFIG_DRM_AMD_DC=y
6503 +CONFIG_DRM_AMD_DC_DCN=y
6504 +CONFIG_DRM_AMD_DC_HDCP=y
6505 +CONFIG_DRM_AMD_DC_SI=y
6506 +# CONFIG_DEBUG_KERNEL_DC is not set
6507 +# end of Display Engine Configuration
6509 +CONFIG_HSA_AMD=y
6510 +CONFIG_DRM_NOUVEAU=m
6511 +# CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT is not set
6512 +CONFIG_NOUVEAU_DEBUG=5
6513 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3
6514 +# CONFIG_NOUVEAU_DEBUG_MMU is not set
6515 +# CONFIG_NOUVEAU_DEBUG_PUSH is not set
6516 +CONFIG_DRM_NOUVEAU_BACKLIGHT=y
6517 +# CONFIG_DRM_NOUVEAU_SVM is not set
6518 +CONFIG_DRM_I915=m
6519 +CONFIG_DRM_I915_FORCE_PROBE=""
6520 +CONFIG_DRM_I915_CAPTURE_ERROR=y
6521 +CONFIG_DRM_I915_COMPRESS_ERROR=y
6522 +CONFIG_DRM_I915_USERPTR=y
6523 +CONFIG_DRM_I915_GVT=y
6524 +CONFIG_DRM_I915_GVT_KVMGT=m
6527 +# drm/i915 Debugging
6529 +# CONFIG_DRM_I915_WERROR is not set
6530 +# CONFIG_DRM_I915_DEBUG is not set
6531 +# CONFIG_DRM_I915_DEBUG_MMIO is not set
6532 +# CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS is not set
6533 +# CONFIG_DRM_I915_SW_FENCE_CHECK_DAG is not set
6534 +# CONFIG_DRM_I915_DEBUG_GUC is not set
6535 +# CONFIG_DRM_I915_SELFTEST is not set
6536 +# CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS is not set
6537 +# CONFIG_DRM_I915_DEBUG_VBLANK_EVADE is not set
6538 +# CONFIG_DRM_I915_DEBUG_RUNTIME_PM is not set
6539 +# end of drm/i915 Debugging
6542 +# drm/i915 Profile Guided Optimisation
6544 +CONFIG_DRM_I915_FENCE_TIMEOUT=10000
6545 +CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND=250
6546 +CONFIG_DRM_I915_HEARTBEAT_INTERVAL=2500
6547 +CONFIG_DRM_I915_PREEMPT_TIMEOUT=640
6548 +CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT=8000
6549 +CONFIG_DRM_I915_STOP_TIMEOUT=100
6550 +CONFIG_DRM_I915_TIMESLICE_DURATION=1
6551 +# end of drm/i915 Profile Guided Optimisation
6553 +CONFIG_DRM_VGEM=m
6554 +CONFIG_DRM_VKMS=m
6555 +CONFIG_DRM_VMWGFX=m
6556 +CONFIG_DRM_VMWGFX_FBCON=y
6557 +CONFIG_DRM_GMA500=m
6558 +CONFIG_DRM_GMA600=y
6559 +CONFIG_DRM_UDL=m
6560 +CONFIG_DRM_AST=m
6561 +CONFIG_DRM_MGAG200=m
6562 +CONFIG_DRM_QXL=m
6563 +CONFIG_DRM_BOCHS=m
6564 +CONFIG_DRM_VIRTIO_GPU=m
6565 +CONFIG_DRM_PANEL=y
6568 +# Display Panels
6570 +CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN=m
6571 +# end of Display Panels
6573 +CONFIG_DRM_BRIDGE=y
6574 +CONFIG_DRM_PANEL_BRIDGE=y
6577 +# Display Interface Bridges
6579 +CONFIG_DRM_ANALOGIX_ANX78XX=m
6580 +CONFIG_DRM_ANALOGIX_DP=m
6581 +# end of Display Interface Bridges
6583 +# CONFIG_DRM_ETNAVIV is not set
6584 +CONFIG_DRM_CIRRUS_QEMU=m
6585 +CONFIG_DRM_GM12U320=m
6586 +CONFIG_TINYDRM_HX8357D=m
6587 +CONFIG_TINYDRM_ILI9225=m
6588 +CONFIG_TINYDRM_ILI9341=m
6589 +CONFIG_TINYDRM_ILI9486=m
6590 +CONFIG_TINYDRM_MI0283QT=m
6591 +CONFIG_TINYDRM_REPAPER=m
6592 +CONFIG_TINYDRM_ST7586=m
6593 +CONFIG_TINYDRM_ST7735R=m
6594 +CONFIG_DRM_XEN=y
6595 +CONFIG_DRM_XEN_FRONTEND=m
6596 +CONFIG_DRM_VBOXVIDEO=m
6597 +# CONFIG_DRM_LEGACY is not set
6598 +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y
6601 +# Frame buffer Devices
6603 +CONFIG_FB_CMDLINE=y
6604 +CONFIG_FB_NOTIFY=y
6605 +CONFIG_FB=y
6606 +CONFIG_FIRMWARE_EDID=y
6607 +CONFIG_FB_DDC=m
6608 +CONFIG_FB_BOOT_VESA_SUPPORT=y
6609 +CONFIG_FB_CFB_FILLRECT=y
6610 +CONFIG_FB_CFB_COPYAREA=y
6611 +CONFIG_FB_CFB_IMAGEBLIT=y
6612 +CONFIG_FB_SYS_FILLRECT=m
6613 +CONFIG_FB_SYS_COPYAREA=m
6614 +CONFIG_FB_SYS_IMAGEBLIT=m
6615 +# CONFIG_FB_FOREIGN_ENDIAN is not set
6616 +CONFIG_FB_SYS_FOPS=m
6617 +CONFIG_FB_DEFERRED_IO=y
6618 +CONFIG_FB_HECUBA=m
6619 +CONFIG_FB_SVGALIB=m
6620 +CONFIG_FB_BACKLIGHT=m
6621 +CONFIG_FB_MODE_HELPERS=y
6622 +CONFIG_FB_TILEBLITTING=y
6625 +# Frame buffer hardware drivers
6627 +CONFIG_FB_CIRRUS=m
6628 +CONFIG_FB_PM2=m
6629 +CONFIG_FB_PM2_FIFO_DISCONNECT=y
6630 +CONFIG_FB_CYBER2000=m
6631 +CONFIG_FB_CYBER2000_DDC=y
6632 +CONFIG_FB_ARC=m
6633 +CONFIG_FB_ASILIANT=y
6634 +CONFIG_FB_IMSTT=y
6635 +CONFIG_FB_VGA16=m
6636 +CONFIG_FB_UVESA=m
6637 +CONFIG_FB_VESA=y
6638 +CONFIG_FB_EFI=y
6639 +CONFIG_FB_N411=m
6640 +CONFIG_FB_HGA=m
6641 +CONFIG_FB_OPENCORES=m
6642 +CONFIG_FB_S1D13XXX=m
6643 +CONFIG_FB_NVIDIA=m
6644 +CONFIG_FB_NVIDIA_I2C=y
6645 +# CONFIG_FB_NVIDIA_DEBUG is not set
6646 +CONFIG_FB_NVIDIA_BACKLIGHT=y
6647 +CONFIG_FB_RIVA=m
6648 +CONFIG_FB_RIVA_I2C=y
6649 +# CONFIG_FB_RIVA_DEBUG is not set
6650 +CONFIG_FB_RIVA_BACKLIGHT=y
6651 +CONFIG_FB_I740=m
6652 +CONFIG_FB_LE80578=m
6653 +CONFIG_FB_CARILLO_RANCH=m
6654 +CONFIG_FB_INTEL=m
6655 +# CONFIG_FB_INTEL_DEBUG is not set
6656 +CONFIG_FB_INTEL_I2C=y
6657 +CONFIG_FB_MATROX=m
6658 +CONFIG_FB_MATROX_MILLENIUM=y
6659 +CONFIG_FB_MATROX_MYSTIQUE=y
6660 +CONFIG_FB_MATROX_G=y
6661 +CONFIG_FB_MATROX_I2C=m
6662 +CONFIG_FB_MATROX_MAVEN=m
6663 +CONFIG_FB_RADEON=m
6664 +CONFIG_FB_RADEON_I2C=y
6665 +CONFIG_FB_RADEON_BACKLIGHT=y
6666 +# CONFIG_FB_RADEON_DEBUG is not set
6667 +CONFIG_FB_ATY128=m
6668 +CONFIG_FB_ATY128_BACKLIGHT=y
6669 +CONFIG_FB_ATY=m
6670 +CONFIG_FB_ATY_CT=y
6671 +# CONFIG_FB_ATY_GENERIC_LCD is not set
6672 +CONFIG_FB_ATY_GX=y
6673 +CONFIG_FB_ATY_BACKLIGHT=y
6674 +CONFIG_FB_S3=m
6675 +CONFIG_FB_S3_DDC=y
6676 +CONFIG_FB_SAVAGE=m
6677 +CONFIG_FB_SAVAGE_I2C=y
6678 +# CONFIG_FB_SAVAGE_ACCEL is not set
6679 +CONFIG_FB_SIS=m
6680 +CONFIG_FB_SIS_300=y
6681 +CONFIG_FB_SIS_315=y
6682 +CONFIG_FB_VIA=m
6683 +# CONFIG_FB_VIA_DIRECT_PROCFS is not set
6684 +CONFIG_FB_VIA_X_COMPATIBILITY=y
6685 +CONFIG_FB_NEOMAGIC=m
6686 +CONFIG_FB_KYRO=m
6687 +CONFIG_FB_3DFX=m
6688 +# CONFIG_FB_3DFX_ACCEL is not set
6689 +# CONFIG_FB_3DFX_I2C is not set
6690 +CONFIG_FB_VOODOO1=m
6691 +CONFIG_FB_VT8623=m
6692 +CONFIG_FB_TRIDENT=m
6693 +CONFIG_FB_ARK=m
6694 +CONFIG_FB_PM3=m
6695 +CONFIG_FB_CARMINE=m
6696 +CONFIG_FB_CARMINE_DRAM_EVAL=y
6697 +# CONFIG_CARMINE_DRAM_CUSTOM is not set
6698 +CONFIG_FB_SM501=m
6699 +CONFIG_FB_SMSCUFX=m
6700 +CONFIG_FB_UDL=m
6701 +# CONFIG_FB_IBM_GXT4500 is not set
6702 +# CONFIG_FB_VIRTUAL is not set
6703 +CONFIG_XEN_FBDEV_FRONTEND=m
6704 +CONFIG_FB_METRONOME=m
6705 +CONFIG_FB_MB862XX=m
6706 +CONFIG_FB_MB862XX_PCI_GDC=y
6707 +CONFIG_FB_MB862XX_I2C=y
6708 +CONFIG_FB_HYPERV=m
6709 +CONFIG_FB_SIMPLE=y
6710 +CONFIG_FB_SM712=m
6711 +# end of Frame buffer Devices
6714 +# Backlight & LCD device support
6716 +CONFIG_LCD_CLASS_DEVICE=m
6717 +CONFIG_LCD_L4F00242T03=m
6718 +CONFIG_LCD_LMS283GF05=m
6719 +CONFIG_LCD_LTV350QV=m
6720 +CONFIG_LCD_ILI922X=m
6721 +CONFIG_LCD_ILI9320=m
6722 +CONFIG_LCD_TDO24M=m
6723 +CONFIG_LCD_VGG2432A4=m
6724 +CONFIG_LCD_PLATFORM=m
6725 +CONFIG_LCD_AMS369FG06=m
6726 +CONFIG_LCD_LMS501KF03=m
6727 +CONFIG_LCD_HX8357=m
6728 +CONFIG_LCD_OTM3225A=m
6729 +CONFIG_BACKLIGHT_CLASS_DEVICE=y
6730 +CONFIG_BACKLIGHT_KTD253=m
6731 +CONFIG_BACKLIGHT_LM3533=m
6732 +CONFIG_BACKLIGHT_CARILLO_RANCH=m
6733 +CONFIG_BACKLIGHT_PWM=m
6734 +CONFIG_BACKLIGHT_DA903X=m
6735 +CONFIG_BACKLIGHT_DA9052=m
6736 +CONFIG_BACKLIGHT_MAX8925=m
6737 +CONFIG_BACKLIGHT_APPLE=m
6738 +CONFIG_BACKLIGHT_QCOM_WLED=m
6739 +CONFIG_BACKLIGHT_SAHARA=m
6740 +CONFIG_BACKLIGHT_WM831X=m
6741 +CONFIG_BACKLIGHT_ADP5520=m
6742 +CONFIG_BACKLIGHT_ADP8860=m
6743 +CONFIG_BACKLIGHT_ADP8870=m
6744 +CONFIG_BACKLIGHT_88PM860X=m
6745 +CONFIG_BACKLIGHT_PCF50633=m
6746 +CONFIG_BACKLIGHT_AAT2870=m
6747 +CONFIG_BACKLIGHT_LM3630A=m
6748 +CONFIG_BACKLIGHT_LM3639=m
6749 +CONFIG_BACKLIGHT_LP855X=m
6750 +CONFIG_BACKLIGHT_LP8788=m
6751 +CONFIG_BACKLIGHT_PANDORA=m
6752 +CONFIG_BACKLIGHT_SKY81452=m
6753 +CONFIG_BACKLIGHT_AS3711=m
6754 +CONFIG_BACKLIGHT_GPIO=m
6755 +CONFIG_BACKLIGHT_LV5207LP=m
6756 +CONFIG_BACKLIGHT_BD6107=m
6757 +CONFIG_BACKLIGHT_ARCXCNN=m
6758 +CONFIG_BACKLIGHT_RAVE_SP=m
6759 +# end of Backlight & LCD device support
6761 +CONFIG_VGASTATE=m
6762 +CONFIG_VIDEOMODE_HELPERS=y
6763 +CONFIG_HDMI=y
6766 +# Console display driver support
6768 +CONFIG_VGA_CONSOLE=y
6769 +CONFIG_DUMMY_CONSOLE=y
6770 +CONFIG_DUMMY_CONSOLE_COLUMNS=80
6771 +CONFIG_DUMMY_CONSOLE_ROWS=25
6772 +CONFIG_FRAMEBUFFER_CONSOLE=y
6773 +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
6774 +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
6775 +CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER=y
6776 +# end of Console display driver support
6778 +# CONFIG_LOGO is not set
6779 +# end of Graphics support
6781 +CONFIG_SOUND=m
6782 +CONFIG_SOUND_OSS_CORE=y
6783 +# CONFIG_SOUND_OSS_CORE_PRECLAIM is not set
6784 +CONFIG_SND=m
6785 +CONFIG_SND_TIMER=m
6786 +CONFIG_SND_PCM=m
6787 +CONFIG_SND_PCM_ELD=y
6788 +CONFIG_SND_PCM_IEC958=y
6789 +CONFIG_SND_DMAENGINE_PCM=m
6790 +CONFIG_SND_HWDEP=m
6791 +CONFIG_SND_SEQ_DEVICE=m
6792 +CONFIG_SND_RAWMIDI=m
6793 +CONFIG_SND_COMPRESS_OFFLOAD=m
6794 +CONFIG_SND_JACK=y
6795 +CONFIG_SND_JACK_INPUT_DEV=y
6796 +CONFIG_SND_OSSEMUL=y
6797 +CONFIG_SND_MIXER_OSS=m
6798 +# CONFIG_SND_PCM_OSS is not set
6799 +CONFIG_SND_PCM_TIMER=y
6800 +CONFIG_SND_HRTIMER=m
6801 +CONFIG_SND_DYNAMIC_MINORS=y
6802 +CONFIG_SND_MAX_CARDS=32
6803 +CONFIG_SND_SUPPORT_OLD_API=y
6804 +CONFIG_SND_PROC_FS=y
6805 +CONFIG_SND_VERBOSE_PROCFS=y
6806 +# CONFIG_SND_VERBOSE_PRINTK is not set
6807 +# CONFIG_SND_DEBUG is not set
6808 +CONFIG_SND_VMASTER=y
6809 +CONFIG_SND_DMA_SGBUF=y
6810 +CONFIG_SND_SEQUENCER=m
6811 +CONFIG_SND_SEQ_DUMMY=m
6812 +# CONFIG_SND_SEQUENCER_OSS is not set
6813 +CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
6814 +CONFIG_SND_SEQ_MIDI_EVENT=m
6815 +CONFIG_SND_SEQ_MIDI=m
6816 +CONFIG_SND_SEQ_MIDI_EMUL=m
6817 +CONFIG_SND_SEQ_VIRMIDI=m
6818 +CONFIG_SND_MPU401_UART=m
6819 +CONFIG_SND_OPL3_LIB=m
6820 +CONFIG_SND_OPL3_LIB_SEQ=m
6821 +CONFIG_SND_VX_LIB=m
6822 +CONFIG_SND_AC97_CODEC=m
6823 +CONFIG_SND_DRIVERS=y
6824 +CONFIG_SND_PCSP=m
6825 +CONFIG_SND_DUMMY=m
6826 +CONFIG_SND_ALOOP=m
6827 +CONFIG_SND_VIRMIDI=m
6828 +CONFIG_SND_MTPAV=m
6829 +CONFIG_SND_MTS64=m
6830 +CONFIG_SND_SERIAL_U16550=m
6831 +CONFIG_SND_MPU401=m
6832 +CONFIG_SND_PORTMAN2X4=m
6833 +CONFIG_SND_AC97_POWER_SAVE=y
6834 +CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0
6835 +CONFIG_SND_SB_COMMON=m
6836 +CONFIG_SND_PCI=y
6837 +CONFIG_SND_AD1889=m
6838 +CONFIG_SND_ALS300=m
6839 +CONFIG_SND_ALS4000=m
6840 +CONFIG_SND_ALI5451=m
6841 +CONFIG_SND_ASIHPI=m
6842 +CONFIG_SND_ATIIXP=m
6843 +CONFIG_SND_ATIIXP_MODEM=m
6844 +CONFIG_SND_AU8810=m
6845 +CONFIG_SND_AU8820=m
6846 +CONFIG_SND_AU8830=m
6847 +CONFIG_SND_AW2=m
6848 +CONFIG_SND_AZT3328=m
6849 +CONFIG_SND_BT87X=m
6850 +# CONFIG_SND_BT87X_OVERCLOCK is not set
6851 +CONFIG_SND_CA0106=m
6852 +CONFIG_SND_CMIPCI=m
6853 +CONFIG_SND_OXYGEN_LIB=m
6854 +CONFIG_SND_OXYGEN=m
6855 +CONFIG_SND_CS4281=m
6856 +CONFIG_SND_CS46XX=m
6857 +CONFIG_SND_CS46XX_NEW_DSP=y
6858 +CONFIG_SND_CTXFI=m
6859 +CONFIG_SND_DARLA20=m
6860 +CONFIG_SND_GINA20=m
6861 +CONFIG_SND_LAYLA20=m
6862 +CONFIG_SND_DARLA24=m
6863 +CONFIG_SND_GINA24=m
6864 +CONFIG_SND_LAYLA24=m
6865 +CONFIG_SND_MONA=m
6866 +CONFIG_SND_MIA=m
6867 +CONFIG_SND_ECHO3G=m
6868 +CONFIG_SND_INDIGO=m
6869 +CONFIG_SND_INDIGOIO=m
6870 +CONFIG_SND_INDIGODJ=m
6871 +CONFIG_SND_INDIGOIOX=m
6872 +CONFIG_SND_INDIGODJX=m
6873 +CONFIG_SND_EMU10K1=m
6874 +CONFIG_SND_EMU10K1_SEQ=m
6875 +CONFIG_SND_EMU10K1X=m
6876 +CONFIG_SND_ENS1370=m
6877 +CONFIG_SND_ENS1371=m
6878 +CONFIG_SND_ES1938=m
6879 +CONFIG_SND_ES1968=m
6880 +CONFIG_SND_ES1968_INPUT=y
6881 +CONFIG_SND_ES1968_RADIO=y
6882 +CONFIG_SND_FM801=m
6883 +CONFIG_SND_FM801_TEA575X_BOOL=y
6884 +CONFIG_SND_HDSP=m
6885 +CONFIG_SND_HDSPM=m
6886 +CONFIG_SND_ICE1712=m
6887 +CONFIG_SND_ICE1724=m
6888 +CONFIG_SND_INTEL8X0=m
6889 +CONFIG_SND_INTEL8X0M=m
6890 +CONFIG_SND_KORG1212=m
6891 +CONFIG_SND_LOLA=m
6892 +CONFIG_SND_LX6464ES=m
6893 +CONFIG_SND_MAESTRO3=m
6894 +CONFIG_SND_MAESTRO3_INPUT=y
6895 +CONFIG_SND_MIXART=m
6896 +CONFIG_SND_NM256=m
6897 +CONFIG_SND_PCXHR=m
6898 +CONFIG_SND_RIPTIDE=m
6899 +CONFIG_SND_RME32=m
6900 +CONFIG_SND_RME96=m
6901 +CONFIG_SND_RME9652=m
6902 +CONFIG_SND_SONICVIBES=m
6903 +CONFIG_SND_TRIDENT=m
6904 +CONFIG_SND_VIA82XX=m
6905 +CONFIG_SND_VIA82XX_MODEM=m
6906 +CONFIG_SND_VIRTUOSO=m
6907 +CONFIG_SND_VX222=m
6908 +CONFIG_SND_YMFPCI=m
6911 +# HD-Audio
6913 +CONFIG_SND_HDA=m
6914 +CONFIG_SND_HDA_GENERIC_LEDS=y
6915 +CONFIG_SND_HDA_INTEL=m
6916 +CONFIG_SND_HDA_HWDEP=y
6917 +CONFIG_SND_HDA_RECONFIG=y
6918 +CONFIG_SND_HDA_INPUT_BEEP=y
6919 +CONFIG_SND_HDA_INPUT_BEEP_MODE=0
6920 +CONFIG_SND_HDA_PATCH_LOADER=y
6921 +CONFIG_SND_HDA_CODEC_REALTEK=m
6922 +CONFIG_SND_HDA_CODEC_ANALOG=m
6923 +CONFIG_SND_HDA_CODEC_SIGMATEL=m
6924 +CONFIG_SND_HDA_CODEC_VIA=m
6925 +CONFIG_SND_HDA_CODEC_HDMI=m
6926 +CONFIG_SND_HDA_CODEC_CIRRUS=m
6927 +CONFIG_SND_HDA_CODEC_CONEXANT=m
6928 +CONFIG_SND_HDA_CODEC_CA0110=m
6929 +CONFIG_SND_HDA_CODEC_CA0132=m
6930 +CONFIG_SND_HDA_CODEC_CA0132_DSP=y
6931 +CONFIG_SND_HDA_CODEC_CMEDIA=m
6932 +CONFIG_SND_HDA_CODEC_SI3054=m
6933 +CONFIG_SND_HDA_GENERIC=m
6934 +CONFIG_SND_HDA_POWER_SAVE_DEFAULT=1
6935 +# CONFIG_SND_HDA_INTEL_HDMI_SILENT_STREAM is not set
6936 +# end of HD-Audio
6938 +CONFIG_SND_HDA_CORE=m
6939 +CONFIG_SND_HDA_DSP_LOADER=y
6940 +CONFIG_SND_HDA_COMPONENT=y
6941 +CONFIG_SND_HDA_I915=y
6942 +CONFIG_SND_HDA_EXT_CORE=m
6943 +CONFIG_SND_HDA_PREALLOC_SIZE=0
6944 +CONFIG_SND_INTEL_NHLT=y
6945 +CONFIG_SND_INTEL_DSP_CONFIG=m
6946 +CONFIG_SND_INTEL_SOUNDWIRE_ACPI=m
6947 +CONFIG_SND_INTEL_BYT_PREFER_SOF=y
6948 +CONFIG_SND_SPI=y
6949 +CONFIG_SND_USB=y
6950 +CONFIG_SND_USB_AUDIO=m
6951 +CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER=y
6952 +CONFIG_SND_USB_UA101=m
6953 +CONFIG_SND_USB_USX2Y=m
6954 +CONFIG_SND_USB_CAIAQ=m
6955 +CONFIG_SND_USB_CAIAQ_INPUT=y
6956 +CONFIG_SND_USB_US122L=m
6957 +CONFIG_SND_USB_6FIRE=m
6958 +CONFIG_SND_USB_HIFACE=m
6959 +CONFIG_SND_BCD2000=m
6960 +CONFIG_SND_USB_LINE6=m
6961 +CONFIG_SND_USB_POD=m
6962 +CONFIG_SND_USB_PODHD=m
6963 +CONFIG_SND_USB_TONEPORT=m
6964 +CONFIG_SND_USB_VARIAX=m
6965 +CONFIG_SND_FIREWIRE=y
6966 +CONFIG_SND_FIREWIRE_LIB=m
6967 +CONFIG_SND_DICE=m
6968 +CONFIG_SND_OXFW=m
6969 +CONFIG_SND_ISIGHT=m
6970 +CONFIG_SND_FIREWORKS=m
6971 +CONFIG_SND_BEBOB=m
6972 +CONFIG_SND_FIREWIRE_DIGI00X=m
6973 +CONFIG_SND_FIREWIRE_TASCAM=m
6974 +CONFIG_SND_FIREWIRE_MOTU=m
6975 +CONFIG_SND_FIREFACE=m
6976 +CONFIG_SND_PCMCIA=y
6977 +CONFIG_SND_VXPOCKET=m
6978 +CONFIG_SND_PDAUDIOCF=m
6979 +CONFIG_SND_SOC=m
6980 +CONFIG_SND_SOC_AC97_BUS=y
6981 +CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM=y
6982 +CONFIG_SND_SOC_COMPRESS=y
6983 +CONFIG_SND_SOC_TOPOLOGY=y
6984 +CONFIG_SND_SOC_ACPI=m
6985 +CONFIG_SND_SOC_ADI=m
6986 +CONFIG_SND_SOC_ADI_AXI_I2S=m
6987 +CONFIG_SND_SOC_ADI_AXI_SPDIF=m
6988 +CONFIG_SND_SOC_AMD_ACP=m
6989 +CONFIG_SND_SOC_AMD_CZ_DA7219MX98357_MACH=m
6990 +CONFIG_SND_SOC_AMD_CZ_RT5645_MACH=m
6991 +CONFIG_SND_SOC_AMD_ACP3x=m
6992 +CONFIG_SND_SOC_AMD_RV_RT5682_MACH=m
6993 +CONFIG_SND_SOC_AMD_RENOIR=m
6994 +CONFIG_SND_SOC_AMD_RENOIR_MACH=m
6995 +CONFIG_SND_ATMEL_SOC=m
6996 +CONFIG_SND_BCM63XX_I2S_WHISTLER=m
6997 +CONFIG_SND_DESIGNWARE_I2S=m
6998 +CONFIG_SND_DESIGNWARE_PCM=y
7001 +# SoC Audio for Freescale CPUs
7005 +# Common SoC Audio options for Freescale CPUs:
7007 +CONFIG_SND_SOC_FSL_ASRC=m
7008 +CONFIG_SND_SOC_FSL_SAI=m
7009 +CONFIG_SND_SOC_FSL_MQS=m
7010 +CONFIG_SND_SOC_FSL_AUDMIX=m
7011 +CONFIG_SND_SOC_FSL_SSI=m
7012 +CONFIG_SND_SOC_FSL_SPDIF=m
7013 +CONFIG_SND_SOC_FSL_ESAI=m
7014 +CONFIG_SND_SOC_FSL_MICFIL=m
7015 +CONFIG_SND_SOC_FSL_EASRC=m
7016 +CONFIG_SND_SOC_FSL_XCVR=m
7017 +CONFIG_SND_SOC_IMX_AUDMUX=m
7018 +# end of SoC Audio for Freescale CPUs
7020 +CONFIG_SND_I2S_HI6210_I2S=m
7021 +CONFIG_SND_SOC_IMG=y
7022 +CONFIG_SND_SOC_IMG_I2S_IN=m
7023 +CONFIG_SND_SOC_IMG_I2S_OUT=m
7024 +CONFIG_SND_SOC_IMG_PARALLEL_OUT=m
7025 +CONFIG_SND_SOC_IMG_SPDIF_IN=m
7026 +CONFIG_SND_SOC_IMG_SPDIF_OUT=m
7027 +CONFIG_SND_SOC_IMG_PISTACHIO_INTERNAL_DAC=m
7028 +CONFIG_SND_SOC_INTEL_SST_TOPLEVEL=y
7029 +CONFIG_SND_SOC_INTEL_SST=m
7030 +CONFIG_SND_SOC_INTEL_CATPT=m
7031 +CONFIG_SND_SST_ATOM_HIFI2_PLATFORM=m
7032 +CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_PCI=m
7033 +CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_ACPI=m
7034 +# CONFIG_SND_SOC_INTEL_SKYLAKE is not set
7035 +CONFIG_SND_SOC_INTEL_SKL=m
7036 +CONFIG_SND_SOC_INTEL_APL=m
7037 +CONFIG_SND_SOC_INTEL_KBL=m
7038 +CONFIG_SND_SOC_INTEL_GLK=m
7039 +# CONFIG_SND_SOC_INTEL_CNL is not set
7040 +# CONFIG_SND_SOC_INTEL_CFL is not set
7041 +# CONFIG_SND_SOC_INTEL_CML_H is not set
7042 +# CONFIG_SND_SOC_INTEL_CML_LP is not set
7043 +CONFIG_SND_SOC_INTEL_SKYLAKE_FAMILY=m
7044 +CONFIG_SND_SOC_INTEL_SKYLAKE_SSP_CLK=m
7045 +# CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC is not set
7046 +CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON=m
7047 +CONFIG_SND_SOC_ACPI_INTEL_MATCH=m
7048 +CONFIG_SND_SOC_INTEL_MACH=y
7049 +# CONFIG_SND_SOC_INTEL_USER_FRIENDLY_LONG_NAMES is not set
7050 +CONFIG_SND_SOC_INTEL_HASWELL_MACH=m
7051 +CONFIG_SND_SOC_INTEL_BDW_RT5650_MACH=m
7052 +CONFIG_SND_SOC_INTEL_BDW_RT5677_MACH=m
7053 +CONFIG_SND_SOC_INTEL_BROADWELL_MACH=m
7054 +CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH=m
7055 +CONFIG_SND_SOC_INTEL_BYTCR_RT5651_MACH=m
7056 +CONFIG_SND_SOC_INTEL_BYTCR_WM5102_MACH=m
7057 +CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH=m
7058 +CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH=m
7059 +CONFIG_SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH=m
7060 +CONFIG_SND_SOC_INTEL_CHT_BSW_NAU8824_MACH=m
7061 +CONFIG_SND_SOC_INTEL_BYT_CHT_CX2072X_MACH=m
7062 +CONFIG_SND_SOC_INTEL_BYT_CHT_DA7213_MACH=m
7063 +CONFIG_SND_SOC_INTEL_BYT_CHT_ES8316_MACH=m
7064 +# CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH is not set
7065 +CONFIG_SND_SOC_INTEL_SKL_RT286_MACH=m
7066 +CONFIG_SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH=m
7067 +CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH=m
7068 +CONFIG_SND_SOC_INTEL_DA7219_MAX98357A_GENERIC=m
7069 +CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON=m
7070 +CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH=m
7071 +CONFIG_SND_SOC_INTEL_BXT_RT298_MACH=m
7072 +CONFIG_SND_SOC_INTEL_SOF_WM8804_MACH=m
7073 +CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH=m
7074 +CONFIG_SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH=m
7075 +CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH=m
7076 +CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH=m
7077 +CONFIG_SND_SOC_INTEL_KBL_RT5660_MACH=m
7078 +CONFIG_SND_SOC_INTEL_GLK_DA7219_MAX98357A_MACH=m
7079 +CONFIG_SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH=m
7080 +CONFIG_SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH=m
7081 +CONFIG_SND_SOC_INTEL_SOF_RT5682_MACH=m
7082 +CONFIG_SND_SOC_INTEL_SOF_PCM512x_MACH=m
7083 +CONFIG_SND_SOC_INTEL_CML_LP_DA7219_MAX98357A_MACH=m
7084 +CONFIG_SND_SOC_INTEL_SOF_CML_RT1011_RT5682_MACH=m
7085 +CONFIG_SND_SOC_INTEL_SOF_DA7219_MAX98373_MACH=m
7086 +CONFIG_SND_SOC_INTEL_EHL_RT5660_MACH=m
7087 +CONFIG_SND_SOC_MTK_BTCVSD=m
7088 +CONFIG_SND_SOC_SOF_TOPLEVEL=y
7089 +CONFIG_SND_SOC_SOF_PCI_DEV=m
7090 +CONFIG_SND_SOC_SOF_PCI=m
7091 +CONFIG_SND_SOC_SOF_ACPI=m
7092 +CONFIG_SND_SOC_SOF_ACPI_DEV=m
7093 +# CONFIG_SND_SOC_SOF_DEBUG_PROBES is not set
7094 +# CONFIG_SND_SOC_SOF_DEVELOPER_SUPPORT is not set
7095 +CONFIG_SND_SOC_SOF=m
7096 +CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE=y
7097 +CONFIG_SND_SOC_SOF_INTEL_TOPLEVEL=y
7098 +CONFIG_SND_SOC_SOF_INTEL_HIFI_EP_IPC=m
7099 +CONFIG_SND_SOC_SOF_INTEL_ATOM_HIFI_EP=m
7100 +CONFIG_SND_SOC_SOF_INTEL_COMMON=m
7101 +CONFIG_SND_SOC_SOF_BAYTRAIL=m
7102 +CONFIG_SND_SOC_SOF_BROADWELL=m
7103 +CONFIG_SND_SOC_SOF_MERRIFIELD=m
7104 +CONFIG_SND_SOC_SOF_INTEL_APL=m
7105 +CONFIG_SND_SOC_SOF_APOLLOLAKE=m
7106 +CONFIG_SND_SOC_SOF_GEMINILAKE=m
7107 +CONFIG_SND_SOC_SOF_INTEL_CNL=m
7108 +CONFIG_SND_SOC_SOF_CANNONLAKE=m
7109 +CONFIG_SND_SOC_SOF_COFFEELAKE=m
7110 +CONFIG_SND_SOC_SOF_COMETLAKE=m
7111 +CONFIG_SND_SOC_SOF_INTEL_ICL=m
7112 +CONFIG_SND_SOC_SOF_ICELAKE=m
7113 +CONFIG_SND_SOC_SOF_JASPERLAKE=m
7114 +CONFIG_SND_SOC_SOF_INTEL_TGL=m
7115 +CONFIG_SND_SOC_SOF_TIGERLAKE=m
7116 +CONFIG_SND_SOC_SOF_ELKHARTLAKE=m
7117 +CONFIG_SND_SOC_SOF_ALDERLAKE=m
7118 +CONFIG_SND_SOC_SOF_HDA_COMMON=m
7119 +CONFIG_SND_SOC_SOF_HDA_LINK=y
7120 +CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC=y
7121 +# CONFIG_SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1 is not set
7122 +CONFIG_SND_SOC_SOF_HDA_LINK_BASELINE=m
7123 +CONFIG_SND_SOC_SOF_HDA=m
7124 +CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE=m
7125 +CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE=m
7126 +CONFIG_SND_SOC_SOF_XTENSA=m
7129 +# STMicroelectronics STM32 SOC audio support
7131 +# end of STMicroelectronics STM32 SOC audio support
7133 +CONFIG_SND_SOC_XILINX_I2S=m
7134 +CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER=m
7135 +CONFIG_SND_SOC_XILINX_SPDIF=m
7136 +CONFIG_SND_SOC_XTFPGA_I2S=m
7137 +CONFIG_SND_SOC_I2C_AND_SPI=m
7140 +# CODEC drivers
7142 +CONFIG_SND_SOC_ARIZONA=m
7143 +CONFIG_SND_SOC_WM_ADSP=m
7144 +CONFIG_SND_SOC_AC97_CODEC=m
7145 +CONFIG_SND_SOC_ADAU_UTILS=m
7146 +CONFIG_SND_SOC_ADAU1372=m
7147 +CONFIG_SND_SOC_ADAU1372_I2C=m
7148 +CONFIG_SND_SOC_ADAU1372_SPI=m
7149 +CONFIG_SND_SOC_ADAU1701=m
7150 +CONFIG_SND_SOC_ADAU17X1=m
7151 +CONFIG_SND_SOC_ADAU1761=m
7152 +CONFIG_SND_SOC_ADAU1761_I2C=m
7153 +CONFIG_SND_SOC_ADAU1761_SPI=m
7154 +CONFIG_SND_SOC_ADAU7002=m
7155 +CONFIG_SND_SOC_ADAU7118=m
7156 +CONFIG_SND_SOC_ADAU7118_HW=m
7157 +CONFIG_SND_SOC_ADAU7118_I2C=m
7158 +CONFIG_SND_SOC_AK4104=m
7159 +CONFIG_SND_SOC_AK4118=m
7160 +CONFIG_SND_SOC_AK4458=m
7161 +CONFIG_SND_SOC_AK4554=m
7162 +CONFIG_SND_SOC_AK4613=m
7163 +CONFIG_SND_SOC_AK4642=m
7164 +CONFIG_SND_SOC_AK5386=m
7165 +CONFIG_SND_SOC_AK5558=m
7166 +CONFIG_SND_SOC_ALC5623=m
7167 +CONFIG_SND_SOC_BD28623=m
7168 +CONFIG_SND_SOC_BT_SCO=m
7169 +CONFIG_SND_SOC_CROS_EC_CODEC=m
7170 +CONFIG_SND_SOC_CS35L32=m
7171 +CONFIG_SND_SOC_CS35L33=m
7172 +CONFIG_SND_SOC_CS35L34=m
7173 +CONFIG_SND_SOC_CS35L35=m
7174 +CONFIG_SND_SOC_CS35L36=m
7175 +CONFIG_SND_SOC_CS42L42=m
7176 +CONFIG_SND_SOC_CS42L51=m
7177 +CONFIG_SND_SOC_CS42L51_I2C=m
7178 +CONFIG_SND_SOC_CS42L52=m
7179 +CONFIG_SND_SOC_CS42L56=m
7180 +CONFIG_SND_SOC_CS42L73=m
7181 +CONFIG_SND_SOC_CS4234=m
7182 +CONFIG_SND_SOC_CS4265=m
7183 +CONFIG_SND_SOC_CS4270=m
7184 +CONFIG_SND_SOC_CS4271=m
7185 +CONFIG_SND_SOC_CS4271_I2C=m
7186 +CONFIG_SND_SOC_CS4271_SPI=m
7187 +CONFIG_SND_SOC_CS42XX8=m
7188 +CONFIG_SND_SOC_CS42XX8_I2C=m
7189 +CONFIG_SND_SOC_CS43130=m
7190 +CONFIG_SND_SOC_CS4341=m
7191 +CONFIG_SND_SOC_CS4349=m
7192 +CONFIG_SND_SOC_CS53L30=m
7193 +CONFIG_SND_SOC_CX2072X=m
7194 +CONFIG_SND_SOC_DA7213=m
7195 +CONFIG_SND_SOC_DA7219=m
7196 +CONFIG_SND_SOC_DMIC=m
7197 +CONFIG_SND_SOC_HDMI_CODEC=m
7198 +CONFIG_SND_SOC_ES7134=m
7199 +CONFIG_SND_SOC_ES7241=m
7200 +CONFIG_SND_SOC_ES8316=m
7201 +CONFIG_SND_SOC_ES8328=m
7202 +CONFIG_SND_SOC_ES8328_I2C=m
7203 +CONFIG_SND_SOC_ES8328_SPI=m
7204 +CONFIG_SND_SOC_GTM601=m
7205 +CONFIG_SND_SOC_HDAC_HDMI=m
7206 +CONFIG_SND_SOC_HDAC_HDA=m
7207 +CONFIG_SND_SOC_INNO_RK3036=m
7208 +CONFIG_SND_SOC_MAX98088=m
7209 +CONFIG_SND_SOC_MAX98090=m
7210 +CONFIG_SND_SOC_MAX98357A=m
7211 +CONFIG_SND_SOC_MAX98504=m
7212 +CONFIG_SND_SOC_MAX9867=m
7213 +CONFIG_SND_SOC_MAX98927=m
7214 +CONFIG_SND_SOC_MAX98373=m
7215 +CONFIG_SND_SOC_MAX98373_I2C=m
7216 +CONFIG_SND_SOC_MAX98373_SDW=m
7217 +CONFIG_SND_SOC_MAX98390=m
7218 +CONFIG_SND_SOC_MAX9860=m
7219 +CONFIG_SND_SOC_MSM8916_WCD_ANALOG=m
7220 +CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=m
7221 +CONFIG_SND_SOC_PCM1681=m
7222 +CONFIG_SND_SOC_PCM1789=m
7223 +CONFIG_SND_SOC_PCM1789_I2C=m
7224 +CONFIG_SND_SOC_PCM179X=m
7225 +CONFIG_SND_SOC_PCM179X_I2C=m
7226 +CONFIG_SND_SOC_PCM179X_SPI=m
7227 +CONFIG_SND_SOC_PCM186X=m
7228 +CONFIG_SND_SOC_PCM186X_I2C=m
7229 +CONFIG_SND_SOC_PCM186X_SPI=m
7230 +CONFIG_SND_SOC_PCM3060=m
7231 +CONFIG_SND_SOC_PCM3060_I2C=m
7232 +CONFIG_SND_SOC_PCM3060_SPI=m
7233 +CONFIG_SND_SOC_PCM3168A=m
7234 +CONFIG_SND_SOC_PCM3168A_I2C=m
7235 +CONFIG_SND_SOC_PCM3168A_SPI=m
7236 +CONFIG_SND_SOC_PCM5102A=m
7237 +CONFIG_SND_SOC_PCM512x=m
7238 +CONFIG_SND_SOC_PCM512x_I2C=m
7239 +CONFIG_SND_SOC_PCM512x_SPI=m
7240 +CONFIG_SND_SOC_RK3328=m
7241 +CONFIG_SND_SOC_RL6231=m
7242 +CONFIG_SND_SOC_RL6347A=m
7243 +CONFIG_SND_SOC_RT286=m
7244 +CONFIG_SND_SOC_RT298=m
7245 +CONFIG_SND_SOC_RT1011=m
7246 +CONFIG_SND_SOC_RT1015=m
7247 +CONFIG_SND_SOC_RT1308_SDW=m
7248 +CONFIG_SND_SOC_RT5514=m
7249 +CONFIG_SND_SOC_RT5514_SPI=m
7250 +CONFIG_SND_SOC_RT5616=m
7251 +CONFIG_SND_SOC_RT5631=m
7252 +CONFIG_SND_SOC_RT5640=m
7253 +CONFIG_SND_SOC_RT5645=m
7254 +CONFIG_SND_SOC_RT5651=m
7255 +CONFIG_SND_SOC_RT5659=m
7256 +CONFIG_SND_SOC_RT5660=m
7257 +CONFIG_SND_SOC_RT5663=m
7258 +CONFIG_SND_SOC_RT5670=m
7259 +CONFIG_SND_SOC_RT5677=m
7260 +CONFIG_SND_SOC_RT5677_SPI=m
7261 +CONFIG_SND_SOC_RT5682=m
7262 +CONFIG_SND_SOC_RT5682_I2C=m
7263 +CONFIG_SND_SOC_RT5682_SDW=m
7264 +CONFIG_SND_SOC_RT700=m
7265 +CONFIG_SND_SOC_RT700_SDW=m
7266 +CONFIG_SND_SOC_RT711=m
7267 +CONFIG_SND_SOC_RT711_SDW=m
7268 +CONFIG_SND_SOC_RT715=m
7269 +CONFIG_SND_SOC_RT715_SDW=m
7270 +CONFIG_SND_SOC_SGTL5000=m
7271 +CONFIG_SND_SOC_SI476X=m
7272 +CONFIG_SND_SOC_SIGMADSP=m
7273 +CONFIG_SND_SOC_SIGMADSP_I2C=m
7274 +CONFIG_SND_SOC_SIGMADSP_REGMAP=m
7275 +CONFIG_SND_SOC_SIMPLE_AMPLIFIER=m
7276 +CONFIG_SND_SOC_SIMPLE_MUX=m
7277 +CONFIG_SND_SOC_SPDIF=m
7278 +CONFIG_SND_SOC_SSM2305=m
7279 +CONFIG_SND_SOC_SSM2602=m
7280 +CONFIG_SND_SOC_SSM2602_SPI=m
7281 +CONFIG_SND_SOC_SSM2602_I2C=m
7282 +CONFIG_SND_SOC_SSM4567=m
7283 +CONFIG_SND_SOC_STA32X=m
7284 +CONFIG_SND_SOC_STA350=m
7285 +CONFIG_SND_SOC_STI_SAS=m
7286 +CONFIG_SND_SOC_TAS2552=m
7287 +CONFIG_SND_SOC_TAS2562=m
7288 +CONFIG_SND_SOC_TAS2764=m
7289 +CONFIG_SND_SOC_TAS2770=m
7290 +CONFIG_SND_SOC_TAS5086=m
7291 +CONFIG_SND_SOC_TAS571X=m
7292 +CONFIG_SND_SOC_TAS5720=m
7293 +CONFIG_SND_SOC_TAS6424=m
7294 +CONFIG_SND_SOC_TDA7419=m
7295 +CONFIG_SND_SOC_TFA9879=m
7296 +CONFIG_SND_SOC_TLV320AIC23=m
7297 +CONFIG_SND_SOC_TLV320AIC23_I2C=m
7298 +CONFIG_SND_SOC_TLV320AIC23_SPI=m
7299 +CONFIG_SND_SOC_TLV320AIC31XX=m
7300 +CONFIG_SND_SOC_TLV320AIC32X4=m
7301 +CONFIG_SND_SOC_TLV320AIC32X4_I2C=m
7302 +CONFIG_SND_SOC_TLV320AIC32X4_SPI=m
7303 +CONFIG_SND_SOC_TLV320AIC3X=m
7304 +CONFIG_SND_SOC_TLV320ADCX140=m
7305 +CONFIG_SND_SOC_TS3A227E=m
7306 +CONFIG_SND_SOC_TSCS42XX=m
7307 +CONFIG_SND_SOC_TSCS454=m
7308 +CONFIG_SND_SOC_UDA1334=m
7309 +CONFIG_SND_SOC_WCD9335=m
7310 +CONFIG_SND_SOC_WCD934X=m
7311 +CONFIG_SND_SOC_WM5102=m
7312 +CONFIG_SND_SOC_WM8510=m
7313 +CONFIG_SND_SOC_WM8523=m
7314 +CONFIG_SND_SOC_WM8524=m
7315 +CONFIG_SND_SOC_WM8580=m
7316 +CONFIG_SND_SOC_WM8711=m
7317 +CONFIG_SND_SOC_WM8728=m
7318 +CONFIG_SND_SOC_WM8731=m
7319 +CONFIG_SND_SOC_WM8737=m
7320 +CONFIG_SND_SOC_WM8741=m
7321 +CONFIG_SND_SOC_WM8750=m
7322 +CONFIG_SND_SOC_WM8753=m
7323 +CONFIG_SND_SOC_WM8770=m
7324 +CONFIG_SND_SOC_WM8776=m
7325 +CONFIG_SND_SOC_WM8782=m
7326 +CONFIG_SND_SOC_WM8804=m
7327 +CONFIG_SND_SOC_WM8804_I2C=m
7328 +CONFIG_SND_SOC_WM8804_SPI=m
7329 +CONFIG_SND_SOC_WM8903=m
7330 +CONFIG_SND_SOC_WM8904=m
7331 +CONFIG_SND_SOC_WM8960=m
7332 +CONFIG_SND_SOC_WM8962=m
7333 +CONFIG_SND_SOC_WM8974=m
7334 +CONFIG_SND_SOC_WM8978=m
7335 +CONFIG_SND_SOC_WM8985=m
7336 +CONFIG_SND_SOC_WSA881X=m
7337 +CONFIG_SND_SOC_ZL38060=m
7338 +CONFIG_SND_SOC_ZX_AUD96P22=m
7339 +CONFIG_SND_SOC_MAX9759=m
7340 +CONFIG_SND_SOC_MT6351=m
7341 +CONFIG_SND_SOC_MT6358=m
7342 +CONFIG_SND_SOC_MT6660=m
7343 +CONFIG_SND_SOC_NAU8315=m
7344 +CONFIG_SND_SOC_NAU8540=m
7345 +CONFIG_SND_SOC_NAU8810=m
7346 +CONFIG_SND_SOC_NAU8822=m
7347 +CONFIG_SND_SOC_NAU8824=m
7348 +CONFIG_SND_SOC_NAU8825=m
7349 +CONFIG_SND_SOC_TPA6130A2=m
7350 +CONFIG_SND_SOC_LPASS_WSA_MACRO=m
7351 +CONFIG_SND_SOC_LPASS_VA_MACRO=m
7352 +CONFIG_SND_SOC_LPASS_RX_MACRO=m
7353 +CONFIG_SND_SOC_LPASS_TX_MACRO=m
7354 +# end of CODEC drivers
7356 +CONFIG_SND_SIMPLE_CARD_UTILS=m
7357 +CONFIG_SND_SIMPLE_CARD=m
7358 +CONFIG_SND_X86=y
7359 +CONFIG_HDMI_LPE_AUDIO=m
7360 +CONFIG_SND_SYNTH_EMUX=m
7361 +CONFIG_SND_XEN_FRONTEND=m
7362 +CONFIG_AC97_BUS=m
7365 +# HID support
7367 +CONFIG_HID=m
7368 +CONFIG_HID_BATTERY_STRENGTH=y
7369 +CONFIG_HIDRAW=y
7370 +CONFIG_UHID=m
7371 +CONFIG_HID_GENERIC=m
7374 +# Special HID drivers
7376 +CONFIG_HID_A4TECH=m
7377 +CONFIG_HID_ACCUTOUCH=m
7378 +CONFIG_HID_ACRUX=m
7379 +CONFIG_HID_ACRUX_FF=y
7380 +CONFIG_HID_APPLE=m
7381 +CONFIG_HID_APPLEIR=m
7382 +CONFIG_HID_ASUS=m
7383 +CONFIG_HID_AUREAL=m
7384 +CONFIG_HID_BELKIN=m
7385 +CONFIG_HID_BETOP_FF=m
7386 +CONFIG_HID_BIGBEN_FF=m
7387 +CONFIG_HID_CHERRY=m
7388 +CONFIG_HID_CHICONY=m
7389 +CONFIG_HID_CORSAIR=m
7390 +CONFIG_HID_COUGAR=m
7391 +CONFIG_HID_MACALLY=m
7392 +CONFIG_HID_PRODIKEYS=m
7393 +CONFIG_HID_CMEDIA=m
7394 +CONFIG_HID_CP2112=m
7395 +CONFIG_HID_CREATIVE_SB0540=m
7396 +CONFIG_HID_CYPRESS=m
7397 +CONFIG_HID_DRAGONRISE=m
7398 +CONFIG_DRAGONRISE_FF=y
7399 +CONFIG_HID_EMS_FF=m
7400 +CONFIG_HID_ELAN=m
7401 +CONFIG_HID_ELECOM=m
7402 +CONFIG_HID_ELO=m
7403 +CONFIG_HID_EZKEY=m
7404 +CONFIG_HID_GEMBIRD=m
7405 +CONFIG_HID_GFRM=m
7406 +CONFIG_HID_GLORIOUS=m
7407 +CONFIG_HID_HOLTEK=m
7408 +CONFIG_HOLTEK_FF=y
7409 +CONFIG_HID_GOOGLE_HAMMER=m
7410 +CONFIG_HID_VIVALDI=m
7411 +CONFIG_HID_GT683R=m
7412 +CONFIG_HID_KEYTOUCH=m
7413 +CONFIG_HID_KYE=m
7414 +CONFIG_HID_UCLOGIC=m
7415 +CONFIG_HID_WALTOP=m
7416 +CONFIG_HID_VIEWSONIC=m
7417 +CONFIG_HID_GYRATION=m
7418 +CONFIG_HID_ICADE=m
7419 +CONFIG_HID_ITE=m
7420 +CONFIG_HID_JABRA=m
7421 +CONFIG_HID_TWINHAN=m
7422 +CONFIG_HID_KENSINGTON=m
7423 +CONFIG_HID_LCPOWER=m
7424 +CONFIG_HID_LED=m
7425 +CONFIG_HID_LENOVO=m
7426 +CONFIG_HID_LOGITECH=m
7427 +CONFIG_HID_LOGITECH_DJ=m
7428 +CONFIG_HID_LOGITECH_HIDPP=m
7429 +CONFIG_LOGITECH_FF=y
7430 +CONFIG_LOGIRUMBLEPAD2_FF=y
7431 +CONFIG_LOGIG940_FF=y
7432 +CONFIG_LOGIWHEELS_FF=y
7433 +CONFIG_HID_MAGICMOUSE=m
7434 +CONFIG_HID_MALTRON=m
7435 +CONFIG_HID_MAYFLASH=m
7436 +CONFIG_HID_REDRAGON=m
7437 +CONFIG_HID_MICROSOFT=m
7438 +CONFIG_HID_MONTEREY=m
7439 +CONFIG_HID_MULTITOUCH=m
7440 +CONFIG_HID_NTI=m
7441 +CONFIG_HID_NTRIG=m
7442 +CONFIG_HID_ORTEK=m
7443 +CONFIG_HID_PANTHERLORD=m
7444 +CONFIG_PANTHERLORD_FF=y
7445 +CONFIG_HID_PENMOUNT=m
7446 +CONFIG_HID_PETALYNX=m
7447 +CONFIG_HID_PICOLCD=m
7448 +CONFIG_HID_PICOLCD_FB=y
7449 +CONFIG_HID_PICOLCD_BACKLIGHT=y
7450 +CONFIG_HID_PICOLCD_LCD=y
7451 +CONFIG_HID_PICOLCD_LEDS=y
7452 +CONFIG_HID_PICOLCD_CIR=y
7453 +CONFIG_HID_PLANTRONICS=m
7454 +CONFIG_HID_PLAYSTATION=m
7455 +CONFIG_PLAYSTATION_FF=y
7456 +CONFIG_HID_PRIMAX=m
7457 +CONFIG_HID_RETRODE=m
7458 +CONFIG_HID_ROCCAT=m
7459 +CONFIG_HID_SAITEK=m
7460 +CONFIG_HID_SAMSUNG=m
7461 +CONFIG_HID_SONY=m
7462 +CONFIG_SONY_FF=y
7463 +CONFIG_HID_SPEEDLINK=m
7464 +CONFIG_HID_STEAM=m
7465 +CONFIG_HID_STEELSERIES=m
7466 +CONFIG_HID_SUNPLUS=m
7467 +CONFIG_HID_RMI=m
7468 +CONFIG_HID_GREENASIA=m
7469 +CONFIG_GREENASIA_FF=y
7470 +CONFIG_HID_HYPERV_MOUSE=m
7471 +CONFIG_HID_SMARTJOYPLUS=m
7472 +CONFIG_SMARTJOYPLUS_FF=y
7473 +CONFIG_HID_TIVO=m
7474 +CONFIG_HID_TOPSEED=m
7475 +CONFIG_HID_THINGM=m
7476 +CONFIG_HID_THRUSTMASTER=m
7477 +CONFIG_THRUSTMASTER_FF=y
7478 +CONFIG_HID_UDRAW_PS3=m
7479 +CONFIG_HID_U2FZERO=m
7480 +CONFIG_HID_WACOM=m
7481 +CONFIG_HID_WIIMOTE=m
7482 +CONFIG_HID_XINMO=m
7483 +CONFIG_HID_ZEROPLUS=m
7484 +CONFIG_ZEROPLUS_FF=y
7485 +CONFIG_HID_ZYDACRON=m
7486 +CONFIG_HID_SENSOR_HUB=m
7487 +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m
7488 +CONFIG_HID_ALPS=m
7489 +CONFIG_HID_MCP2221=m
7490 +# end of Special HID drivers
7493 +# USB HID support
7495 +CONFIG_USB_HID=m
7496 +CONFIG_HID_PID=y
7497 +CONFIG_USB_HIDDEV=y
7500 +# USB HID Boot Protocol drivers
7502 +CONFIG_USB_KBD=m
7503 +CONFIG_USB_MOUSE=m
7504 +# end of USB HID Boot Protocol drivers
7505 +# end of USB HID support
7508 +# I2C HID support
7510 +CONFIG_I2C_HID_ACPI=m
7511 +# end of I2C HID support
7513 +CONFIG_I2C_HID_CORE=m
7516 +# Intel ISH HID support
7518 +CONFIG_INTEL_ISH_HID=m
7519 +CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER=m
7520 +# end of Intel ISH HID support
7523 +# AMD SFH HID Support
7525 +CONFIG_AMD_SFH_HID=m
7526 +# end of AMD SFH HID Support
7527 +# end of HID support
7529 +CONFIG_USB_OHCI_LITTLE_ENDIAN=y
7530 +CONFIG_USB_SUPPORT=y
7531 +CONFIG_USB_COMMON=y
7532 +CONFIG_USB_LED_TRIG=y
7533 +CONFIG_USB_ULPI_BUS=m
7534 +CONFIG_USB_CONN_GPIO=m
7535 +CONFIG_USB_ARCH_HAS_HCD=y
7536 +CONFIG_USB=y
7537 +CONFIG_USB_PCI=y
7538 +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
7541 +# Miscellaneous USB options
7543 +CONFIG_USB_DEFAULT_PERSIST=y
7544 +# CONFIG_USB_FEW_INIT_RETRIES is not set
7545 +CONFIG_USB_DYNAMIC_MINORS=y
7546 +# CONFIG_USB_OTG is not set
7547 +# CONFIG_USB_OTG_PRODUCTLIST is not set
7548 +# CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB is not set
7549 +CONFIG_USB_LEDS_TRIGGER_USBPORT=m
7550 +CONFIG_USB_AUTOSUSPEND_DELAY=2
7551 +CONFIG_USB_MON=m
7554 +# USB Host Controller Drivers
7556 +CONFIG_USB_C67X00_HCD=m
7557 +CONFIG_USB_XHCI_HCD=y
7558 +CONFIG_USB_XHCI_DBGCAP=y
7559 +CONFIG_USB_XHCI_PCI=m
7560 +CONFIG_USB_XHCI_PCI_RENESAS=m
7561 +CONFIG_USB_XHCI_PLATFORM=m
7562 +CONFIG_USB_EHCI_HCD=y
7563 +CONFIG_USB_EHCI_ROOT_HUB_TT=y
7564 +CONFIG_USB_EHCI_TT_NEWSCHED=y
7565 +CONFIG_USB_EHCI_PCI=y
7566 +CONFIG_USB_EHCI_FSL=m
7567 +CONFIG_USB_EHCI_HCD_PLATFORM=y
7568 +CONFIG_USB_OXU210HP_HCD=m
7569 +CONFIG_USB_ISP116X_HCD=m
7570 +CONFIG_USB_FOTG210_HCD=m
7571 +CONFIG_USB_MAX3421_HCD=m
7572 +CONFIG_USB_OHCI_HCD=y
7573 +CONFIG_USB_OHCI_HCD_PCI=y
7574 +CONFIG_USB_OHCI_HCD_PLATFORM=y
7575 +CONFIG_USB_UHCI_HCD=y
7576 +CONFIG_USB_U132_HCD=m
7577 +CONFIG_USB_SL811_HCD=m
7578 +CONFIG_USB_SL811_HCD_ISO=y
7579 +CONFIG_USB_SL811_CS=m
7580 +CONFIG_USB_R8A66597_HCD=m
7581 +CONFIG_USB_HCD_BCMA=m
7582 +CONFIG_USB_HCD_SSB=m
7583 +# CONFIG_USB_HCD_TEST_MODE is not set
7586 +# USB Device Class drivers
7588 +CONFIG_USB_ACM=m
7589 +CONFIG_USB_PRINTER=m
7590 +CONFIG_USB_WDM=m
7591 +CONFIG_USB_TMC=m
7594 +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
7598 +# also be needed; see USB_STORAGE Help for more info
7600 +CONFIG_USB_STORAGE=m
7601 +# CONFIG_USB_STORAGE_DEBUG is not set
7602 +CONFIG_USB_STORAGE_REALTEK=m
7603 +CONFIG_REALTEK_AUTOPM=y
7604 +CONFIG_USB_STORAGE_DATAFAB=m
7605 +CONFIG_USB_STORAGE_FREECOM=m
7606 +CONFIG_USB_STORAGE_ISD200=m
7607 +CONFIG_USB_STORAGE_USBAT=m
7608 +CONFIG_USB_STORAGE_SDDR09=m
7609 +CONFIG_USB_STORAGE_SDDR55=m
7610 +CONFIG_USB_STORAGE_JUMPSHOT=m
7611 +CONFIG_USB_STORAGE_ALAUDA=m
7612 +CONFIG_USB_STORAGE_ONETOUCH=m
7613 +CONFIG_USB_STORAGE_KARMA=m
7614 +CONFIG_USB_STORAGE_CYPRESS_ATACB=m
7615 +CONFIG_USB_STORAGE_ENE_UB6250=m
7616 +CONFIG_USB_UAS=m
7619 +# USB Imaging devices
7621 +CONFIG_USB_MDC800=m
7622 +CONFIG_USB_MICROTEK=m
7623 +CONFIG_USBIP_CORE=m
7624 +CONFIG_USBIP_VHCI_HCD=m
7625 +CONFIG_USBIP_VHCI_HC_PORTS=8
7626 +CONFIG_USBIP_VHCI_NR_HCS=1
7627 +CONFIG_USBIP_HOST=m
7628 +CONFIG_USBIP_VUDC=m
7629 +# CONFIG_USBIP_DEBUG is not set
7630 +CONFIG_USB_CDNS_SUPPORT=m
7631 +CONFIG_USB_CDNS_HOST=y
7632 +CONFIG_USB_CDNS3=m
7633 +CONFIG_USB_CDNS3_GADGET=y
7634 +CONFIG_USB_CDNS3_HOST=y
7635 +CONFIG_USB_CDNS3_PCI_WRAP=m
7636 +CONFIG_USB_CDNSP_PCI=m
7637 +CONFIG_USB_CDNSP_GADGET=y
7638 +CONFIG_USB_CDNSP_HOST=y
7639 +CONFIG_USB_MUSB_HDRC=m
7640 +# CONFIG_USB_MUSB_HOST is not set
7641 +# CONFIG_USB_MUSB_GADGET is not set
7642 +CONFIG_USB_MUSB_DUAL_ROLE=y
7645 +# Platform Glue Layer
7649 +# MUSB DMA mode
7651 +CONFIG_MUSB_PIO_ONLY=y
7652 +CONFIG_USB_DWC3=m
7653 +CONFIG_USB_DWC3_ULPI=y
7654 +# CONFIG_USB_DWC3_HOST is not set
7655 +# CONFIG_USB_DWC3_GADGET is not set
7656 +CONFIG_USB_DWC3_DUAL_ROLE=y
7659 +# Platform Glue Driver Support
7661 +CONFIG_USB_DWC3_PCI=m
7662 +CONFIG_USB_DWC3_HAPS=m
7663 +CONFIG_USB_DWC2=y
7664 +CONFIG_USB_DWC2_HOST=y
7667 +# Gadget/Dual-role mode requires USB Gadget support to be enabled
7669 +CONFIG_USB_DWC2_PCI=m
7670 +# CONFIG_USB_DWC2_DEBUG is not set
7671 +# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set
7672 +CONFIG_USB_CHIPIDEA=m
7673 +CONFIG_USB_CHIPIDEA_UDC=y
7674 +CONFIG_USB_CHIPIDEA_HOST=y
7675 +CONFIG_USB_CHIPIDEA_PCI=m
7676 +CONFIG_USB_CHIPIDEA_MSM=m
7677 +CONFIG_USB_CHIPIDEA_GENERIC=m
7678 +CONFIG_USB_ISP1760=m
7679 +CONFIG_USB_ISP1760_HCD=y
7680 +CONFIG_USB_ISP1761_UDC=y
7681 +# CONFIG_USB_ISP1760_HOST_ROLE is not set
7682 +# CONFIG_USB_ISP1760_GADGET_ROLE is not set
7683 +CONFIG_USB_ISP1760_DUAL_ROLE=y
7686 +# USB port drivers
7688 +CONFIG_USB_USS720=m
7689 +CONFIG_USB_SERIAL=m
7690 +CONFIG_USB_SERIAL_GENERIC=y
7691 +CONFIG_USB_SERIAL_SIMPLE=m
7692 +CONFIG_USB_SERIAL_AIRCABLE=m
7693 +CONFIG_USB_SERIAL_ARK3116=m
7694 +CONFIG_USB_SERIAL_BELKIN=m
7695 +CONFIG_USB_SERIAL_CH341=m
7696 +CONFIG_USB_SERIAL_WHITEHEAT=m
7697 +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
7698 +CONFIG_USB_SERIAL_CP210X=m
7699 +CONFIG_USB_SERIAL_CYPRESS_M8=m
7700 +CONFIG_USB_SERIAL_EMPEG=m
7701 +CONFIG_USB_SERIAL_FTDI_SIO=m
7702 +CONFIG_USB_SERIAL_VISOR=m
7703 +CONFIG_USB_SERIAL_IPAQ=m
7704 +CONFIG_USB_SERIAL_IR=m
7705 +CONFIG_USB_SERIAL_EDGEPORT=m
7706 +CONFIG_USB_SERIAL_EDGEPORT_TI=m
7707 +CONFIG_USB_SERIAL_F81232=m
7708 +CONFIG_USB_SERIAL_F8153X=m
7709 +CONFIG_USB_SERIAL_GARMIN=m
7710 +CONFIG_USB_SERIAL_IPW=m
7711 +CONFIG_USB_SERIAL_IUU=m
7712 +CONFIG_USB_SERIAL_KEYSPAN_PDA=m
7713 +CONFIG_USB_SERIAL_KEYSPAN=m
7714 +CONFIG_USB_SERIAL_KLSI=m
7715 +CONFIG_USB_SERIAL_KOBIL_SCT=m
7716 +CONFIG_USB_SERIAL_MCT_U232=m
7717 +CONFIG_USB_SERIAL_METRO=m
7718 +CONFIG_USB_SERIAL_MOS7720=m
7719 +CONFIG_USB_SERIAL_MOS7715_PARPORT=y
7720 +CONFIG_USB_SERIAL_MOS7840=m
7721 +CONFIG_USB_SERIAL_MXUPORT=m
7722 +CONFIG_USB_SERIAL_NAVMAN=m
7723 +CONFIG_USB_SERIAL_PL2303=m
7724 +CONFIG_USB_SERIAL_OTI6858=m
7725 +CONFIG_USB_SERIAL_QCAUX=m
7726 +CONFIG_USB_SERIAL_QUALCOMM=m
7727 +CONFIG_USB_SERIAL_SPCP8X5=m
7728 +CONFIG_USB_SERIAL_SAFE=m
7729 +# CONFIG_USB_SERIAL_SAFE_PADDED is not set
7730 +CONFIG_USB_SERIAL_SIERRAWIRELESS=m
7731 +CONFIG_USB_SERIAL_SYMBOL=m
7732 +CONFIG_USB_SERIAL_TI=m
7733 +CONFIG_USB_SERIAL_CYBERJACK=m
7734 +CONFIG_USB_SERIAL_WWAN=m
7735 +CONFIG_USB_SERIAL_OPTION=m
7736 +CONFIG_USB_SERIAL_OMNINET=m
7737 +CONFIG_USB_SERIAL_OPTICON=m
7738 +CONFIG_USB_SERIAL_XSENS_MT=m
7739 +CONFIG_USB_SERIAL_WISHBONE=m
7740 +CONFIG_USB_SERIAL_SSU100=m
7741 +CONFIG_USB_SERIAL_QT2=m
7742 +CONFIG_USB_SERIAL_UPD78F0730=m
7743 +CONFIG_USB_SERIAL_XR=m
7744 +CONFIG_USB_SERIAL_DEBUG=m
7747 +# USB Miscellaneous drivers
7749 +CONFIG_USB_EMI62=m
7750 +CONFIG_USB_EMI26=m
7751 +CONFIG_USB_ADUTUX=m
7752 +CONFIG_USB_SEVSEG=m
7753 +CONFIG_USB_LEGOTOWER=m
7754 +CONFIG_USB_LCD=m
7755 +CONFIG_USB_CYPRESS_CY7C63=m
7756 +CONFIG_USB_CYTHERM=m
7757 +CONFIG_USB_IDMOUSE=m
7758 +CONFIG_USB_FTDI_ELAN=m
7759 +CONFIG_USB_APPLEDISPLAY=m
7760 +CONFIG_APPLE_MFI_FASTCHARGE=m
7761 +CONFIG_USB_SISUSBVGA=m
7762 +CONFIG_USB_LD=m
7763 +CONFIG_USB_TRANCEVIBRATOR=m
7764 +CONFIG_USB_IOWARRIOR=m
7765 +CONFIG_USB_TEST=m
7766 +CONFIG_USB_EHSET_TEST_FIXTURE=m
7767 +CONFIG_USB_ISIGHTFW=m
7768 +CONFIG_USB_YUREX=m
7769 +CONFIG_USB_EZUSB_FX2=m
7770 +CONFIG_USB_HUB_USB251XB=m
7771 +CONFIG_USB_HSIC_USB3503=m
7772 +CONFIG_USB_HSIC_USB4604=m
7773 +CONFIG_USB_LINK_LAYER_TEST=m
7774 +CONFIG_USB_CHAOSKEY=m
7775 +CONFIG_USB_ATM=m
7776 +CONFIG_USB_SPEEDTOUCH=m
7777 +CONFIG_USB_CXACRU=m
7778 +CONFIG_USB_UEAGLEATM=m
7779 +CONFIG_USB_XUSBATM=m
7782 +# USB Physical Layer drivers
7784 +CONFIG_USB_PHY=y
7785 +CONFIG_NOP_USB_XCEIV=m
7786 +CONFIG_USB_GPIO_VBUS=m
7787 +CONFIG_TAHVO_USB=m
7788 +CONFIG_TAHVO_USB_HOST_BY_DEFAULT=y
7789 +CONFIG_USB_ISP1301=m
7790 +# end of USB Physical Layer drivers
7792 +CONFIG_USB_GADGET=m
7793 +# CONFIG_USB_GADGET_DEBUG is not set
7794 +# CONFIG_USB_GADGET_DEBUG_FILES is not set
7795 +# CONFIG_USB_GADGET_DEBUG_FS is not set
7796 +CONFIG_USB_GADGET_VBUS_DRAW=2
7797 +CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
7798 +CONFIG_U_SERIAL_CONSOLE=y
7801 +# USB Peripheral Controller
7803 +CONFIG_USB_FOTG210_UDC=m
7804 +CONFIG_USB_GR_UDC=m
7805 +CONFIG_USB_R8A66597=m
7806 +CONFIG_USB_PXA27X=m
7807 +CONFIG_USB_MV_UDC=m
7808 +CONFIG_USB_MV_U3D=m
7809 +CONFIG_USB_SNP_CORE=m
7810 +# CONFIG_USB_M66592 is not set
7811 +CONFIG_USB_BDC_UDC=m
7812 +CONFIG_USB_AMD5536UDC=m
7813 +CONFIG_USB_NET2272=m
7814 +CONFIG_USB_NET2272_DMA=y
7815 +CONFIG_USB_NET2280=m
7816 +CONFIG_USB_GOKU=m
7817 +CONFIG_USB_EG20T=m
7818 +CONFIG_USB_MAX3420_UDC=m
7819 +# CONFIG_USB_DUMMY_HCD is not set
7820 +# end of USB Peripheral Controller
7822 +CONFIG_USB_LIBCOMPOSITE=m
7823 +CONFIG_USB_F_ACM=m
7824 +CONFIG_USB_F_SS_LB=m
7825 +CONFIG_USB_U_SERIAL=m
7826 +CONFIG_USB_U_ETHER=m
7827 +CONFIG_USB_U_AUDIO=m
7828 +CONFIG_USB_F_SERIAL=m
7829 +CONFIG_USB_F_OBEX=m
7830 +CONFIG_USB_F_NCM=m
7831 +CONFIG_USB_F_ECM=m
7832 +CONFIG_USB_F_PHONET=m
7833 +CONFIG_USB_F_EEM=m
7834 +CONFIG_USB_F_SUBSET=m
7835 +CONFIG_USB_F_RNDIS=m
7836 +CONFIG_USB_F_MASS_STORAGE=m
7837 +CONFIG_USB_F_FS=m
7838 +CONFIG_USB_F_UAC1=m
7839 +CONFIG_USB_F_UAC1_LEGACY=m
7840 +CONFIG_USB_F_UAC2=m
7841 +CONFIG_USB_F_UVC=m
7842 +CONFIG_USB_F_MIDI=m
7843 +CONFIG_USB_F_HID=m
7844 +CONFIG_USB_F_PRINTER=m
7845 +CONFIG_USB_F_TCM=m
7846 +CONFIG_USB_CONFIGFS=m
7847 +CONFIG_USB_CONFIGFS_SERIAL=y
7848 +CONFIG_USB_CONFIGFS_ACM=y
7849 +CONFIG_USB_CONFIGFS_OBEX=y
7850 +CONFIG_USB_CONFIGFS_NCM=y
7851 +CONFIG_USB_CONFIGFS_ECM=y
7852 +CONFIG_USB_CONFIGFS_ECM_SUBSET=y
7853 +CONFIG_USB_CONFIGFS_RNDIS=y
7854 +CONFIG_USB_CONFIGFS_EEM=y
7855 +CONFIG_USB_CONFIGFS_PHONET=y
7856 +CONFIG_USB_CONFIGFS_MASS_STORAGE=y
7857 +CONFIG_USB_CONFIGFS_F_LB_SS=y
7858 +CONFIG_USB_CONFIGFS_F_FS=y
7859 +CONFIG_USB_CONFIGFS_F_UAC1=y
7860 +CONFIG_USB_CONFIGFS_F_UAC1_LEGACY=y
7861 +CONFIG_USB_CONFIGFS_F_UAC2=y
7862 +CONFIG_USB_CONFIGFS_F_MIDI=y
7863 +CONFIG_USB_CONFIGFS_F_HID=y
7864 +CONFIG_USB_CONFIGFS_F_UVC=y
7865 +CONFIG_USB_CONFIGFS_F_PRINTER=y
7866 +CONFIG_USB_CONFIGFS_F_TCM=y
7869 +# USB Gadget precomposed configurations
7871 +CONFIG_USB_ZERO=m
7872 +CONFIG_USB_AUDIO=m
7873 +CONFIG_GADGET_UAC1=y
7874 +# CONFIG_GADGET_UAC1_LEGACY is not set
7875 +CONFIG_USB_ETH=m
7876 +CONFIG_USB_ETH_RNDIS=y
7877 +CONFIG_USB_ETH_EEM=y
7878 +CONFIG_USB_G_NCM=m
7879 +CONFIG_USB_GADGETFS=m
7880 +CONFIG_USB_FUNCTIONFS=m
7881 +CONFIG_USB_FUNCTIONFS_ETH=y
7882 +CONFIG_USB_FUNCTIONFS_RNDIS=y
7883 +CONFIG_USB_FUNCTIONFS_GENERIC=y
7884 +CONFIG_USB_MASS_STORAGE=m
7885 +CONFIG_USB_GADGET_TARGET=m
7886 +CONFIG_USB_G_SERIAL=m
7887 +CONFIG_USB_MIDI_GADGET=m
7888 +CONFIG_USB_G_PRINTER=m
7889 +CONFIG_USB_CDC_COMPOSITE=m
7890 +CONFIG_USB_G_NOKIA=m
7891 +CONFIG_USB_G_ACM_MS=m
7892 +# CONFIG_USB_G_MULTI is not set
7893 +CONFIG_USB_G_HID=m
7894 +CONFIG_USB_G_DBGP=m
7895 +# CONFIG_USB_G_DBGP_PRINTK is not set
7896 +CONFIG_USB_G_DBGP_SERIAL=y
7897 +CONFIG_USB_G_WEBCAM=m
7898 +CONFIG_USB_RAW_GADGET=m
7899 +# end of USB Gadget precomposed configurations
7901 +CONFIG_TYPEC=m
7902 +CONFIG_TYPEC_TCPM=m
7903 +CONFIG_TYPEC_TCPCI=m
7904 +CONFIG_TYPEC_RT1711H=m
7905 +CONFIG_TYPEC_MT6360=m
7906 +CONFIG_TYPEC_TCPCI_MAXIM=m
7907 +CONFIG_TYPEC_FUSB302=m
7908 +# CONFIG_TYPEC_WCOVE is not set
7909 +CONFIG_TYPEC_UCSI=m
7910 +CONFIG_UCSI_CCG=m
7911 +CONFIG_UCSI_ACPI=m
7912 +CONFIG_TYPEC_HD3SS3220=m
7913 +CONFIG_TYPEC_TPS6598X=m
7914 +CONFIG_TYPEC_STUSB160X=m
7917 +# USB Type-C Multiplexer/DeMultiplexer Switch support
7919 +CONFIG_TYPEC_MUX_PI3USB30532=m
7920 +CONFIG_TYPEC_MUX_INTEL_PMC=m
7921 +# end of USB Type-C Multiplexer/DeMultiplexer Switch support
7924 +# USB Type-C Alternate Mode drivers
7926 +CONFIG_TYPEC_DP_ALTMODE=m
7927 +CONFIG_TYPEC_NVIDIA_ALTMODE=m
7928 +# end of USB Type-C Alternate Mode drivers
7930 +CONFIG_USB_ROLE_SWITCH=y
7931 +CONFIG_USB_ROLES_INTEL_XHCI=m
7932 +CONFIG_MMC=y
7933 +CONFIG_MMC_BLOCK=m
7934 +CONFIG_MMC_BLOCK_MINORS=8
7935 +CONFIG_SDIO_UART=m
7936 +# CONFIG_MMC_TEST is not set
7937 +CONFIG_MMC_CRYPTO=y
7940 +# MMC/SD/SDIO Host Controller Drivers
7942 +# CONFIG_MMC_DEBUG is not set
7943 +CONFIG_MMC_SDHCI=m
7944 +CONFIG_MMC_SDHCI_IO_ACCESSORS=y
7945 +CONFIG_MMC_SDHCI_PCI=m
7946 +CONFIG_MMC_RICOH_MMC=y
7947 +CONFIG_MMC_SDHCI_ACPI=m
7948 +CONFIG_MMC_SDHCI_PLTFM=m
7949 +CONFIG_MMC_SDHCI_F_SDH30=m
7950 +CONFIG_MMC_WBSD=m
7951 +CONFIG_MMC_ALCOR=m
7952 +CONFIG_MMC_TIFM_SD=m
7953 +CONFIG_MMC_SPI=m
7954 +CONFIG_MMC_SDRICOH_CS=m
7955 +CONFIG_MMC_CB710=m
7956 +CONFIG_MMC_VIA_SDMMC=m
7957 +CONFIG_MMC_VUB300=m
7958 +CONFIG_MMC_USHC=m
7959 +CONFIG_MMC_USDHI6ROL0=m
7960 +CONFIG_MMC_REALTEK_PCI=m
7961 +CONFIG_MMC_REALTEK_USB=m
7962 +CONFIG_MMC_CQHCI=m
7963 +# CONFIG_MMC_HSQ is not set
7964 +CONFIG_MMC_TOSHIBA_PCI=m
7965 +CONFIG_MMC_MTK=m
7966 +CONFIG_MMC_SDHCI_XENON=m
7967 +CONFIG_MEMSTICK=m
7968 +# CONFIG_MEMSTICK_DEBUG is not set
7971 +# MemoryStick drivers
7973 +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set
7974 +CONFIG_MSPRO_BLOCK=m
7975 +CONFIG_MS_BLOCK=m
7978 +# MemoryStick Host Controller Drivers
7980 +CONFIG_MEMSTICK_TIFM_MS=m
7981 +CONFIG_MEMSTICK_JMICRON_38X=m
7982 +CONFIG_MEMSTICK_R592=m
7983 +CONFIG_MEMSTICK_REALTEK_PCI=m
7984 +CONFIG_MEMSTICK_REALTEK_USB=m
7985 +CONFIG_NEW_LEDS=y
7986 +CONFIG_LEDS_CLASS=y
7987 +CONFIG_LEDS_CLASS_FLASH=m
7988 +CONFIG_LEDS_CLASS_MULTICOLOR=m
7989 +CONFIG_LEDS_BRIGHTNESS_HW_CHANGED=y
7992 +# LED drivers
7994 +CONFIG_LEDS_88PM860X=m
7995 +CONFIG_LEDS_APU=m
7996 +CONFIG_LEDS_AS3645A=m
7997 +CONFIG_LEDS_LM3530=m
7998 +CONFIG_LEDS_LM3532=m
7999 +CONFIG_LEDS_LM3533=m
8000 +CONFIG_LEDS_LM3642=m
8001 +CONFIG_LEDS_LM3601X=m
8002 +CONFIG_LEDS_MT6323=m
8003 +CONFIG_LEDS_PCA9532=m
8004 +CONFIG_LEDS_PCA9532_GPIO=y
8005 +CONFIG_LEDS_GPIO=m
8006 +CONFIG_LEDS_LP3944=m
8007 +CONFIG_LEDS_LP3952=m
8008 +CONFIG_LEDS_LP50XX=m
8009 +CONFIG_LEDS_LP8788=m
8010 +CONFIG_LEDS_CLEVO_MAIL=m
8011 +CONFIG_LEDS_PCA955X=m
8012 +CONFIG_LEDS_PCA955X_GPIO=y
8013 +CONFIG_LEDS_PCA963X=m
8014 +CONFIG_LEDS_WM831X_STATUS=m
8015 +CONFIG_LEDS_WM8350=m
8016 +CONFIG_LEDS_DA903X=m
8017 +CONFIG_LEDS_DA9052=m
8018 +CONFIG_LEDS_DAC124S085=m
8019 +CONFIG_LEDS_PWM=m
8020 +CONFIG_LEDS_REGULATOR=m
8021 +CONFIG_LEDS_BD2802=m
8022 +CONFIG_LEDS_INTEL_SS4200=m
8023 +CONFIG_LEDS_ADP5520=m
8024 +CONFIG_LEDS_MC13783=m
8025 +CONFIG_LEDS_TCA6507=m
8026 +CONFIG_LEDS_TLC591XX=m
8027 +CONFIG_LEDS_MAX8997=m
8028 +CONFIG_LEDS_LM355x=m
8029 +CONFIG_LEDS_MENF21BMC=m
8032 +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)
8034 +CONFIG_LEDS_BLINKM=m
8035 +CONFIG_LEDS_MLXCPLD=m
8036 +CONFIG_LEDS_MLXREG=m
8037 +CONFIG_LEDS_USER=m
8038 +CONFIG_LEDS_NIC78BX=m
8039 +CONFIG_LEDS_TI_LMU_COMMON=m
8040 +CONFIG_LEDS_LM36274=m
8041 +CONFIG_LEDS_TPS6105X=m
8042 +CONFIG_LEDS_SGM3140=m
8045 +# Flash and Torch LED drivers
8047 +CONFIG_LEDS_RT8515=m
8050 +# LED Triggers
8052 +CONFIG_LEDS_TRIGGERS=y
8053 +CONFIG_LEDS_TRIGGER_TIMER=m
8054 +CONFIG_LEDS_TRIGGER_ONESHOT=m
8055 +CONFIG_LEDS_TRIGGER_DISK=y
8056 +CONFIG_LEDS_TRIGGER_MTD=y
8057 +CONFIG_LEDS_TRIGGER_HEARTBEAT=m
8058 +CONFIG_LEDS_TRIGGER_BACKLIGHT=m
8059 +CONFIG_LEDS_TRIGGER_CPU=y
8060 +CONFIG_LEDS_TRIGGER_ACTIVITY=m
8061 +CONFIG_LEDS_TRIGGER_GPIO=m
8062 +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
8065 +# iptables trigger is under Netfilter config (LED target)
8067 +CONFIG_LEDS_TRIGGER_TRANSIENT=m
8068 +CONFIG_LEDS_TRIGGER_CAMERA=m
8069 +CONFIG_LEDS_TRIGGER_PANIC=y
8070 +CONFIG_LEDS_TRIGGER_NETDEV=m
8071 +CONFIG_LEDS_TRIGGER_PATTERN=m
8072 +CONFIG_LEDS_TRIGGER_AUDIO=m
8073 +CONFIG_LEDS_TRIGGER_TTY=m
8076 +# LED Blink
8078 +CONFIG_LEDS_BLINK=y
8079 +# CONFIG_ACCESSIBILITY is not set
8080 +CONFIG_INFINIBAND=m
8081 +CONFIG_INFINIBAND_USER_MAD=m
8082 +CONFIG_INFINIBAND_USER_ACCESS=m
8083 +CONFIG_INFINIBAND_USER_MEM=y
8084 +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y
8085 +CONFIG_INFINIBAND_ADDR_TRANS=y
8086 +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y
8087 +CONFIG_INFINIBAND_VIRT_DMA=y
8088 +CONFIG_INFINIBAND_MTHCA=m
8089 +# CONFIG_INFINIBAND_MTHCA_DEBUG is not set
8090 +CONFIG_INFINIBAND_QIB=m
8091 +CONFIG_INFINIBAND_QIB_DCA=y
8092 +CONFIG_INFINIBAND_CXGB4=m
8093 +CONFIG_INFINIBAND_EFA=m
8094 +CONFIG_INFINIBAND_I40IW=m
8095 +CONFIG_MLX4_INFINIBAND=m
8096 +CONFIG_MLX5_INFINIBAND=m
8097 +CONFIG_INFINIBAND_OCRDMA=m
8098 +CONFIG_INFINIBAND_VMWARE_PVRDMA=m
8099 +CONFIG_INFINIBAND_USNIC=m
8100 +CONFIG_INFINIBAND_BNXT_RE=m
8101 +CONFIG_INFINIBAND_HFI1=m
8102 +# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set
8103 +# CONFIG_SDMA_VERBOSITY is not set
8104 +CONFIG_INFINIBAND_QEDR=m
8105 +CONFIG_INFINIBAND_RDMAVT=m
8106 +CONFIG_RDMA_RXE=m
8107 +CONFIG_RDMA_SIW=m
8108 +CONFIG_INFINIBAND_IPOIB=m
8109 +CONFIG_INFINIBAND_IPOIB_CM=y
8110 +# CONFIG_INFINIBAND_IPOIB_DEBUG is not set
8111 +CONFIG_INFINIBAND_SRP=m
8112 +CONFIG_INFINIBAND_SRPT=m
8113 +CONFIG_INFINIBAND_ISER=m
8114 +CONFIG_INFINIBAND_ISERT=m
8115 +CONFIG_INFINIBAND_RTRS=m
8116 +CONFIG_INFINIBAND_RTRS_CLIENT=m
8117 +CONFIG_INFINIBAND_RTRS_SERVER=m
8118 +CONFIG_INFINIBAND_OPA_VNIC=m
8119 +CONFIG_EDAC_ATOMIC_SCRUB=y
8120 +CONFIG_EDAC_SUPPORT=y
8121 +CONFIG_EDAC=y
8122 +# CONFIG_EDAC_LEGACY_SYSFS is not set
8123 +# CONFIG_EDAC_DEBUG is not set
8124 +CONFIG_EDAC_DECODE_MCE=m
8125 +CONFIG_EDAC_GHES=y
8126 +CONFIG_EDAC_AMD64=m
8127 +CONFIG_EDAC_E752X=m
8128 +CONFIG_EDAC_I82975X=m
8129 +CONFIG_EDAC_I3000=m
8130 +CONFIG_EDAC_I3200=m
8131 +CONFIG_EDAC_IE31200=m
8132 +CONFIG_EDAC_X38=m
8133 +CONFIG_EDAC_I5400=m
8134 +CONFIG_EDAC_I7CORE=m
8135 +CONFIG_EDAC_I5000=m
8136 +CONFIG_EDAC_I5100=m
8137 +CONFIG_EDAC_I7300=m
8138 +CONFIG_EDAC_SBRIDGE=m
8139 +CONFIG_EDAC_SKX=m
8140 +CONFIG_EDAC_I10NM=m
8141 +CONFIG_EDAC_PND2=m
8142 +CONFIG_EDAC_IGEN6=m
8143 +CONFIG_RTC_LIB=y
8144 +CONFIG_RTC_MC146818_LIB=y
8145 +CONFIG_RTC_CLASS=y
8146 +CONFIG_RTC_HCTOSYS=y
8147 +CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
8148 +CONFIG_RTC_SYSTOHC=y
8149 +CONFIG_RTC_SYSTOHC_DEVICE="rtc0"
8150 +# CONFIG_RTC_DEBUG is not set
8151 +CONFIG_RTC_NVMEM=y
8154 +# RTC interfaces
8156 +CONFIG_RTC_INTF_SYSFS=y
8157 +CONFIG_RTC_INTF_PROC=y
8158 +CONFIG_RTC_INTF_DEV=y
8159 +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
8160 +# CONFIG_RTC_DRV_TEST is not set
8163 +# I2C RTC drivers
8165 +CONFIG_RTC_DRV_88PM860X=m
8166 +CONFIG_RTC_DRV_88PM80X=m
8167 +CONFIG_RTC_DRV_ABB5ZES3=m
8168 +CONFIG_RTC_DRV_ABEOZ9=m
8169 +CONFIG_RTC_DRV_ABX80X=m
8170 +CONFIG_RTC_DRV_DS1307=m
8171 +CONFIG_RTC_DRV_DS1307_CENTURY=y
8172 +CONFIG_RTC_DRV_DS1374=m
8173 +CONFIG_RTC_DRV_DS1374_WDT=y
8174 +CONFIG_RTC_DRV_DS1672=m
8175 +CONFIG_RTC_DRV_LP8788=m
8176 +CONFIG_RTC_DRV_MAX6900=m
8177 +CONFIG_RTC_DRV_MAX8907=m
8178 +CONFIG_RTC_DRV_MAX8925=m
8179 +CONFIG_RTC_DRV_MAX8998=m
8180 +CONFIG_RTC_DRV_MAX8997=m
8181 +CONFIG_RTC_DRV_RS5C372=m
8182 +CONFIG_RTC_DRV_ISL1208=m
8183 +CONFIG_RTC_DRV_ISL12022=m
8184 +CONFIG_RTC_DRV_X1205=m
8185 +CONFIG_RTC_DRV_PCF8523=m
8186 +CONFIG_RTC_DRV_PCF85063=m
8187 +CONFIG_RTC_DRV_PCF85363=m
8188 +CONFIG_RTC_DRV_PCF8563=m
8189 +CONFIG_RTC_DRV_PCF8583=m
8190 +CONFIG_RTC_DRV_M41T80=m
8191 +CONFIG_RTC_DRV_M41T80_WDT=y
8192 +CONFIG_RTC_DRV_BQ32K=m
8193 +CONFIG_RTC_DRV_PALMAS=m
8194 +CONFIG_RTC_DRV_TPS6586X=m
8195 +CONFIG_RTC_DRV_TPS65910=m
8196 +CONFIG_RTC_DRV_TPS80031=m
8197 +CONFIG_RTC_DRV_RC5T583=m
8198 +CONFIG_RTC_DRV_S35390A=m
8199 +CONFIG_RTC_DRV_FM3130=m
8200 +CONFIG_RTC_DRV_RX8010=m
8201 +CONFIG_RTC_DRV_RX8581=m
8202 +CONFIG_RTC_DRV_RX8025=m
8203 +CONFIG_RTC_DRV_EM3027=m
8204 +CONFIG_RTC_DRV_RV3028=m
8205 +CONFIG_RTC_DRV_RV3032=m
8206 +CONFIG_RTC_DRV_RV8803=m
8207 +CONFIG_RTC_DRV_S5M=m
8208 +CONFIG_RTC_DRV_SD3078=m
8211 +# SPI RTC drivers
8213 +CONFIG_RTC_DRV_M41T93=m
8214 +CONFIG_RTC_DRV_M41T94=m
8215 +CONFIG_RTC_DRV_DS1302=m
8216 +CONFIG_RTC_DRV_DS1305=m
8217 +CONFIG_RTC_DRV_DS1343=m
8218 +CONFIG_RTC_DRV_DS1347=m
8219 +CONFIG_RTC_DRV_DS1390=m
8220 +CONFIG_RTC_DRV_MAX6916=m
8221 +CONFIG_RTC_DRV_R9701=m
8222 +CONFIG_RTC_DRV_RX4581=m
8223 +CONFIG_RTC_DRV_RS5C348=m
8224 +CONFIG_RTC_DRV_MAX6902=m
8225 +CONFIG_RTC_DRV_PCF2123=m
8226 +CONFIG_RTC_DRV_MCP795=m
8227 +CONFIG_RTC_I2C_AND_SPI=y
8230 +# SPI and I2C RTC drivers
8232 +CONFIG_RTC_DRV_DS3232=m
8233 +CONFIG_RTC_DRV_DS3232_HWMON=y
8234 +CONFIG_RTC_DRV_PCF2127=m
8235 +CONFIG_RTC_DRV_RV3029C2=m
8236 +CONFIG_RTC_DRV_RV3029_HWMON=y
8237 +CONFIG_RTC_DRV_RX6110=m
8240 +# Platform RTC drivers
8242 +CONFIG_RTC_DRV_CMOS=y
8243 +CONFIG_RTC_DRV_DS1286=m
8244 +CONFIG_RTC_DRV_DS1511=m
8245 +CONFIG_RTC_DRV_DS1553=m
8246 +CONFIG_RTC_DRV_DS1685_FAMILY=m
8247 +CONFIG_RTC_DRV_DS1685=y
8248 +# CONFIG_RTC_DRV_DS1689 is not set
8249 +# CONFIG_RTC_DRV_DS17285 is not set
8250 +# CONFIG_RTC_DRV_DS17485 is not set
8251 +# CONFIG_RTC_DRV_DS17885 is not set
8252 +CONFIG_RTC_DRV_DS1742=m
8253 +CONFIG_RTC_DRV_DS2404=m
8254 +CONFIG_RTC_DRV_DA9052=m
8255 +CONFIG_RTC_DRV_DA9055=m
8256 +CONFIG_RTC_DRV_DA9063=m
8257 +CONFIG_RTC_DRV_STK17TA8=m
8258 +CONFIG_RTC_DRV_M48T86=m
8259 +CONFIG_RTC_DRV_M48T35=m
8260 +CONFIG_RTC_DRV_M48T59=m
8261 +CONFIG_RTC_DRV_MSM6242=m
8262 +CONFIG_RTC_DRV_BQ4802=m
8263 +CONFIG_RTC_DRV_RP5C01=m
8264 +CONFIG_RTC_DRV_V3020=m
8265 +CONFIG_RTC_DRV_WM831X=m
8266 +CONFIG_RTC_DRV_WM8350=m
8267 +CONFIG_RTC_DRV_PCF50633=m
8268 +CONFIG_RTC_DRV_CROS_EC=m
8271 +# on-CPU RTC drivers
8273 +CONFIG_RTC_DRV_FTRTC010=m
8274 +CONFIG_RTC_DRV_PCAP=m
8275 +CONFIG_RTC_DRV_MC13XXX=m
8276 +CONFIG_RTC_DRV_MT6397=m
8279 +# HID Sensor RTC drivers
8281 +CONFIG_RTC_DRV_HID_SENSOR_TIME=m
8282 +CONFIG_RTC_DRV_WILCO_EC=m
8283 +CONFIG_DMADEVICES=y
8284 +# CONFIG_DMADEVICES_DEBUG is not set
8287 +# DMA Devices
8289 +CONFIG_DMA_ENGINE=y
8290 +CONFIG_DMA_VIRTUAL_CHANNELS=y
8291 +CONFIG_DMA_ACPI=y
8292 +CONFIG_ALTERA_MSGDMA=m
8293 +CONFIG_INTEL_IDMA64=m
8294 +CONFIG_INTEL_IDXD=m
8295 +CONFIG_INTEL_IDXD_SVM=y
8296 +CONFIG_INTEL_IOATDMA=m
8297 +CONFIG_PLX_DMA=m
8298 +CONFIG_XILINX_ZYNQMP_DPDMA=m
8299 +CONFIG_QCOM_HIDMA_MGMT=m
8300 +CONFIG_QCOM_HIDMA=m
8301 +CONFIG_DW_DMAC_CORE=m
8302 +CONFIG_DW_DMAC=m
8303 +CONFIG_DW_DMAC_PCI=m
8304 +CONFIG_DW_EDMA=m
8305 +CONFIG_DW_EDMA_PCIE=m
8306 +CONFIG_HSU_DMA=m
8307 +CONFIG_SF_PDMA=m
8308 +CONFIG_INTEL_LDMA=y
8311 +# DMA Clients
8313 +CONFIG_ASYNC_TX_DMA=y
8314 +# CONFIG_DMATEST is not set
8315 +CONFIG_DMA_ENGINE_RAID=y
8318 +# DMABUF options
8320 +CONFIG_SYNC_FILE=y
8321 +CONFIG_SW_SYNC=y
8322 +CONFIG_UDMABUF=y
8323 +# CONFIG_DMABUF_MOVE_NOTIFY is not set
8324 +# CONFIG_DMABUF_DEBUG is not set
8325 +# CONFIG_DMABUF_SELFTESTS is not set
8326 +CONFIG_DMABUF_HEAPS=y
8327 +CONFIG_DMABUF_HEAPS_SYSTEM=y
8328 +# end of DMABUF options
8330 +CONFIG_DCA=m
8331 +CONFIG_AUXDISPLAY=y
8332 +CONFIG_CHARLCD=m
8333 +CONFIG_HD44780_COMMON=m
8334 +CONFIG_HD44780=m
8335 +CONFIG_KS0108=m
8336 +CONFIG_KS0108_PORT=0x378
8337 +CONFIG_KS0108_DELAY=2
8338 +CONFIG_CFAG12864B=m
8339 +CONFIG_CFAG12864B_RATE=20
8340 +CONFIG_IMG_ASCII_LCD=m
8341 +CONFIG_LCD2S=m
8342 +CONFIG_PARPORT_PANEL=m
8343 +CONFIG_PANEL_PARPORT=0
8344 +CONFIG_PANEL_PROFILE=5
8345 +# CONFIG_PANEL_CHANGE_MESSAGE is not set
8346 +# CONFIG_CHARLCD_BL_OFF is not set
8347 +# CONFIG_CHARLCD_BL_ON is not set
8348 +CONFIG_CHARLCD_BL_FLASH=y
8349 +CONFIG_PANEL=m
8350 +CONFIG_UIO=m
8351 +CONFIG_UIO_CIF=m
8352 +CONFIG_UIO_PDRV_GENIRQ=m
8353 +CONFIG_UIO_DMEM_GENIRQ=m
8354 +CONFIG_UIO_AEC=m
8355 +CONFIG_UIO_SERCOS3=m
8356 +CONFIG_UIO_PCI_GENERIC=m
8357 +CONFIG_UIO_NETX=m
8358 +CONFIG_UIO_PRUSS=m
8359 +CONFIG_UIO_MF624=m
8360 +CONFIG_UIO_HV_GENERIC=m
8361 +CONFIG_VFIO_IOMMU_TYPE1=y
8362 +CONFIG_VFIO_VIRQFD=y
8363 +CONFIG_VFIO=y
8364 +CONFIG_VFIO_NOIOMMU=y
8365 +CONFIG_VFIO_PCI=y
8366 +CONFIG_VFIO_PCI_VGA=y
8367 +CONFIG_VFIO_PCI_MMAP=y
8368 +CONFIG_VFIO_PCI_INTX=y
8369 +CONFIG_VFIO_PCI_IGD=y
8370 +CONFIG_VFIO_MDEV=m
8371 +CONFIG_VFIO_MDEV_DEVICE=m
8372 +CONFIG_IRQ_BYPASS_MANAGER=y
8373 +CONFIG_VIRT_DRIVERS=y
8374 +CONFIG_VBOXGUEST=m
8375 +CONFIG_NITRO_ENCLAVES=m
8376 +CONFIG_ACRN_HSM=m
8377 +CONFIG_VIRTIO=y
8378 +CONFIG_VIRTIO_PCI_LIB=y
8379 +CONFIG_VIRTIO_MENU=y
8380 +CONFIG_VIRTIO_PCI=y
8381 +CONFIG_VIRTIO_PCI_LEGACY=y
8382 +CONFIG_VIRTIO_VDPA=m
8383 +CONFIG_VIRTIO_PMEM=m
8384 +CONFIG_VIRTIO_BALLOON=y
8385 +CONFIG_VIRTIO_MEM=m
8386 +CONFIG_VIRTIO_INPUT=m
8387 +CONFIG_VIRTIO_MMIO=y
8388 +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
8389 +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m
8390 +CONFIG_VDPA=m
8391 +CONFIG_VDPA_SIM=m
8392 +CONFIG_VDPA_SIM_NET=m
8393 +CONFIG_IFCVF=m
8394 +CONFIG_MLX5_VDPA=y
8395 +CONFIG_MLX5_VDPA_NET=m
8396 +CONFIG_VHOST_IOTLB=m
8397 +CONFIG_VHOST_RING=m
8398 +CONFIG_VHOST=m
8399 +CONFIG_VHOST_MENU=y
8400 +CONFIG_VHOST_NET=m
8401 +CONFIG_VHOST_SCSI=m
8402 +CONFIG_VHOST_VSOCK=m
8403 +CONFIG_VHOST_VDPA=m
8404 +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set
8407 +# Microsoft Hyper-V guest support
8409 +CONFIG_HYPERV=m
8410 +CONFIG_HYPERV_TIMER=y
8411 +CONFIG_HYPERV_UTILS=m
8412 +CONFIG_HYPERV_BALLOON=m
8413 +# end of Microsoft Hyper-V guest support
8416 +# Xen driver support
8418 +CONFIG_XEN_BALLOON=y
8419 +CONFIG_XEN_BALLOON_MEMORY_HOTPLUG=y
8420 +CONFIG_XEN_MEMORY_HOTPLUG_LIMIT=512
8421 +CONFIG_XEN_SCRUB_PAGES_DEFAULT=y
8422 +CONFIG_XEN_DEV_EVTCHN=m
8423 +CONFIG_XEN_BACKEND=y
8424 +CONFIG_XENFS=m
8425 +CONFIG_XEN_COMPAT_XENFS=y
8426 +CONFIG_XEN_SYS_HYPERVISOR=y
8427 +CONFIG_XEN_XENBUS_FRONTEND=y
8428 +CONFIG_XEN_GNTDEV=m
8429 +CONFIG_XEN_GNTDEV_DMABUF=y
8430 +CONFIG_XEN_GRANT_DEV_ALLOC=m
8431 +CONFIG_XEN_GRANT_DMA_ALLOC=y
8432 +CONFIG_SWIOTLB_XEN=y
8433 +CONFIG_XEN_PCIDEV_BACKEND=m
8434 +CONFIG_XEN_PVCALLS_FRONTEND=m
8435 +# CONFIG_XEN_PVCALLS_BACKEND is not set
8436 +CONFIG_XEN_SCSI_BACKEND=m
8437 +CONFIG_XEN_PRIVCMD=m
8438 +CONFIG_XEN_ACPI_PROCESSOR=y
8439 +CONFIG_XEN_MCE_LOG=y
8440 +CONFIG_XEN_HAVE_PVMMU=y
8441 +CONFIG_XEN_EFI=y
8442 +CONFIG_XEN_AUTO_XLATE=y
8443 +CONFIG_XEN_ACPI=y
8444 +CONFIG_XEN_SYMS=y
8445 +CONFIG_XEN_HAVE_VPMU=y
8446 +CONFIG_XEN_FRONT_PGDIR_SHBUF=m
8447 +CONFIG_XEN_UNPOPULATED_ALLOC=y
8448 +# end of Xen driver support
8450 +CONFIG_GREYBUS=m
8451 +CONFIG_GREYBUS_ES2=m
8452 +CONFIG_STAGING=y
8453 +CONFIG_PRISM2_USB=m
8454 +CONFIG_COMEDI=m
8455 +# CONFIG_COMEDI_DEBUG is not set
8456 +CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB=2048
8457 +CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB=20480
8458 +CONFIG_COMEDI_MISC_DRIVERS=y
8459 +CONFIG_COMEDI_BOND=m
8460 +CONFIG_COMEDI_TEST=m
8461 +CONFIG_COMEDI_PARPORT=m
8462 +CONFIG_COMEDI_ISA_DRIVERS=y
8463 +CONFIG_COMEDI_PCL711=m
8464 +CONFIG_COMEDI_PCL724=m
8465 +CONFIG_COMEDI_PCL726=m
8466 +CONFIG_COMEDI_PCL730=m
8467 +CONFIG_COMEDI_PCL812=m
8468 +CONFIG_COMEDI_PCL816=m
8469 +CONFIG_COMEDI_PCL818=m
8470 +CONFIG_COMEDI_PCM3724=m
8471 +CONFIG_COMEDI_AMPLC_DIO200_ISA=m
8472 +CONFIG_COMEDI_AMPLC_PC236_ISA=m
8473 +CONFIG_COMEDI_AMPLC_PC263_ISA=m
8474 +CONFIG_COMEDI_RTI800=m
8475 +CONFIG_COMEDI_RTI802=m
8476 +CONFIG_COMEDI_DAC02=m
8477 +CONFIG_COMEDI_DAS16M1=m
8478 +CONFIG_COMEDI_DAS08_ISA=m
8479 +CONFIG_COMEDI_DAS16=m
8480 +CONFIG_COMEDI_DAS800=m
8481 +CONFIG_COMEDI_DAS1800=m
8482 +CONFIG_COMEDI_DAS6402=m
8483 +CONFIG_COMEDI_DT2801=m
8484 +CONFIG_COMEDI_DT2811=m
8485 +CONFIG_COMEDI_DT2814=m
8486 +CONFIG_COMEDI_DT2815=m
8487 +CONFIG_COMEDI_DT2817=m
8488 +CONFIG_COMEDI_DT282X=m
8489 +CONFIG_COMEDI_DMM32AT=m
8490 +CONFIG_COMEDI_FL512=m
8491 +CONFIG_COMEDI_AIO_AIO12_8=m
8492 +CONFIG_COMEDI_AIO_IIRO_16=m
8493 +CONFIG_COMEDI_II_PCI20KC=m
8494 +CONFIG_COMEDI_C6XDIGIO=m
8495 +CONFIG_COMEDI_MPC624=m
8496 +CONFIG_COMEDI_ADQ12B=m
8497 +CONFIG_COMEDI_NI_AT_A2150=m
8498 +CONFIG_COMEDI_NI_AT_AO=m
8499 +CONFIG_COMEDI_NI_ATMIO=m
8500 +CONFIG_COMEDI_NI_ATMIO16D=m
8501 +CONFIG_COMEDI_NI_LABPC_ISA=m
8502 +CONFIG_COMEDI_PCMAD=m
8503 +CONFIG_COMEDI_PCMDA12=m
8504 +CONFIG_COMEDI_PCMMIO=m
8505 +CONFIG_COMEDI_PCMUIO=m
8506 +CONFIG_COMEDI_MULTIQ3=m
8507 +CONFIG_COMEDI_S526=m
8508 +CONFIG_COMEDI_PCI_DRIVERS=m
8509 +CONFIG_COMEDI_8255_PCI=m
8510 +CONFIG_COMEDI_ADDI_WATCHDOG=m
8511 +CONFIG_COMEDI_ADDI_APCI_1032=m
8512 +CONFIG_COMEDI_ADDI_APCI_1500=m
8513 +CONFIG_COMEDI_ADDI_APCI_1516=m
8514 +CONFIG_COMEDI_ADDI_APCI_1564=m
8515 +CONFIG_COMEDI_ADDI_APCI_16XX=m
8516 +CONFIG_COMEDI_ADDI_APCI_2032=m
8517 +CONFIG_COMEDI_ADDI_APCI_2200=m
8518 +CONFIG_COMEDI_ADDI_APCI_3120=m
8519 +CONFIG_COMEDI_ADDI_APCI_3501=m
8520 +CONFIG_COMEDI_ADDI_APCI_3XXX=m
8521 +CONFIG_COMEDI_ADL_PCI6208=m
8522 +CONFIG_COMEDI_ADL_PCI7X3X=m
8523 +CONFIG_COMEDI_ADL_PCI8164=m
8524 +CONFIG_COMEDI_ADL_PCI9111=m
8525 +CONFIG_COMEDI_ADL_PCI9118=m
8526 +CONFIG_COMEDI_ADV_PCI1710=m
8527 +CONFIG_COMEDI_ADV_PCI1720=m
8528 +CONFIG_COMEDI_ADV_PCI1723=m
8529 +CONFIG_COMEDI_ADV_PCI1724=m
8530 +CONFIG_COMEDI_ADV_PCI1760=m
8531 +CONFIG_COMEDI_ADV_PCI_DIO=m
8532 +CONFIG_COMEDI_AMPLC_DIO200_PCI=m
8533 +CONFIG_COMEDI_AMPLC_PC236_PCI=m
8534 +CONFIG_COMEDI_AMPLC_PC263_PCI=m
8535 +CONFIG_COMEDI_AMPLC_PCI224=m
8536 +CONFIG_COMEDI_AMPLC_PCI230=m
8537 +CONFIG_COMEDI_CONTEC_PCI_DIO=m
8538 +CONFIG_COMEDI_DAS08_PCI=m
8539 +CONFIG_COMEDI_DT3000=m
8540 +CONFIG_COMEDI_DYNA_PCI10XX=m
8541 +CONFIG_COMEDI_GSC_HPDI=m
8542 +CONFIG_COMEDI_MF6X4=m
8543 +CONFIG_COMEDI_ICP_MULTI=m
8544 +CONFIG_COMEDI_DAQBOARD2000=m
8545 +CONFIG_COMEDI_JR3_PCI=m
8546 +CONFIG_COMEDI_KE_COUNTER=m
8547 +CONFIG_COMEDI_CB_PCIDAS64=m
8548 +CONFIG_COMEDI_CB_PCIDAS=m
8549 +CONFIG_COMEDI_CB_PCIDDA=m
8550 +CONFIG_COMEDI_CB_PCIMDAS=m
8551 +CONFIG_COMEDI_CB_PCIMDDA=m
8552 +CONFIG_COMEDI_ME4000=m
8553 +CONFIG_COMEDI_ME_DAQ=m
8554 +CONFIG_COMEDI_NI_6527=m
8555 +CONFIG_COMEDI_NI_65XX=m
8556 +CONFIG_COMEDI_NI_660X=m
8557 +CONFIG_COMEDI_NI_670X=m
8558 +CONFIG_COMEDI_NI_LABPC_PCI=m
8559 +CONFIG_COMEDI_NI_PCIDIO=m
8560 +CONFIG_COMEDI_NI_PCIMIO=m
8561 +CONFIG_COMEDI_RTD520=m
8562 +CONFIG_COMEDI_S626=m
8563 +CONFIG_COMEDI_MITE=m
8564 +CONFIG_COMEDI_NI_TIOCMD=m
8565 +CONFIG_COMEDI_PCMCIA_DRIVERS=m
8566 +CONFIG_COMEDI_CB_DAS16_CS=m
8567 +CONFIG_COMEDI_DAS08_CS=m
8568 +CONFIG_COMEDI_NI_DAQ_700_CS=m
8569 +CONFIG_COMEDI_NI_DAQ_DIO24_CS=m
8570 +CONFIG_COMEDI_NI_LABPC_CS=m
8571 +CONFIG_COMEDI_NI_MIO_CS=m
8572 +CONFIG_COMEDI_QUATECH_DAQP_CS=m
8573 +CONFIG_COMEDI_USB_DRIVERS=m
8574 +CONFIG_COMEDI_DT9812=m
8575 +CONFIG_COMEDI_NI_USB6501=m
8576 +CONFIG_COMEDI_USBDUX=m
8577 +CONFIG_COMEDI_USBDUXFAST=m
8578 +CONFIG_COMEDI_USBDUXSIGMA=m
8579 +CONFIG_COMEDI_VMK80XX=m
8580 +CONFIG_COMEDI_8254=m
8581 +CONFIG_COMEDI_8255=m
8582 +CONFIG_COMEDI_8255_SA=m
8583 +CONFIG_COMEDI_KCOMEDILIB=m
8584 +CONFIG_COMEDI_AMPLC_DIO200=m
8585 +CONFIG_COMEDI_AMPLC_PC236=m
8586 +CONFIG_COMEDI_DAS08=m
8587 +CONFIG_COMEDI_ISADMA=m
8588 +CONFIG_COMEDI_NI_LABPC=m
8589 +CONFIG_COMEDI_NI_LABPC_ISADMA=m
8590 +CONFIG_COMEDI_NI_TIO=m
8591 +CONFIG_COMEDI_NI_ROUTING=m
8592 +CONFIG_RTL8192U=m
8593 +CONFIG_RTLLIB=m
8594 +CONFIG_RTLLIB_CRYPTO_CCMP=m
8595 +CONFIG_RTLLIB_CRYPTO_TKIP=m
8596 +CONFIG_RTLLIB_CRYPTO_WEP=m
8597 +CONFIG_RTL8192E=m
8598 +CONFIG_RTL8723BS=m
8599 +CONFIG_R8712U=m
8600 +CONFIG_R8188EU=m
8601 +CONFIG_88EU_AP_MODE=y
8602 +CONFIG_RTS5208=m
8603 +CONFIG_VT6655=m
8604 +CONFIG_VT6656=m
8607 +# IIO staging drivers
8611 +# Accelerometers
8613 +CONFIG_ADIS16203=m
8614 +CONFIG_ADIS16240=m
8615 +# end of Accelerometers
8618 +# Analog to digital converters
8620 +CONFIG_AD7816=m
8621 +CONFIG_AD7280=m
8622 +# end of Analog to digital converters
8625 +# Analog digital bi-direction converters
8627 +CONFIG_ADT7316=m
8628 +CONFIG_ADT7316_SPI=m
8629 +CONFIG_ADT7316_I2C=m
8630 +# end of Analog digital bi-direction converters
8633 +# Capacitance to digital converters
8635 +CONFIG_AD7150=m
8636 +CONFIG_AD7746=m
8637 +# end of Capacitance to digital converters
8640 +# Direct Digital Synthesis
8642 +CONFIG_AD9832=m
8643 +CONFIG_AD9834=m
8644 +# end of Direct Digital Synthesis
8647 +# Network Analyzer, Impedance Converters
8649 +CONFIG_AD5933=m
8650 +# end of Network Analyzer, Impedance Converters
8653 +# Active energy metering IC
8655 +CONFIG_ADE7854=m
8656 +CONFIG_ADE7854_I2C=m
8657 +CONFIG_ADE7854_SPI=m
8658 +# end of Active energy metering IC
8661 +# Resolver to digital converters
8663 +CONFIG_AD2S1210=m
8664 +# end of Resolver to digital converters
8665 +# end of IIO staging drivers
8667 +CONFIG_FB_SM750=m
8668 +CONFIG_STAGING_MEDIA=y
8669 +CONFIG_INTEL_ATOMISP=y
8670 +CONFIG_VIDEO_ATOMISP=m
8671 +# CONFIG_VIDEO_ATOMISP_ISP2401 is not set
8672 +CONFIG_VIDEO_ATOMISP_OV2722=m
8673 +CONFIG_VIDEO_ATOMISP_GC2235=m
8674 +CONFIG_VIDEO_ATOMISP_MSRLIST_HELPER=m
8675 +CONFIG_VIDEO_ATOMISP_MT9M114=m
8676 +CONFIG_VIDEO_ATOMISP_GC0310=m
8677 +CONFIG_VIDEO_ATOMISP_OV2680=m
8678 +CONFIG_VIDEO_ATOMISP_OV5693=m
8679 +CONFIG_VIDEO_ATOMISP_LM3554=m
8680 +CONFIG_VIDEO_ZORAN=m
8681 +CONFIG_VIDEO_ZORAN_DC30=m
8682 +CONFIG_VIDEO_ZORAN_ZR36060=m
8683 +CONFIG_VIDEO_ZORAN_BUZ=m
8684 +CONFIG_VIDEO_ZORAN_DC10=m
8685 +CONFIG_VIDEO_ZORAN_LML33=m
8686 +CONFIG_VIDEO_ZORAN_LML33R10=m
8687 +CONFIG_VIDEO_ZORAN_AVS6EYES=m
8688 +CONFIG_VIDEO_IPU3_IMGU=m
8691 +# Android
8693 +CONFIG_ASHMEM=m
8694 +# end of Android
8696 +CONFIG_LTE_GDM724X=m
8697 +CONFIG_FIREWIRE_SERIAL=m
8698 +CONFIG_FWTTY_MAX_TOTAL_PORTS=64
8699 +CONFIG_FWTTY_MAX_CARD_PORTS=32
8700 +CONFIG_GS_FPGABOOT=m
8701 +CONFIG_UNISYSSPAR=y
8702 +CONFIG_UNISYS_VISORNIC=m
8703 +CONFIG_UNISYS_VISORINPUT=m
8704 +CONFIG_UNISYS_VISORHBA=m
8705 +CONFIG_FB_TFT=m
8706 +CONFIG_FB_TFT_AGM1264K_FL=m
8707 +CONFIG_FB_TFT_BD663474=m
8708 +CONFIG_FB_TFT_HX8340BN=m
8709 +CONFIG_FB_TFT_HX8347D=m
8710 +CONFIG_FB_TFT_HX8353D=m
8711 +CONFIG_FB_TFT_HX8357D=m
8712 +CONFIG_FB_TFT_ILI9163=m
8713 +CONFIG_FB_TFT_ILI9320=m
8714 +CONFIG_FB_TFT_ILI9325=m
8715 +CONFIG_FB_TFT_ILI9340=m
8716 +CONFIG_FB_TFT_ILI9341=m
8717 +CONFIG_FB_TFT_ILI9481=m
8718 +CONFIG_FB_TFT_ILI9486=m
8719 +CONFIG_FB_TFT_PCD8544=m
8720 +CONFIG_FB_TFT_RA8875=m
8721 +CONFIG_FB_TFT_S6D02A1=m
8722 +CONFIG_FB_TFT_S6D1121=m
8723 +CONFIG_FB_TFT_SEPS525=m
8724 +CONFIG_FB_TFT_SH1106=m
8725 +CONFIG_FB_TFT_SSD1289=m
8726 +CONFIG_FB_TFT_SSD1305=m
8727 +CONFIG_FB_TFT_SSD1306=m
8728 +CONFIG_FB_TFT_SSD1331=m
8729 +CONFIG_FB_TFT_SSD1351=m
8730 +CONFIG_FB_TFT_ST7735R=m
8731 +CONFIG_FB_TFT_ST7789V=m
8732 +CONFIG_FB_TFT_TINYLCD=m
8733 +CONFIG_FB_TFT_TLS8204=m
8734 +CONFIG_FB_TFT_UC1611=m
8735 +CONFIG_FB_TFT_UC1701=m
8736 +CONFIG_FB_TFT_UPD161704=m
8737 +CONFIG_FB_TFT_WATTEROTT=m
8738 +CONFIG_MOST_COMPONENTS=m
8739 +CONFIG_MOST_NET=m
8740 +CONFIG_MOST_SOUND=m
8741 +CONFIG_MOST_VIDEO=m
8742 +CONFIG_MOST_I2C=m
8743 +CONFIG_KS7010=m
8744 +CONFIG_GREYBUS_AUDIO=m
8745 +CONFIG_GREYBUS_AUDIO_APB_CODEC=m
8746 +CONFIG_GREYBUS_BOOTROM=m
8747 +CONFIG_GREYBUS_FIRMWARE=m
8748 +CONFIG_GREYBUS_HID=m
8749 +CONFIG_GREYBUS_LIGHT=m
8750 +CONFIG_GREYBUS_LOG=m
8751 +CONFIG_GREYBUS_LOOPBACK=m
8752 +CONFIG_GREYBUS_POWER=m
8753 +CONFIG_GREYBUS_RAW=m
8754 +CONFIG_GREYBUS_VIBRATOR=m
8755 +CONFIG_GREYBUS_BRIDGED_PHY=m
8756 +CONFIG_GREYBUS_GPIO=m
8757 +CONFIG_GREYBUS_I2C=m
8758 +CONFIG_GREYBUS_PWM=m
8759 +CONFIG_GREYBUS_SDIO=m
8760 +CONFIG_GREYBUS_SPI=m
8761 +CONFIG_GREYBUS_UART=m
8762 +CONFIG_GREYBUS_USB=m
8763 +CONFIG_PI433=m
8766 +# Gasket devices
8768 +CONFIG_STAGING_GASKET_FRAMEWORK=m
8769 +CONFIG_STAGING_APEX_DRIVER=m
8770 +# end of Gasket devices
8772 +CONFIG_FIELDBUS_DEV=m
8773 +CONFIG_KPC2000=y
8774 +CONFIG_KPC2000_CORE=m
8775 +CONFIG_KPC2000_SPI=m
8776 +CONFIG_KPC2000_I2C=m
8777 +CONFIG_KPC2000_DMA=m
8778 +CONFIG_QLGE=m
8779 +CONFIG_WIMAX=m
8780 +CONFIG_WIMAX_DEBUG_LEVEL=8
8781 +CONFIG_WIMAX_I2400M=m
8782 +CONFIG_WIMAX_I2400M_USB=m
8783 +CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8
8784 +CONFIG_WFX=m
8785 +CONFIG_SPMI_HISI3670=m
8786 +CONFIG_X86_PLATFORM_DEVICES=y
8787 +CONFIG_ACPI_WMI=m
8788 +CONFIG_WMI_BMOF=m
8789 +CONFIG_HUAWEI_WMI=m
8790 +CONFIG_UV_SYSFS=m
8791 +CONFIG_INTEL_WMI_SBL_FW_UPDATE=m
8792 +CONFIG_INTEL_WMI_THUNDERBOLT=m
8793 +CONFIG_MXM_WMI=m
8794 +CONFIG_PEAQ_WMI=m
8795 +CONFIG_XIAOMI_WMI=m
8796 +CONFIG_ACERHDF=m
8797 +CONFIG_ACER_WIRELESS=m
8798 +CONFIG_ACER_WMI=m
8799 +CONFIG_AMD_PMC=m
8800 +CONFIG_APPLE_GMUX=m
8801 +CONFIG_ASUS_LAPTOP=m
8802 +CONFIG_ASUS_WIRELESS=m
8803 +CONFIG_ASUS_WMI=m
8804 +CONFIG_ASUS_NB_WMI=m
8805 +CONFIG_EEEPC_LAPTOP=m
8806 +CONFIG_EEEPC_WMI=m
8807 +CONFIG_X86_PLATFORM_DRIVERS_DELL=y
8808 +CONFIG_ALIENWARE_WMI=m
8809 +CONFIG_DCDBAS=m
8810 +CONFIG_DELL_LAPTOP=m
8811 +CONFIG_DELL_RBU=m
8812 +CONFIG_DELL_RBTN=m
8813 +CONFIG_DELL_SMBIOS=m
8814 +CONFIG_DELL_SMBIOS_WMI=y
8815 +CONFIG_DELL_SMBIOS_SMM=y
8816 +CONFIG_DELL_SMO8800=m
8817 +CONFIG_DELL_WMI=m
8818 +CONFIG_DELL_WMI_AIO=m
8819 +CONFIG_DELL_WMI_DESCRIPTOR=m
8820 +CONFIG_DELL_WMI_LED=m
8821 +CONFIG_DELL_WMI_SYSMAN=m
8822 +CONFIG_AMILO_RFKILL=m
8823 +CONFIG_FUJITSU_LAPTOP=m
8824 +CONFIG_FUJITSU_TABLET=m
8825 +CONFIG_GPD_POCKET_FAN=m
8826 +CONFIG_HP_ACCEL=m
8827 +CONFIG_HP_WIRELESS=m
8828 +CONFIG_HP_WMI=m
8829 +CONFIG_IBM_RTL=m
8830 +CONFIG_IDEAPAD_LAPTOP=m
8831 +CONFIG_SENSORS_HDAPS=m
8832 +CONFIG_THINKPAD_ACPI=m
8833 +CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y
8834 +CONFIG_THINKPAD_ACPI_DEBUGFACILITIES=y
8835 +# CONFIG_THINKPAD_ACPI_DEBUG is not set
8836 +# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set
8837 +CONFIG_THINKPAD_ACPI_VIDEO=y
8838 +CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y
8839 +CONFIG_INTEL_ATOMISP2_LED=m
8840 +CONFIG_INTEL_CHT_INT33FE=m
8841 +CONFIG_INTEL_HID_EVENT=m
8842 +CONFIG_INTEL_INT0002_VGPIO=m
8843 +CONFIG_INTEL_MENLOW=m
8844 +CONFIG_INTEL_OAKTRAIL=m
8845 +CONFIG_INTEL_VBTN=m
8846 +CONFIG_MSI_LAPTOP=m
8847 +CONFIG_MSI_WMI=m
8848 +CONFIG_PCENGINES_APU2=m
8849 +CONFIG_SAMSUNG_LAPTOP=m
8850 +CONFIG_SAMSUNG_Q10=m
8851 +CONFIG_ACPI_TOSHIBA=m
8852 +CONFIG_TOSHIBA_BT_RFKILL=m
8853 +CONFIG_TOSHIBA_HAPS=m
8854 +# CONFIG_TOSHIBA_WMI is not set
8855 +CONFIG_ACPI_CMPC=m
8856 +CONFIG_COMPAL_LAPTOP=m
8857 +CONFIG_LG_LAPTOP=m
8858 +CONFIG_PANASONIC_LAPTOP=m
8859 +CONFIG_SONY_LAPTOP=m
8860 +CONFIG_SONYPI_COMPAT=y
8861 +CONFIG_SYSTEM76_ACPI=m
8862 +CONFIG_TOPSTAR_LAPTOP=m
8863 +CONFIG_I2C_MULTI_INSTANTIATE=m
8864 +CONFIG_MLX_PLATFORM=m
8865 +CONFIG_TOUCHSCREEN_DMI=y
8866 +CONFIG_INTEL_IPS=m
8867 +CONFIG_INTEL_RST=m
8868 +CONFIG_INTEL_SMARTCONNECT=m
8871 +# Intel Speed Select Technology interface support
8873 +CONFIG_INTEL_SPEED_SELECT_INTERFACE=m
8874 +# end of Intel Speed Select Technology interface support
8876 +CONFIG_INTEL_TURBO_MAX_3=y
8877 +CONFIG_INTEL_UNCORE_FREQ_CONTROL=m
8878 +CONFIG_INTEL_BXTWC_PMIC_TMU=m
8879 +CONFIG_INTEL_CHTDC_TI_PWRBTN=m
8880 +CONFIG_INTEL_MRFLD_PWRBTN=m
8881 +CONFIG_INTEL_PMC_CORE=y
8882 +CONFIG_INTEL_PMT_CLASS=m
8883 +CONFIG_INTEL_PMT_TELEMETRY=m
8884 +CONFIG_INTEL_PMT_CRASHLOG=m
8885 +CONFIG_INTEL_PUNIT_IPC=m
8886 +CONFIG_INTEL_SCU_IPC=y
8887 +CONFIG_INTEL_SCU=y
8888 +CONFIG_INTEL_SCU_PCI=y
8889 +CONFIG_INTEL_SCU_PLATFORM=m
8890 +CONFIG_INTEL_SCU_IPC_UTIL=m
8891 +CONFIG_INTEL_TELEMETRY=m
8892 +CONFIG_PMC_ATOM=y
8893 +CONFIG_CHROME_PLATFORMS=y
8894 +CONFIG_CHROMEOS_LAPTOP=m
8895 +CONFIG_CHROMEOS_PSTORE=m
8896 +CONFIG_CHROMEOS_TBMC=m
8897 +CONFIG_CROS_EC=m
8898 +CONFIG_CROS_EC_I2C=m
8899 +CONFIG_CROS_EC_ISHTP=m
8900 +CONFIG_CROS_EC_SPI=m
8901 +CONFIG_CROS_EC_LPC=m
8902 +CONFIG_CROS_EC_PROTO=y
8903 +CONFIG_CROS_KBD_LED_BACKLIGHT=m
8904 +CONFIG_CROS_EC_CHARDEV=m
8905 +CONFIG_CROS_EC_LIGHTBAR=m
8906 +CONFIG_CROS_EC_DEBUGFS=m
8907 +CONFIG_CROS_EC_SENSORHUB=m
8908 +CONFIG_CROS_EC_SYSFS=m
8909 +CONFIG_CROS_EC_TYPEC=m
8910 +CONFIG_CROS_USBPD_LOGGER=m
8911 +CONFIG_CROS_USBPD_NOTIFY=m
8912 +CONFIG_WILCO_EC=m
8913 +CONFIG_WILCO_EC_DEBUGFS=m
8914 +CONFIG_WILCO_EC_EVENTS=m
8915 +CONFIG_WILCO_EC_TELEMETRY=m
8916 +CONFIG_MELLANOX_PLATFORM=y
8917 +CONFIG_MLXREG_HOTPLUG=m
8918 +CONFIG_MLXREG_IO=m
8919 +CONFIG_SURFACE_PLATFORMS=y
8920 +CONFIG_SURFACE3_WMI=m
8921 +CONFIG_SURFACE_3_BUTTON=m
8922 +CONFIG_SURFACE_3_POWER_OPREGION=m
8923 +CONFIG_SURFACE_ACPI_NOTIFY=m
8924 +CONFIG_SURFACE_AGGREGATOR_CDEV=m
8925 +CONFIG_SURFACE_GPE=m
8926 +CONFIG_SURFACE_HOTPLUG=m
8927 +CONFIG_SURFACE_PRO3_BUTTON=m
8928 +CONFIG_SURFACE_AGGREGATOR=m
8929 +CONFIG_SURFACE_AGGREGATOR_BUS=y
8930 +# CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION is not set
8931 +CONFIG_HAVE_CLK=y
8932 +CONFIG_CLKDEV_LOOKUP=y
8933 +CONFIG_HAVE_CLK_PREPARE=y
8934 +CONFIG_COMMON_CLK=y
8935 +CONFIG_COMMON_CLK_WM831X=m
8936 +CONFIG_COMMON_CLK_MAX9485=m
8937 +CONFIG_COMMON_CLK_SI5341=m
8938 +CONFIG_COMMON_CLK_SI5351=m
8939 +CONFIG_COMMON_CLK_SI544=m
8940 +CONFIG_COMMON_CLK_CDCE706=m
8941 +CONFIG_COMMON_CLK_CS2000_CP=m
8942 +CONFIG_COMMON_CLK_S2MPS11=m
8943 +CONFIG_CLK_TWL6040=m
8944 +CONFIG_COMMON_CLK_PALMAS=m
8945 +CONFIG_COMMON_CLK_PWM=m
8946 +CONFIG_XILINX_VCU=m
8947 +CONFIG_HWSPINLOCK=y
8950 +# Clock Source drivers
8952 +CONFIG_CLKEVT_I8253=y
8953 +CONFIG_I8253_LOCK=y
8954 +CONFIG_CLKBLD_I8253=y
8955 +# end of Clock Source drivers
8957 +CONFIG_MAILBOX=y
8958 +CONFIG_PCC=y
8959 +CONFIG_ALTERA_MBOX=m
8960 +CONFIG_IOMMU_IOVA=y
8961 +CONFIG_IOASID=y
8962 +CONFIG_IOMMU_API=y
8963 +CONFIG_IOMMU_SUPPORT=y
8966 +# Generic IOMMU Pagetable Support
8968 +CONFIG_IOMMU_IO_PGTABLE=y
8969 +# end of Generic IOMMU Pagetable Support
8971 +# CONFIG_IOMMU_DEBUGFS is not set
8972 +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set
8973 +CONFIG_IOMMU_DMA=y
8974 +CONFIG_AMD_IOMMU=y
8975 +CONFIG_AMD_IOMMU_V2=m
8976 +CONFIG_DMAR_TABLE=y
8977 +CONFIG_INTEL_IOMMU=y
8978 +CONFIG_INTEL_IOMMU_SVM=y
8979 +# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set
8980 +CONFIG_INTEL_IOMMU_FLOPPY_WA=y
8981 +# CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON is not set
8982 +CONFIG_IRQ_REMAP=y
8983 +CONFIG_HYPERV_IOMMU=y
8986 +# Remoteproc drivers
8988 +CONFIG_REMOTEPROC=y
8989 +CONFIG_REMOTEPROC_CDEV=y
8990 +# end of Remoteproc drivers
8993 +# Rpmsg drivers
8995 +CONFIG_RPMSG=m
8996 +CONFIG_RPMSG_CHAR=m
8997 +CONFIG_RPMSG_NS=m
8998 +CONFIG_RPMSG_QCOM_GLINK=m
8999 +CONFIG_RPMSG_QCOM_GLINK_RPM=m
9000 +CONFIG_RPMSG_VIRTIO=m
9001 +# end of Rpmsg drivers
9003 +CONFIG_SOUNDWIRE=m
9006 +# SoundWire Devices
9008 +CONFIG_SOUNDWIRE_CADENCE=m
9009 +CONFIG_SOUNDWIRE_INTEL=m
9010 +CONFIG_SOUNDWIRE_QCOM=m
9011 +CONFIG_SOUNDWIRE_GENERIC_ALLOCATION=m
9014 +# SOC (System On Chip) specific Drivers
9018 +# Amlogic SoC drivers
9020 +# end of Amlogic SoC drivers
9023 +# Broadcom SoC drivers
9025 +# end of Broadcom SoC drivers
9028 +# NXP/Freescale QorIQ SoC drivers
9030 +# end of NXP/Freescale QorIQ SoC drivers
9033 +# i.MX SoC drivers
9035 +# end of i.MX SoC drivers
9038 +# Enable LiteX SoC Builder specific drivers
9040 +# end of Enable LiteX SoC Builder specific drivers
9043 +# Qualcomm SoC drivers
9045 +CONFIG_QCOM_QMI_HELPERS=m
9046 +# end of Qualcomm SoC drivers
9048 +CONFIG_SOC_TI=y
9051 +# Xilinx SoC drivers
9053 +# end of Xilinx SoC drivers
9054 +# end of SOC (System On Chip) specific Drivers
9056 +CONFIG_PM_DEVFREQ=y
9059 +# DEVFREQ Governors
9061 +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
9062 +CONFIG_DEVFREQ_GOV_PERFORMANCE=y
9063 +CONFIG_DEVFREQ_GOV_POWERSAVE=y
9064 +CONFIG_DEVFREQ_GOV_USERSPACE=y
9065 +CONFIG_DEVFREQ_GOV_PASSIVE=y
9068 +# DEVFREQ Drivers
9070 +CONFIG_PM_DEVFREQ_EVENT=y
9071 +CONFIG_EXTCON=y
9074 +# Extcon Device Drivers
9076 +CONFIG_EXTCON_ADC_JACK=m
9077 +CONFIG_EXTCON_ARIZONA=m
9078 +CONFIG_EXTCON_AXP288=m
9079 +CONFIG_EXTCON_FSA9480=m
9080 +CONFIG_EXTCON_GPIO=m
9081 +CONFIG_EXTCON_INTEL_INT3496=m
9082 +CONFIG_EXTCON_INTEL_CHT_WC=m
9083 +CONFIG_EXTCON_INTEL_MRFLD=m
9084 +CONFIG_EXTCON_MAX14577=m
9085 +CONFIG_EXTCON_MAX3355=m
9086 +CONFIG_EXTCON_MAX77693=m
9087 +CONFIG_EXTCON_MAX77843=m
9088 +CONFIG_EXTCON_MAX8997=m
9089 +CONFIG_EXTCON_PALMAS=m
9090 +CONFIG_EXTCON_PTN5150=m
9091 +CONFIG_EXTCON_RT8973A=m
9092 +CONFIG_EXTCON_SM5502=m
9093 +CONFIG_EXTCON_USB_GPIO=m
9094 +CONFIG_EXTCON_USBC_CROS_EC=m
9095 +CONFIG_EXTCON_USBC_TUSB320=m
9096 +CONFIG_MEMORY=y
9097 +CONFIG_FPGA_DFL_EMIF=m
9098 +CONFIG_IIO=m
9099 +CONFIG_IIO_BUFFER=y
9100 +CONFIG_IIO_BUFFER_CB=m
9101 +CONFIG_IIO_BUFFER_DMA=m
9102 +CONFIG_IIO_BUFFER_DMAENGINE=m
9103 +CONFIG_IIO_BUFFER_HW_CONSUMER=m
9104 +CONFIG_IIO_KFIFO_BUF=m
9105 +CONFIG_IIO_TRIGGERED_BUFFER=m
9106 +CONFIG_IIO_CONFIGFS=m
9107 +CONFIG_IIO_TRIGGER=y
9108 +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
9109 +CONFIG_IIO_SW_DEVICE=m
9110 +CONFIG_IIO_SW_TRIGGER=m
9111 +CONFIG_IIO_TRIGGERED_EVENT=m
9114 +# Accelerometers
9116 +CONFIG_ADIS16201=m
9117 +CONFIG_ADIS16209=m
9118 +CONFIG_ADXL372=m
9119 +CONFIG_ADXL372_SPI=m
9120 +CONFIG_ADXL372_I2C=m
9121 +CONFIG_BMA220=m
9122 +CONFIG_BMA400=m
9123 +CONFIG_BMA400_I2C=m
9124 +CONFIG_BMA400_SPI=m
9125 +CONFIG_BMC150_ACCEL=m
9126 +CONFIG_BMC150_ACCEL_I2C=m
9127 +CONFIG_BMC150_ACCEL_SPI=m
9128 +CONFIG_DA280=m
9129 +CONFIG_DA311=m
9130 +CONFIG_DMARD09=m
9131 +CONFIG_DMARD10=m
9132 +CONFIG_HID_SENSOR_ACCEL_3D=m
9133 +CONFIG_IIO_CROS_EC_ACCEL_LEGACY=m
9134 +CONFIG_IIO_ST_ACCEL_3AXIS=m
9135 +CONFIG_IIO_ST_ACCEL_I2C_3AXIS=m
9136 +CONFIG_IIO_ST_ACCEL_SPI_3AXIS=m
9137 +CONFIG_KXSD9=m
9138 +CONFIG_KXSD9_SPI=m
9139 +CONFIG_KXSD9_I2C=m
9140 +CONFIG_KXCJK1013=m
9141 +CONFIG_MC3230=m
9142 +CONFIG_MMA7455=m
9143 +CONFIG_MMA7455_I2C=m
9144 +CONFIG_MMA7455_SPI=m
9145 +CONFIG_MMA7660=m
9146 +CONFIG_MMA8452=m
9147 +CONFIG_MMA9551_CORE=m
9148 +CONFIG_MMA9551=m
9149 +CONFIG_MMA9553=m
9150 +CONFIG_MXC4005=m
9151 +CONFIG_MXC6255=m
9152 +CONFIG_SCA3000=m
9153 +CONFIG_STK8312=m
9154 +CONFIG_STK8BA50=m
9155 +# end of Accelerometers
9158 +# Analog to digital converters
9160 +CONFIG_AD_SIGMA_DELTA=m
9161 +CONFIG_AD7091R5=m
9162 +CONFIG_AD7124=m
9163 +CONFIG_AD7192=m
9164 +CONFIG_AD7266=m
9165 +CONFIG_AD7291=m
9166 +CONFIG_AD7292=m
9167 +CONFIG_AD7298=m
9168 +CONFIG_AD7476=m
9169 +CONFIG_AD7606=m
9170 +CONFIG_AD7606_IFACE_PARALLEL=m
9171 +CONFIG_AD7606_IFACE_SPI=m
9172 +CONFIG_AD7766=m
9173 +CONFIG_AD7768_1=m
9174 +CONFIG_AD7780=m
9175 +CONFIG_AD7791=m
9176 +CONFIG_AD7793=m
9177 +CONFIG_AD7887=m
9178 +CONFIG_AD7923=m
9179 +CONFIG_AD7949=m
9180 +CONFIG_AD799X=m
9181 +CONFIG_AD9467=m
9182 +CONFIG_ADI_AXI_ADC=m
9183 +CONFIG_AXP20X_ADC=m
9184 +CONFIG_AXP288_ADC=m
9185 +CONFIG_CC10001_ADC=m
9186 +CONFIG_DA9150_GPADC=m
9187 +CONFIG_DLN2_ADC=m
9188 +CONFIG_HI8435=m
9189 +CONFIG_HX711=m
9190 +CONFIG_INA2XX_ADC=m
9191 +CONFIG_INTEL_MRFLD_ADC=m
9192 +CONFIG_LP8788_ADC=m
9193 +CONFIG_LTC2471=m
9194 +CONFIG_LTC2485=m
9195 +CONFIG_LTC2496=m
9196 +CONFIG_LTC2497=m
9197 +CONFIG_MAX1027=m
9198 +CONFIG_MAX11100=m
9199 +CONFIG_MAX1118=m
9200 +CONFIG_MAX1241=m
9201 +CONFIG_MAX1363=m
9202 +CONFIG_MAX9611=m
9203 +CONFIG_MCP320X=m
9204 +CONFIG_MCP3422=m
9205 +CONFIG_MCP3911=m
9206 +CONFIG_MEDIATEK_MT6360_ADC=m
9207 +CONFIG_MEN_Z188_ADC=m
9208 +CONFIG_MP2629_ADC=m
9209 +CONFIG_NAU7802=m
9210 +CONFIG_PALMAS_GPADC=m
9211 +CONFIG_QCOM_VADC_COMMON=m
9212 +CONFIG_QCOM_SPMI_IADC=m
9213 +CONFIG_QCOM_SPMI_VADC=m
9214 +CONFIG_QCOM_SPMI_ADC5=m
9215 +CONFIG_STX104=m
9216 +CONFIG_TI_ADC081C=m
9217 +CONFIG_TI_ADC0832=m
9218 +CONFIG_TI_ADC084S021=m
9219 +CONFIG_TI_ADC12138=m
9220 +CONFIG_TI_ADC108S102=m
9221 +CONFIG_TI_ADC128S052=m
9222 +CONFIG_TI_ADC161S626=m
9223 +CONFIG_TI_ADS1015=m
9224 +CONFIG_TI_ADS7950=m
9225 +CONFIG_TI_AM335X_ADC=m
9226 +CONFIG_TI_TLC4541=m
9227 +CONFIG_TWL4030_MADC=m
9228 +CONFIG_TWL6030_GPADC=m
9229 +CONFIG_VIPERBOARD_ADC=m
9230 +CONFIG_XILINX_XADC=m
9231 +# end of Analog to digital converters
9234 +# Analog Front Ends
9236 +# end of Analog Front Ends
9239 +# Amplifiers
9241 +CONFIG_AD8366=m
9242 +CONFIG_HMC425=m
9243 +# end of Amplifiers
9246 +# Chemical Sensors
9248 +CONFIG_ATLAS_PH_SENSOR=m
9249 +CONFIG_ATLAS_EZO_SENSOR=m
9250 +CONFIG_BME680=m
9251 +CONFIG_BME680_I2C=m
9252 +CONFIG_BME680_SPI=m
9253 +CONFIG_CCS811=m
9254 +CONFIG_IAQCORE=m
9255 +CONFIG_PMS7003=m
9256 +CONFIG_SCD30_CORE=m
9257 +CONFIG_SCD30_I2C=m
9258 +CONFIG_SCD30_SERIAL=m
9259 +CONFIG_SENSIRION_SGP30=m
9260 +CONFIG_SPS30=m
9261 +CONFIG_VZ89X=m
9262 +# end of Chemical Sensors
9264 +CONFIG_IIO_CROS_EC_SENSORS_CORE=m
9265 +CONFIG_IIO_CROS_EC_SENSORS=m
9266 +CONFIG_IIO_CROS_EC_SENSORS_LID_ANGLE=m
9269 +# Hid Sensor IIO Common
9271 +CONFIG_HID_SENSOR_IIO_COMMON=m
9272 +CONFIG_HID_SENSOR_IIO_TRIGGER=m
9273 +# end of Hid Sensor IIO Common
9275 +CONFIG_IIO_MS_SENSORS_I2C=m
9278 +# SSP Sensor Common
9280 +CONFIG_IIO_SSP_SENSORS_COMMONS=m
9281 +CONFIG_IIO_SSP_SENSORHUB=m
9282 +# end of SSP Sensor Common
9284 +CONFIG_IIO_ST_SENSORS_I2C=m
9285 +CONFIG_IIO_ST_SENSORS_SPI=m
9286 +CONFIG_IIO_ST_SENSORS_CORE=m
9289 +# Digital to analog converters
9291 +CONFIG_AD5064=m
9292 +CONFIG_AD5360=m
9293 +CONFIG_AD5380=m
9294 +CONFIG_AD5421=m
9295 +CONFIG_AD5446=m
9296 +CONFIG_AD5449=m
9297 +CONFIG_AD5592R_BASE=m
9298 +CONFIG_AD5592R=m
9299 +CONFIG_AD5593R=m
9300 +CONFIG_AD5504=m
9301 +CONFIG_AD5624R_SPI=m
9302 +CONFIG_AD5686=m
9303 +CONFIG_AD5686_SPI=m
9304 +CONFIG_AD5696_I2C=m
9305 +CONFIG_AD5755=m
9306 +CONFIG_AD5758=m
9307 +CONFIG_AD5761=m
9308 +CONFIG_AD5764=m
9309 +CONFIG_AD5766=m
9310 +CONFIG_AD5770R=m
9311 +CONFIG_AD5791=m
9312 +CONFIG_AD7303=m
9313 +CONFIG_AD8801=m
9314 +CONFIG_CIO_DAC=m
9315 +CONFIG_DS4424=m
9316 +CONFIG_LTC1660=m
9317 +CONFIG_LTC2632=m
9318 +CONFIG_M62332=m
9319 +CONFIG_MAX517=m
9320 +CONFIG_MCP4725=m
9321 +CONFIG_MCP4922=m
9322 +CONFIG_TI_DAC082S085=m
9323 +CONFIG_TI_DAC5571=m
9324 +CONFIG_TI_DAC7311=m
9325 +CONFIG_TI_DAC7612=m
9326 +# end of Digital to analog converters
9329 +# IIO dummy driver
9331 +CONFIG_IIO_SIMPLE_DUMMY=m
9332 +# CONFIG_IIO_SIMPLE_DUMMY_EVENTS is not set
9333 +# CONFIG_IIO_SIMPLE_DUMMY_BUFFER is not set
9334 +# end of IIO dummy driver
9337 +# Frequency Synthesizers DDS/PLL
9341 +# Clock Generator/Distribution
9343 +CONFIG_AD9523=m
9344 +# end of Clock Generator/Distribution
9347 +# Phase-Locked Loop (PLL) frequency synthesizers
9349 +CONFIG_ADF4350=m
9350 +CONFIG_ADF4371=m
9351 +# end of Phase-Locked Loop (PLL) frequency synthesizers
9352 +# end of Frequency Synthesizers DDS/PLL
9355 +# Digital gyroscope sensors
9357 +CONFIG_ADIS16080=m
9358 +CONFIG_ADIS16130=m
9359 +CONFIG_ADIS16136=m
9360 +CONFIG_ADIS16260=m
9361 +CONFIG_ADXRS290=m
9362 +CONFIG_ADXRS450=m
9363 +CONFIG_BMG160=m
9364 +CONFIG_BMG160_I2C=m
9365 +CONFIG_BMG160_SPI=m
9366 +CONFIG_FXAS21002C=m
9367 +CONFIG_FXAS21002C_I2C=m
9368 +CONFIG_FXAS21002C_SPI=m
9369 +CONFIG_HID_SENSOR_GYRO_3D=m
9370 +CONFIG_MPU3050=m
9371 +CONFIG_MPU3050_I2C=m
9372 +CONFIG_IIO_ST_GYRO_3AXIS=m
9373 +CONFIG_IIO_ST_GYRO_I2C_3AXIS=m
9374 +CONFIG_IIO_ST_GYRO_SPI_3AXIS=m
9375 +CONFIG_ITG3200=m
9376 +# end of Digital gyroscope sensors
9379 +# Health Sensors
9383 +# Heart Rate Monitors
9385 +CONFIG_AFE4403=m
9386 +CONFIG_AFE4404=m
9387 +CONFIG_MAX30100=m
9388 +CONFIG_MAX30102=m
9389 +# end of Heart Rate Monitors
9390 +# end of Health Sensors
9393 +# Humidity sensors
9395 +CONFIG_AM2315=m
9396 +CONFIG_DHT11=m
9397 +CONFIG_HDC100X=m
9398 +CONFIG_HDC2010=m
9399 +CONFIG_HID_SENSOR_HUMIDITY=m
9400 +CONFIG_HTS221=m
9401 +CONFIG_HTS221_I2C=m
9402 +CONFIG_HTS221_SPI=m
9403 +CONFIG_HTU21=m
9404 +CONFIG_SI7005=m
9405 +CONFIG_SI7020=m
9406 +# end of Humidity sensors
9409 +# Inertial measurement units
9411 +CONFIG_ADIS16400=m
9412 +CONFIG_ADIS16460=m
9413 +CONFIG_ADIS16475=m
9414 +CONFIG_ADIS16480=m
9415 +CONFIG_BMI160=m
9416 +CONFIG_BMI160_I2C=m
9417 +CONFIG_BMI160_SPI=m
9418 +CONFIG_FXOS8700=m
9419 +CONFIG_FXOS8700_I2C=m
9420 +CONFIG_FXOS8700_SPI=m
9421 +CONFIG_KMX61=m
9422 +CONFIG_INV_ICM42600=m
9423 +CONFIG_INV_ICM42600_I2C=m
9424 +CONFIG_INV_ICM42600_SPI=m
9425 +CONFIG_INV_MPU6050_IIO=m
9426 +CONFIG_INV_MPU6050_I2C=m
9427 +CONFIG_INV_MPU6050_SPI=m
9428 +CONFIG_IIO_ST_LSM6DSX=m
9429 +CONFIG_IIO_ST_LSM6DSX_I2C=m
9430 +CONFIG_IIO_ST_LSM6DSX_SPI=m
9431 +CONFIG_IIO_ST_LSM6DSX_I3C=m
9432 +# end of Inertial measurement units
9434 +CONFIG_IIO_ADIS_LIB=m
9435 +CONFIG_IIO_ADIS_LIB_BUFFER=y
9438 +# Light sensors
9440 +CONFIG_ACPI_ALS=m
9441 +CONFIG_ADJD_S311=m
9442 +CONFIG_ADUX1020=m
9443 +CONFIG_AL3010=m
9444 +CONFIG_AL3320A=m
9445 +CONFIG_APDS9300=m
9446 +CONFIG_APDS9960=m
9447 +CONFIG_AS73211=m
9448 +CONFIG_BH1750=m
9449 +CONFIG_BH1780=m
9450 +CONFIG_CM32181=m
9451 +CONFIG_CM3232=m
9452 +CONFIG_CM3323=m
9453 +CONFIG_CM36651=m
9454 +CONFIG_IIO_CROS_EC_LIGHT_PROX=m
9455 +CONFIG_GP2AP002=m
9456 +CONFIG_GP2AP020A00F=m
9457 +CONFIG_IQS621_ALS=m
9458 +CONFIG_SENSORS_ISL29018=m
9459 +CONFIG_SENSORS_ISL29028=m
9460 +CONFIG_ISL29125=m
9461 +CONFIG_HID_SENSOR_ALS=m
9462 +CONFIG_HID_SENSOR_PROX=m
9463 +CONFIG_JSA1212=m
9464 +CONFIG_RPR0521=m
9465 +CONFIG_SENSORS_LM3533=m
9466 +CONFIG_LTR501=m
9467 +CONFIG_LV0104CS=m
9468 +CONFIG_MAX44000=m
9469 +CONFIG_MAX44009=m
9470 +CONFIG_NOA1305=m
9471 +CONFIG_OPT3001=m
9472 +CONFIG_PA12203001=m
9473 +CONFIG_SI1133=m
9474 +CONFIG_SI1145=m
9475 +CONFIG_STK3310=m
9476 +CONFIG_ST_UVIS25=m
9477 +CONFIG_ST_UVIS25_I2C=m
9478 +CONFIG_ST_UVIS25_SPI=m
9479 +CONFIG_TCS3414=m
9480 +CONFIG_TCS3472=m
9481 +CONFIG_SENSORS_TSL2563=m
9482 +CONFIG_TSL2583=m
9483 +CONFIG_TSL2772=m
9484 +CONFIG_TSL4531=m
9485 +CONFIG_US5182D=m
9486 +CONFIG_VCNL4000=m
9487 +CONFIG_VCNL4035=m
9488 +CONFIG_VEML6030=m
9489 +CONFIG_VEML6070=m
9490 +CONFIG_VL6180=m
9491 +CONFIG_ZOPT2201=m
9492 +# end of Light sensors
9495 +# Magnetometer sensors
9497 +CONFIG_AK8975=m
9498 +CONFIG_AK09911=m
9499 +CONFIG_BMC150_MAGN=m
9500 +CONFIG_BMC150_MAGN_I2C=m
9501 +CONFIG_BMC150_MAGN_SPI=m
9502 +CONFIG_MAG3110=m
9503 +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m
9504 +CONFIG_MMC35240=m
9505 +CONFIG_IIO_ST_MAGN_3AXIS=m
9506 +CONFIG_IIO_ST_MAGN_I2C_3AXIS=m
9507 +CONFIG_IIO_ST_MAGN_SPI_3AXIS=m
9508 +CONFIG_SENSORS_HMC5843=m
9509 +CONFIG_SENSORS_HMC5843_I2C=m
9510 +CONFIG_SENSORS_HMC5843_SPI=m
9511 +CONFIG_SENSORS_RM3100=m
9512 +CONFIG_SENSORS_RM3100_I2C=m
9513 +CONFIG_SENSORS_RM3100_SPI=m
9514 +CONFIG_YAMAHA_YAS530=m
9515 +# end of Magnetometer sensors
9518 +# Multiplexers
9520 +# end of Multiplexers
9523 +# Inclinometer sensors
9525 +CONFIG_HID_SENSOR_INCLINOMETER_3D=m
9526 +CONFIG_HID_SENSOR_DEVICE_ROTATION=m
9527 +# end of Inclinometer sensors
9530 +# Triggers - standalone
9532 +CONFIG_IIO_HRTIMER_TRIGGER=m
9533 +CONFIG_IIO_INTERRUPT_TRIGGER=m
9534 +CONFIG_IIO_TIGHTLOOP_TRIGGER=m
9535 +CONFIG_IIO_SYSFS_TRIGGER=m
9536 +# end of Triggers - standalone
9539 +# Linear and angular position sensors
9541 +CONFIG_IQS624_POS=m
9542 +CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE=m
9543 +# end of Linear and angular position sensors
9546 +# Digital potentiometers
9548 +CONFIG_AD5272=m
9549 +CONFIG_DS1803=m
9550 +CONFIG_MAX5432=m
9551 +CONFIG_MAX5481=m
9552 +CONFIG_MAX5487=m
9553 +CONFIG_MCP4018=m
9554 +CONFIG_MCP4131=m
9555 +CONFIG_MCP4531=m
9556 +CONFIG_MCP41010=m
9557 +CONFIG_TPL0102=m
9558 +# end of Digital potentiometers
9561 +# Digital potentiostats
9563 +CONFIG_LMP91000=m
9564 +# end of Digital potentiostats
9567 +# Pressure sensors
9569 +CONFIG_ABP060MG=m
9570 +CONFIG_BMP280=m
9571 +CONFIG_BMP280_I2C=m
9572 +CONFIG_BMP280_SPI=m
9573 +CONFIG_IIO_CROS_EC_BARO=m
9574 +CONFIG_DLHL60D=m
9575 +CONFIG_DPS310=m
9576 +CONFIG_HID_SENSOR_PRESS=m
9577 +CONFIG_HP03=m
9578 +CONFIG_ICP10100=m
9579 +CONFIG_MPL115=m
9580 +CONFIG_MPL115_I2C=m
9581 +CONFIG_MPL115_SPI=m
9582 +CONFIG_MPL3115=m
9583 +CONFIG_MS5611=m
9584 +CONFIG_MS5611_I2C=m
9585 +CONFIG_MS5611_SPI=m
9586 +CONFIG_MS5637=m
9587 +CONFIG_IIO_ST_PRESS=m
9588 +CONFIG_IIO_ST_PRESS_I2C=m
9589 +CONFIG_IIO_ST_PRESS_SPI=m
9590 +CONFIG_T5403=m
9591 +CONFIG_HP206C=m
9592 +CONFIG_ZPA2326=m
9593 +CONFIG_ZPA2326_I2C=m
9594 +CONFIG_ZPA2326_SPI=m
9595 +# end of Pressure sensors
9598 +# Lightning sensors
9600 +CONFIG_AS3935=m
9601 +# end of Lightning sensors
9604 +# Proximity and distance sensors
9606 +CONFIG_ISL29501=m
9607 +CONFIG_LIDAR_LITE_V2=m
9608 +CONFIG_MB1232=m
9609 +CONFIG_PING=m
9610 +CONFIG_RFD77402=m
9611 +CONFIG_SRF04=m
9612 +CONFIG_SX9310=m
9613 +CONFIG_SX9500=m
9614 +CONFIG_SRF08=m
9615 +CONFIG_VCNL3020=m
9616 +CONFIG_VL53L0X_I2C=m
9617 +# end of Proximity and distance sensors
9620 +# Resolver to digital converters
9622 +CONFIG_AD2S90=m
9623 +CONFIG_AD2S1200=m
9624 +# end of Resolver to digital converters
9627 +# Temperature sensors
9629 +CONFIG_IQS620AT_TEMP=m
9630 +CONFIG_LTC2983=m
9631 +CONFIG_MAXIM_THERMOCOUPLE=m
9632 +CONFIG_HID_SENSOR_TEMP=m
9633 +CONFIG_MLX90614=m
9634 +CONFIG_MLX90632=m
9635 +CONFIG_TMP006=m
9636 +CONFIG_TMP007=m
9637 +CONFIG_TSYS01=m
9638 +CONFIG_TSYS02D=m
9639 +CONFIG_MAX31856=m
9640 +# end of Temperature sensors
9642 +CONFIG_NTB=m
9643 +CONFIG_NTB_MSI=y
9644 +# CONFIG_NTB_AMD is not set
9645 +CONFIG_NTB_IDT=m
9646 +CONFIG_NTB_INTEL=m
9647 +CONFIG_NTB_EPF=m
9648 +CONFIG_NTB_SWITCHTEC=m
9649 +CONFIG_NTB_PINGPONG=m
9650 +CONFIG_NTB_TOOL=m
9651 +CONFIG_NTB_PERF=m
9652 +# CONFIG_NTB_MSI_TEST is not set
9653 +CONFIG_NTB_TRANSPORT=m
9654 +CONFIG_VME_BUS=y
9657 +# VME Bridge Drivers
9659 +CONFIG_VME_CA91CX42=m
9660 +CONFIG_VME_TSI148=m
9661 +CONFIG_VME_FAKE=m
9664 +# VME Board Drivers
9666 +CONFIG_VMIVME_7805=m
9669 +# VME Device Drivers
9671 +CONFIG_VME_USER=m
9672 +CONFIG_PWM=y
9673 +CONFIG_PWM_SYSFS=y
9674 +# CONFIG_PWM_DEBUG is not set
9675 +CONFIG_PWM_CRC=y
9676 +CONFIG_PWM_CROS_EC=m
9677 +CONFIG_PWM_DWC=m
9678 +CONFIG_PWM_IQS620A=m
9679 +CONFIG_PWM_LP3943=m
9680 +CONFIG_PWM_LPSS=y
9681 +CONFIG_PWM_LPSS_PCI=y
9682 +CONFIG_PWM_LPSS_PLATFORM=y
9683 +CONFIG_PWM_PCA9685=m
9684 +CONFIG_PWM_TWL=m
9685 +CONFIG_PWM_TWL_LED=m
9688 +# IRQ chip support
9690 +CONFIG_MADERA_IRQ=m
9691 +# end of IRQ chip support
9693 +CONFIG_IPACK_BUS=m
9694 +CONFIG_BOARD_TPCI200=m
9695 +CONFIG_SERIAL_IPOCTAL=m
9696 +CONFIG_RESET_CONTROLLER=y
9697 +CONFIG_RESET_BRCMSTB_RESCAL=y
9698 +CONFIG_RESET_TI_SYSCON=m
9701 +# PHY Subsystem
9703 +CONFIG_GENERIC_PHY=y
9704 +CONFIG_USB_LGM_PHY=m
9705 +CONFIG_BCM_KONA_USB2_PHY=m
9706 +CONFIG_PHY_PXA_28NM_HSIC=m
9707 +CONFIG_PHY_PXA_28NM_USB2=m
9708 +CONFIG_PHY_CPCAP_USB=m
9709 +CONFIG_PHY_QCOM_USB_HS=m
9710 +CONFIG_PHY_QCOM_USB_HSIC=m
9711 +CONFIG_PHY_SAMSUNG_USB2=m
9712 +CONFIG_PHY_TUSB1210=m
9713 +CONFIG_PHY_INTEL_LGM_EMMC=m
9714 +# end of PHY Subsystem
9716 +CONFIG_POWERCAP=y
9717 +CONFIG_INTEL_RAPL_CORE=m
9718 +CONFIG_INTEL_RAPL=m
9719 +CONFIG_IDLE_INJECT=y
9720 +CONFIG_DTPM=y
9721 +CONFIG_DTPM_CPU=y
9722 +CONFIG_MCB=m
9723 +CONFIG_MCB_PCI=m
9724 +CONFIG_MCB_LPC=m
9727 +# Performance monitor support
9729 +# end of Performance monitor support
9731 +CONFIG_RAS=y
9732 +CONFIG_RAS_CEC=y
9733 +# CONFIG_RAS_CEC_DEBUG is not set
9734 +CONFIG_USB4=m
9735 +# CONFIG_USB4_DEBUGFS_WRITE is not set
9736 +# CONFIG_USB4_DMA_TEST is not set
9739 +# Android
9741 +CONFIG_ANDROID=y
9742 +CONFIG_ANDROID_BINDER_IPC=m
9743 +CONFIG_ANDROID_BINDERFS=m
9744 +CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder,vndbinder"
9745 +# CONFIG_ANDROID_BINDER_IPC_SELFTEST is not set
9746 +# end of Android
9748 +CONFIG_LIBNVDIMM=y
9749 +CONFIG_BLK_DEV_PMEM=m
9750 +CONFIG_ND_BLK=m
9751 +CONFIG_ND_CLAIM=y
9752 +CONFIG_ND_BTT=m
9753 +CONFIG_BTT=y
9754 +CONFIG_ND_PFN=m
9755 +CONFIG_NVDIMM_PFN=y
9756 +CONFIG_NVDIMM_DAX=y
9757 +CONFIG_NVDIMM_KEYS=y
9758 +CONFIG_DAX_DRIVER=y
9759 +CONFIG_DAX=y
9760 +CONFIG_DEV_DAX=m
9761 +CONFIG_DEV_DAX_PMEM=m
9762 +CONFIG_DEV_DAX_HMEM=m
9763 +CONFIG_DEV_DAX_HMEM_DEVICES=y
9764 +CONFIG_DEV_DAX_KMEM=m
9765 +CONFIG_DEV_DAX_PMEM_COMPAT=m
9766 +CONFIG_NVMEM=y
9767 +CONFIG_NVMEM_SYSFS=y
9768 +CONFIG_NVMEM_SPMI_SDAM=m
9769 +CONFIG_RAVE_SP_EEPROM=m
9770 +CONFIG_NVMEM_RMEM=m
9773 +# HW tracing support
9775 +CONFIG_STM=m
9776 +CONFIG_STM_PROTO_BASIC=m
9777 +CONFIG_STM_PROTO_SYS_T=m
9778 +CONFIG_STM_DUMMY=m
9779 +CONFIG_STM_SOURCE_CONSOLE=m
9780 +CONFIG_STM_SOURCE_HEARTBEAT=m
9781 +CONFIG_INTEL_TH=m
9782 +CONFIG_INTEL_TH_PCI=m
9783 +CONFIG_INTEL_TH_ACPI=m
9784 +CONFIG_INTEL_TH_GTH=m
9785 +CONFIG_INTEL_TH_STH=m
9786 +CONFIG_INTEL_TH_MSU=m
9787 +CONFIG_INTEL_TH_PTI=m
9788 +# CONFIG_INTEL_TH_DEBUG is not set
9789 +# end of HW tracing support
9791 +CONFIG_FPGA=m
9792 +CONFIG_ALTERA_PR_IP_CORE=m
9793 +CONFIG_FPGA_MGR_ALTERA_PS_SPI=m
9794 +CONFIG_FPGA_MGR_ALTERA_CVP=m
9795 +CONFIG_FPGA_MGR_XILINX_SPI=m
9796 +CONFIG_FPGA_MGR_MACHXO2_SPI=m
9797 +CONFIG_FPGA_BRIDGE=m
9798 +CONFIG_ALTERA_FREEZE_BRIDGE=m
9799 +CONFIG_XILINX_PR_DECOUPLER=m
9800 +CONFIG_FPGA_REGION=m
9801 +CONFIG_FPGA_DFL=m
9802 +CONFIG_FPGA_DFL_FME=m
9803 +CONFIG_FPGA_DFL_FME_MGR=m
9804 +CONFIG_FPGA_DFL_FME_BRIDGE=m
9805 +CONFIG_FPGA_DFL_FME_REGION=m
9806 +CONFIG_FPGA_DFL_AFU=m
9807 +CONFIG_FPGA_DFL_NIOS_INTEL_PAC_N3000=m
9808 +CONFIG_FPGA_DFL_PCI=m
9809 +CONFIG_TEE=m
9812 +# TEE drivers
9814 +CONFIG_AMDTEE=m
9815 +# end of TEE drivers
9817 +CONFIG_MULTIPLEXER=m
9820 +# Multiplexer drivers
9822 +CONFIG_MUX_ADG792A=m
9823 +CONFIG_MUX_ADGS1408=m
9824 +CONFIG_MUX_GPIO=m
9825 +# end of Multiplexer drivers
9827 +CONFIG_PM_OPP=y
9828 +CONFIG_UNISYS_VISORBUS=m
9829 +CONFIG_SIOX=m
9830 +CONFIG_SIOX_BUS_GPIO=m
9831 +CONFIG_SLIMBUS=m
9832 +CONFIG_SLIM_QCOM_CTRL=m
9833 +CONFIG_INTERCONNECT=y
9834 +CONFIG_COUNTER=m
9835 +CONFIG_104_QUAD_8=m
9836 +CONFIG_MOST=m
9837 +CONFIG_MOST_USB_HDM=m
9838 +CONFIG_MOST_CDEV=m
9839 +# end of Device Drivers
9842 +# File systems
9844 +CONFIG_DCACHE_WORD_ACCESS=y
9845 +CONFIG_VALIDATE_FS_PARSER=y
9846 +CONFIG_FS_IOMAP=y
9847 +# CONFIG_EXT2_FS is not set
9848 +# CONFIG_EXT3_FS is not set
9849 +CONFIG_EXT4_FS=y
9850 +CONFIG_EXT4_USE_FOR_EXT2=y
9851 +CONFIG_EXT4_FS_POSIX_ACL=y
9852 +CONFIG_EXT4_FS_SECURITY=y
9853 +# CONFIG_EXT4_DEBUG is not set
9854 +CONFIG_JBD2=y
9855 +# CONFIG_JBD2_DEBUG is not set
9856 +CONFIG_FS_MBCACHE=y
9857 +CONFIG_REISERFS_FS=m
9858 +# CONFIG_REISERFS_CHECK is not set
9859 +# CONFIG_REISERFS_PROC_INFO is not set
9860 +CONFIG_REISERFS_FS_XATTR=y
9861 +CONFIG_REISERFS_FS_POSIX_ACL=y
9862 +CONFIG_REISERFS_FS_SECURITY=y
9863 +CONFIG_JFS_FS=m
9864 +CONFIG_JFS_POSIX_ACL=y
9865 +CONFIG_JFS_SECURITY=y
9866 +# CONFIG_JFS_DEBUG is not set
9867 +CONFIG_JFS_STATISTICS=y
9868 +CONFIG_XFS_FS=m
9869 +CONFIG_XFS_SUPPORT_V4=y
9870 +CONFIG_XFS_QUOTA=y
9871 +CONFIG_XFS_POSIX_ACL=y
9872 +CONFIG_XFS_RT=y
9873 +# CONFIG_XFS_ONLINE_SCRUB is not set
9874 +# CONFIG_XFS_WARN is not set
9875 +# CONFIG_XFS_DEBUG is not set
9876 +CONFIG_GFS2_FS=m
9877 +CONFIG_GFS2_FS_LOCKING_DLM=y
9878 +CONFIG_OCFS2_FS=m
9879 +CONFIG_OCFS2_FS_O2CB=m
9880 +CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
9881 +CONFIG_OCFS2_FS_STATS=y
9882 +CONFIG_OCFS2_DEBUG_MASKLOG=y
9883 +# CONFIG_OCFS2_DEBUG_FS is not set
9884 +CONFIG_BTRFS_FS=m
9885 +CONFIG_BTRFS_FS_POSIX_ACL=y
9886 +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set
9887 +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set
9888 +# CONFIG_BTRFS_DEBUG is not set
9889 +# CONFIG_BTRFS_ASSERT is not set
9890 +# CONFIG_BTRFS_FS_REF_VERIFY is not set
9891 +CONFIG_NILFS2_FS=m
9892 +CONFIG_F2FS_FS=m
9893 +CONFIG_F2FS_STAT_FS=y
9894 +CONFIG_F2FS_FS_XATTR=y
9895 +CONFIG_F2FS_FS_POSIX_ACL=y
9896 +CONFIG_F2FS_FS_SECURITY=y
9897 +# CONFIG_F2FS_CHECK_FS is not set
9898 +# CONFIG_F2FS_FAULT_INJECTION is not set
9899 +CONFIG_F2FS_FS_COMPRESSION=y
9900 +CONFIG_F2FS_FS_LZO=y
9901 +CONFIG_F2FS_FS_LZ4=y
9902 +CONFIG_F2FS_FS_LZ4HC=y
9903 +CONFIG_F2FS_FS_ZSTD=y
9904 +CONFIG_F2FS_FS_LZORLE=y
9905 +CONFIG_ZONEFS_FS=m
9906 +CONFIG_FS_DAX=y
9907 +CONFIG_FS_DAX_PMD=y
9908 +CONFIG_FS_POSIX_ACL=y
9909 +CONFIG_EXPORTFS=y
9910 +CONFIG_EXPORTFS_BLOCK_OPS=y
9911 +CONFIG_FILE_LOCKING=y
9912 +CONFIG_MANDATORY_FILE_LOCKING=y
9913 +CONFIG_FS_ENCRYPTION=y
9914 +CONFIG_FS_ENCRYPTION_ALGS=y
9915 +CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
9916 +CONFIG_FS_VERITY=y
9917 +# CONFIG_FS_VERITY_DEBUG is not set
9918 +CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y
9919 +CONFIG_FSNOTIFY=y
9920 +CONFIG_DNOTIFY=y
9921 +CONFIG_INOTIFY_USER=y
9922 +CONFIG_FANOTIFY=y
9923 +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
9924 +CONFIG_QUOTA=y
9925 +CONFIG_QUOTA_NETLINK_INTERFACE=y
9926 +# CONFIG_PRINT_QUOTA_WARNING is not set
9927 +# CONFIG_QUOTA_DEBUG is not set
9928 +CONFIG_QUOTA_TREE=m
9929 +CONFIG_QFMT_V1=m
9930 +CONFIG_QFMT_V2=m
9931 +CONFIG_QUOTACTL=y
9932 +CONFIG_AUTOFS4_FS=m
9933 +CONFIG_AUTOFS_FS=m
9934 +CONFIG_FUSE_FS=y
9935 +CONFIG_CUSE=m
9936 +CONFIG_VIRTIO_FS=m
9937 +CONFIG_FUSE_DAX=y
9938 +CONFIG_OVERLAY_FS=m
9939 +# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set
9940 +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y
9941 +# CONFIG_OVERLAY_FS_INDEX is not set
9942 +CONFIG_OVERLAY_FS_XINO_AUTO=y
9943 +# CONFIG_OVERLAY_FS_METACOPY is not set
9946 +# Caches
9948 +CONFIG_FSCACHE=m
9949 +CONFIG_FSCACHE_STATS=y
9950 +# CONFIG_FSCACHE_HISTOGRAM is not set
9951 +# CONFIG_FSCACHE_DEBUG is not set
9952 +# CONFIG_FSCACHE_OBJECT_LIST is not set
9953 +CONFIG_CACHEFILES=m
9954 +# CONFIG_CACHEFILES_DEBUG is not set
9955 +# CONFIG_CACHEFILES_HISTOGRAM is not set
9956 +# end of Caches
9959 +# CD-ROM/DVD Filesystems
9961 +CONFIG_ISO9660_FS=m
9962 +CONFIG_JOLIET=y
9963 +CONFIG_ZISOFS=y
9964 +CONFIG_UDF_FS=m
9965 +# end of CD-ROM/DVD Filesystems
9968 +# DOS/FAT/EXFAT/NT Filesystems
9970 +CONFIG_FAT_FS=y
9971 +CONFIG_MSDOS_FS=m
9972 +CONFIG_VFAT_FS=y
9973 +CONFIG_FAT_DEFAULT_CODEPAGE=437
9974 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
9975 +# CONFIG_FAT_DEFAULT_UTF8 is not set
9976 +CONFIG_EXFAT_FS=m
9977 +CONFIG_EXFAT_DEFAULT_IOCHARSET="utf8"
9978 +# CONFIG_NTFS_FS is not set
9979 +CONFIG_NTFS3_FS=m
9980 +# CONFIG_NTFS3_64BIT_CLUSTER is not set
9981 +CONFIG_NTFS3_LZX_XPRESS=y
9982 +# CONFIG_NTFS3_FS_POSIX_ACL is not set
9983 +# end of DOS/FAT/EXFAT/NT Filesystems
9986 +# Pseudo filesystems
9988 +CONFIG_PROC_FS=y
9989 +CONFIG_PROC_KCORE=y
9990 +CONFIG_PROC_VMCORE=y
9991 +CONFIG_PROC_VMCORE_DEVICE_DUMP=y
9992 +CONFIG_PROC_SYSCTL=y
9993 +CONFIG_PROC_PAGE_MONITOR=y
9994 +CONFIG_PROC_CHILDREN=y
9995 +CONFIG_PROC_PID_ARCH_STATUS=y
9996 +CONFIG_PROC_CPU_RESCTRL=y
9997 +CONFIG_KERNFS=y
9998 +CONFIG_SYSFS=y
9999 +CONFIG_TMPFS=y
10000 +CONFIG_TMPFS_POSIX_ACL=y
10001 +CONFIG_TMPFS_XATTR=y
10002 +CONFIG_TMPFS_INODE64=y
10003 +CONFIG_HUGETLBFS=y
10004 +CONFIG_HUGETLB_PAGE=y
10005 +CONFIG_MEMFD_CREATE=y
10006 +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y
10007 +CONFIG_CONFIGFS_FS=y
10008 +CONFIG_EFIVAR_FS=y
10009 +# end of Pseudo filesystems
10011 +CONFIG_MISC_FILESYSTEMS=y
10012 +CONFIG_ORANGEFS_FS=m
10013 +CONFIG_ADFS_FS=m
10014 +# CONFIG_ADFS_FS_RW is not set
10015 +CONFIG_AFFS_FS=m
10016 +CONFIG_ECRYPT_FS=y
10017 +CONFIG_ECRYPT_FS_MESSAGING=y
10018 +CONFIG_HFS_FS=m
10019 +CONFIG_HFSPLUS_FS=m
10020 +CONFIG_BEFS_FS=m
10021 +# CONFIG_BEFS_DEBUG is not set
10022 +CONFIG_BFS_FS=m
10023 +CONFIG_EFS_FS=m
10024 +CONFIG_JFFS2_FS=m
10025 +CONFIG_JFFS2_FS_DEBUG=0
10026 +CONFIG_JFFS2_FS_WRITEBUFFER=y
10027 +# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
10028 +# CONFIG_JFFS2_SUMMARY is not set
10029 +CONFIG_JFFS2_FS_XATTR=y
10030 +CONFIG_JFFS2_FS_POSIX_ACL=y
10031 +CONFIG_JFFS2_FS_SECURITY=y
10032 +CONFIG_JFFS2_COMPRESSION_OPTIONS=y
10033 +CONFIG_JFFS2_ZLIB=y
10034 +CONFIG_JFFS2_LZO=y
10035 +CONFIG_JFFS2_RTIME=y
10036 +# CONFIG_JFFS2_RUBIN is not set
10037 +# CONFIG_JFFS2_CMODE_NONE is not set
10038 +# CONFIG_JFFS2_CMODE_PRIORITY is not set
10039 +# CONFIG_JFFS2_CMODE_SIZE is not set
10040 +CONFIG_JFFS2_CMODE_FAVOURLZO=y
10041 +CONFIG_UBIFS_FS=m
10042 +# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
10043 +CONFIG_UBIFS_FS_LZO=y
10044 +CONFIG_UBIFS_FS_ZLIB=y
10045 +CONFIG_UBIFS_FS_ZSTD=y
10046 +# CONFIG_UBIFS_ATIME_SUPPORT is not set
10047 +CONFIG_UBIFS_FS_XATTR=y
10048 +CONFIG_UBIFS_FS_SECURITY=y
10049 +CONFIG_UBIFS_FS_AUTHENTICATION=y
10050 +CONFIG_CRAMFS=m
10051 +CONFIG_CRAMFS_BLOCKDEV=y
10052 +CONFIG_CRAMFS_MTD=y
10053 +CONFIG_SQUASHFS=y
10054 +# CONFIG_SQUASHFS_FILE_CACHE is not set
10055 +CONFIG_SQUASHFS_FILE_DIRECT=y
10056 +CONFIG_SQUASHFS_DECOMP_SINGLE=y
10057 +# CONFIG_SQUASHFS_DECOMP_MULTI is not set
10058 +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set
10059 +CONFIG_SQUASHFS_XATTR=y
10060 +CONFIG_SQUASHFS_ZLIB=y
10061 +CONFIG_SQUASHFS_LZ4=y
10062 +CONFIG_SQUASHFS_LZO=y
10063 +CONFIG_SQUASHFS_XZ=y
10064 +CONFIG_SQUASHFS_ZSTD=y
10065 +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set
10066 +# CONFIG_SQUASHFS_EMBEDDED is not set
10067 +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
10068 +CONFIG_VXFS_FS=m
10069 +CONFIG_MINIX_FS=m
10070 +CONFIG_OMFS_FS=m
10071 +CONFIG_HPFS_FS=m
10072 +CONFIG_QNX4FS_FS=m
10073 +CONFIG_QNX6FS_FS=m
10074 +# CONFIG_QNX6FS_DEBUG is not set
10075 +CONFIG_ROMFS_FS=m
10076 +CONFIG_ROMFS_BACKED_BY_BLOCK=y
10077 +# CONFIG_ROMFS_BACKED_BY_MTD is not set
10078 +# CONFIG_ROMFS_BACKED_BY_BOTH is not set
10079 +CONFIG_ROMFS_ON_BLOCK=y
10080 +CONFIG_PSTORE=y
10081 +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240
10082 +# CONFIG_PSTORE_DEFLATE_COMPRESS is not set
10083 +# CONFIG_PSTORE_LZO_COMPRESS is not set
10084 +# CONFIG_PSTORE_LZ4_COMPRESS is not set
10085 +# CONFIG_PSTORE_LZ4HC_COMPRESS is not set
10086 +# CONFIG_PSTORE_842_COMPRESS is not set
10087 +CONFIG_PSTORE_ZSTD_COMPRESS=y
10088 +CONFIG_PSTORE_COMPRESS=y
10089 +CONFIG_PSTORE_ZSTD_COMPRESS_DEFAULT=y
10090 +CONFIG_PSTORE_COMPRESS_DEFAULT="zstd"
10091 +# CONFIG_PSTORE_CONSOLE is not set
10092 +# CONFIG_PSTORE_PMSG is not set
10093 +CONFIG_PSTORE_RAM=m
10094 +CONFIG_PSTORE_ZONE=m
10095 +CONFIG_PSTORE_BLK=m
10096 +CONFIG_PSTORE_BLK_BLKDEV=""
10097 +CONFIG_PSTORE_BLK_KMSG_SIZE=64
10098 +CONFIG_PSTORE_BLK_MAX_REASON=2
10099 +CONFIG_SYSV_FS=m
10100 +CONFIG_UFS_FS=m
10101 +# CONFIG_UFS_FS_WRITE is not set
10102 +# CONFIG_UFS_DEBUG is not set
10103 +CONFIG_EROFS_FS=m
10104 +# CONFIG_EROFS_FS_DEBUG is not set
10105 +CONFIG_EROFS_FS_XATTR=y
10106 +CONFIG_EROFS_FS_POSIX_ACL=y
10107 +CONFIG_EROFS_FS_SECURITY=y
10108 +CONFIG_EROFS_FS_ZIP=y
10109 +CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT=1
10110 +CONFIG_VBOXSF_FS=m
10111 +CONFIG_NETWORK_FILESYSTEMS=y
10112 +CONFIG_NFS_FS=m
10113 +CONFIG_NFS_V2=m
10114 +CONFIG_NFS_V3=m
10115 +CONFIG_NFS_V3_ACL=y
10116 +CONFIG_NFS_V4=m
10117 +CONFIG_NFS_SWAP=y
10118 +CONFIG_NFS_V4_1=y
10119 +CONFIG_NFS_V4_2=y
10120 +CONFIG_PNFS_FILE_LAYOUT=m
10121 +CONFIG_PNFS_BLOCK=m
10122 +CONFIG_PNFS_FLEXFILE_LAYOUT=m
10123 +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org"
10124 +CONFIG_NFS_V4_1_MIGRATION=y
10125 +CONFIG_NFS_V4_SECURITY_LABEL=y
10126 +CONFIG_NFS_FSCACHE=y
10127 +# CONFIG_NFS_USE_LEGACY_DNS is not set
10128 +CONFIG_NFS_USE_KERNEL_DNS=y
10129 +CONFIG_NFS_DEBUG=y
10130 +CONFIG_NFS_DISABLE_UDP_SUPPORT=y
10131 +# CONFIG_NFS_V4_2_READ_PLUS is not set
10132 +CONFIG_NFSD=m
10133 +CONFIG_NFSD_V2_ACL=y
10134 +CONFIG_NFSD_V3=y
10135 +CONFIG_NFSD_V3_ACL=y
10136 +CONFIG_NFSD_V4=y
10137 +CONFIG_NFSD_PNFS=y
10138 +CONFIG_NFSD_BLOCKLAYOUT=y
10139 +CONFIG_NFSD_SCSILAYOUT=y
10140 +CONFIG_NFSD_FLEXFILELAYOUT=y
10141 +CONFIG_NFSD_V4_2_INTER_SSC=y
10142 +CONFIG_NFSD_V4_SECURITY_LABEL=y
10143 +CONFIG_GRACE_PERIOD=m
10144 +CONFIG_LOCKD=m
10145 +CONFIG_LOCKD_V4=y
10146 +CONFIG_NFS_ACL_SUPPORT=m
10147 +CONFIG_NFS_COMMON=y
10148 +CONFIG_NFS_V4_2_SSC_HELPER=m
10149 +CONFIG_SUNRPC=m
10150 +CONFIG_SUNRPC_GSS=m
10151 +CONFIG_SUNRPC_BACKCHANNEL=y
10152 +CONFIG_SUNRPC_SWAP=y
10153 +CONFIG_RPCSEC_GSS_KRB5=m
10154 +# CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES is not set
10155 +CONFIG_SUNRPC_DEBUG=y
10156 +CONFIG_SUNRPC_XPRT_RDMA=m
10157 +CONFIG_CEPH_FS=m
10158 +CONFIG_CEPH_FSCACHE=y
10159 +CONFIG_CEPH_FS_POSIX_ACL=y
10160 +CONFIG_CEPH_FS_SECURITY_LABEL=y
10161 +CONFIG_CIFS=m
10162 +# CONFIG_CIFS_STATS2 is not set
10163 +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y
10164 +CONFIG_CIFS_WEAK_PW_HASH=y
10165 +CONFIG_CIFS_UPCALL=y
10166 +CONFIG_CIFS_XATTR=y
10167 +CONFIG_CIFS_POSIX=y
10168 +CONFIG_CIFS_DEBUG=y
10169 +# CONFIG_CIFS_DEBUG2 is not set
10170 +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set
10171 +CONFIG_CIFS_DFS_UPCALL=y
10172 +CONFIG_CIFS_SWN_UPCALL=y
10173 +# CONFIG_CIFS_SMB_DIRECT is not set
10174 +CONFIG_CIFS_FSCACHE=y
10175 +CONFIG_CODA_FS=m
10176 +CONFIG_AFS_FS=m
10177 +# CONFIG_AFS_DEBUG is not set
10178 +CONFIG_AFS_FSCACHE=y
10179 +# CONFIG_AFS_DEBUG_CURSOR is not set
10180 +CONFIG_9P_FS=m
10181 +CONFIG_9P_FSCACHE=y
10182 +CONFIG_9P_FS_POSIX_ACL=y
10183 +CONFIG_9P_FS_SECURITY=y
10184 +CONFIG_NLS=y
10185 +CONFIG_NLS_DEFAULT="utf8"
10186 +CONFIG_NLS_CODEPAGE_437=y
10187 +CONFIG_NLS_CODEPAGE_737=m
10188 +CONFIG_NLS_CODEPAGE_775=m
10189 +CONFIG_NLS_CODEPAGE_850=m
10190 +CONFIG_NLS_CODEPAGE_852=m
10191 +CONFIG_NLS_CODEPAGE_855=m
10192 +CONFIG_NLS_CODEPAGE_857=m
10193 +CONFIG_NLS_CODEPAGE_860=m
10194 +CONFIG_NLS_CODEPAGE_861=m
10195 +CONFIG_NLS_CODEPAGE_862=m
10196 +CONFIG_NLS_CODEPAGE_863=m
10197 +CONFIG_NLS_CODEPAGE_864=m
10198 +CONFIG_NLS_CODEPAGE_865=m
10199 +CONFIG_NLS_CODEPAGE_866=m
10200 +CONFIG_NLS_CODEPAGE_869=m
10201 +CONFIG_NLS_CODEPAGE_936=m
10202 +CONFIG_NLS_CODEPAGE_950=m
10203 +CONFIG_NLS_CODEPAGE_932=m
10204 +CONFIG_NLS_CODEPAGE_949=m
10205 +CONFIG_NLS_CODEPAGE_874=m
10206 +CONFIG_NLS_ISO8859_8=m
10207 +CONFIG_NLS_CODEPAGE_1250=m
10208 +CONFIG_NLS_CODEPAGE_1251=m
10209 +CONFIG_NLS_ASCII=m
10210 +CONFIG_NLS_ISO8859_1=m
10211 +CONFIG_NLS_ISO8859_2=m
10212 +CONFIG_NLS_ISO8859_3=m
10213 +CONFIG_NLS_ISO8859_4=m
10214 +CONFIG_NLS_ISO8859_5=m
10215 +CONFIG_NLS_ISO8859_6=m
10216 +CONFIG_NLS_ISO8859_7=m
10217 +CONFIG_NLS_ISO8859_9=m
10218 +CONFIG_NLS_ISO8859_13=m
10219 +CONFIG_NLS_ISO8859_14=m
10220 +CONFIG_NLS_ISO8859_15=m
10221 +CONFIG_NLS_KOI8_R=m
10222 +CONFIG_NLS_KOI8_U=m
10223 +CONFIG_NLS_MAC_ROMAN=m
10224 +CONFIG_NLS_MAC_CELTIC=m
10225 +CONFIG_NLS_MAC_CENTEURO=m
10226 +CONFIG_NLS_MAC_CROATIAN=m
10227 +CONFIG_NLS_MAC_CYRILLIC=m
10228 +CONFIG_NLS_MAC_GAELIC=m
10229 +CONFIG_NLS_MAC_GREEK=m
10230 +CONFIG_NLS_MAC_ICELAND=m
10231 +CONFIG_NLS_MAC_INUIT=m
10232 +CONFIG_NLS_MAC_ROMANIAN=m
10233 +CONFIG_NLS_MAC_TURKISH=m
10234 +CONFIG_NLS_UTF8=m
10235 +CONFIG_DLM=m
10236 +# CONFIG_DLM_DEBUG is not set
10237 +CONFIG_UNICODE=y
10238 +# CONFIG_UNICODE_NORMALIZATION_SELFTEST is not set
10239 +CONFIG_IO_WQ=y
10240 +# end of File systems
10243 +# Security options
10245 +CONFIG_KEYS=y
10246 +CONFIG_KEYS_REQUEST_CACHE=y
10247 +CONFIG_PERSISTENT_KEYRINGS=y
10248 +CONFIG_TRUSTED_KEYS=y
10249 +CONFIG_ENCRYPTED_KEYS=y
10250 +CONFIG_KEY_DH_OPERATIONS=y
10251 +CONFIG_KEY_NOTIFICATIONS=y
10252 +CONFIG_SECURITY_DMESG_RESTRICT=y
10253 +CONFIG_SECURITY=y
10254 +CONFIG_SECURITYFS=y
10255 +CONFIG_SECURITY_NETWORK=y
10256 +CONFIG_PAGE_TABLE_ISOLATION=y
10257 +CONFIG_SECURITY_INFINIBAND=y
10258 +CONFIG_SECURITY_NETWORK_XFRM=y
10259 +CONFIG_SECURITY_PATH=y
10260 +CONFIG_INTEL_TXT=y
10261 +CONFIG_LSM_MMAP_MIN_ADDR=0
10262 +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y
10263 +CONFIG_HARDENED_USERCOPY=y
10264 +CONFIG_HARDENED_USERCOPY_FALLBACK=y
10265 +# CONFIG_HARDENED_USERCOPY_PAGESPAN is not set
10266 +CONFIG_FORTIFY_SOURCE=y
10267 +# CONFIG_STATIC_USERMODEHELPER is not set
10268 +CONFIG_SECURITY_SELINUX=y
10269 +CONFIG_SECURITY_SELINUX_BOOTPARAM=y
10270 +# CONFIG_SECURITY_SELINUX_DISABLE is not set
10271 +CONFIG_SECURITY_SELINUX_DEVELOP=y
10272 +CONFIG_SECURITY_SELINUX_AVC_STATS=y
10273 +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
10274 +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9
10275 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256
10276 +CONFIG_SECURITY_SMACK=y
10277 +# CONFIG_SECURITY_SMACK_BRINGUP is not set
10278 +CONFIG_SECURITY_SMACK_NETFILTER=y
10279 +CONFIG_SECURITY_SMACK_APPEND_SIGNALS=y
10280 +CONFIG_SECURITY_TOMOYO=y
10281 +CONFIG_SECURITY_TOMOYO_MAX_ACCEPT_ENTRY=2048
10282 +CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG=1024
10283 +# CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER is not set
10284 +CONFIG_SECURITY_TOMOYO_POLICY_LOADER="/sbin/tomoyo-init"
10285 +CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER="/sbin/init"
10286 +# CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING is not set
10287 +CONFIG_SECURITY_APPARMOR=y
10288 +CONFIG_SECURITY_APPARMOR_HASH=y
10289 +CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y
10290 +# CONFIG_SECURITY_APPARMOR_DEBUG is not set
10291 +# CONFIG_SECURITY_LOADPIN is not set
10292 +CONFIG_SECURITY_YAMA=y
10293 +CONFIG_SECURITY_SAFESETID=y
10294 +CONFIG_SECURITY_LOCKDOWN_LSM=y
10295 +CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
10296 +CONFIG_LOCK_DOWN_KERNEL_FORCE_NONE=y
10297 +# CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY is not set
10298 +# CONFIG_LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY is not set
10299 +CONFIG_INTEGRITY=y
10300 +CONFIG_INTEGRITY_SIGNATURE=y
10301 +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
10302 +CONFIG_INTEGRITY_TRUSTED_KEYRING=y
10303 +CONFIG_INTEGRITY_PLATFORM_KEYRING=y
10304 +CONFIG_LOAD_UEFI_KEYS=y
10305 +CONFIG_INTEGRITY_AUDIT=y
10306 +CONFIG_IMA=y
10307 +CONFIG_IMA_MEASURE_PCR_IDX=10
10308 +CONFIG_IMA_LSM_RULES=y
10309 +# CONFIG_IMA_TEMPLATE is not set
10310 +CONFIG_IMA_NG_TEMPLATE=y
10311 +# CONFIG_IMA_SIG_TEMPLATE is not set
10312 +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng"
10313 +CONFIG_IMA_DEFAULT_HASH_SHA1=y
10314 +# CONFIG_IMA_DEFAULT_HASH_SHA256 is not set
10315 +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set
10316 +CONFIG_IMA_DEFAULT_HASH="sha1"
10317 +# CONFIG_IMA_WRITE_POLICY is not set
10318 +# CONFIG_IMA_READ_POLICY is not set
10319 +CONFIG_IMA_APPRAISE=y
10320 +# CONFIG_IMA_ARCH_POLICY is not set
10321 +# CONFIG_IMA_APPRAISE_BUILD_POLICY is not set
10322 +CONFIG_IMA_APPRAISE_BOOTPARAM=y
10323 +CONFIG_IMA_APPRAISE_MODSIG=y
10324 +CONFIG_IMA_TRUSTED_KEYRING=y
10325 +# CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY is not set
10326 +# CONFIG_IMA_BLACKLIST_KEYRING is not set
10327 +# CONFIG_IMA_LOAD_X509 is not set
10328 +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y
10329 +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y
10330 +# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set
10331 +CONFIG_EVM=y
10332 +CONFIG_EVM_ATTR_FSUUID=y
10333 +CONFIG_EVM_EXTRA_SMACK_XATTRS=y
10334 +CONFIG_EVM_ADD_XATTRS=y
10335 +# CONFIG_EVM_LOAD_X509 is not set
10336 +# CONFIG_DEFAULT_SECURITY_SELINUX is not set
10337 +# CONFIG_DEFAULT_SECURITY_SMACK is not set
10338 +# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
10339 +CONFIG_DEFAULT_SECURITY_APPARMOR=y
10340 +# CONFIG_DEFAULT_SECURITY_DAC is not set
10341 +CONFIG_LSM="lockdown,yama,integrity,apparmor"
10344 +# Kernel hardening options
10348 +# Memory initialization
10350 +CONFIG_INIT_STACK_NONE=y
10351 +CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
10352 +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set
10353 +# end of Memory initialization
10354 +# end of Kernel hardening options
10355 +# end of Security options
10357 +CONFIG_XOR_BLOCKS=m
10358 +CONFIG_ASYNC_CORE=m
10359 +CONFIG_ASYNC_MEMCPY=m
10360 +CONFIG_ASYNC_XOR=m
10361 +CONFIG_ASYNC_PQ=m
10362 +CONFIG_ASYNC_RAID6_RECOV=m
10363 +CONFIG_CRYPTO=y
10366 +# Crypto core or helper
10368 +CONFIG_CRYPTO_ALGAPI=y
10369 +CONFIG_CRYPTO_ALGAPI2=y
10370 +CONFIG_CRYPTO_AEAD=y
10371 +CONFIG_CRYPTO_AEAD2=y
10372 +CONFIG_CRYPTO_SKCIPHER=y
10373 +CONFIG_CRYPTO_SKCIPHER2=y
10374 +CONFIG_CRYPTO_HASH=y
10375 +CONFIG_CRYPTO_HASH2=y
10376 +CONFIG_CRYPTO_RNG=y
10377 +CONFIG_CRYPTO_RNG2=y
10378 +CONFIG_CRYPTO_RNG_DEFAULT=y
10379 +CONFIG_CRYPTO_AKCIPHER2=y
10380 +CONFIG_CRYPTO_AKCIPHER=y
10381 +CONFIG_CRYPTO_KPP2=y
10382 +CONFIG_CRYPTO_KPP=y
10383 +CONFIG_CRYPTO_ACOMP2=y
10384 +CONFIG_CRYPTO_MANAGER=y
10385 +CONFIG_CRYPTO_MANAGER2=y
10386 +CONFIG_CRYPTO_USER=m
10387 +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
10388 +CONFIG_CRYPTO_GF128MUL=y
10389 +CONFIG_CRYPTO_NULL=y
10390 +CONFIG_CRYPTO_NULL2=y
10391 +CONFIG_CRYPTO_PCRYPT=m
10392 +CONFIG_CRYPTO_CRYPTD=m
10393 +CONFIG_CRYPTO_AUTHENC=m
10394 +CONFIG_CRYPTO_TEST=m
10395 +CONFIG_CRYPTO_SIMD=m
10396 +CONFIG_CRYPTO_ENGINE=m
10399 +# Public-key cryptography
10401 +CONFIG_CRYPTO_RSA=y
10402 +CONFIG_CRYPTO_DH=y
10403 +CONFIG_CRYPTO_ECC=m
10404 +CONFIG_CRYPTO_ECDH=m
10405 +CONFIG_CRYPTO_ECRDSA=m
10406 +CONFIG_CRYPTO_SM2=m
10407 +CONFIG_CRYPTO_CURVE25519=m
10408 +CONFIG_CRYPTO_CURVE25519_X86=m
10411 +# Authenticated Encryption with Associated Data
10413 +CONFIG_CRYPTO_CCM=m
10414 +CONFIG_CRYPTO_GCM=y
10415 +CONFIG_CRYPTO_CHACHA20POLY1305=m
10416 +CONFIG_CRYPTO_AEGIS128=m
10417 +CONFIG_CRYPTO_AEGIS128_AESNI_SSE2=m
10418 +CONFIG_CRYPTO_SEQIV=y
10419 +CONFIG_CRYPTO_ECHAINIV=m
10422 +# Block modes
10424 +CONFIG_CRYPTO_CBC=y
10425 +CONFIG_CRYPTO_CFB=m
10426 +CONFIG_CRYPTO_CTR=y
10427 +CONFIG_CRYPTO_CTS=y
10428 +CONFIG_CRYPTO_ECB=y
10429 +CONFIG_CRYPTO_LRW=m
10430 +CONFIG_CRYPTO_OFB=m
10431 +CONFIG_CRYPTO_PCBC=m
10432 +CONFIG_CRYPTO_XTS=y
10433 +CONFIG_CRYPTO_KEYWRAP=m
10434 +CONFIG_CRYPTO_NHPOLY1305=m
10435 +CONFIG_CRYPTO_NHPOLY1305_SSE2=m
10436 +CONFIG_CRYPTO_NHPOLY1305_AVX2=m
10437 +CONFIG_CRYPTO_ADIANTUM=m
10438 +CONFIG_CRYPTO_ESSIV=m
10441 +# Hash modes
10443 +CONFIG_CRYPTO_CMAC=m
10444 +CONFIG_CRYPTO_HMAC=y
10445 +CONFIG_CRYPTO_XCBC=m
10446 +CONFIG_CRYPTO_VMAC=m
10449 +# Digest
10451 +CONFIG_CRYPTO_CRC32C=y
10452 +CONFIG_CRYPTO_CRC32C_INTEL=y
10453 +CONFIG_CRYPTO_CRC32=m
10454 +CONFIG_CRYPTO_CRC32_PCLMUL=m
10455 +CONFIG_CRYPTO_XXHASH=m
10456 +CONFIG_CRYPTO_BLAKE2B=m
10457 +CONFIG_CRYPTO_BLAKE2S=m
10458 +CONFIG_CRYPTO_BLAKE2S_X86=m
10459 +CONFIG_CRYPTO_CRCT10DIF=y
10460 +CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m
10461 +CONFIG_CRYPTO_GHASH=y
10462 +CONFIG_CRYPTO_POLY1305=m
10463 +CONFIG_CRYPTO_POLY1305_X86_64=m
10464 +CONFIG_CRYPTO_MD4=m
10465 +CONFIG_CRYPTO_MD5=y
10466 +CONFIG_CRYPTO_MICHAEL_MIC=m
10467 +CONFIG_CRYPTO_RMD160=m
10468 +CONFIG_CRYPTO_SHA1=y
10469 +CONFIG_CRYPTO_SHA1_SSSE3=m
10470 +CONFIG_CRYPTO_SHA256_SSSE3=m
10471 +CONFIG_CRYPTO_SHA512_SSSE3=m
10472 +CONFIG_CRYPTO_SHA256=y
10473 +CONFIG_CRYPTO_SHA512=y
10474 +CONFIG_CRYPTO_SHA3=m
10475 +CONFIG_CRYPTO_SM3=m
10476 +CONFIG_CRYPTO_STREEBOG=m
10477 +CONFIG_CRYPTO_WP512=m
10478 +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m
10481 +# Ciphers
10483 +CONFIG_CRYPTO_AES=y
10484 +CONFIG_CRYPTO_AES_TI=m
10485 +CONFIG_CRYPTO_AES_NI_INTEL=m
10486 +CONFIG_CRYPTO_BLOWFISH=m
10487 +CONFIG_CRYPTO_BLOWFISH_COMMON=m
10488 +CONFIG_CRYPTO_BLOWFISH_X86_64=m
10489 +CONFIG_CRYPTO_CAMELLIA=m
10490 +CONFIG_CRYPTO_CAMELLIA_X86_64=m
10491 +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m
10492 +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m
10493 +CONFIG_CRYPTO_CAST_COMMON=m
10494 +CONFIG_CRYPTO_CAST5=m
10495 +CONFIG_CRYPTO_CAST5_AVX_X86_64=m
10496 +CONFIG_CRYPTO_CAST6=m
10497 +CONFIG_CRYPTO_CAST6_AVX_X86_64=m
10498 +CONFIG_CRYPTO_DES=m
10499 +CONFIG_CRYPTO_DES3_EDE_X86_64=m
10500 +CONFIG_CRYPTO_FCRYPT=m
10501 +CONFIG_CRYPTO_CHACHA20=m
10502 +CONFIG_CRYPTO_CHACHA20_X86_64=m
10503 +CONFIG_CRYPTO_SERPENT=m
10504 +CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m
10505 +CONFIG_CRYPTO_SERPENT_AVX_X86_64=m
10506 +CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m
10507 +CONFIG_CRYPTO_SM4=m
10508 +CONFIG_CRYPTO_TWOFISH=m
10509 +CONFIG_CRYPTO_TWOFISH_COMMON=m
10510 +CONFIG_CRYPTO_TWOFISH_X86_64=m
10511 +CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m
10512 +CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m
10515 +# Compression
10517 +CONFIG_CRYPTO_DEFLATE=y
10518 +CONFIG_CRYPTO_LZO=y
10519 +CONFIG_CRYPTO_842=m
10520 +CONFIG_CRYPTO_LZ4=y
10521 +CONFIG_CRYPTO_LZ4HC=m
10522 +CONFIG_CRYPTO_ZSTD=y
10525 +# Random Number Generation
10527 +CONFIG_CRYPTO_ANSI_CPRNG=m
10528 +CONFIG_CRYPTO_DRBG_MENU=y
10529 +CONFIG_CRYPTO_DRBG_HMAC=y
10530 +CONFIG_CRYPTO_DRBG_HASH=y
10531 +CONFIG_CRYPTO_DRBG_CTR=y
10532 +CONFIG_CRYPTO_DRBG=y
10533 +CONFIG_CRYPTO_JITTERENTROPY=y
10534 +CONFIG_CRYPTO_USER_API=m
10535 +CONFIG_CRYPTO_USER_API_HASH=m
10536 +CONFIG_CRYPTO_USER_API_SKCIPHER=m
10537 +CONFIG_CRYPTO_USER_API_RNG=m
10538 +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set
10539 +CONFIG_CRYPTO_USER_API_AEAD=m
10540 +# CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE is not set
10541 +CONFIG_CRYPTO_STATS=y
10542 +CONFIG_CRYPTO_HASH_INFO=y
10545 +# Crypto library routines
10547 +CONFIG_CRYPTO_LIB_AES=y
10548 +CONFIG_CRYPTO_LIB_ARC4=m
10549 +CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S=m
10550 +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=m
10551 +CONFIG_CRYPTO_LIB_BLAKE2S=m
10552 +CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=m
10553 +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m
10554 +CONFIG_CRYPTO_LIB_CHACHA=m
10555 +CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519=m
10556 +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m
10557 +CONFIG_CRYPTO_LIB_CURVE25519=m
10558 +CONFIG_CRYPTO_LIB_DES=m
10559 +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=11
10560 +CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m
10561 +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m
10562 +CONFIG_CRYPTO_LIB_POLY1305=m
10563 +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
10564 +CONFIG_CRYPTO_LIB_SHA256=y
10565 +CONFIG_CRYPTO_HW=y
10566 +CONFIG_CRYPTO_DEV_PADLOCK=y
10567 +CONFIG_CRYPTO_DEV_PADLOCK_AES=m
10568 +CONFIG_CRYPTO_DEV_PADLOCK_SHA=m
10569 +CONFIG_CRYPTO_DEV_ATMEL_I2C=m
10570 +CONFIG_CRYPTO_DEV_ATMEL_ECC=m
10571 +CONFIG_CRYPTO_DEV_ATMEL_SHA204A=m
10572 +CONFIG_CRYPTO_DEV_CCP=y
10573 +CONFIG_CRYPTO_DEV_CCP_DD=m
10574 +CONFIG_CRYPTO_DEV_SP_CCP=y
10575 +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m
10576 +CONFIG_CRYPTO_DEV_SP_PSP=y
10577 +# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set
10578 +CONFIG_CRYPTO_DEV_QAT=m
10579 +CONFIG_CRYPTO_DEV_QAT_DH895xCC=m
10580 +CONFIG_CRYPTO_DEV_QAT_C3XXX=m
10581 +CONFIG_CRYPTO_DEV_QAT_C62X=m
10582 +CONFIG_CRYPTO_DEV_QAT_4XXX=m
10583 +CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m
10584 +CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m
10585 +CONFIG_CRYPTO_DEV_QAT_C62XVF=m
10586 +CONFIG_CRYPTO_DEV_NITROX=m
10587 +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m
10588 +CONFIG_CRYPTO_DEV_CHELSIO=m
10589 +CONFIG_CRYPTO_DEV_VIRTIO=m
10590 +CONFIG_CRYPTO_DEV_SAFEXCEL=m
10591 +CONFIG_CRYPTO_DEV_AMLOGIC_GXL=m
10592 +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG is not set
10593 +CONFIG_ASYMMETRIC_KEY_TYPE=y
10594 +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
10595 +CONFIG_ASYMMETRIC_TPM_KEY_SUBTYPE=m
10596 +CONFIG_X509_CERTIFICATE_PARSER=y
10597 +CONFIG_PKCS8_PRIVATE_KEY_PARSER=m
10598 +CONFIG_TPM_KEY_PARSER=m
10599 +CONFIG_PKCS7_MESSAGE_PARSER=y
10600 +CONFIG_PKCS7_TEST_KEY=m
10601 +CONFIG_SIGNED_PE_FILE_VERIFICATION=y
10604 +# Certificates for signature checking
10606 +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem"
10607 +CONFIG_SYSTEM_TRUSTED_KEYRING=y
10608 +CONFIG_SYSTEM_TRUSTED_KEYS=""
10609 +CONFIG_SYSTEM_EXTRA_CERTIFICATE=y
10610 +CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=4096
10611 +CONFIG_SECONDARY_TRUSTED_KEYRING=y
10612 +CONFIG_SYSTEM_BLACKLIST_KEYRING=y
10613 +CONFIG_SYSTEM_BLACKLIST_HASH_LIST=""
10614 +# end of Certificates for signature checking
10617 +# Library routines
10619 +CONFIG_RAID6_PQ=m
10620 +CONFIG_RAID6_PQ_BENCHMARK=y
10621 +CONFIG_LINEAR_RANGES=y
10622 +CONFIG_PACKING=y
10623 +CONFIG_BITREVERSE=y
10624 +CONFIG_GENERIC_STRNCPY_FROM_USER=y
10625 +CONFIG_GENERIC_STRNLEN_USER=y
10626 +CONFIG_GENERIC_NET_UTILS=y
10627 +CONFIG_GENERIC_FIND_FIRST_BIT=y
10628 +CONFIG_CORDIC=m
10629 +# CONFIG_PRIME_NUMBERS is not set
10630 +CONFIG_RATIONAL=y
10631 +CONFIG_GENERIC_PCI_IOMAP=y
10632 +CONFIG_GENERIC_IOMAP=y
10633 +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y
10634 +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y
10635 +CONFIG_ARCH_USE_SYM_ANNOTATIONS=y
10636 +CONFIG_CRC_CCITT=y
10637 +CONFIG_CRC16=y
10638 +CONFIG_CRC_T10DIF=y
10639 +CONFIG_CRC_ITU_T=m
10640 +CONFIG_CRC32=y
10641 +# CONFIG_CRC32_SELFTEST is not set
10642 +CONFIG_CRC32_SLICEBY8=y
10643 +# CONFIG_CRC32_SLICEBY4 is not set
10644 +# CONFIG_CRC32_SARWATE is not set
10645 +# CONFIG_CRC32_BIT is not set
10646 +CONFIG_CRC64=m
10647 +CONFIG_CRC4=m
10648 +CONFIG_CRC7=m
10649 +CONFIG_LIBCRC32C=m
10650 +CONFIG_CRC8=m
10651 +CONFIG_XXHASH=y
10652 +# CONFIG_RANDOM32_SELFTEST is not set
10653 +CONFIG_842_COMPRESS=m
10654 +CONFIG_842_DECOMPRESS=m
10655 +CONFIG_ZLIB_INFLATE=y
10656 +CONFIG_ZLIB_DEFLATE=y
10657 +CONFIG_LZO_COMPRESS=y
10658 +CONFIG_LZO_DECOMPRESS=y
10659 +CONFIG_LZ4_COMPRESS=y
10660 +CONFIG_LZ4HC_COMPRESS=y
10661 +CONFIG_LZ4_DECOMPRESS=y
10662 +CONFIG_ZSTD_COMPRESS=y
10663 +CONFIG_ZSTD_DECOMPRESS=y
10664 +CONFIG_XZ_DEC=y
10665 +CONFIG_XZ_DEC_X86=y
10666 +CONFIG_XZ_DEC_POWERPC=y
10667 +CONFIG_XZ_DEC_IA64=y
10668 +CONFIG_XZ_DEC_ARM=y
10669 +CONFIG_XZ_DEC_ARMTHUMB=y
10670 +CONFIG_XZ_DEC_SPARC=y
10671 +CONFIG_XZ_DEC_BCJ=y
10672 +CONFIG_XZ_DEC_TEST=m
10673 +CONFIG_DECOMPRESS_GZIP=y
10674 +CONFIG_DECOMPRESS_BZIP2=y
10675 +CONFIG_DECOMPRESS_LZMA=y
10676 +CONFIG_DECOMPRESS_XZ=y
10677 +CONFIG_DECOMPRESS_LZO=y
10678 +CONFIG_DECOMPRESS_LZ4=y
10679 +CONFIG_DECOMPRESS_ZSTD=y
10680 +CONFIG_GENERIC_ALLOCATOR=y
10681 +CONFIG_REED_SOLOMON=m
10682 +CONFIG_REED_SOLOMON_ENC8=y
10683 +CONFIG_REED_SOLOMON_DEC8=y
10684 +CONFIG_REED_SOLOMON_DEC16=y
10685 +CONFIG_BCH=m
10686 +CONFIG_TEXTSEARCH=y
10687 +CONFIG_TEXTSEARCH_KMP=m
10688 +CONFIG_TEXTSEARCH_BM=m
10689 +CONFIG_TEXTSEARCH_FSM=m
10690 +CONFIG_BTREE=y
10691 +CONFIG_INTERVAL_TREE=y
10692 +CONFIG_XARRAY_MULTI=y
10693 +CONFIG_ASSOCIATIVE_ARRAY=y
10694 +CONFIG_HAS_IOMEM=y
10695 +CONFIG_HAS_IOPORT_MAP=y
10696 +CONFIG_HAS_DMA=y
10697 +CONFIG_DMA_OPS=y
10698 +CONFIG_NEED_SG_DMA_LENGTH=y
10699 +CONFIG_NEED_DMA_MAP_STATE=y
10700 +CONFIG_ARCH_DMA_ADDR_T_64BIT=y
10701 +CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y
10702 +CONFIG_SWIOTLB=y
10703 +CONFIG_DMA_COHERENT_POOL=y
10704 +# CONFIG_DMA_API_DEBUG is not set
10705 +# CONFIG_DMA_MAP_BENCHMARK is not set
10706 +CONFIG_SGL_ALLOC=y
10707 +CONFIG_IOMMU_HELPER=y
10708 +CONFIG_CHECK_SIGNATURE=y
10709 +CONFIG_CPU_RMAP=y
10710 +CONFIG_DQL=y
10711 +CONFIG_GLOB=y
10712 +# CONFIG_GLOB_SELFTEST is not set
10713 +CONFIG_NLATTR=y
10714 +CONFIG_LRU_CACHE=m
10715 +CONFIG_CLZ_TAB=y
10716 +CONFIG_IRQ_POLL=y
10717 +CONFIG_MPILIB=y
10718 +CONFIG_SIGNATURE=y
10719 +CONFIG_DIMLIB=y
10720 +CONFIG_OID_REGISTRY=y
10721 +CONFIG_UCS2_STRING=y
10722 +CONFIG_HAVE_GENERIC_VDSO=y
10723 +CONFIG_GENERIC_GETTIMEOFDAY=y
10724 +CONFIG_GENERIC_VDSO_TIME_NS=y
10725 +CONFIG_FONT_SUPPORT=y
10726 +CONFIG_FONTS=y
10727 +CONFIG_FONT_8x8=y
10728 +CONFIG_FONT_8x16=y
10729 +# CONFIG_FONT_6x11 is not set
10730 +# CONFIG_FONT_7x14 is not set
10731 +# CONFIG_FONT_PEARL_8x8 is not set
10732 +CONFIG_FONT_ACORN_8x8=y
10733 +# CONFIG_FONT_MINI_4x6 is not set
10734 +CONFIG_FONT_6x10=y
10735 +# CONFIG_FONT_10x18 is not set
10736 +# CONFIG_FONT_SUN8x16 is not set
10737 +# CONFIG_FONT_SUN12x22 is not set
10738 +CONFIG_FONT_TER16x32=y
10739 +# CONFIG_FONT_6x8 is not set
10740 +CONFIG_SG_POOL=y
10741 +CONFIG_ARCH_HAS_PMEM_API=y
10742 +CONFIG_MEMREGION=y
10743 +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y
10744 +CONFIG_ARCH_HAS_COPY_MC=y
10745 +CONFIG_ARCH_STACKWALK=y
10746 +CONFIG_SBITMAP=y
10747 +CONFIG_PARMAN=m
10748 +CONFIG_OBJAGG=m
10749 +# CONFIG_STRING_SELFTEST is not set
10750 +# end of Library routines
10752 +CONFIG_PLDMFW=y
10755 +# Kernel hacking
10759 +# printk and dmesg options
10761 +CONFIG_PRINTK_TIME=y
10762 +# CONFIG_PRINTK_CALLER is not set
10763 +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7
10764 +CONFIG_CONSOLE_LOGLEVEL_QUIET=3
10765 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4
10766 +CONFIG_BOOT_PRINTK_DELAY=y
10767 +CONFIG_DYNAMIC_DEBUG=y
10768 +CONFIG_DYNAMIC_DEBUG_CORE=y
10769 +# CONFIG_SYMBOLIC_ERRNAME is not set
10770 +# CONFIG_DEBUG_BUGVERBOSE is not set
10771 +# end of printk and dmesg options
10774 +# Compile-time checks and compiler options
10776 +# CONFIG_DEBUG_INFO is not set
10777 +CONFIG_FRAME_WARN=1024
10778 +# CONFIG_STRIP_ASM_SYMS is not set
10779 +# CONFIG_READABLE_ASM is not set
10780 +# CONFIG_HEADERS_INSTALL is not set
10781 +# CONFIG_DEBUG_SECTION_MISMATCH is not set
10782 +CONFIG_SECTION_MISMATCH_WARN_ONLY=y
10783 +# CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_32B is not set
10784 +CONFIG_STACK_VALIDATION=y
10785 +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
10786 +# end of Compile-time checks and compiler options
10789 +# Generic Kernel Debugging Instruments
10791 +CONFIG_MAGIC_SYSRQ=y
10792 +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x01b6
10793 +CONFIG_MAGIC_SYSRQ_SERIAL=y
10794 +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE=""
10795 +CONFIG_DEBUG_FS=y
10796 +CONFIG_DEBUG_FS_ALLOW_ALL=y
10797 +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set
10798 +# CONFIG_DEBUG_FS_ALLOW_NONE is not set
10799 +CONFIG_HAVE_ARCH_KGDB=y
10800 +CONFIG_KGDB=y
10801 +CONFIG_KGDB_HONOUR_BLOCKLIST=y
10802 +CONFIG_KGDB_SERIAL_CONSOLE=y
10803 +# CONFIG_KGDB_TESTS is not set
10804 +CONFIG_KGDB_LOW_LEVEL_TRAP=y
10805 +CONFIG_KGDB_KDB=y
10806 +CONFIG_KDB_DEFAULT_ENABLE=0x1
10807 +CONFIG_KDB_KEYBOARD=y
10808 +CONFIG_KDB_CONTINUE_CATASTROPHIC=0
10809 +CONFIG_ARCH_HAS_EARLY_DEBUG=y
10810 +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y
10811 +# CONFIG_UBSAN is not set
10812 +CONFIG_HAVE_ARCH_KCSAN=y
10813 +CONFIG_HAVE_KCSAN_COMPILER=y
10814 +# CONFIG_KCSAN is not set
10815 +# end of Generic Kernel Debugging Instruments
10817 +CONFIG_DEBUG_KERNEL=y
10818 +CONFIG_DEBUG_MISC=y
10821 +# Memory Debugging
10823 +# CONFIG_PAGE_EXTENSION is not set
10824 +# CONFIG_DEBUG_PAGEALLOC is not set
10825 +# CONFIG_PAGE_OWNER is not set
10826 +CONFIG_PAGE_POISONING=y
10827 +# CONFIG_DEBUG_RODATA_TEST is not set
10828 +CONFIG_ARCH_HAS_DEBUG_WX=y
10829 +CONFIG_DEBUG_WX=y
10830 +CONFIG_GENERIC_PTDUMP=y
10831 +CONFIG_PTDUMP_CORE=y
10832 +# CONFIG_PTDUMP_DEBUGFS is not set
10833 +# CONFIG_DEBUG_OBJECTS is not set
10834 +# CONFIG_SLUB_DEBUG_ON is not set
10835 +# CONFIG_SLUB_STATS is not set
10836 +CONFIG_HAVE_DEBUG_KMEMLEAK=y
10837 +# CONFIG_DEBUG_KMEMLEAK is not set
10838 +# CONFIG_DEBUG_STACK_USAGE is not set
10839 +CONFIG_SCHED_STACK_END_CHECK=y
10840 +CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y
10841 +# CONFIG_DEBUG_VM is not set
10842 +# CONFIG_DEBUG_VM_PGTABLE is not set
10843 +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y
10844 +# CONFIG_DEBUG_VIRTUAL is not set
10845 +# CONFIG_DEBUG_MEMORY_INIT is not set
10846 +CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
10847 +# CONFIG_DEBUG_PER_CPU_MAPS is not set
10848 +CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP=y
10849 +# CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP is not set
10850 +CONFIG_HAVE_ARCH_KASAN=y
10851 +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y
10852 +CONFIG_CC_HAS_KASAN_GENERIC=y
10853 +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y
10854 +# CONFIG_KASAN is not set
10855 +CONFIG_HAVE_ARCH_KFENCE=y
10856 +CONFIG_KFENCE=y
10857 +CONFIG_KFENCE_STATIC_KEYS=y
10858 +CONFIG_KFENCE_SAMPLE_INTERVAL=0
10859 +CONFIG_KFENCE_NUM_OBJECTS=255
10860 +CONFIG_KFENCE_STRESS_TEST_FAULTS=0
10861 +# end of Memory Debugging
10863 +# CONFIG_DEBUG_SHIRQ is not set
10866 +# Debug Oops, Lockups and Hangs
10868 +# CONFIG_PANIC_ON_OOPS is not set
10869 +CONFIG_PANIC_ON_OOPS_VALUE=0
10870 +CONFIG_PANIC_TIMEOUT=0
10871 +CONFIG_LOCKUP_DETECTOR=y
10872 +CONFIG_SOFTLOCKUP_DETECTOR=y
10873 +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
10874 +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
10875 +CONFIG_HARDLOCKUP_DETECTOR_PERF=y
10876 +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y
10877 +CONFIG_HARDLOCKUP_DETECTOR=y
10878 +# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set
10879 +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0
10880 +CONFIG_DETECT_HUNG_TASK=y
10881 +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
10882 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
10883 +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
10884 +# CONFIG_WQ_WATCHDOG is not set
10885 +# CONFIG_TEST_LOCKUP is not set
10886 +# end of Debug Oops, Lockups and Hangs
10889 +# Scheduler Debugging
10891 +CONFIG_SCHED_DEBUG=y
10892 +CONFIG_SCHED_INFO=y
10893 +CONFIG_SCHEDSTATS=y
10894 +# end of Scheduler Debugging
10896 +# CONFIG_DEBUG_TIMEKEEPING is not set
10897 +# CONFIG_DEBUG_PREEMPT is not set
10900 +# Lock Debugging (spinlocks, mutexes, etc...)
10902 +CONFIG_LOCK_DEBUGGING_SUPPORT=y
10903 +# CONFIG_PROVE_LOCKING is not set
10904 +# CONFIG_LOCK_STAT is not set
10905 +# CONFIG_DEBUG_RT_MUTEXES is not set
10906 +# CONFIG_DEBUG_SPINLOCK is not set
10907 +# CONFIG_DEBUG_MUTEXES is not set
10908 +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set
10909 +# CONFIG_DEBUG_RWSEMS is not set
10910 +# CONFIG_DEBUG_LOCK_ALLOC is not set
10911 +# CONFIG_DEBUG_ATOMIC_SLEEP is not set
10912 +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
10913 +# CONFIG_LOCK_TORTURE_TEST is not set
10914 +# CONFIG_WW_MUTEX_SELFTEST is not set
10915 +# CONFIG_SCF_TORTURE_TEST is not set
10916 +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set
10917 +# end of Lock Debugging (spinlocks, mutexes, etc...)
10919 +# CONFIG_DEBUG_IRQFLAGS is not set
10920 +CONFIG_STACKTRACE=y
10921 +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set
10922 +# CONFIG_DEBUG_KOBJECT is not set
10925 +# Debug kernel data structures
10927 +# CONFIG_DEBUG_LIST is not set
10928 +# CONFIG_DEBUG_PLIST is not set
10929 +# CONFIG_DEBUG_SG is not set
10930 +# CONFIG_DEBUG_NOTIFIERS is not set
10931 +# CONFIG_BUG_ON_DATA_CORRUPTION is not set
10932 +# end of Debug kernel data structures
10934 +# CONFIG_DEBUG_CREDENTIALS is not set
10937 +# RCU Debugging
10939 +# CONFIG_RCU_SCALE_TEST is not set
10940 +# CONFIG_RCU_TORTURE_TEST is not set
10941 +# CONFIG_RCU_REF_SCALE_TEST is not set
10942 +CONFIG_RCU_CPU_STALL_TIMEOUT=60
10943 +# CONFIG_RCU_TRACE is not set
10944 +# CONFIG_RCU_EQS_DEBUG is not set
10945 +# CONFIG_RCU_STRICT_GRACE_PERIOD is not set
10946 +# end of RCU Debugging
10948 +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set
10949 +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
10950 +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set
10951 +CONFIG_LATENCYTOP=y
10952 +CONFIG_USER_STACKTRACE_SUPPORT=y
10953 +CONFIG_HAVE_FUNCTION_TRACER=y
10954 +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
10955 +CONFIG_HAVE_DYNAMIC_FTRACE=y
10956 +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
10957 +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y
10958 +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y
10959 +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
10960 +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
10961 +CONFIG_HAVE_FENTRY=y
10962 +CONFIG_HAVE_OBJTOOL_MCOUNT=y
10963 +CONFIG_HAVE_C_RECORDMCOUNT=y
10964 +CONFIG_TRACING_SUPPORT=y
10965 +# CONFIG_FTRACE is not set
10966 +# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
10967 +CONFIG_SAMPLES=y
10968 +# CONFIG_SAMPLE_AUXDISPLAY is not set
10969 +# CONFIG_SAMPLE_KOBJECT is not set
10970 +# CONFIG_SAMPLE_KPROBES is not set
10971 +# CONFIG_SAMPLE_HW_BREAKPOINT is not set
10972 +# CONFIG_SAMPLE_KFIFO is not set
10973 +# CONFIG_SAMPLE_KDB is not set
10974 +# CONFIG_SAMPLE_RPMSG_CLIENT is not set
10975 +# CONFIG_SAMPLE_CONFIGFS is not set
10976 +# CONFIG_SAMPLE_VFIO_MDEV_MTTY is not set
10977 +# CONFIG_SAMPLE_VFIO_MDEV_MDPY is not set
10978 +# CONFIG_SAMPLE_VFIO_MDEV_MDPY_FB is not set
10979 +# CONFIG_SAMPLE_VFIO_MDEV_MBOCHS is not set
10980 +# CONFIG_SAMPLE_WATCHDOG is not set
10981 +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y
10982 +CONFIG_STRICT_DEVMEM=y
10983 +# CONFIG_IO_STRICT_DEVMEM is not set
10986 +# x86 Debugging
10988 +CONFIG_TRACE_IRQFLAGS_SUPPORT=y
10989 +CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y
10990 +CONFIG_EARLY_PRINTK_USB=y
10991 +# CONFIG_X86_VERBOSE_BOOTUP is not set
10992 +CONFIG_EARLY_PRINTK=y
10993 +CONFIG_EARLY_PRINTK_DBGP=y
10994 +CONFIG_EARLY_PRINTK_USB_XDBC=y
10995 +# CONFIG_EFI_PGT_DUMP is not set
10996 +# CONFIG_DEBUG_TLBFLUSH is not set
10997 +# CONFIG_IOMMU_DEBUG is not set
10998 +CONFIG_HAVE_MMIOTRACE_SUPPORT=y
10999 +# CONFIG_X86_DECODER_SELFTEST is not set
11000 +# CONFIG_IO_DELAY_0X80 is not set
11001 +CONFIG_IO_DELAY_0XED=y
11002 +# CONFIG_IO_DELAY_UDELAY is not set
11003 +# CONFIG_IO_DELAY_NONE is not set
11004 +# CONFIG_DEBUG_BOOT_PARAMS is not set
11005 +# CONFIG_CPA_DEBUG is not set
11006 +# CONFIG_DEBUG_ENTRY is not set
11007 +# CONFIG_DEBUG_NMI_SELFTEST is not set
11008 +CONFIG_X86_DEBUG_FPU=y
11009 +CONFIG_PUNIT_ATOM_DEBUG=m
11010 +CONFIG_UNWINDER_ORC=y
11011 +# CONFIG_UNWINDER_FRAME_POINTER is not set
11012 +# CONFIG_UNWINDER_GUESS is not set
11013 +# end of x86 Debugging
11016 +# Kernel Testing and Coverage
11018 +# CONFIG_KUNIT is not set
11019 +CONFIG_NOTIFIER_ERROR_INJECTION=m
11020 +CONFIG_PM_NOTIFIER_ERROR_INJECT=m
11021 +# CONFIG_NETDEV_NOTIFIER_ERROR_INJECT is not set
11022 +CONFIG_FUNCTION_ERROR_INJECTION=y
11023 +# CONFIG_FAULT_INJECTION is not set
11024 +CONFIG_ARCH_HAS_KCOV=y
11025 +CONFIG_CC_HAS_SANCOV_TRACE_PC=y
11026 +# CONFIG_KCOV is not set
11027 +CONFIG_RUNTIME_TESTING_MENU=y
11028 +# CONFIG_LKDTM is not set
11029 +# CONFIG_TEST_LIST_SORT is not set
11030 +# CONFIG_TEST_MIN_HEAP is not set
11031 +# CONFIG_TEST_SORT is not set
11032 +# CONFIG_KPROBES_SANITY_TEST is not set
11033 +# CONFIG_BACKTRACE_SELF_TEST is not set
11034 +# CONFIG_RBTREE_TEST is not set
11035 +# CONFIG_REED_SOLOMON_TEST is not set
11036 +# CONFIG_INTERVAL_TREE_TEST is not set
11037 +# CONFIG_PERCPU_TEST is not set
11038 +# CONFIG_ATOMIC64_SELFTEST is not set
11039 +# CONFIG_ASYNC_RAID6_TEST is not set
11040 +# CONFIG_TEST_HEXDUMP is not set
11041 +# CONFIG_TEST_STRING_HELPERS is not set
11042 +# CONFIG_TEST_STRSCPY is not set
11043 +# CONFIG_TEST_KSTRTOX is not set
11044 +# CONFIG_TEST_PRINTF is not set
11045 +# CONFIG_TEST_BITMAP is not set
11046 +# CONFIG_TEST_UUID is not set
11047 +# CONFIG_TEST_XARRAY is not set
11048 +# CONFIG_TEST_OVERFLOW is not set
11049 +# CONFIG_TEST_RHASHTABLE is not set
11050 +# CONFIG_TEST_HASH is not set
11051 +# CONFIG_TEST_IDA is not set
11052 +# CONFIG_TEST_PARMAN is not set
11053 +# CONFIG_TEST_LKM is not set
11054 +# CONFIG_TEST_BITOPS is not set
11055 +# CONFIG_TEST_VMALLOC is not set
11056 +# CONFIG_TEST_USER_COPY is not set
11057 +CONFIG_TEST_BPF=m
11058 +CONFIG_TEST_BLACKHOLE_DEV=m
11059 +# CONFIG_FIND_BIT_BENCHMARK is not set
11060 +# CONFIG_TEST_FIRMWARE is not set
11061 +# CONFIG_TEST_SYSCTL is not set
11062 +# CONFIG_TEST_UDELAY is not set
11063 +# CONFIG_TEST_STATIC_KEYS is not set
11064 +# CONFIG_TEST_KMOD is not set
11065 +# CONFIG_TEST_MEMCAT_P is not set
11066 +# CONFIG_TEST_OBJAGG is not set
11067 +# CONFIG_TEST_STACKINIT is not set
11068 +# CONFIG_TEST_MEMINIT is not set
11069 +# CONFIG_TEST_HMM is not set
11070 +# CONFIG_TEST_FREE_PAGES is not set
11071 +# CONFIG_TEST_FPU is not set
11072 +CONFIG_MEMTEST=y
11073 +# CONFIG_HYPERV_TESTING is not set
11074 +# end of Kernel Testing and Coverage
11075 +# end of Kernel hacking
11076 diff --git a/.gitignore b/.gitignore
11077 index 3af66272d6f1..127012c1f717 100644
11078 --- a/.gitignore
11079 +++ b/.gitignore
11080 @@ -57,6 +57,7 @@ modules.order
11081  /tags
11082  /TAGS
11083  /linux
11084 +/modules-only.symvers
11085  /vmlinux
11086  /vmlinux.32
11087  /vmlinux.symvers
11088 diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
11089 index 04545725f187..e38e2c55b2fa 100644
11090 --- a/Documentation/admin-guide/kernel-parameters.txt
11091 +++ b/Documentation/admin-guide/kernel-parameters.txt
11092 @@ -358,6 +358,10 @@
11093         autoconf=       [IPV6]
11094                         See Documentation/networking/ipv6.rst.
11096 +       autogroup=      [KNL] Enable or disable scheduler automatic task group
11097 +                       creation.
11098 +                       Format: <bool>
11100         show_lapic=     [APIC,X86] Advanced Programmable Interrupt Controller
11101                         Limit apic dumping. The parameter defines the maximal
11102                         number of local apics being dumped. Also it is possible
11103 @@ -1869,13 +1873,6 @@
11104                         bypassed by not enabling DMAR with this option. In
11105                         this case, gfx device will use physical address for
11106                         DMA.
11107 -               forcedac [X86-64]
11108 -                       With this option iommu will not optimize to look
11109 -                       for io virtual address below 32-bit forcing dual
11110 -                       address cycle on pci bus for cards supporting greater
11111 -                       than 32-bit addressing. The default is to look
11112 -                       for translation below 32-bit and if not available
11113 -                       then look in the higher range.
11114                 strict [Default Off]
11115                         With this option on every unmap_single operation will
11116                         result in a hardware IOTLB flush operation as opposed
11117 @@ -1964,6 +1961,14 @@
11118                 nobypass        [PPC/POWERNV]
11119                         Disable IOMMU bypass, using IOMMU for PCI devices.
11121 +       iommu.forcedac= [ARM64, X86] Control IOVA allocation for PCI devices.
11122 +                       Format: { "0" | "1" }
11123 +                       0 - Try to allocate a 32-bit DMA address first, before
11124 +                         falling back to the full range if needed.
11125 +                       1 - Allocate directly from the full usable range,
11126 +                         forcing Dual Address Cycle for PCI cards supporting
11127 +                         greater than 32-bit addressing.
11129         iommu.strict=   [ARM64] Configure TLB invalidation behaviour
11130                         Format: { "0" | "1" }
11131                         0 - Lazy mode.
11132 @@ -3196,8 +3201,6 @@
11133         noapic          [SMP,APIC] Tells the kernel to not make use of any
11134                         IOAPICs that may be present in the system.
11136 -       noautogroup     Disable scheduler automatic task group creation.
11138         nobats          [PPC] Do not use BATs for mapping kernel lowmem
11139                         on "Classic" PPC cores.
11141 @@ -3660,6 +3663,15 @@
11142                 nomsi           [MSI] If the PCI_MSI kernel config parameter is
11143                                 enabled, this kernel boot option can be used to
11144                                 disable the use of MSI interrupts system-wide.
11145 +               pcie_acs_override =
11146 +                                       [PCIE] Override missing PCIe ACS support for:
11147 +                               downstream
11148 +                                       All downstream ports - full ACS capabilities
11149 +                               multifunction
11150 +                                       All multifunction devices - multifunction ACS subset
11151 +                               id:nnnn:nnnn
11152 +                                       Specific device - full ACS capabilities
11153 +                                       Specified as vid:did (vendor/device ID) in hex
11154                 noioapicquirk   [APIC] Disable all boot interrupt quirks.
11155                                 Safety option to keep boot IRQs enabled. This
11156                                 should never be necessary.
11157 diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
11158 index 1d56a6b73a4e..4d55ff02310c 100644
11159 --- a/Documentation/admin-guide/sysctl/kernel.rst
11160 +++ b/Documentation/admin-guide/sysctl/kernel.rst
11161 @@ -1087,6 +1087,10 @@ Model available). If your platform happens to meet the
11162  requirements for EAS but you do not want to use it, change
11163  this value to 0.
11165 +sched_interactivity_factor (CacULE scheduler only)
11166 +==================================================
11167 +Sets the value *m* for interactivity score calculations. See
11168 +Figure 1 in https://web.cs.ucdavis.edu/~roper/ecs150/ULE.pdf
11170  sched_schedstats
11171  ================
11172 diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst
11173 index 586cd4b86428..cf4a90d7a058 100644
11174 --- a/Documentation/admin-guide/sysctl/vm.rst
11175 +++ b/Documentation/admin-guide/sysctl/vm.rst
11176 @@ -26,6 +26,8 @@ Currently, these files are in /proc/sys/vm:
11178  - admin_reserve_kbytes
11179  - block_dump
11180 +- clean_low_kbytes
11181 +- clean_min_kbytes
11182  - compact_memory
11183  - compaction_proactiveness
11184  - compact_unevictable_allowed
11185 @@ -113,6 +115,41 @@ block_dump enables block I/O debugging when set to a nonzero value. More
11186  information on block I/O debugging is in Documentation/admin-guide/laptops/laptop-mode.rst.
11189 +clean_low_kbytes
11190 +=====================
11192 +This knob provides *best-effort* protection of clean file pages. The clean file
11193 +pages on the current node won't be reclaimed under memory pressure when their
11194 +amount is below vm.clean_low_kbytes *unless* we threaten to OOM or have no
11195 +free swap space or vm.swappiness=0.
11197 +Protection of clean file pages may be used to prevent thrashing and
11198 +reducing I/O under low-memory conditions.
11200 +Setting it to a high value may result in a early eviction of anonymous pages
11201 +into the swap space by attempting to hold the protected amount of clean file
11202 +pages in memory.
11204 +The default value is defined by CONFIG_CLEAN_LOW_KBYTES.
11207 +clean_min_kbytes
11208 +=====================
11210 +This knob provides *hard* protection of clean file pages. The clean file pages
11211 +on the current node won't be reclaimed under memory pressure when their amount
11212 +is below vm.clean_min_kbytes.
11214 +Hard protection of clean file pages may be used to avoid high latency and
11215 +prevent livelock in near-OOM conditions.
11217 +Setting it to a high value may result in a early out-of-memory condition due to
11218 +the inability to reclaim the protected amount of clean file pages when other
11219 +types of pages cannot be reclaimed.
11221 +The default value is defined by CONFIG_CLEAN_MIN_KBYTES.
11224  compact_memory
11225  ==============
11227 diff --git a/Documentation/devicetree/bindings/media/renesas,vin.yaml b/Documentation/devicetree/bindings/media/renesas,vin.yaml
11228 index fe7c4cbfe4ba..dd1a5ce5896c 100644
11229 --- a/Documentation/devicetree/bindings/media/renesas,vin.yaml
11230 +++ b/Documentation/devicetree/bindings/media/renesas,vin.yaml
11231 @@ -193,23 +193,35 @@ required:
11232    - interrupts
11233    - clocks
11234    - power-domains
11235 -  - resets
11237 -if:
11238 -  properties:
11239 -    compatible:
11240 -      contains:
11241 -        enum:
11242 -          - renesas,vin-r8a7778
11243 -          - renesas,vin-r8a7779
11244 -          - renesas,rcar-gen2-vin
11245 -then:
11246 -  required:
11247 -    - port
11248 -else:
11249 -  required:
11250 -    - renesas,id
11251 -    - ports
11253 +allOf:
11254 +  - if:
11255 +      not:
11256 +        properties:
11257 +          compatible:
11258 +            contains:
11259 +              enum:
11260 +                - renesas,vin-r8a7778
11261 +                - renesas,vin-r8a7779
11262 +    then:
11263 +      required:
11264 +        - resets
11266 +  - if:
11267 +      properties:
11268 +        compatible:
11269 +          contains:
11270 +            enum:
11271 +              - renesas,vin-r8a7778
11272 +              - renesas,vin-r8a7779
11273 +              - renesas,rcar-gen2-vin
11274 +    then:
11275 +      required:
11276 +        - port
11277 +    else:
11278 +      required:
11279 +        - renesas,id
11280 +        - ports
11282  additionalProperties: false
11284 diff --git a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
11285 index 4a2bcc0158e2..8fdfbc763d70 100644
11286 --- a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
11287 +++ b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
11288 @@ -17,6 +17,7 @@ allOf:
11289  properties:
11290    compatible:
11291      oneOf:
11292 +      - const: renesas,pcie-r8a7779       # R-Car H1
11293        - items:
11294            - enum:
11295                - renesas,pcie-r8a7742      # RZ/G1H
11296 @@ -74,7 +75,16 @@ required:
11297    - clocks
11298    - clock-names
11299    - power-domains
11300 -  - resets
11302 +if:
11303 +  not:
11304 +    properties:
11305 +      compatible:
11306 +        contains:
11307 +          const: renesas,pcie-r8a7779
11308 +then:
11309 +  required:
11310 +    - resets
11312  unevaluatedProperties: false
11314 diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
11315 index 626447fee092..7808ec8bc712 100644
11316 --- a/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
11317 +++ b/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
11318 @@ -25,11 +25,13 @@ properties:
11319        - qcom,msm8998-qmp-pcie-phy
11320        - qcom,msm8998-qmp-ufs-phy
11321        - qcom,msm8998-qmp-usb3-phy
11322 +      - qcom,sc7180-qmp-usb3-phy
11323        - qcom,sc8180x-qmp-ufs-phy
11324        - qcom,sc8180x-qmp-usb3-phy
11325        - qcom,sdm845-qhp-pcie-phy
11326        - qcom,sdm845-qmp-pcie-phy
11327        - qcom,sdm845-qmp-ufs-phy
11328 +      - qcom,sdm845-qmp-usb3-phy
11329        - qcom,sdm845-qmp-usb3-uni-phy
11330        - qcom,sm8150-qmp-ufs-phy
11331        - qcom,sm8150-qmp-usb3-phy
11332 diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml
11333 index 33974ad10afe..62c0179d1765 100644
11334 --- a/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml
11335 +++ b/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml
11336 @@ -14,9 +14,7 @@ properties:
11337    compatible:
11338      enum:
11339        - qcom,sc7180-qmp-usb3-dp-phy
11340 -      - qcom,sc7180-qmp-usb3-phy
11341        - qcom,sdm845-qmp-usb3-dp-phy
11342 -      - qcom,sdm845-qmp-usb3-phy
11343    reg:
11344      items:
11345        - description: Address and length of PHY's USB serdes block.
11346 diff --git a/Documentation/devicetree/bindings/serial/8250.yaml b/Documentation/devicetree/bindings/serial/8250.yaml
11347 index f54cae9ff7b2..d3f87f2bfdc2 100644
11348 --- a/Documentation/devicetree/bindings/serial/8250.yaml
11349 +++ b/Documentation/devicetree/bindings/serial/8250.yaml
11350 @@ -93,11 +93,6 @@ properties:
11351                - mediatek,mt7622-btif
11352                - mediatek,mt7623-btif
11353            - const: mediatek,mtk-btif
11354 -      - items:
11355 -          - enum:
11356 -              - mediatek,mt7622-btif
11357 -              - mediatek,mt7623-btif
11358 -          - const: mediatek,mtk-btif
11359        - items:
11360            - const: mrvl,mmp-uart
11361            - const: intel,xscale-uart
11362 diff --git a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
11363 index 8631678283f9..865be05083c3 100644
11364 --- a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
11365 +++ b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
11366 @@ -80,7 +80,8 @@ required:
11367    - interrupts
11368    - clocks
11370 -additionalProperties: false
11371 +additionalProperties:
11372 +  type: object
11374  examples:
11375    - |
11376 diff --git a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
11377 index b33a76eeac4e..f963204e0b16 100644
11378 --- a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
11379 +++ b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
11380 @@ -28,14 +28,7 @@ properties:
11381        - renesas,r8a77980-thermal # R-Car V3H
11382        - renesas,r8a779a0-thermal # R-Car V3U
11384 -  reg:
11385 -    minItems: 2
11386 -    maxItems: 4
11387 -    items:
11388 -      - description: TSC1 registers
11389 -      - description: TSC2 registers
11390 -      - description: TSC3 registers
11391 -      - description: TSC4 registers
11392 +  reg: true
11394    interrupts:
11395      items:
11396 @@ -71,8 +64,25 @@ if:
11397            enum:
11398              - renesas,r8a779a0-thermal
11399  then:
11400 +  properties:
11401 +    reg:
11402 +      minItems: 2
11403 +      maxItems: 3
11404 +      items:
11405 +        - description: TSC1 registers
11406 +        - description: TSC2 registers
11407 +        - description: TSC3 registers
11408    required:
11409      - interrupts
11410 +else:
11411 +  properties:
11412 +    reg:
11413 +      items:
11414 +        - description: TSC0 registers
11415 +        - description: TSC1 registers
11416 +        - description: TSC2 registers
11417 +        - description: TSC3 registers
11418 +        - description: TSC4 registers
11420  additionalProperties: false
11422 @@ -111,3 +121,20 @@ examples:
11423                      };
11424              };
11425      };
11426 +  - |
11427 +    #include <dt-bindings/clock/r8a779a0-cpg-mssr.h>
11428 +    #include <dt-bindings/interrupt-controller/arm-gic.h>
11429 +    #include <dt-bindings/power/r8a779a0-sysc.h>
11431 +    tsc_r8a779a0: thermal@e6190000 {
11432 +            compatible = "renesas,r8a779a0-thermal";
11433 +            reg = <0xe6190000 0x200>,
11434 +                  <0xe6198000 0x200>,
11435 +                  <0xe61a0000 0x200>,
11436 +                  <0xe61a8000 0x200>,
11437 +                  <0xe61b0000 0x200>;
11438 +            clocks = <&cpg CPG_MOD 919>;
11439 +            power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
11440 +            resets = <&cpg 919>;
11441 +            #thermal-sensor-cells = <1>;
11442 +    };
11443 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
11444 index e361fc95ca29..82e3eee7363b 100644
11445 --- a/Documentation/dontdiff
11446 +++ b/Documentation/dontdiff
11447 @@ -178,6 +178,7 @@ mktables
11448  mktree
11449  mkutf8data
11450  modpost
11451 +modules-only.symvers
11452  modules.builtin
11453  modules.builtin.modinfo
11454  modules.nsdeps
11455 diff --git a/Documentation/driver-api/xilinx/eemi.rst b/Documentation/driver-api/xilinx/eemi.rst
11456 index 9dcbc6f18d75..c1bc47b9000d 100644
11457 --- a/Documentation/driver-api/xilinx/eemi.rst
11458 +++ b/Documentation/driver-api/xilinx/eemi.rst
11459 @@ -16,35 +16,8 @@ components running across different processing clusters on a chip or
11460  device to communicate with a power management controller (PMC) on a
11461  device to issue or respond to power management requests.
11463 -EEMI ops is a structure containing all eemi APIs supported by Zynq MPSoC.
11464 -The zynqmp-firmware driver maintain all EEMI APIs in zynqmp_eemi_ops
11465 -structure. Any driver who want to communicate with PMC using EEMI APIs
11466 -can call zynqmp_pm_get_eemi_ops().
11468 -Example of EEMI ops::
11470 -       /* zynqmp-firmware driver maintain all EEMI APIs */
11471 -       struct zynqmp_eemi_ops {
11472 -               int (*get_api_version)(u32 *version);
11473 -               int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out);
11474 -       };
11476 -       static const struct zynqmp_eemi_ops eemi_ops = {
11477 -               .get_api_version = zynqmp_pm_get_api_version,
11478 -               .query_data = zynqmp_pm_query_data,
11479 -       };
11481 -Example of EEMI ops usage::
11483 -       static const struct zynqmp_eemi_ops *eemi_ops;
11484 -       u32 ret_payload[PAYLOAD_ARG_CNT];
11485 -       int ret;
11487 -       eemi_ops = zynqmp_pm_get_eemi_ops();
11488 -       if (IS_ERR(eemi_ops))
11489 -               return PTR_ERR(eemi_ops);
11491 -       ret = eemi_ops->query_data(qdata, ret_payload);
11492 +Any driver who wants to communicate with PMC using EEMI APIs use the
11493 +functions provided for each function.
11495  IOCTL
11496  ------
11497 diff --git a/Documentation/filesystems/ntfs3.rst b/Documentation/filesystems/ntfs3.rst
11498 new file mode 100644
11499 index 000000000000..ffe9ea0c1499
11500 --- /dev/null
11501 +++ b/Documentation/filesystems/ntfs3.rst
11502 @@ -0,0 +1,106 @@
11503 +.. SPDX-License-Identifier: GPL-2.0
11505 +=====
11506 +NTFS3
11507 +=====
11510 +Summary and Features
11511 +====================
11513 +NTFS3 is fully functional NTFS Read-Write driver. The driver works with
11514 +NTFS versions up to 3.1, normal/compressed/sparse files
11515 +and journal replaying. File system type to use on mount is 'ntfs3'.
11517 +- This driver implements NTFS read/write support for normal, sparse and
11518 +  compressed files.
11519 +- Supports native journal replaying;
11520 +- Supports extended attributes
11521 +       Predefined extended attributes:
11522 +       - 'system.ntfs_security' gets/sets security
11523 +                       descriptor (SECURITY_DESCRIPTOR_RELATIVE)
11524 +       - 'system.ntfs_attrib' gets/sets ntfs file/dir attributes.
11525 +               Note: applied to empty files, this allows to switch type between
11526 +               sparse(0x200), compressed(0x800) and normal;
11527 +- Supports NFS export of mounted NTFS volumes.
11529 +Mount Options
11530 +=============
11532 +The list below describes mount options supported by NTFS3 driver in addition to
11533 +generic ones.
11535 +===============================================================================
11537 +nls=name               This option informs the driver how to interpret path
11538 +                       strings and translate them to Unicode and back. If
11539 +                       this option is not set, the default codepage will be
11540 +                       used (CONFIG_NLS_DEFAULT).
11541 +                       Examples:
11542 +                               'nls=utf8'
11544 +uid=
11545 +gid=
11546 +umask=                 Controls the default permissions for files/directories created
11547 +                       after the NTFS volume is mounted.
11549 +fmask=
11550 +dmask=                 Instead of specifying umask which applies both to
11551 +                       files and directories, fmask applies only to files and
11552 +                       dmask only to directories.
11554 +nohidden               Files with the Windows-specific HIDDEN (FILE_ATTRIBUTE_HIDDEN)
11555 +                       attribute will not be shown under Linux.
11557 +sys_immutable          Files with the Windows-specific SYSTEM
11558 +                       (FILE_ATTRIBUTE_SYSTEM) attribute will be marked as system
11559 +                       immutable files.
11561 +discard                        Enable support of the TRIM command for improved performance
11562 +                       on delete operations, which is recommended for use with the
11563 +                       solid-state drives (SSD).
11565 +force                  Forces the driver to mount partitions even if 'dirty' flag
11566 +                       (volume dirty) is set. Not recommended for use.
11568 +sparse                 Create new files as "sparse".
11570 +showmeta               Use this parameter to show all meta-files (System Files) on
11571 +                       a mounted NTFS partition.
11572 +                       By default, all meta-files are hidden.
11574 +prealloc               Preallocate space for files excessively when file size is
11575 +                       increasing on writes. Decreases fragmentation in case of
11576 +                       parallel write operations to different files.
11578 +no_acs_rules           "No access rules" mount option sets access rights for
11579 +                       files/folders to 777 and owner/group to root. This mount
11580 +                       option absorbs all other permissions:
11581 +                       - permissions change for files/folders will be reported
11582 +                               as successful, but they will remain 777;
11583 +                       - owner/group change will be reported as successful, but
11584 +                               they will stay as root
11586 +acl                    Support POSIX ACLs (Access Control Lists). Effective if
11587 +                       supported by Kernel. Not to be confused with NTFS ACLs.
11588 +                       The option specified as acl enables support for POSIX ACLs.
11590 +noatime                        All files and directories will not update their last access
11591 +                       time attribute if a partition is mounted with this parameter.
11592 +                       This option can speed up file system operation.
11594 +===============================================================================
11596 +ToDo list
11597 +=========
11599 +- Full journaling support (currently journal replaying is supported) over JBD.
11602 +References
11603 +==========
11604 +https://www.paragon-software.com/home/ntfs-linux-professional/
11605 +       - Commercial version of the NTFS driver for Linux.
11607 +almaz.alexandrovich@paragon-software.com
11608 +       - Direct e-mail address for feedback and requests on the NTFS3 implementation.
11609 diff --git a/Documentation/locking/futex2.rst b/Documentation/locking/futex2.rst
11610 new file mode 100644
11611 index 000000000000..3ab49f0e741c
11612 --- /dev/null
11613 +++ b/Documentation/locking/futex2.rst
11614 @@ -0,0 +1,198 @@
11615 +.. SPDX-License-Identifier: GPL-2.0
11617 +======
11618 +futex2
11619 +======
11621 +:Author: André Almeida <andrealmeid@collabora.com>
11623 +futex, or fast user mutex, is a set of syscalls to allow userspace to create
11624 +performant synchronization mechanisms, such as mutexes, semaphores and
11625 +conditional variables in userspace. C standard libraries, like glibc, uses it
11626 +as a means to implement more high level interfaces like pthreads.
11628 +The interface
11629 +=============
11631 +uAPI functions
11632 +--------------
11634 +.. kernel-doc:: kernel/futex2.c
11635 +   :identifiers: sys_futex_wait sys_futex_wake sys_futex_waitv sys_futex_requeue
11637 +uAPI structures
11638 +---------------
11640 +.. kernel-doc:: include/uapi/linux/futex.h
11642 +The ``flag`` argument
11643 +---------------------
11645 +The flag is used to specify the size of the futex word
11646 +(FUTEX_[8, 16, 32]). It's mandatory to define one, since there's no
11647 +default size.
11649 +By default, the timeout uses a monotonic clock, but can be used as a realtime
11650 +one by using the FUTEX_REALTIME_CLOCK flag.
11652 +By default, futexes are of the private type, that means that this user address
11653 +will be accessed by threads that share the same memory region. This allows for
11654 +some internal optimizations, so they are faster. However, if the address needs
11655 +to be shared with different processes (like using ``mmap()`` or ``shm()``), they
11656 +need to be defined as shared and the flag FUTEX_SHARED_FLAG is used to set that.
11658 +By default, the operation has no NUMA-awareness, meaning that the user can't
11659 +choose the memory node where the kernel side futex data will be stored. The
11660 +user can choose the node where it wants to operate by setting the
11661 +FUTEX_NUMA_FLAG and using the following structure (where X can be 8, 16, or
11662 +32)::
11664 + struct futexX_numa {
11665 +         __uX value;
11666 +         __sX hint;
11667 + };
11669 +This structure should be passed at the ``void *uaddr`` of futex functions. The
11670 +address of the structure will be used to be waited on/waken on, and the
11671 +``value`` will be compared to ``val`` as usual. The ``hint`` member is used to
11672 +define which node the futex will use. When waiting, the futex will be
11673 +registered on a kernel-side table stored on that node; when waking, the futex
11674 +will be searched for on that given table. That means that there's no redundancy
11675 +between tables, and the wrong ``hint`` value will lead to undesired behavior.
11676 +Userspace is responsible for dealing with node migrations issues that may
11677 +occur. ``hint`` can range from [0, MAX_NUMA_NODES), for specifying a node, or
11678 +-1, to use the same node the current process is using.
11680 +When not using FUTEX_NUMA_FLAG on a NUMA system, the futex will be stored on a
11681 +global table on allocated on the first node.
11683 +The ``timo`` argument
11684 +---------------------
11686 +As per the Y2038 work done in the kernel, new interfaces shouldn't add timeout
11687 +options known to be buggy. Given that, ``timo`` should be a 64-bit timeout at
11688 +all platforms, using an absolute timeout value.
11690 +Implementation
11691 +==============
11693 +The internal implementation follows a similar design to the original futex.
11694 +Given that we want to replicate the same external behavior of current futex,
11695 +this should be somewhat expected.
11697 +Waiting
11698 +-------
11700 +For the wait operations, they are all treated as if you want to wait on N
11701 +futexes, so the path for futex_wait and futex_waitv is the basically the same.
11702 +For both syscalls, the first step is to prepare an internal list for the list
11703 +of futexes to wait for (using struct futexv_head). For futex_wait() calls, this
11704 +list will have a single object.
11706 +We have a hash table, where waiters register themselves before sleeping. Then
11707 +the wake function checks this table looking for waiters at uaddr.  The hash
11708 +bucket to be used is determined by a struct futex_key, that stores information
11709 +to uniquely identify an address from a given process. Given the huge address
11710 +space, there'll be hash collisions, so we store information to be later used on
11711 +collision treatment.
11713 +First, for every futex we want to wait on, we check if (``*uaddr == val``).
11714 +This check is done holding the bucket lock, so we are correctly serialized with
11715 +any futex_wake() calls. If any waiter fails the check above, we dequeue all
11716 +futexes. The check (``*uaddr == val``) can fail for two reasons:
11718 +- The values are different, and we return -EAGAIN. However, if while
11719 +  dequeueing we found that some futexes were awakened, we prioritize this
11720 +  and return success.
11722 +- When trying to access the user address, we do so with page faults
11723 +  disabled because we are holding a bucket's spin lock (and can't sleep
11724 +  while holding a spin lock). If there's an error, it might be a page
11725 +  fault, or an invalid address. We release the lock, dequeue everyone
11726 +  (because it's illegal to sleep while there are futexes enqueued, we
11727 +  could lose wakeups) and try again with page fault enabled. If we
11728 +  succeed, this means that the address is valid, but we need to do
11729 +  all the work again. For serialization reasons, we need to have the
11730 +  spin lock when getting the user value. Additionally, for shared
11731 +  futexes, we also need to recalculate the hash, since the underlying
11732 +  mapping mechanisms could have changed when dealing with page fault.
11733 +  If, even with page fault enabled, we can't access the address, it
11734 +  means it's an invalid user address, and we return -EFAULT. For this
11735 +  case, we prioritize the error, even if some futexes were awaken.
11737 +If the check is OK, they are enqueued on a linked list in our bucket, and
11738 +proceed to the next one. If all waiters succeed, we put the thread to sleep
11739 +until a futex_wake() call, timeout expires or we get a signal. After waking up,
11740 +we dequeue everyone, and check if some futex was awakened. This dequeue is done
11741 +by iteratively walking at each element of struct futex_head list.
11743 +All enqueuing/dequeuing operations requires to hold the bucket lock, to avoid
11744 +racing while modifying the list.
11746 +Waking
11747 +------
11749 +We get the bucket that's storing the waiters at uaddr, and wake the required
11750 +number of waiters, checking for hash collision.
11752 +There's an optimization that makes futex_wake() not take the bucket lock if
11753 +there's no one to be woken on that bucket. It checks an atomic counter that each
11754 +bucket has, if it says 0, then the syscall exits. In order for this to work, the
11755 +waiter thread increases it before taking the lock, so the wake thread will
11756 +correctly see that there's someone waiting and will continue the path to take
11757 +the bucket lock. To get the correct serialization, the waiter issues a memory
11758 +barrier after increasing the bucket counter and the waker issues a memory
11759 +barrier before checking it.
11761 +Requeuing
11762 +---------
11764 +The requeue path first checks for each struct futex_requeue and their flags.
11765 +Then, it will compare the expected value with the one at uaddr1::uaddr.
11766 +Following the same serialization explained at Waking_, we increase the atomic
11767 +counter for the bucket of uaddr2 before taking the lock. We need to have both
11768 +buckets locks at same time so we don't race with other futex operation. To
11769 +ensure the locks are taken in the same order for all threads (and thus avoiding
11770 +deadlocks), every requeue operation takes the "smaller" bucket first, when
11771 +comparing both addresses.
11773 +If the compare with user value succeeds, we proceed by waking ``nr_wake``
11774 +futexes, and then requeuing ``nr_requeue`` from bucket of uaddr1 to the uaddr2.
11775 +This consists in a simple list deletion/addition and replacing the old futex key
11776 +with the new one.
11778 +Futex keys
11779 +----------
11781 +There are two types of futexes: private and shared ones. The private are futexes
11782 +meant to be used by threads that share the same memory space, are easier to be
11783 +uniquely identified and thus can have some performance optimization. The
11784 +elements for identifying one are: the start address of the page where the
11785 +address is, the address offset within the page and the current->mm pointer.
11787 +Now, for uniquely identifying a shared futex:
11789 +- If the page containing the user address is an anonymous page, we can
11790 +  just use the same data used for private futexes (the start address of
11791 +  the page, the address offset within the page and the current->mm
11792 +  pointer); that will be enough for uniquely identifying such futex. We
11793 +  also set one bit at the key to differentiate if a private futex is
11794 +  used on the same address (mixing shared and private calls does not
11795 +  work).
11797 +- If the page is file-backed, current->mm maybe isn't the same one for
11798 +  every user of this futex, so we need to use other data: the
11799 +  page->index, a UUID for the struct inode and the offset within the
11800 +  page.
11802 +Note that members of futex_key don't have any particular meaning after they
11803 +are part of the struct - they are just bytes to identify a futex.  Given that,
11804 +we don't need to use a particular name or type that matches the original data,
11805 +we only need to care about the bitsize of each component and make both private
11806 +and shared fit in the same memory space.
11808 +Source code documentation
11809 +=========================
11811 +.. kernel-doc:: kernel/futex2.c
11812 +   :no-identifiers: sys_futex_wait sys_futex_wake sys_futex_waitv sys_futex_requeue
11813 diff --git a/Documentation/locking/index.rst b/Documentation/locking/index.rst
11814 index 7003bd5aeff4..9bf03c7fa1ec 100644
11815 --- a/Documentation/locking/index.rst
11816 +++ b/Documentation/locking/index.rst
11817 @@ -24,6 +24,7 @@ locking
11818      percpu-rw-semaphore
11819      robust-futexes
11820      robust-futex-ABI
11821 +    futex2
11823  .. only::  subproject and html
11825 diff --git a/Documentation/powerpc/syscall64-abi.rst b/Documentation/powerpc/syscall64-abi.rst
11826 index dabee3729e5a..56490c4c0c07 100644
11827 --- a/Documentation/powerpc/syscall64-abi.rst
11828 +++ b/Documentation/powerpc/syscall64-abi.rst
11829 @@ -109,6 +109,16 @@ auxiliary vector.
11831  scv 0 syscalls will always behave as PPC_FEATURE2_HTM_NOSC.
11833 +ptrace
11834 +------
11835 +When ptracing system calls (PTRACE_SYSCALL), the pt_regs.trap value contains
11836 +the system call type that can be used to distinguish between sc and scv 0
11837 +system calls, and the different register conventions can be accounted for.
11839 +If the value of (pt_regs.trap & 0xfff0) is 0xc00 then the system call was
11840 +performed with the sc instruction, if it is 0x3000 then the system call was
11841 +performed with the scv 0 instruction.
11843  vsyscall
11844  ========
11846 diff --git a/Documentation/scheduler/sched-CacULE.rst b/Documentation/scheduler/sched-CacULE.rst
11847 new file mode 100644
11848 index 000000000000..82b0847c468a
11849 --- /dev/null
11850 +++ b/Documentation/scheduler/sched-CacULE.rst
11851 @@ -0,0 +1,76 @@
11852 +======================================
11853 +The CacULE Scheduler by Hamad Al Marri.
11854 +======================================
11856 +1.  Overview
11857 +=============
11859 +The CacULE CPU scheduler is based on interactivity score mechanism.
11860 +The interactivity score is inspired by the ULE scheduler (FreeBSD
11861 +scheduler).
11863 +1.1 About CacULE Scheduler
11864 +--------------------------
11866 +  - Each CPU has its own runqueue.
11868 +  - NORMAL runqueue is a linked list of sched_entities (instead of RB-Tree).
11870 +  - RT and other runqueues are just the same as the CFS's.
11872 +  - Wake up tasks preempt currently running tasks if its interactivity score value
11873 +    is higher.
11876 +1.2. Complexity
11877 +----------------
11879 +The complexity of Enqueue and Dequeue a task is O(1).
11881 +The complexity of pick the next task is in O(n), where n is the number of tasks
11882 +in a runqueue (each CPU has its own runqueue).
11884 +Note: O(n) sounds scary, but usually for a machine with 4 CPUS where it is used
11885 +for desktop or mobile jobs, the maximum number of runnable tasks might not
11886 +exceeds 10 (at the pick next run time) - the idle tasks are excluded since they
11887 +are dequeued when sleeping and enqueued when they wake up.
11890 +2. The CacULE Interactivity Score
11891 +=======================================================
11893 +The interactivity score is inspired by the ULE scheduler (FreeBSD scheduler).
11894 +For more information see: https://web.cs.ucdavis.edu/~roper/ecs150/ULE.pdf
11895 +CacULE doesn't replace CFS with ULE, it only changes the CFS' pick next task
11896 +mechanism to ULE's interactivity score mechanism for picking next task to run.
11899 +2.3 sched_interactivity_factor
11900 +=================
11901 +Sets the value *m* for interactivity score calculations. See Figure 1 in
11902 +https://web.cs.ucdavis.edu/~roper/ecs150/ULE.pdf
11903 +The default value of in CacULE is 10 which means that the Maximum Interactive
11904 +Score is 20 (since m = Maximum Interactive Score / 2).
11905 +You can tune sched_interactivity_factor with sysctl command:
11907 +       sysctl kernel.sched_interactivity_factor=50
11909 +This command changes the sched_interactivity_factor from 10 to 50.
11912 +3. Scheduling policies
11913 +=======================
11915 +CacULE some CFS, implements three scheduling policies:
11917 +  - SCHED_NORMAL (traditionally called SCHED_OTHER): The scheduling
11918 +    policy that is used for regular tasks.
11920 +  - SCHED_BATCH: Does not preempt nearly as often as regular tasks
11921 +    would, thereby allowing tasks to run longer and make better use of
11922 +    caches but at the cost of interactivity. This is well suited for
11923 +    batch jobs.
11925 +  - SCHED_IDLE: This is even weaker than nice 19, but its not a true
11926 +    idle timer scheduler in order to avoid to get into priority
11927 +    inversion problems which would deadlock the machine.
11928 diff --git a/Documentation/userspace-api/media/v4l/subdev-formats.rst b/Documentation/userspace-api/media/v4l/subdev-formats.rst
11929 index 7f16cbe46e5c..e6a9faa81197 100644
11930 --- a/Documentation/userspace-api/media/v4l/subdev-formats.rst
11931 +++ b/Documentation/userspace-api/media/v4l/subdev-formats.rst
11932 @@ -1567,8 +1567,8 @@ The following tables list existing packed RGB formats.
11933        - MEDIA_BUS_FMT_RGB101010_1X30
11934        - 0x1018
11935        -
11936 -      - 0
11937 -      - 0
11938 +      -
11939 +      -
11940        - r\ :sub:`9`
11941        - r\ :sub:`8`
11942        - r\ :sub:`7`
11943 diff --git a/Documentation/vm/index.rst b/Documentation/vm/index.rst
11944 index eff5fbd492d0..c353b3f55924 100644
11945 --- a/Documentation/vm/index.rst
11946 +++ b/Documentation/vm/index.rst
11947 @@ -17,6 +17,7 @@ various features of the Linux memory management
11949     swap_numa
11950     zswap
11951 +   multigen_lru
11953  Kernel developers MM documentation
11954  ==================================
11955 diff --git a/Documentation/vm/multigen_lru.rst b/Documentation/vm/multigen_lru.rst
11956 new file mode 100644
11957 index 000000000000..cf772aeca317
11958 --- /dev/null
11959 +++ b/Documentation/vm/multigen_lru.rst
11960 @@ -0,0 +1,192 @@
11961 +=====================
11962 +Multigenerational LRU
11963 +=====================
11965 +Quick Start
11966 +===========
11967 +Build Options
11968 +-------------
11969 +:Required: Set ``CONFIG_LRU_GEN=y``.
11971 +:Optional: Change ``CONFIG_NR_LRU_GENS`` to a number ``X`` to support
11972 + a maximum of ``X`` generations.
11974 +:Optional: Change ``CONFIG_TIERS_PER_GEN`` to a number ``Y`` to support
11975 + a maximum of ``Y`` tiers per generation.
11977 +:Optional: Set ``CONFIG_LRU_GEN_ENABLED=y`` to turn the feature on by
11978 + default.
11980 +Runtime Options
11981 +---------------
11982 +:Required: Write ``1`` to ``/sys/kernel/mm/lru_gen/enable`` if the
11983 + feature was not turned on by default.
11985 +:Optional: Change ``/sys/kernel/mm/lru_gen/spread`` to a number ``N``
11986 + to spread pages out across ``N+1`` generations. ``N`` should be less
11987 + than ``X``. Larger values make the background aging more aggressive.
11989 +:Optional: Read ``/sys/kernel/debug/lru_gen`` to verify the feature.
11990 + This file has the following output:
11994 +  memcg  memcg_id  memcg_path
11995 +    node  node_id
11996 +      min_gen  birth_time  anon_size  file_size
11997 +      ...
11998 +      max_gen  birth_time  anon_size  file_size
12000 +Given a memcg and a node, ``min_gen`` is the oldest generation
12001 +(number) and ``max_gen`` is the youngest. Birth time is in
12002 +milliseconds. The sizes of anon and file types are in pages.
12004 +Recipes
12005 +-------
12006 +:Android on ARMv8.1+: ``X=4``, ``N=0``
12008 +:Android on pre-ARMv8.1 CPUs: Not recommended due to the lack of
12009 + ``ARM64_HW_AFDBM``
12011 +:Laptops running Chrome on x86_64: ``X=7``, ``N=2``
12013 +:Working set estimation: Write ``+ memcg_id node_id gen [swappiness]``
12014 + to ``/sys/kernel/debug/lru_gen`` to account referenced pages to
12015 + generation ``max_gen`` and create the next generation ``max_gen+1``.
12016 + ``gen`` should be equal to ``max_gen``. A swap file and a non-zero
12017 + ``swappiness`` are required to scan anon type. If swapping is not
12018 + desired, set ``vm.swappiness`` to ``0``.
12020 +:Proactive reclaim: Write ``- memcg_id node_id gen [swappiness]
12021 + [nr_to_reclaim]`` to ``/sys/kernel/debug/lru_gen`` to evict
12022 + generations less than or equal to ``gen``. ``gen`` should be less
12023 + than ``max_gen-1`` as ``max_gen`` and ``max_gen-1`` are active
12024 + generations and therefore protected from the eviction. Use
12025 + ``nr_to_reclaim`` to limit the number of pages to be evicted.
12026 + Multiple command lines are supported, so does concatenation with
12027 + delimiters ``,`` and ``;``.
12029 +Framework
12030 +=========
12031 +For each ``lruvec``, evictable pages are divided into multiple
12032 +generations. The youngest generation number is stored in ``max_seq``
12033 +for both anon and file types as they are aged on an equal footing. The
12034 +oldest generation numbers are stored in ``min_seq[2]`` separately for
12035 +anon and file types as clean file pages can be evicted regardless of
12036 +swap and write-back constraints. Generation numbers are truncated into
12037 +``order_base_2(CONFIG_NR_LRU_GENS+1)`` bits in order to fit into
12038 +``page->flags``. The sliding window technique is used to prevent
12039 +truncated generation numbers from overlapping. Each truncated
12040 +generation number is an index to an array of per-type and per-zone
12041 +lists. Evictable pages are added to the per-zone lists indexed by
12042 +``max_seq`` or ``min_seq[2]`` (modulo ``CONFIG_NR_LRU_GENS``),
12043 +depending on whether they are being faulted in.
12045 +Each generation is then divided into multiple tiers. Tiers represent
12046 +levels of usage from file descriptors only. Pages accessed N times via
12047 +file descriptors belong to tier order_base_2(N). In contrast to moving
12048 +across generations which requires the lru lock, moving across tiers
12049 +only involves an atomic operation on ``page->flags`` and therefore has
12050 +a negligible cost.
12052 +The workflow comprises two conceptually independent functions: the
12053 +aging and the eviction.
12055 +Aging
12056 +-----
12057 +The aging produces young generations. Given an ``lruvec``, the aging
12058 +scans page tables for referenced pages of this ``lruvec``. Upon
12059 +finding one, the aging updates its generation number to ``max_seq``.
12060 +After each round of scan, the aging increments ``max_seq``.
12062 +The aging maintains either a system-wide ``mm_struct`` list or
12063 +per-memcg ``mm_struct`` lists, and it only scans page tables of
12064 +processes that have been scheduled since the last scan. Since scans
12065 +are differential with respect to referenced pages, the cost is roughly
12066 +proportional to their number.
12068 +The aging is due when both of ``min_seq[2]`` reaches ``max_seq-1``,
12069 +assuming both anon and file types are reclaimable.
12071 +Eviction
12072 +--------
12073 +The eviction consumes old generations. Given an ``lruvec``, the
12074 +eviction scans the pages on the per-zone lists indexed by either of
12075 +``min_seq[2]``. It first tries to select a type based on the values of
12076 +``min_seq[2]``. When anon and file types are both available from the
12077 +same generation, it selects the one that has a lower refault rate.
12079 +During a scan, the eviction sorts pages according to their generation
12080 +numbers, if the aging has found them referenced.  It also moves pages
12081 +from the tiers that have higher refault rates than tier 0 to the next
12082 +generation.
12084 +When it finds all the per-zone lists of a selected type are empty, the
12085 +eviction increments ``min_seq[2]`` indexed by this selected type.
12087 +Rationale
12088 +=========
12089 +Limitations of Current Implementation
12090 +-------------------------------------
12091 +Notion of Active/Inactive
12092 +~~~~~~~~~~~~~~~~~~~~~~~~~
12093 +For servers equipped with hundreds of gigabytes of memory, the
12094 +granularity of the active/inactive is too coarse to be useful for job
12095 +scheduling. False active/inactive rates are relatively high, and thus
12096 +the assumed savings may not materialize.
12098 +For phones and laptops, executable pages are frequently evicted
12099 +despite the fact that there are many less recently used anon pages.
12100 +Major faults on executable pages cause ``janks`` (slow UI renderings)
12101 +and negatively impact user experience.
12103 +For ``lruvec``\s from different memcgs or nodes, comparisons are
12104 +impossible due to the lack of a common frame of reference.
12106 +Incremental Scans via ``rmap``
12107 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12108 +Each incremental scan picks up at where the last scan left off and
12109 +stops after it has found a handful of unreferenced pages. For
12110 +workloads using a large amount of anon memory, incremental scans lose
12111 +the advantage under sustained memory pressure due to high ratios of
12112 +the number of scanned pages to the number of reclaimed pages. On top
12113 +of that, the ``rmap`` has poor memory locality due to its complex data
12114 +structures. The combined effects typically result in a high amount of
12115 +CPU usage in the reclaim path.
12117 +Benefits of Multigenerational LRU
12118 +---------------------------------
12119 +Notion of Generation Numbers
12120 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12121 +The notion of generation numbers introduces a quantitative approach to
12122 +memory overcommit. A larger number of pages can be spread out across
12123 +configurable generations, and thus they have relatively low false
12124 +active/inactive rates. Each generation includes all pages that have
12125 +been referenced since the last generation.
12127 +Given an ``lruvec``, scans and the selections between anon and file
12128 +types are all based on generation numbers, which are simple and yet
12129 +effective. For different ``lruvec``\s, comparisons are still possible
12130 +based on birth times of generations.
12132 +Differential Scans via Page Tables
12133 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12134 +Each differential scan discovers all pages that have been referenced
12135 +since the last scan. Specifically, it walks the ``mm_struct`` list
12136 +associated with an ``lruvec`` to scan page tables of processes that
12137 +have been scheduled since the last scan. The cost of each differential
12138 +scan is roughly proportional to the number of referenced pages it
12139 +discovers. Unless address spaces are extremely sparse, page tables
12140 +usually have better memory locality than the ``rmap``. The end result
12141 +is generally a significant reduction in CPU usage, for workloads
12142 +using a large amount of anon memory.
12144 +To-do List
12145 +==========
12146 +KVM Optimization
12147 +----------------
12148 +Support shadow page table scanning.
12150 +NUMA Optimization
12151 +-----------------
12152 +Support NUMA policies and per-node RSS counters.
12153 diff --git a/MAINTAINERS b/MAINTAINERS
12154 index 9450e052f1b1..b7a2162d159a 100644
12155 --- a/MAINTAINERS
12156 +++ b/MAINTAINERS
12157 @@ -7377,7 +7377,7 @@ F:        Documentation/locking/*futex*
12158  F:     include/asm-generic/futex.h
12159  F:     include/linux/futex.h
12160  F:     include/uapi/linux/futex.h
12161 -F:     kernel/futex.c
12162 +F:     kernel/futex*
12163  F:     tools/perf/bench/futex*
12164  F:     tools/testing/selftests/futex/
12166 @@ -12775,6 +12775,13 @@ T:     git git://git.kernel.org/pub/scm/linux/kernel/git/aia21/ntfs.git
12167  F:     Documentation/filesystems/ntfs.rst
12168  F:     fs/ntfs/
12170 +NTFS3 FILESYSTEM
12171 +M:     Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
12172 +S:     Supported
12173 +W:     http://www.paragon-software.com/
12174 +F:     Documentation/filesystems/ntfs3.rst
12175 +F:     fs/ntfs3/
12177  NUBUS SUBSYSTEM
12178  M:     Finn Thain <fthain@telegraphics.com.au>
12179  L:     linux-m68k@lists.linux-m68k.org
12180 @@ -19912,6 +19919,18 @@ F:     Documentation/vm/zsmalloc.rst
12181  F:     include/linux/zsmalloc.h
12182  F:     mm/zsmalloc.c
12184 +ZSTD
12185 +M:     Nick Terrell <terrelln@fb.com>
12186 +S:     Maintained
12187 +B:     https://github.com/facebook/zstd/issues
12188 +T:     git git://github.com/terrelln/linux.git
12189 +F:     include/linux/zstd*
12190 +F:     lib/zstd/
12191 +F:     lib/decompress_unzstd.c
12192 +F:     crypto/zstd.c
12193 +N:     zstd
12194 +K:     zstd
12196  ZSWAP COMPRESSED SWAP CACHING
12197  M:     Seth Jennings <sjenning@redhat.com>
12198  M:     Dan Streetman <ddstreet@ieee.org>
12199 diff --git a/Makefile b/Makefile
12200 index 3a10a8e08b6d..ef62c826e868 100644
12201 --- a/Makefile
12202 +++ b/Makefile
12203 @@ -1,7 +1,7 @@
12204  # SPDX-License-Identifier: GPL-2.0
12205  VERSION = 5
12206  PATCHLEVEL = 12
12207 -SUBLEVEL = 0
12208 +SUBLEVEL = 8
12209  EXTRAVERSION =
12210  NAME = Frozen Wasteland
12212 @@ -775,16 +775,16 @@ KBUILD_CFLAGS += -Wno-gnu
12213  KBUILD_CFLAGS += -mno-global-merge
12214  else
12216 -# These warnings generated too much noise in a regular build.
12217 -# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
12218 -KBUILD_CFLAGS += -Wno-unused-but-set-variable
12220  # Warn about unmarked fall-throughs in switch statement.
12221  # Disabled for clang while comment to attribute conversion happens and
12222  # https://github.com/ClangBuiltLinux/linux/issues/636 is discussed.
12223  KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,)
12224  endif
12226 +# These warnings generated too much noise in a regular build.
12227 +# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
12228 +KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
12230  KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
12231  ifdef CONFIG_FRAME_POINTER
12232  KBUILD_CFLAGS  += -fno-omit-frame-pointer -fno-optimize-sibling-calls
12233 @@ -1066,8 +1066,8 @@ endif # INSTALL_MOD_STRIP
12234  export mod_strip_cmd
12236  # CONFIG_MODULE_COMPRESS, if defined, will cause module to be compressed
12237 -# after they are installed in agreement with CONFIG_MODULE_COMPRESS_GZIP
12238 -# or CONFIG_MODULE_COMPRESS_XZ.
12239 +# after they are installed in agreement with CONFIG_MODULE_COMPRESS_GZIP,
12240 +# CONFIG_MODULE_COMPRESS_XZ, or CONFIG_MODULE_COMPRESS_ZSTD.
12242  mod_compress_cmd = true
12243  ifdef CONFIG_MODULE_COMPRESS
12244 @@ -1077,6 +1077,9 @@ ifdef CONFIG_MODULE_COMPRESS
12245    ifdef CONFIG_MODULE_COMPRESS_XZ
12246      mod_compress_cmd = $(XZ) --lzma2=dict=2MiB -f
12247    endif # CONFIG_MODULE_COMPRESS_XZ
12248 +  ifdef CONFIG_MODULE_COMPRESS_ZSTD
12249 +    mod_compress_cmd = $(ZSTD) -T0 --rm -f -q
12250 +  endif # CONFIG_MODULE_COMPRESS_ZSTD
12251  endif # CONFIG_MODULE_COMPRESS
12252  export mod_compress_cmd
12254 @@ -1513,7 +1516,7 @@ endif # CONFIG_MODULES
12255  # make distclean Remove editor backup files, patch leftover files and the like
12257  # Directories & files removed with 'make clean'
12258 -CLEAN_FILES += include/ksym vmlinux.symvers \
12259 +CLEAN_FILES += include/ksym vmlinux.symvers modules-only.symvers \
12260                modules.builtin modules.builtin.modinfo modules.nsdeps \
12261                compile_commands.json .thinlto-cache
12263 diff --git a/arch/Kconfig b/arch/Kconfig
12264 index ecfd3520b676..cbd7f66734ee 100644
12265 --- a/arch/Kconfig
12266 +++ b/arch/Kconfig
12267 @@ -782,6 +782,15 @@ config HAVE_ARCH_TRANSPARENT_HUGEPAGE
12268  config HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
12269         bool
12271 +config HAVE_ARCH_PARENT_PMD_YOUNG
12272 +       bool
12273 +       depends on PGTABLE_LEVELS > 2
12274 +       help
12275 +         Architectures that select this are able to set the accessed bit on
12276 +         non-leaf PMD entries in addition to leaf PTE entries where pages are
12277 +         mapped. For them, page table walkers that clear the accessed bit may
12278 +         stop at non-leaf PMD entries when they do not see the accessed bit.
12280  config HAVE_ARCH_HUGE_VMAP
12281         bool
12283 diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
12284 index ad9b7fe4dba3..4a9d33372fe2 100644
12285 --- a/arch/arc/include/asm/page.h
12286 +++ b/arch/arc/include/asm/page.h
12287 @@ -7,6 +7,18 @@
12289  #include <uapi/asm/page.h>
12291 +#ifdef CONFIG_ARC_HAS_PAE40
12293 +#define MAX_POSSIBLE_PHYSMEM_BITS      40
12294 +#define PAGE_MASK_PHYS                 (0xff00000000ull | PAGE_MASK)
12296 +#else /* CONFIG_ARC_HAS_PAE40 */
12298 +#define MAX_POSSIBLE_PHYSMEM_BITS      32
12299 +#define PAGE_MASK_PHYS                 PAGE_MASK
12301 +#endif /* CONFIG_ARC_HAS_PAE40 */
12303  #ifndef __ASSEMBLY__
12305  #define clear_page(paddr)              memset((paddr), 0, PAGE_SIZE)
12306 diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
12307 index 163641726a2b..5878846f00cf 100644
12308 --- a/arch/arc/include/asm/pgtable.h
12309 +++ b/arch/arc/include/asm/pgtable.h
12310 @@ -107,8 +107,8 @@
12311  #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
12313  /* Set of bits not changed in pte_modify */
12314 -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
12316 +#define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
12317 +                                                          _PAGE_SPECIAL)
12318  /* More Abbrevaited helpers */
12319  #define PAGE_U_NONE     __pgprot(___DEF)
12320  #define PAGE_U_R        __pgprot(___DEF | _PAGE_READ)
12321 @@ -132,13 +132,7 @@
12322  #define PTE_BITS_IN_PD0                (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
12323  #define PTE_BITS_RWX           (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
12325 -#ifdef CONFIG_ARC_HAS_PAE40
12326 -#define PTE_BITS_NON_RWX_IN_PD1        (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
12327 -#define MAX_POSSIBLE_PHYSMEM_BITS 40
12328 -#else
12329 -#define PTE_BITS_NON_RWX_IN_PD1        (PAGE_MASK | _PAGE_CACHEABLE)
12330 -#define MAX_POSSIBLE_PHYSMEM_BITS 32
12331 -#endif
12332 +#define PTE_BITS_NON_RWX_IN_PD1        (PAGE_MASK_PHYS | _PAGE_CACHEABLE)
12334  /**************************************************************************
12335   * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
12336 diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
12337 index 2a97e2718a21..2a4ad619abfb 100644
12338 --- a/arch/arc/include/uapi/asm/page.h
12339 +++ b/arch/arc/include/uapi/asm/page.h
12340 @@ -33,5 +33,4 @@
12342  #define PAGE_MASK      (~(PAGE_SIZE-1))
12345  #endif /* _UAPI__ASM_ARC_PAGE_H */
12346 diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
12347 index 1743506081da..2cb8dfe866b6 100644
12348 --- a/arch/arc/kernel/entry.S
12349 +++ b/arch/arc/kernel/entry.S
12350 @@ -177,7 +177,7 @@ tracesys:
12352         ; Do the Sys Call as we normally would.
12353         ; Validate the Sys Call number
12354 -       cmp     r8,  NR_syscalls
12355 +       cmp     r8,  NR_syscalls - 1
12356         mov.hi  r0, -ENOSYS
12357         bhi     tracesys_exit
12359 @@ -255,7 +255,7 @@ ENTRY(EV_Trap)
12360         ;============ Normal syscall case
12362         ; syscall num shd not exceed the total system calls avail
12363 -       cmp     r8,  NR_syscalls
12364 +       cmp     r8,  NR_syscalls - 1
12365         mov.hi  r0, -ENOSYS
12366         bhi     .Lret_from_system_call
12368 diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
12369 index ce07e697916c..1bcc6985b9a0 100644
12370 --- a/arch/arc/mm/init.c
12371 +++ b/arch/arc/mm/init.c
12372 @@ -157,7 +157,16 @@ void __init setup_arch_memory(void)
12373         min_high_pfn = PFN_DOWN(high_mem_start);
12374         max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
12376 -       max_zone_pfn[ZONE_HIGHMEM] = min_low_pfn;
12377 +       /*
12378 +        * max_high_pfn should be ok here for both HIGHMEM and HIGHMEM+PAE.
12379 +        * For HIGHMEM without PAE max_high_pfn should be less than
12380 +        * min_low_pfn to guarantee that these two regions don't overlap.
12381 +        * For PAE case highmem is greater than lowmem, so it is natural
12382 +        * to use max_high_pfn.
12383 +        *
12384 +        * In both cases, holes should be handled by pfn_valid().
12385 +        */
12386 +       max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
12388         high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
12390 diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
12391 index fac4adc90204..95c649fbc95a 100644
12392 --- a/arch/arc/mm/ioremap.c
12393 +++ b/arch/arc/mm/ioremap.c
12394 @@ -53,9 +53,10 @@ EXPORT_SYMBOL(ioremap);
12395  void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
12396                            unsigned long flags)
12398 +       unsigned int off;
12399         unsigned long vaddr;
12400         struct vm_struct *area;
12401 -       phys_addr_t off, end;
12402 +       phys_addr_t end;
12403         pgprot_t prot = __pgprot(flags);
12405         /* Don't allow wraparound, zero size */
12406 @@ -72,7 +73,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
12408         /* Mappings have to be page-aligned */
12409         off = paddr & ~PAGE_MASK;
12410 -       paddr &= PAGE_MASK;
12411 +       paddr &= PAGE_MASK_PHYS;
12412         size = PAGE_ALIGN(end + 1) - paddr;
12414         /*
12415 diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
12416 index 9bb3c24f3677..9c7c68247289 100644
12417 --- a/arch/arc/mm/tlb.c
12418 +++ b/arch/arc/mm/tlb.c
12419 @@ -576,7 +576,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
12420                       pte_t *ptep)
12422         unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
12423 -       phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
12424 +       phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
12425         struct page *page = pfn_to_page(pte_pfn(*ptep));
12427         create_tlb(vma, vaddr, ptep);
12428 diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
12429 index fd94e27ba4fa..c1f804768621 100644
12430 --- a/arch/arm/boot/compressed/Makefile
12431 +++ b/arch/arm/boot/compressed/Makefile
12432 @@ -118,8 +118,8 @@ asflags-y := -DZIMAGE
12434  # Supply kernel BSS size to the decompressor via a linker symbol.
12435  KBSS_SZ = $(shell echo $$(($$($(NM) $(obj)/../../../../vmlinux | \
12436 -               sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \
12437 -                      -e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) )
12438 +               sed -n -e 's/^\([^ ]*\) [ABD] __bss_start$$/-0x\1/p' \
12439 +                      -e 's/^\([^ ]*\) [ABD] __bss_stop$$/+0x\1/p') )) )
12440  LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
12441  # Supply ZRELADDR to the decompressor via a linker symbol.
12442  ifneq ($(CONFIG_AUTO_ZRELADDR),y)
12443 diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
12444 index 6c9804d2f3b4..6df1ce545061 100644
12445 --- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
12446 +++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
12447 @@ -713,9 +713,9 @@ &i2c7 {
12448         multi-master;
12449         status = "okay";
12451 -       si7021-a20@20 {
12452 +       si7021-a20@40 {
12453                 compatible = "silabs,si7020";
12454 -               reg = <0x20>;
12455 +               reg = <0x40>;
12456         };
12458         tmp275@48 {
12459 diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts
12460 index 775ceb3acb6c..edca66c232c1 100644
12461 --- a/arch/arm/boot/dts/at91-sam9x60ek.dts
12462 +++ b/arch/arm/boot/dts/at91-sam9x60ek.dts
12463 @@ -8,6 +8,7 @@
12464   */
12465  /dts-v1/;
12466  #include "sam9x60.dtsi"
12467 +#include <dt-bindings/input/input.h>
12469  / {
12470         model = "Microchip SAM9X60-EK";
12471 @@ -84,7 +85,7 @@ gpio_keys {
12472                 sw1 {
12473                         label = "SW1";
12474                         gpios = <&pioD 18 GPIO_ACTIVE_LOW>;
12475 -                       linux,code=<0x104>;
12476 +                       linux,code=<KEY_PROG1>;
12477                         wakeup-source;
12478                 };
12479         };
12480 diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
12481 index 84e1180f3e89..a9e6fee55a2a 100644
12482 --- a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
12483 +++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
12484 @@ -11,6 +11,7 @@
12485  #include "at91-sama5d27_som1.dtsi"
12486  #include <dt-bindings/mfd/atmel-flexcom.h>
12487  #include <dt-bindings/gpio/gpio.h>
12488 +#include <dt-bindings/input/input.h>
12490  / {
12491         model = "Atmel SAMA5D27 SOM1 EK";
12492 @@ -466,7 +467,7 @@ gpio_keys {
12493                 pb4 {
12494                         label = "USER";
12495                         gpios = <&pioA PIN_PA29 GPIO_ACTIVE_LOW>;
12496 -                       linux,code = <0x104>;
12497 +                       linux,code = <KEY_PROG1>;
12498                         wakeup-source;
12499                 };
12500         };
12501 diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
12502 index 180a08765cb8..ff83967fd008 100644
12503 --- a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
12504 +++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
12505 @@ -8,6 +8,7 @@
12506   */
12507  /dts-v1/;
12508  #include "at91-sama5d27_wlsom1.dtsi"
12509 +#include <dt-bindings/input/input.h>
12511  / {
12512         model = "Microchip SAMA5D27 WLSOM1 EK";
12513 @@ -35,7 +36,7 @@ gpio_keys {
12514                 sw4 {
12515                         label = "USER BUTTON";
12516                         gpios = <&pioA PIN_PB2 GPIO_ACTIVE_LOW>;
12517 -                       linux,code = <0x104>;
12518 +                       linux,code = <KEY_PROG1>;
12519                         wakeup-source;
12520                 };
12521         };
12522 diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts
12523 index 46722a163184..bd64721fa23c 100644
12524 --- a/arch/arm/boot/dts/at91-sama5d2_icp.dts
12525 +++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts
12526 @@ -12,6 +12,7 @@
12527  #include "sama5d2.dtsi"
12528  #include "sama5d2-pinfunc.h"
12529  #include <dt-bindings/gpio/gpio.h>
12530 +#include <dt-bindings/input/input.h>
12531  #include <dt-bindings/mfd/atmel-flexcom.h>
12533  / {
12534 @@ -51,7 +52,7 @@ gpio_keys {
12535                 sw4 {
12536                         label = "USER_PB1";
12537                         gpios = <&pioA PIN_PD0 GPIO_ACTIVE_LOW>;
12538 -                       linux,code = <0x104>;
12539 +                       linux,code = <KEY_PROG1>;
12540                         wakeup-source;
12541                 };
12542         };
12543 diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
12544 index 8de57d164acd..dfd150eb0fd8 100644
12545 --- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
12546 +++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
12547 @@ -11,6 +11,7 @@
12548  #include "sama5d2-pinfunc.h"
12549  #include <dt-bindings/mfd/atmel-flexcom.h>
12550  #include <dt-bindings/gpio/gpio.h>
12551 +#include <dt-bindings/input/input.h>
12552  #include <dt-bindings/pinctrl/at91.h>
12554  / {
12555 @@ -402,7 +403,7 @@ gpio_keys {
12556                 bp1 {
12557                         label = "PB_USER";
12558                         gpios = <&pioA PIN_PA10 GPIO_ACTIVE_LOW>;
12559 -                       linux,code = <0x104>;
12560 +                       linux,code = <KEY_PROG1>;
12561                         wakeup-source;
12562                 };
12563         };
12564 diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
12565 index 4e7cf21f124c..509c732a0d8b 100644
12566 --- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
12567 +++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
12568 @@ -10,6 +10,7 @@
12569  #include "sama5d2-pinfunc.h"
12570  #include <dt-bindings/mfd/atmel-flexcom.h>
12571  #include <dt-bindings/gpio/gpio.h>
12572 +#include <dt-bindings/input/input.h>
12573  #include <dt-bindings/regulator/active-semi,8945a-regulator.h>
12575  / {
12576 @@ -712,7 +713,7 @@ gpio_keys {
12577                 bp1 {
12578                         label = "PB_USER";
12579                         gpios = <&pioA PIN_PB9 GPIO_ACTIVE_LOW>;
12580 -                       linux,code = <0x104>;
12581 +                       linux,code = <KEY_PROG1>;
12582                         wakeup-source;
12583                 };
12584         };
12585 diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
12586 index 5179258f9247..9c55a921263b 100644
12587 --- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
12588 +++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
12589 @@ -7,6 +7,7 @@
12590   */
12591  /dts-v1/;
12592  #include "sama5d36.dtsi"
12593 +#include <dt-bindings/input/input.h>
12595  / {
12596         model = "SAMA5D3 Xplained";
12597 @@ -354,7 +355,7 @@ gpio_keys {
12598                 bp3 {
12599                         label = "PB_USER";
12600                         gpios = <&pioE 29 GPIO_ACTIVE_LOW>;
12601 -                       linux,code = <0x104>;
12602 +                       linux,code = <KEY_PROG1>;
12603                         wakeup-source;
12604                 };
12605         };
12606 diff --git a/arch/arm/boot/dts/at91sam9260ek.dts b/arch/arm/boot/dts/at91sam9260ek.dts
12607 index d3446e42b598..ce96345d28a3 100644
12608 --- a/arch/arm/boot/dts/at91sam9260ek.dts
12609 +++ b/arch/arm/boot/dts/at91sam9260ek.dts
12610 @@ -7,6 +7,7 @@
12611   */
12612  /dts-v1/;
12613  #include "at91sam9260.dtsi"
12614 +#include <dt-bindings/input/input.h>
12616  / {
12617         model = "Atmel at91sam9260ek";
12618 @@ -156,7 +157,7 @@ btn3 {
12619                 btn4 {
12620                         label = "Button 4";
12621                         gpios = <&pioA 31 GPIO_ACTIVE_LOW>;
12622 -                       linux,code = <0x104>;
12623 +                       linux,code = <KEY_PROG1>;
12624                         wakeup-source;
12625                 };
12626         };
12627 diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
12628 index 6e6e672c0b86..87bb39060e8b 100644
12629 --- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
12630 +++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
12631 @@ -5,6 +5,7 @@
12632   * Copyright (C) 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
12633   */
12634  #include "at91sam9g20.dtsi"
12635 +#include <dt-bindings/input/input.h>
12637  / {
12639 @@ -234,7 +235,7 @@ btn3 {
12640                 btn4 {
12641                         label = "Button 4";
12642                         gpios = <&pioA 31 GPIO_ACTIVE_LOW>;
12643 -                       linux,code = <0x104>;
12644 +                       linux,code = <KEY_PROG1>;
12645                         wakeup-source;
12646                 };
12647         };
12648 diff --git a/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts b/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
12649 index 6a96655d8626..8ed403767540 100644
12650 --- a/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
12651 +++ b/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
12652 @@ -21,8 +21,8 @@ chosen {
12654         memory@0 {
12655                 device_type = "memory";
12656 -               reg = <0x00000000 0x08000000
12657 -                      0x88000000 0x08000000>;
12658 +               reg = <0x00000000 0x08000000>,
12659 +                     <0x88000000 0x08000000>;
12660         };
12662         leds {
12663 diff --git a/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts b/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
12664 index 3b0029e61b4c..667b118ba4ee 100644
12665 --- a/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
12666 +++ b/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
12667 @@ -21,8 +21,8 @@ chosen {
12669         memory@0 {
12670                 device_type = "memory";
12671 -               reg = <0x00000000 0x08000000
12672 -                      0x88000000 0x08000000>;
12673 +               reg = <0x00000000 0x08000000>,
12674 +                     <0x88000000 0x08000000>;
12675         };
12677         leds {
12678 diff --git a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
12679 index 90f57bad6b24..ff31ce45831a 100644
12680 --- a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
12681 +++ b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
12682 @@ -21,8 +21,8 @@ chosen {
12684         memory@0 {
12685                 device_type = "memory";
12686 -               reg = <0x00000000 0x08000000
12687 -                      0x88000000 0x18000000>;
12688 +               reg = <0x00000000 0x08000000>,
12689 +                     <0x88000000 0x18000000>;
12690         };
12692         spi {
12693 diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
12694 index fed75e6ab58c..61c7b137607e 100644
12695 --- a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
12696 +++ b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
12697 @@ -22,8 +22,8 @@ chosen {
12699         memory {
12700                 device_type = "memory";
12701 -               reg = <0x00000000 0x08000000
12702 -                      0x88000000 0x08000000>;
12703 +               reg = <0x00000000 0x08000000>,
12704 +                     <0x88000000 0x08000000>;
12705         };
12707         leds {
12708 diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts b/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
12709 index 79542e18915c..4c60eda296d9 100644
12710 --- a/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
12711 +++ b/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
12712 @@ -21,8 +21,8 @@ chosen {
12714         memory@0 {
12715                 device_type = "memory";
12716 -               reg = <0x00000000 0x08000000
12717 -                      0x88000000 0x08000000>;
12718 +               reg = <0x00000000 0x08000000>,
12719 +                     <0x88000000 0x08000000>;
12720         };
12722         leds {
12723 diff --git a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
12724 index 51c64f0b2560..9ca6d1b2590d 100644
12725 --- a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
12726 +++ b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
12727 @@ -21,8 +21,8 @@ chosen {
12729         memory@0 {
12730                 device_type = "memory";
12731 -               reg = <0x00000000 0x08000000
12732 -                      0x88000000 0x08000000>;
12733 +               reg = <0x00000000 0x08000000>,
12734 +                     <0x88000000 0x08000000>;
12735         };
12737         leds {
12738 diff --git a/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts b/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
12739 index c29950b43a95..0e273c598732 100644
12740 --- a/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
12741 +++ b/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
12742 @@ -21,8 +21,8 @@ chosen {
12744         memory@0 {
12745                 device_type = "memory";
12746 -               reg = <0x00000000 0x08000000
12747 -                      0x88000000 0x08000000>;
12748 +               reg = <0x00000000 0x08000000>,
12749 +                     <0x88000000 0x08000000>;
12750         };
12752         leds {
12753 diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
12754 index 2f2d2b0a6893..d857751ec507 100644
12755 --- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
12756 +++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
12757 @@ -21,8 +21,8 @@ chosen {
12759         memory@0 {
12760                 device_type = "memory";
12761 -               reg = <0x00000000 0x08000000
12762 -                      0x88000000 0x08000000>;
12763 +               reg = <0x00000000 0x08000000>,
12764 +                     <0x88000000 0x08000000>;
12765         };
12767         spi {
12768 diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
12769 index 0e349e39f608..8b1a05a0f1a1 100644
12770 --- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
12771 +++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
12772 @@ -21,8 +21,8 @@ chosen {
12774         memory@0 {
12775                 device_type = "memory";
12776 -               reg = <0x00000000 0x08000000
12777 -                      0x88000000 0x08000000>;
12778 +               reg = <0x00000000 0x08000000>,
12779 +                     <0x88000000 0x08000000>;
12780         };
12782         spi {
12783 diff --git a/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts b/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
12784 index 8f1e565c3db4..6c6bb7b17d27 100644
12785 --- a/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
12786 +++ b/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
12787 @@ -21,8 +21,8 @@ chosen {
12789         memory {
12790                 device_type = "memory";
12791 -               reg = <0x00000000 0x08000000
12792 -                      0x88000000 0x08000000>;
12793 +               reg = <0x00000000 0x08000000>,
12794 +                     <0x88000000 0x08000000>;
12795         };
12797         leds {
12798 diff --git a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
12799 index ce888b1835d1..d29e7f80ea6a 100644
12800 --- a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
12801 +++ b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
12802 @@ -21,8 +21,8 @@ chosen {
12804         memory {
12805                 device_type = "memory";
12806 -               reg = <0x00000000 0x08000000
12807 -                      0x88000000 0x18000000>;
12808 +               reg = <0x00000000 0x08000000>,
12809 +                     <0x88000000 0x18000000>;
12810         };
12812         leds {
12813 diff --git a/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts b/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
12814 index ed8619b54d69..38fbefdf2e4e 100644
12815 --- a/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
12816 +++ b/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
12817 @@ -18,8 +18,8 @@ chosen {
12819         memory {
12820                 device_type = "memory";
12821 -               reg = <0x00000000 0x08000000
12822 -                      0x88000000 0x08000000>;
12823 +               reg = <0x00000000 0x08000000>,
12824 +                     <0x88000000 0x08000000>;
12825         };
12827         gpio-keys {
12828 diff --git a/arch/arm/boot/dts/bcm4709-netgear-r7000.dts b/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
12829 index 1f87993eae1d..7989a53597d4 100644
12830 --- a/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
12831 +++ b/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
12832 @@ -21,8 +21,8 @@ chosen {
12834         memory {
12835                 device_type = "memory";
12836 -               reg = <0x00000000 0x08000000
12837 -                      0x88000000 0x08000000>;
12838 +               reg = <0x00000000 0x08000000>,
12839 +                     <0x88000000 0x08000000>;
12840         };
12842         leds {
12843 diff --git a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
12844 index 6c6199a53d09..87b655be674c 100644
12845 --- a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
12846 +++ b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
12847 @@ -32,8 +32,8 @@ chosen {
12849         memory {
12850                 device_type = "memory";
12851 -               reg = <0x00000000 0x08000000
12852 -                      0x88000000 0x08000000>;
12853 +               reg = <0x00000000 0x08000000>,
12854 +                     <0x88000000 0x08000000>;
12855         };
12857         leds {
12858 diff --git a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
12859 index 911c65fbf251..e635a15041dd 100644
12860 --- a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
12861 +++ b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
12862 @@ -21,8 +21,8 @@ chosen {
12864         memory@0 {
12865                 device_type = "memory";
12866 -               reg = <0x00000000 0x08000000
12867 -                      0x88000000 0x08000000>;
12868 +               reg = <0x00000000 0x08000000>,
12869 +                     <0x88000000 0x08000000>;
12870         };
12872         nand: nand@18028000 {
12873 diff --git a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
12874 index 3725f2b0d60b..4b24b25389b5 100644
12875 --- a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
12876 +++ b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
12877 @@ -18,8 +18,8 @@ chosen {
12879         memory@0 {
12880                 device_type = "memory";
12881 -               reg = <0x00000000 0x08000000
12882 -                      0x88000000 0x08000000>;
12883 +               reg = <0x00000000 0x08000000>,
12884 +                     <0x88000000 0x08000000>;
12885         };
12887         gpio-keys {
12888 diff --git a/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts b/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
12889 index 50f7cd08cfbb..a6dc99955e19 100644
12890 --- a/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
12891 +++ b/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
12892 @@ -18,8 +18,8 @@ chosen {
12894         memory@0 {
12895                 device_type = "memory";
12896 -               reg = <0x00000000 0x08000000
12897 -                      0x88000000 0x18000000>;
12898 +               reg = <0x00000000 0x08000000>,
12899 +                     <0x88000000 0x18000000>;
12900         };
12902         leds {
12903 diff --git a/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts b/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
12904 index bcc420f85b56..ff98837bc0db 100644
12905 --- a/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
12906 +++ b/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
12907 @@ -18,8 +18,8 @@ chosen {
12909         memory@0 {
12910                 device_type = "memory";
12911 -               reg = <0x00000000 0x08000000
12912 -                      0x88000000 0x18000000>;
12913 +               reg = <0x00000000 0x08000000>,
12914 +                     <0x88000000 0x18000000>;
12915         };
12917         leds {
12918 diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
12919 index 4f8d777ae18d..452b8d0ab180 100644
12920 --- a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
12921 +++ b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
12922 @@ -18,8 +18,8 @@ chosen {
12924         memory {
12925                 device_type = "memory";
12926 -               reg = <0x00000000 0x08000000
12927 -                      0x88000000 0x18000000>;
12928 +               reg = <0x00000000 0x08000000>,
12929 +                     <0x88000000 0x18000000>;
12930         };
12932         leds {
12933 diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
12934 index e17e9a17fb00..b76bfe6efcd4 100644
12935 --- a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
12936 +++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
12937 @@ -18,8 +18,8 @@ chosen {
12939         memory@0 {
12940                 device_type = "memory";
12941 -               reg = <0x00000000 0x08000000
12942 -                      0x88000000 0x08000000>;
12943 +               reg = <0x00000000 0x08000000>,
12944 +                     <0x88000000 0x08000000>;
12945         };
12947         leds {
12948 diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
12949 index 60cc87ecc7ec..32d5a50578ec 100644
12950 --- a/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
12951 +++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
12952 @@ -18,8 +18,8 @@ chosen {
12954         memory@0 {
12955                 device_type = "memory";
12956 -               reg = <0x00000000 0x08000000
12957 -                      0x88000000 0x18000000>;
12958 +               reg = <0x00000000 0x08000000>,
12959 +                     <0x88000000 0x18000000>;
12960         };
12962         leds {
12963 diff --git a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
12964 index f42a1703f4ab..42097a4c2659 100644
12965 --- a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
12966 +++ b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
12967 @@ -18,8 +18,8 @@ chosen {
12969         memory@0 {
12970                 device_type = "memory";
12971 -               reg = <0x00000000 0x08000000
12972 -                      0x88000000 0x18000000>;
12973 +               reg = <0x00000000 0x08000000>,
12974 +                     <0x88000000 0x18000000>;
12975         };
12977         leds {
12978 diff --git a/arch/arm/boot/dts/bcm47094-phicomm-k3.dts b/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
12979 index ac3a4483dcb3..a2566ad4619c 100644
12980 --- a/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
12981 +++ b/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
12982 @@ -15,8 +15,8 @@ / {
12984         memory@0 {
12985                 device_type = "memory";
12986 -               reg = <0x00000000 0x08000000
12987 -                      0x88000000 0x18000000>;
12988 +               reg = <0x00000000 0x08000000>,
12989 +                     <0x88000000 0x18000000>;
12990         };
12992         gpio-keys {
12993 diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
12994 index 3bf90d9e3335..a294a02f2d23 100644
12995 --- a/arch/arm/boot/dts/dra7-l4.dtsi
12996 +++ b/arch/arm/boot/dts/dra7-l4.dtsi
12997 @@ -1168,7 +1168,7 @@ timer2: timer@0 {
12998                         };
12999                 };
13001 -               target-module@34000 {                   /* 0x48034000, ap 7 46.0 */
13002 +               timer3_target: target-module@34000 {    /* 0x48034000, ap 7 46.0 */
13003                         compatible = "ti,sysc-omap4-timer", "ti,sysc";
13004                         reg = <0x34000 0x4>,
13005                               <0x34010 0x4>;
13006 @@ -1195,7 +1195,7 @@ timer3: timer@0 {
13007                         };
13008                 };
13010 -               target-module@36000 {                   /* 0x48036000, ap 9 4e.0 */
13011 +               timer4_target: target-module@36000 {    /* 0x48036000, ap 9 4e.0 */
13012                         compatible = "ti,sysc-omap4-timer", "ti,sysc";
13013                         reg = <0x36000 0x4>,
13014                               <0x36010 0x4>;
13015 diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
13016 index ce1194744f84..53d68786a61f 100644
13017 --- a/arch/arm/boot/dts/dra7.dtsi
13018 +++ b/arch/arm/boot/dts/dra7.dtsi
13019 @@ -46,6 +46,7 @@ aliases {
13021         timer {
13022                 compatible = "arm,armv7-timer";
13023 +               status = "disabled";    /* See ARM architected timer wrap erratum i940 */
13024                 interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
13025                              <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
13026                              <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
13027 @@ -1241,3 +1242,22 @@ timer@0 {
13028                 assigned-clock-parents = <&sys_32k_ck>;
13029         };
13030  };
13032 +/* Local timers, see ARM architected timer wrap erratum i940 */
13033 +&timer3_target {
13034 +       ti,no-reset-on-init;
13035 +       ti,no-idle;
13036 +       timer@0 {
13037 +               assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER3_CLKCTRL 24>;
13038 +               assigned-clock-parents = <&timer_sys_clk_div>;
13039 +       };
13042 +&timer4_target {
13043 +       ti,no-reset-on-init;
13044 +       ti,no-idle;
13045 +       timer@0 {
13046 +               assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 24>;
13047 +               assigned-clock-parents = <&timer_sys_clk_div>;
13048 +       };
13050 diff --git a/arch/arm/boot/dts/exynos4210-i9100.dts b/arch/arm/boot/dts/exynos4210-i9100.dts
13051 index 304a8ee2364c..d98c78207aaf 100644
13052 --- a/arch/arm/boot/dts/exynos4210-i9100.dts
13053 +++ b/arch/arm/boot/dts/exynos4210-i9100.dts
13054 @@ -136,7 +136,7 @@ battery@36 {
13055                         compatible = "maxim,max17042";
13057                         interrupt-parent = <&gpx2>;
13058 -                       interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
13059 +                       interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
13061                         pinctrl-0 = <&max17042_fuel_irq>;
13062                         pinctrl-names = "default";
13063 diff --git a/arch/arm/boot/dts/exynos4412-midas.dtsi b/arch/arm/boot/dts/exynos4412-midas.dtsi
13064 index 111c32bae02c..fc77c1bfd844 100644
13065 --- a/arch/arm/boot/dts/exynos4412-midas.dtsi
13066 +++ b/arch/arm/boot/dts/exynos4412-midas.dtsi
13067 @@ -173,7 +173,7 @@ i2c_max77693: i2c-gpio-1 {
13068                 pmic@66 {
13069                         compatible = "maxim,max77693";
13070                         interrupt-parent = <&gpx1>;
13071 -                       interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
13072 +                       interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
13073                         pinctrl-names = "default";
13074                         pinctrl-0 = <&max77693_irq>;
13075                         reg = <0x66>;
13076 @@ -221,7 +221,7 @@ i2c_max77693_fuel: i2c-gpio-3 {
13077                 fuel-gauge@36 {
13078                         compatible = "maxim,max17047";
13079                         interrupt-parent = <&gpx2>;
13080 -                       interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
13081 +                       interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
13082                         pinctrl-names = "default";
13083                         pinctrl-0 = <&max77693_fuel_irq>;
13084                         reg = <0x36>;
13085 @@ -665,7 +665,7 @@ &i2c_7 {
13086         max77686: pmic@9 {
13087                 compatible = "maxim,max77686";
13088                 interrupt-parent = <&gpx0>;
13089 -               interrupts = <7 IRQ_TYPE_NONE>;
13090 +               interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
13091                 pinctrl-0 = <&max77686_irq>;
13092                 pinctrl-names = "default";
13093                 reg = <0x09>;
13094 diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
13095 index 2b20d9095d9f..eebe6a3952ce 100644
13096 --- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
13097 +++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
13098 @@ -278,7 +278,7 @@ usb3503: usb-hub@8 {
13099         max77686: pmic@9 {
13100                 compatible = "maxim,max77686";
13101                 interrupt-parent = <&gpx3>;
13102 -               interrupts = <2 IRQ_TYPE_NONE>;
13103 +               interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
13104                 pinctrl-names = "default";
13105                 pinctrl-0 = <&max77686_irq>;
13106                 reg = <0x09>;
13107 diff --git a/arch/arm/boot/dts/exynos4412-p4note.dtsi b/arch/arm/boot/dts/exynos4412-p4note.dtsi
13108 index b2f9d5448a18..9e750890edb8 100644
13109 --- a/arch/arm/boot/dts/exynos4412-p4note.dtsi
13110 +++ b/arch/arm/boot/dts/exynos4412-p4note.dtsi
13111 @@ -146,7 +146,7 @@ fuel-gauge@36 {
13112                         pinctrl-0 = <&fuel_alert_irq>;
13113                         pinctrl-names = "default";
13114                         interrupt-parent = <&gpx2>;
13115 -                       interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
13116 +                       interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
13117                         maxim,rsns-microohm = <10000>;
13118                         maxim,over-heat-temp = <600>;
13119                         maxim,over-volt = <4300>;
13120 @@ -322,7 +322,7 @@ &i2c_7 {
13121         max77686: pmic@9 {
13122                 compatible = "maxim,max77686";
13123                 interrupt-parent = <&gpx0>;
13124 -               interrupts = <7 IRQ_TYPE_NONE>;
13125 +               interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
13126                 pinctrl-0 = <&max77686_irq>;
13127                 pinctrl-names = "default";
13128                 reg = <0x09>;
13129 diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
13130 index 8b5a79a8720c..39bbe18145cf 100644
13131 --- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
13132 +++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
13133 @@ -134,7 +134,7 @@ max77686: pmic@9 {
13134                 compatible = "maxim,max77686";
13135                 reg = <0x09>;
13136                 interrupt-parent = <&gpx3>;
13137 -               interrupts = <2 IRQ_TYPE_NONE>;
13138 +               interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
13139                 pinctrl-names = "default";
13140                 pinctrl-0 = <&max77686_irq>;
13141                 #clock-cells = <1>;
13142 diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
13143 index 6635f6184051..2335c4687349 100644
13144 --- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi
13145 +++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
13146 @@ -292,7 +292,7 @@ &i2c_0 {
13147         max77686: pmic@9 {
13148                 compatible = "maxim,max77686";
13149                 interrupt-parent = <&gpx3>;
13150 -               interrupts = <2 IRQ_TYPE_NONE>;
13151 +               interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
13152                 pinctrl-names = "default";
13153                 pinctrl-0 = <&max77686_irq>;
13154                 wakeup-source;
13155 diff --git a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
13156 index 0cda654371ae..56ee02ceba7d 100644
13157 --- a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
13158 +++ b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
13159 @@ -575,7 +575,7 @@ fuelgauge: max17048@36 {
13160                         maxim,rcomp = /bits/ 8 <0x4d>;
13162                         interrupt-parent = <&msmgpio>;
13163 -                       interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
13164 +                       interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
13166                         pinctrl-names = "default";
13167                         pinctrl-0 = <&fuelgauge_pin>;
13168 diff --git a/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts b/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
13169 index a0f7f461f48c..2dadb836c5fe 100644
13170 --- a/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
13171 +++ b/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
13172 @@ -717,7 +717,7 @@ fuelgauge@36 {
13173                         maxim,rcomp = /bits/ 8 <0x56>;
13175                         interrupt-parent = <&pma8084_gpios>;
13176 -                       interrupts = <21 IRQ_TYPE_EDGE_FALLING>;
13177 +                       interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
13179                         pinctrl-names = "default";
13180                         pinctrl-0 = <&fuelgauge_pin>;
13181 diff --git a/arch/arm/boot/dts/r8a7790-lager.dts b/arch/arm/boot/dts/r8a7790-lager.dts
13182 index 09a152b91557..1d6f0c5d02e9 100644
13183 --- a/arch/arm/boot/dts/r8a7790-lager.dts
13184 +++ b/arch/arm/boot/dts/r8a7790-lager.dts
13185 @@ -53,6 +53,9 @@ aliases {
13186                 i2c11 = &i2cexio1;
13187                 i2c12 = &i2chdmi;
13188                 i2c13 = &i2cpwr;
13189 +               mmc0 = &mmcif1;
13190 +               mmc1 = &sdhi0;
13191 +               mmc2 = &sdhi2;
13192         };
13194         chosen {
13195 diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
13196 index f603cba5441f..6af1727b8269 100644
13197 --- a/arch/arm/boot/dts/r8a7791-koelsch.dts
13198 +++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
13199 @@ -53,6 +53,9 @@ aliases {
13200                 i2c12 = &i2cexio1;
13201                 i2c13 = &i2chdmi;
13202                 i2c14 = &i2cexio4;
13203 +               mmc0 = &sdhi0;
13204 +               mmc1 = &sdhi1;
13205 +               mmc2 = &sdhi2;
13206         };
13208         chosen {
13209 diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
13210 index c6d563fb7ec7..bf51e29c793a 100644
13211 --- a/arch/arm/boot/dts/r8a7791-porter.dts
13212 +++ b/arch/arm/boot/dts/r8a7791-porter.dts
13213 @@ -28,6 +28,8 @@ aliases {
13214                 serial0 = &scif0;
13215                 i2c9 = &gpioi2c2;
13216                 i2c10 = &i2chdmi;
13217 +               mmc0 = &sdhi0;
13218 +               mmc1 = &sdhi2;
13219         };
13221         chosen {
13222 diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts
13223 index abf487e8fe0f..2b59a0491350 100644
13224 --- a/arch/arm/boot/dts/r8a7793-gose.dts
13225 +++ b/arch/arm/boot/dts/r8a7793-gose.dts
13226 @@ -49,6 +49,9 @@ aliases {
13227                 i2c10 = &gpioi2c4;
13228                 i2c11 = &i2chdmi;
13229                 i2c12 = &i2cexio4;
13230 +               mmc0 = &sdhi0;
13231 +               mmc1 = &sdhi1;
13232 +               mmc2 = &sdhi2;
13233         };
13235         chosen {
13236 diff --git a/arch/arm/boot/dts/r8a7794-alt.dts b/arch/arm/boot/dts/r8a7794-alt.dts
13237 index 3f1cc5bbf329..32025986b3b9 100644
13238 --- a/arch/arm/boot/dts/r8a7794-alt.dts
13239 +++ b/arch/arm/boot/dts/r8a7794-alt.dts
13240 @@ -19,6 +19,9 @@ aliases {
13241                 i2c10 = &gpioi2c4;
13242                 i2c11 = &i2chdmi;
13243                 i2c12 = &i2cexio4;
13244 +               mmc0 = &mmcif0;
13245 +               mmc1 = &sdhi0;
13246 +               mmc2 = &sdhi1;
13247         };
13249         chosen {
13250 diff --git a/arch/arm/boot/dts/r8a7794-silk.dts b/arch/arm/boot/dts/r8a7794-silk.dts
13251 index 677596f6c9c9..af066ee5e275 100644
13252 --- a/arch/arm/boot/dts/r8a7794-silk.dts
13253 +++ b/arch/arm/boot/dts/r8a7794-silk.dts
13254 @@ -31,6 +31,8 @@ aliases {
13255                 serial0 = &scif2;
13256                 i2c9 = &gpioi2c1;
13257                 i2c10 = &i2chdmi;
13258 +               mmc0 = &mmcif0;
13259 +               mmc1 = &sdhi1;
13260         };
13262         chosen {
13263 diff --git a/arch/arm/boot/dts/s5pv210-fascinate4g.dts b/arch/arm/boot/dts/s5pv210-fascinate4g.dts
13264 index ca064359dd30..b47d8300e536 100644
13265 --- a/arch/arm/boot/dts/s5pv210-fascinate4g.dts
13266 +++ b/arch/arm/boot/dts/s5pv210-fascinate4g.dts
13267 @@ -115,7 +115,7 @@ &fg {
13268         compatible = "maxim,max77836-battery";
13270         interrupt-parent = <&gph3>;
13271 -       interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
13272 +       interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
13274         pinctrl-names = "default";
13275         pinctrl-0 = <&fg_irq>;
13276 diff --git a/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
13277 index cb3677f0a1cb..b580397ede83 100644
13278 --- a/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
13279 +++ b/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
13280 @@ -8,37 +8,43 @@
13281  / {
13282         soc {
13283                 i2c@80128000 {
13284 -                       /* Marked:
13285 -                        * 129
13286 -                        * M35
13287 -                        * L3GD20
13288 -                        */
13289 -                       l3gd20@6a {
13290 -                               /* Gyroscope */
13291 -                               compatible = "st,l3gd20";
13292 -                               status = "disabled";
13293 +                       accelerometer@19 {
13294 +                               compatible = "st,lsm303dlhc-accel";
13295                                 st,drdy-int-pin = <1>;
13296 -                               drive-open-drain;
13297 -                               reg = <0x6a>; // 0x6a or 0x6b
13298 +                               reg = <0x19>;
13299                                 vdd-supply = <&ab8500_ldo_aux1_reg>;
13300                                 vddio-supply = <&db8500_vsmps2_reg>;
13301 +                               interrupt-parent = <&gpio2>;
13302 +                               interrupts = <18 IRQ_TYPE_EDGE_RISING>,
13303 +                                            <19 IRQ_TYPE_EDGE_RISING>;
13304 +                               pinctrl-names = "default";
13305 +                               pinctrl-0 = <&accel_tvk_mode>;
13306                         };
13307 -                       /*
13308 -                        * Marked:
13309 -                        * 2122
13310 -                        * C3H
13311 -                        * DQEEE
13312 -                        * LIS3DH?
13313 -                        */
13314 -                       lis3dh@18 {
13315 -                               /* Accelerometer */
13316 -                               compatible = "st,lis3dh-accel";
13317 +                       magnetometer@1e {
13318 +                               compatible = "st,lsm303dlm-magn";
13319                                 st,drdy-int-pin = <1>;
13320 -                               reg = <0x18>;
13321 +                               reg = <0x1e>;
13322                                 vdd-supply = <&ab8500_ldo_aux1_reg>;
13323                                 vddio-supply = <&db8500_vsmps2_reg>;
13324 +                               // This interrupt is not properly working with the driver
13325 +                               // interrupt-parent = <&gpio1>;
13326 +                               // interrupts = <0 IRQ_TYPE_EDGE_RISING>;
13327                                 pinctrl-names = "default";
13328 -                               pinctrl-0 = <&accel_tvk_mode>;
13329 +                               pinctrl-0 = <&magn_tvk_mode>;
13330 +                       };
13331 +                       gyroscope@68 {
13332 +                               /* Gyroscope */
13333 +                               compatible = "st,l3g4200d-gyro";
13334 +                               reg = <0x68>;
13335 +                               vdd-supply = <&ab8500_ldo_aux1_reg>;
13336 +                               vddio-supply = <&db8500_vsmps2_reg>;
13337 +                       };
13338 +                       pressure@5c {
13339 +                               /* Barometer/pressure sensor */
13340 +                               compatible = "st,lps001wp-press";
13341 +                               reg = <0x5c>;
13342 +                               vdd-supply = <&ab8500_ldo_aux1_reg>;
13343 +                               vddio-supply = <&db8500_vsmps2_reg>;
13344                         };
13345                 };
13347 @@ -54,5 +60,26 @@ panel {
13348                                 };
13349                         };
13350                 };
13352 +               pinctrl {
13353 +                       accelerometer {
13354 +                               accel_tvk_mode: accel_tvk {
13355 +                                       /* Accelerometer interrupt lines 1 & 2 */
13356 +                                       tvk_cfg {
13357 +                                               pins = "GPIO82_C1", "GPIO83_D3";
13358 +                                               ste,config = <&gpio_in_pd>;
13359 +                                       };
13360 +                               };
13361 +                       };
13362 +                       magnetometer {
13363 +                               magn_tvk_mode: magn_tvk {
13364 +                                       /* GPIO 32 used for DRDY, pull this down */
13365 +                                       tvk_cfg {
13366 +                                               pins = "GPIO32_V2";
13367 +                                               ste,config = <&gpio_in_pd>;
13368 +                                       };
13369 +                               };
13370 +                       };
13371 +               };
13372         };
13373  };
13374 diff --git a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
13375 index 7b4249ed1983..060baa8b7e9d 100644
13376 --- a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
13377 +++ b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
13378 @@ -1891,10 +1891,15 @@ pins2 {
13379         usart2_idle_pins_c: usart2-idle-2 {
13380                 pins1 {
13381                         pinmux = <STM32_PINMUX('D', 5, ANALOG)>, /* USART2_TX */
13382 -                                <STM32_PINMUX('D', 4, ANALOG)>, /* USART2_RTS */
13383                                  <STM32_PINMUX('D', 3, ANALOG)>; /* USART2_CTS_NSS */
13384                 };
13385                 pins2 {
13386 +                       pinmux = <STM32_PINMUX('D', 4, AF7)>; /* USART2_RTS */
13387 +                       bias-disable;
13388 +                       drive-push-pull;
13389 +                       slew-rate = <3>;
13390 +               };
13391 +               pins3 {
13392                         pinmux = <STM32_PINMUX('D', 6, AF7)>; /* USART2_RX */
13393                         bias-disable;
13394                 };
13395 @@ -1940,10 +1945,15 @@ pins2 {
13396         usart3_idle_pins_b: usart3-idle-1 {
13397                 pins1 {
13398                         pinmux = <STM32_PINMUX('B', 10, ANALOG)>, /* USART3_TX */
13399 -                                <STM32_PINMUX('G', 8, ANALOG)>, /* USART3_RTS */
13400                                  <STM32_PINMUX('I', 10, ANALOG)>; /* USART3_CTS_NSS */
13401                 };
13402                 pins2 {
13403 +                       pinmux = <STM32_PINMUX('G', 8, AF8)>; /* USART3_RTS */
13404 +                       bias-disable;
13405 +                       drive-push-pull;
13406 +                       slew-rate = <0>;
13407 +               };
13408 +               pins3 {
13409                         pinmux = <STM32_PINMUX('B', 12, AF8)>; /* USART3_RX */
13410                         bias-disable;
13411                 };
13412 @@ -1976,10 +1986,15 @@ pins2 {
13413         usart3_idle_pins_c: usart3-idle-2 {
13414                 pins1 {
13415                         pinmux = <STM32_PINMUX('B', 10, ANALOG)>, /* USART3_TX */
13416 -                                <STM32_PINMUX('G', 8, ANALOG)>, /* USART3_RTS */
13417                                  <STM32_PINMUX('B', 13, ANALOG)>; /* USART3_CTS_NSS */
13418                 };
13419                 pins2 {
13420 +                       pinmux = <STM32_PINMUX('G', 8, AF8)>; /* USART3_RTS */
13421 +                       bias-disable;
13422 +                       drive-push-pull;
13423 +                       slew-rate = <0>;
13424 +               };
13425 +               pins3 {
13426                         pinmux = <STM32_PINMUX('B', 12, AF8)>; /* USART3_RX */
13427                         bias-disable;
13428                 };
13429 diff --git a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
13430 index d3b99535d755..f9c0f6884cc1 100644
13431 --- a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
13432 +++ b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
13433 @@ -448,7 +448,7 @@ touchscreen@4c {
13435                         reset-gpios = <&gpio TEGRA_GPIO(Q, 7) GPIO_ACTIVE_LOW>;
13437 -                       avdd-supply = <&vdd_3v3_sys>;
13438 +                       vdda-supply = <&vdd_3v3_sys>;
13439                         vdd-supply  = <&vdd_3v3_sys>;
13440                 };
13442 diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi
13443 index b0b15c97306b..e81e5937a60a 100644
13444 --- a/arch/arm/boot/dts/uniphier-pxs2.dtsi
13445 +++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi
13446 @@ -583,7 +583,7 @@ eth: ethernet@65000000 {
13447                         clocks = <&sys_clk 6>;
13448                         reset-names = "ether";
13449                         resets = <&sys_rst 6>;
13450 -                       phy-mode = "rgmii";
13451 +                       phy-mode = "rgmii-id";
13452                         local-mac-address = [00 00 00 00 00 00];
13453                         socionext,syscon-phy-mode = <&soc_glue 0>;
13455 diff --git a/arch/arm/crypto/blake2s-core.S b/arch/arm/crypto/blake2s-core.S
13456 index bed897e9a181..86345751bbf3 100644
13457 --- a/arch/arm/crypto/blake2s-core.S
13458 +++ b/arch/arm/crypto/blake2s-core.S
13459 @@ -8,6 +8,7 @@
13460   */
13462  #include <linux/linkage.h>
13463 +#include <asm/assembler.h>
13465         // Registers used to hold message words temporarily.  There aren't
13466         // enough ARM registers to hold the whole message block, so we have to
13467 @@ -38,6 +39,23 @@
13468  #endif
13469  .endm
13471 +.macro _le32_bswap     a, tmp
13472 +#ifdef __ARMEB__
13473 +       rev_l           \a, \tmp
13474 +#endif
13475 +.endm
13477 +.macro _le32_bswap_8x  a, b, c, d, e, f, g, h,  tmp
13478 +       _le32_bswap     \a, \tmp
13479 +       _le32_bswap     \b, \tmp
13480 +       _le32_bswap     \c, \tmp
13481 +       _le32_bswap     \d, \tmp
13482 +       _le32_bswap     \e, \tmp
13483 +       _le32_bswap     \f, \tmp
13484 +       _le32_bswap     \g, \tmp
13485 +       _le32_bswap     \h, \tmp
13486 +.endm
13488  // Execute a quarter-round of BLAKE2s by mixing two columns or two diagonals.
13489  // (a0, b0, c0, d0) and (a1, b1, c1, d1) give the registers containing the two
13490  // columns/diagonals.  s0-s1 are the word offsets to the message words the first
13491 @@ -180,8 +198,10 @@ ENTRY(blake2s_compress_arch)
13492         tst             r1, #3
13493         bne             .Lcopy_block_misaligned
13494         ldmia           r1!, {r2-r9}
13495 +       _le32_bswap_8x  r2, r3, r4, r5, r6, r7, r8, r9,  r14
13496         stmia           r12!, {r2-r9}
13497         ldmia           r1!, {r2-r9}
13498 +       _le32_bswap_8x  r2, r3, r4, r5, r6, r7, r8, r9,  r14
13499         stmia           r12, {r2-r9}
13500  .Lcopy_block_done:
13501         str             r1, [sp, #68]           // Update message pointer
13502 @@ -268,6 +288,7 @@ ENTRY(blake2s_compress_arch)
13503  1:
13504  #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13505         ldr             r3, [r1], #4
13506 +       _le32_bswap     r3, r4
13507  #else
13508         ldrb            r3, [r1, #0]
13509         ldrb            r4, [r1, #1]
13510 diff --git a/arch/arm/crypto/curve25519-core.S b/arch/arm/crypto/curve25519-core.S
13511 index be18af52e7dc..b697fa5d059a 100644
13512 --- a/arch/arm/crypto/curve25519-core.S
13513 +++ b/arch/arm/crypto/curve25519-core.S
13514 @@ -10,8 +10,8 @@
13515  #include <linux/linkage.h>
13517  .text
13518 -.fpu neon
13519  .arch armv7-a
13520 +.fpu neon
13521  .align 4
13523  ENTRY(curve25519_neon)
13524 diff --git a/arch/arm/crypto/poly1305-glue.c b/arch/arm/crypto/poly1305-glue.c
13525 index 3023c1acfa19..c31bd8f7c092 100644
13526 --- a/arch/arm/crypto/poly1305-glue.c
13527 +++ b/arch/arm/crypto/poly1305-glue.c
13528 @@ -29,7 +29,7 @@ void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit)
13530  static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
13532 -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
13533 +void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
13535         poly1305_init_arm(&dctx->h, key);
13536         dctx->s[0] = get_unaligned_le32(key + 16);
13537 diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
13538 index be8050b0c3df..70993af22d80 100644
13539 --- a/arch/arm/kernel/asm-offsets.c
13540 +++ b/arch/arm/kernel/asm-offsets.c
13541 @@ -24,6 +24,7 @@
13542  #include <asm/vdso_datapage.h>
13543  #include <asm/hardware/cache-l2x0.h>
13544  #include <linux/kbuild.h>
13545 +#include <linux/arm-smccc.h>
13546  #include "signal.h"
13548  /*
13549 @@ -148,6 +149,8 @@ int main(void)
13550    DEFINE(SLEEP_SAVE_SP_PHYS,   offsetof(struct sleep_save_sp, save_ptr_stash_phys));
13551    DEFINE(SLEEP_SAVE_SP_VIRT,   offsetof(struct sleep_save_sp, save_ptr_stash));
13552  #endif
13553 +  DEFINE(ARM_SMCCC_QUIRK_ID_OFFS,      offsetof(struct arm_smccc_quirk, id));
13554 +  DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS,   offsetof(struct arm_smccc_quirk, state));
13555    BLANK();
13556    DEFINE(DMA_BIDIRECTIONAL,    DMA_BIDIRECTIONAL);
13557    DEFINE(DMA_TO_DEVICE,                DMA_TO_DEVICE);
13558 diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
13559 index 08660ae9dcbc..b1423fb130ea 100644
13560 --- a/arch/arm/kernel/hw_breakpoint.c
13561 +++ b/arch/arm/kernel/hw_breakpoint.c
13562 @@ -886,7 +886,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
13563                         info->trigger = addr;
13564                         pr_debug("breakpoint fired: address = 0x%x\n", addr);
13565                         perf_bp_event(bp, regs);
13566 -                       if (!bp->overflow_handler)
13567 +                       if (is_default_overflow_handler(bp))
13568                                 enable_single_step(bp, addr);
13569                         goto unlock;
13570                 }
13571 diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S
13572 index 00664c78faca..931df62a7831 100644
13573 --- a/arch/arm/kernel/smccc-call.S
13574 +++ b/arch/arm/kernel/smccc-call.S
13575 @@ -3,7 +3,9 @@
13576   * Copyright (c) 2015, Linaro Limited
13577   */
13578  #include <linux/linkage.h>
13579 +#include <linux/arm-smccc.h>
13581 +#include <asm/asm-offsets.h>
13582  #include <asm/opcodes-sec.h>
13583  #include <asm/opcodes-virt.h>
13584  #include <asm/unwind.h>
13585 @@ -27,7 +29,14 @@ UNWIND(      .fnstart)
13586  UNWIND(        .save   {r4-r7})
13587         ldm     r12, {r4-r7}
13588         \instr
13589 -       pop     {r4-r7}
13590 +       ldr     r4, [sp, #36]
13591 +       cmp     r4, #0
13592 +       beq     1f                      // No quirk structure
13593 +       ldr     r5, [r4, #ARM_SMCCC_QUIRK_ID_OFFS]
13594 +       cmp     r5, #ARM_SMCCC_QUIRK_QCOM_A6
13595 +       bne     1f                      // No quirk present
13596 +       str     r6, [r4, #ARM_SMCCC_QUIRK_STATE_OFFS]
13597 +1:     pop     {r4-r7}
13598         ldr     r12, [sp, #(4 * 4)]
13599         stm     r12, {r0-r3}
13600         bx      lr
13601 diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
13602 index 24bd20564be7..43f0a3ebf390 100644
13603 --- a/arch/arm/kernel/suspend.c
13604 +++ b/arch/arm/kernel/suspend.c
13605 @@ -1,4 +1,5 @@
13606  // SPDX-License-Identifier: GPL-2.0
13607 +#include <linux/ftrace.h>
13608  #include <linux/init.h>
13609  #include <linux/slab.h>
13610  #include <linux/mm_types.h>
13611 @@ -25,6 +26,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
13612         if (!idmap_pgd)
13613                 return -EINVAL;
13615 +       /*
13616 +        * Function graph tracer state gets incosistent when the kernel
13617 +        * calls functions that never return (aka suspend finishers) hence
13618 +        * disable graph tracing during their execution.
13619 +        */
13620 +       pause_graph_tracing();
13622         /*
13623          * Provide a temporary page table with an identity mapping for
13624          * the MMU-enable code, required for resuming.  On successful
13625 @@ -32,6 +40,9 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
13626          * back to the correct page tables.
13627          */
13628         ret = __cpu_suspend(arg, fn, __mpidr);
13630 +       unpause_graph_tracing();
13632         if (ret == 0) {
13633                 cpu_switch_mm(mm->pgd, mm);
13634                 local_flush_bp_all();
13635 @@ -45,7 +56,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
13636  int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
13638         u32 __mpidr = cpu_logical_map(smp_processor_id());
13639 -       return __cpu_suspend(arg, fn, __mpidr);
13640 +       int ret;
13642 +       pause_graph_tracing();
13643 +       ret = __cpu_suspend(arg, fn, __mpidr);
13644 +       unpause_graph_tracing();
13646 +       return ret;
13648  #define        idmap_pgd       NULL
13649  #endif
13650 diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
13651 index dcc1191291a2..24a700535747 100644
13652 --- a/arch/arm/tools/syscall.tbl
13653 +++ b/arch/arm/tools/syscall.tbl
13654 @@ -456,3 +456,7 @@
13655  440    common  process_madvise                 sys_process_madvise
13656  441    common  epoll_pwait2                    sys_epoll_pwait2
13657  442    common  mount_setattr                   sys_mount_setattr
13658 +443    common  futex_wait                      sys_futex_wait
13659 +444    common  futex_wake                      sys_futex_wake
13660 +445    common  futex_waitv                     sys_futex_waitv
13661 +446    common  futex_requeue                   sys_futex_requeue
13662 diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts
13663 index 6e4ad66ff536..8d5d368dbe90 100644
13664 --- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts
13665 +++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts
13666 @@ -65,6 +65,7 @@ port@3 {
13667         port@7 {
13668                 label = "sw";
13669                 reg = <7>;
13670 +               phy-mode = "rgmii";
13672                 fixed-link {
13673                         speed = <1000>;
13674 diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
13675 index 9354077f74cd..9e799328c6db 100644
13676 --- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
13677 +++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
13678 @@ -131,7 +131,7 @@ usb@d000 {
13679                         status = "disabled";
13680                 };
13682 -               ethernet-switch@80000 {
13683 +               bus@80000 {
13684                         compatible = "simple-bus";
13685                         #size-cells = <1>;
13686                         #address-cells = <1>;
13687 diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
13688 index 0d38327043f8..cd3c3edd48fa 100644
13689 --- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
13690 +++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
13691 @@ -28,6 +28,10 @@ &bq25895 {
13692         ti,termination-current = <144000>;  /* uA */
13693  };
13695 +&buck3_reg {
13696 +       regulator-always-on;
13699  &proximity {
13700         proximity-near-level = <25>;
13701  };
13702 diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
13703 index 7a2df148c6a3..456dcd4a7793 100644
13704 --- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
13705 +++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
13706 @@ -156,7 +156,8 @@ uart1: serial@12200 {
13707                         };
13709                         nb_periph_clk: nb-periph-clk@13000 {
13710 -                               compatible = "marvell,armada-3700-periph-clock-nb";
13711 +                               compatible = "marvell,armada-3700-periph-clock-nb",
13712 +                                            "syscon";
13713                                 reg = <0x13000 0x100>;
13714                                 clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
13715                                 <&tbg 3>, <&xtalclk>;
13716 diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
13717 index 6dffada2e66b..28aa634c9780 100644
13718 --- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
13719 +++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
13720 @@ -294,7 +294,7 @@ &pwm0 {
13722  &pwrap {
13723         /* Only MT8173 E1 needs USB power domain */
13724 -       power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
13725 +       power-domains = <&spm MT8173_POWER_DOMAIN_USB>;
13727         pmic: mt6397 {
13728                 compatible = "mediatek,mt6397";
13729 diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
13730 index 7fa870e4386a..ecb37a7e6870 100644
13731 --- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
13732 +++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
13733 @@ -1235,7 +1235,7 @@ dsi1: dsi@1401c000 {
13734                                  <&mmsys CLK_MM_DSI1_DIGITAL>,
13735                                  <&mipi_tx1>;
13736                         clock-names = "engine", "digital", "hs";
13737 -                       phy = <&mipi_tx1>;
13738 +                       phys = <&mipi_tx1>;
13739                         phy-names = "dphy";
13740                         status = "disabled";
13741                 };
13742 diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
13743 index 80519a145f13..16f4b1fc0fb9 100644
13744 --- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
13745 +++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
13746 @@ -983,6 +983,9 @@ mmsys: syscon@14000000 {
13747                         compatible = "mediatek,mt8183-mmsys", "syscon";
13748                         reg = <0 0x14000000 0 0x1000>;
13749                         #clock-cells = <1>;
13750 +                       mboxes = <&gce 0 CMDQ_THR_PRIO_HIGHEST>,
13751 +                                <&gce 1 CMDQ_THR_PRIO_HIGHEST>;
13752 +                       mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0 0x1000>;
13753                 };
13755                 ovl0: ovl@14008000 {
13756 @@ -1058,6 +1061,7 @@ ccorr0: ccorr@1400f000 {
13757                         interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_LOW>;
13758                         power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
13759                         clocks = <&mmsys CLK_MM_DISP_CCORR0>;
13760 +                       mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0xf000 0x1000>;
13761                 };
13763                 aal0: aal@14010000 {
13764 @@ -1067,6 +1071,7 @@ aal0: aal@14010000 {
13765                         interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_LOW>;
13766                         power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
13767                         clocks = <&mmsys CLK_MM_DISP_AAL0>;
13768 +                       mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0 0x1000>;
13769                 };
13771                 gamma0: gamma@14011000 {
13772 @@ -1075,6 +1080,7 @@ gamma0: gamma@14011000 {
13773                         interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_LOW>;
13774                         power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
13775                         clocks = <&mmsys CLK_MM_DISP_GAMMA0>;
13776 +                       mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x1000 0x1000>;
13777                 };
13779                 dither0: dither@14012000 {
13780 @@ -1083,6 +1089,7 @@ dither0: dither@14012000 {
13781                         interrupts = <GIC_SPI 235 IRQ_TYPE_LEVEL_LOW>;
13782                         power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
13783                         clocks = <&mmsys CLK_MM_DISP_DITHER0>;
13784 +                       mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x2000 0x1000>;
13785                 };
13787                 dsi0: dsi@14014000 {
13788 diff --git a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
13789 index 63fd70086bb8..9f27e7ed5e22 100644
13790 --- a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
13791 +++ b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
13792 @@ -56,7 +56,7 @@ &i2c0 {
13793         tca6416: gpio@20 {
13794                 compatible = "ti,tca6416";
13795                 reg = <0x20>;
13796 -               reset-gpios = <&pio 65 GPIO_ACTIVE_HIGH>;
13797 +               reset-gpios = <&pio 65 GPIO_ACTIVE_LOW>;
13798                 pinctrl-names = "default";
13799                 pinctrl-0 = <&tca6416_pins>;
13801 diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
13802 index 07c8b2c926c0..b8f7cf5cbdab 100644
13803 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
13804 +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
13805 @@ -22,9 +22,11 @@ charger-thermal {
13806                         thermal-sensors = <&pm6150_adc_tm 1>;
13808                         trips {
13809 -                               temperature = <125000>;
13810 -                               hysteresis = <1000>;
13811 -                               type = "critical";
13812 +                               charger-crit {
13813 +                                       temperature = <125000>;
13814 +                                       hysteresis = <1000>;
13815 +                                       type = "critical";
13816 +                               };
13817                         };
13818                 };
13819         };
13820 @@ -768,17 +770,17 @@ &sdhc_2 {
13821  };
13823  &spi0 {
13824 -       pinctrl-0 = <&qup_spi0_cs_gpio>;
13825 +       pinctrl-0 = <&qup_spi0_cs_gpio_init_high>, <&qup_spi0_cs_gpio>;
13826         cs-gpios = <&tlmm 37 GPIO_ACTIVE_LOW>;
13827  };
13829  &spi6 {
13830 -       pinctrl-0 = <&qup_spi6_cs_gpio>;
13831 +       pinctrl-0 = <&qup_spi6_cs_gpio_init_high>, <&qup_spi6_cs_gpio>;
13832         cs-gpios = <&tlmm 62 GPIO_ACTIVE_LOW>;
13833  };
13835  ap_spi_fp: &spi10 {
13836 -       pinctrl-0 = <&qup_spi10_cs_gpio>;
13837 +       pinctrl-0 = <&qup_spi10_cs_gpio_init_high>, <&qup_spi10_cs_gpio>;
13838         cs-gpios = <&tlmm 89 GPIO_ACTIVE_LOW>;
13840         cros_ec_fp: ec@0 {
13841 @@ -1339,6 +1341,27 @@ pinconf {
13842                 };
13843         };
13845 +       qup_spi0_cs_gpio_init_high: qup-spi0-cs-gpio-init-high {
13846 +               pinconf {
13847 +                       pins = "gpio37";
13848 +                       output-high;
13849 +               };
13850 +       };
13852 +       qup_spi6_cs_gpio_init_high: qup-spi6-cs-gpio-init-high {
13853 +               pinconf {
13854 +                       pins = "gpio62";
13855 +                       output-high;
13856 +               };
13857 +       };
13859 +       qup_spi10_cs_gpio_init_high: qup-spi10-cs-gpio-init-high {
13860 +               pinconf {
13861 +                       pins = "gpio89";
13862 +                       output-high;
13863 +               };
13864 +       };
13866         qup_uart3_sleep: qup-uart3-sleep {
13867                 pinmux {
13868                         pins = "gpio38", "gpio39",
13869 diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
13870 index c4ac6f5dc008..96d36b38f269 100644
13871 --- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
13872 +++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
13873 @@ -1015,7 +1015,7 @@ swm: swm@c85 {
13874                 left_spkr: wsa8810-left{
13875                         compatible = "sdw10217201000";
13876                         reg = <0 1>;
13877 -                       powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
13878 +                       powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
13879                         #thermal-sensor-cells = <0>;
13880                         sound-name-prefix = "SpkrLeft";
13881                         #sound-dai-cells = <0>;
13882 @@ -1023,7 +1023,7 @@ left_spkr: wsa8810-left{
13884                 right_spkr: wsa8810-right{
13885                         compatible = "sdw10217201000";
13886 -                       powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
13887 +                       powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
13888                         reg = <0 2>;
13889                         #thermal-sensor-cells = <0>;
13890                         sound-name-prefix = "SpkrRight";
13891 diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
13892 index 454f794af547..6a2ed02d383d 100644
13893 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
13894 +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
13895 @@ -2382,7 +2382,7 @@ tlmm: pinctrl@3400000 {
13896                         #gpio-cells = <2>;
13897                         interrupt-controller;
13898                         #interrupt-cells = <2>;
13899 -                       gpio-ranges = <&tlmm 0 0 150>;
13900 +                       gpio-ranges = <&tlmm 0 0 151>;
13901                         wakeup-parent = <&pdc_intc>;
13903                         cci0_default: cci0-default {
13904 diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
13905 index e5bb17bc2f46..778613d3410b 100644
13906 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
13907 +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
13908 @@ -914,7 +914,7 @@ tlmm: pinctrl@3100000 {
13909                               <0x0 0x03D00000 0x0 0x300000>;
13910                         reg-names = "west", "east", "north", "south";
13911                         interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
13912 -                       gpio-ranges = <&tlmm 0 0 175>;
13913 +                       gpio-ranges = <&tlmm 0 0 176>;
13914                         gpio-controller;
13915                         #gpio-cells = <2>;
13916                         interrupt-controller;
13917 diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
13918 index 947e1accae3a..46a6c18cea91 100644
13919 --- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
13920 +++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
13921 @@ -279,7 +279,7 @@ mmcx_reg: mmcx-reg {
13923         pmu {
13924                 compatible = "arm,armv8-pmuv3";
13925 -               interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
13926 +               interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
13927         };
13929         psci {
13930 @@ -2327,10 +2327,9 @@ mdss: mdss@ae00000 {
13931                         reg = <0 0x0ae00000 0 0x1000>;
13932                         reg-names = "mdss";
13934 -                       interconnects = <&gem_noc MASTER_AMPSS_M0 &config_noc SLAVE_DISPLAY_CFG>,
13935 -                                       <&mmss_noc MASTER_MDP_PORT0 &mc_virt SLAVE_EBI_CH0>,
13936 +                       interconnects = <&mmss_noc MASTER_MDP_PORT0 &mc_virt SLAVE_EBI_CH0>,
13937                                         <&mmss_noc MASTER_MDP_PORT1 &mc_virt SLAVE_EBI_CH0>;
13938 -                       interconnect-names = "notused", "mdp0-mem", "mdp1-mem";
13939 +                       interconnect-names = "mdp0-mem", "mdp1-mem";
13941                         power-domains = <&dispcc MDSS_GDSC>;
13943 @@ -2580,7 +2579,7 @@ opp-358000000 {
13945                 dispcc: clock-controller@af00000 {
13946                         compatible = "qcom,sm8250-dispcc";
13947 -                       reg = <0 0x0af00000 0 0x20000>;
13948 +                       reg = <0 0x0af00000 0 0x10000>;
13949                         mmcx-supply = <&mmcx_reg>;
13950                         clocks = <&rpmhcc RPMH_CXO_CLK>,
13951                                  <&dsi0_phy 0>,
13952 @@ -2588,28 +2587,14 @@ dispcc: clock-controller@af00000 {
13953                                  <&dsi1_phy 0>,
13954                                  <&dsi1_phy 1>,
13955                                  <0>,
13956 -                                <0>,
13957 -                                <0>,
13958 -                                <0>,
13959 -                                <0>,
13960 -                                <0>,
13961 -                                <0>,
13962 -                                <0>,
13963 -                                <&sleep_clk>;
13964 +                                <0>;
13965                         clock-names = "bi_tcxo",
13966                                       "dsi0_phy_pll_out_byteclk",
13967                                       "dsi0_phy_pll_out_dsiclk",
13968                                       "dsi1_phy_pll_out_byteclk",
13969                                       "dsi1_phy_pll_out_dsiclk",
13970 -                                     "dp_link_clk_divsel_ten",
13971 -                                     "dp_vco_divided_clk_src_mux",
13972 -                                     "dptx1_phy_pll_link_clk",
13973 -                                     "dptx1_phy_pll_vco_div_clk",
13974 -                                     "dptx2_phy_pll_link_clk",
13975 -                                     "dptx2_phy_pll_vco_div_clk",
13976 -                                     "edp_phy_pll_link_clk",
13977 -                                     "edp_phy_pll_vco_div_clk",
13978 -                                     "sleep_clk";
13979 +                                     "dp_phy_pll_link_clk",
13980 +                                     "dp_phy_pll_vco_div_clk";
13981                         #clock-cells = <1>;
13982                         #reset-cells = <1>;
13983                         #power-domain-cells = <1>;
13984 @@ -2689,7 +2674,7 @@ tlmm: pinctrl@f100000 {
13985                         #gpio-cells = <2>;
13986                         interrupt-controller;
13987                         #interrupt-cells = <2>;
13988 -                       gpio-ranges = <&tlmm 0 0 180>;
13989 +                       gpio-ranges = <&tlmm 0 0 181>;
13990                         wakeup-parent = <&pdc>;
13992                         pri_mi2s_active: pri-mi2s-active {
13993 @@ -3754,7 +3739,7 @@ timer {
13994                                 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
13995                              <GIC_PPI 11
13996                                 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
13997 -                            <GIC_PPI 12
13998 +                            <GIC_PPI 10
13999                                 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
14000         };
14002 diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
14003 index 5ef460458f5c..e2fca420e518 100644
14004 --- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
14005 +++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
14006 @@ -153,7 +153,7 @@ memory@80000000 {
14008         pmu {
14009                 compatible = "arm,armv8-pmuv3";
14010 -               interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
14011 +               interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
14012         };
14014         psci {
14015 @@ -382,7 +382,7 @@ tlmm: pinctrl@f100000 {
14016                         #gpio-cells = <2>;
14017                         interrupt-controller;
14018                         #interrupt-cells = <2>;
14019 -                       gpio-ranges = <&tlmm 0 0 203>;
14020 +                       gpio-ranges = <&tlmm 0 0 204>;
14022                         qup_uart3_default_state: qup-uart3-default-state {
14023                                 rx {
14024 diff --git a/arch/arm64/boot/dts/renesas/hihope-common.dtsi b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
14025 index 7a3da9b06f67..0c7e6f790590 100644
14026 --- a/arch/arm64/boot/dts/renesas/hihope-common.dtsi
14027 +++ b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
14028 @@ -12,6 +12,9 @@ / {
14029         aliases {
14030                 serial0 = &scif2;
14031                 serial1 = &hscif0;
14032 +               mmc0 = &sdhi3;
14033 +               mmc1 = &sdhi0;
14034 +               mmc2 = &sdhi2;
14035         };
14037         chosen {
14038 diff --git a/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts b/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
14039 index 501cb05da228..3cf2e076940f 100644
14040 --- a/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
14041 +++ b/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
14042 @@ -21,6 +21,9 @@ aliases {
14043                 serial4 = &hscif2;
14044                 serial5 = &scif5;
14045                 ethernet0 = &avb;
14046 +               mmc0 = &sdhi3;
14047 +               mmc1 = &sdhi0;
14048 +               mmc2 = &sdhi2;
14049         };
14051         chosen {
14052 diff --git a/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts b/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts
14053 index 71763f4402a7..3c0d59def8ee 100644
14054 --- a/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts
14055 +++ b/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts
14056 @@ -22,6 +22,9 @@ aliases {
14057                 serial5 = &scif5;
14058                 serial6 = &scif4;
14059                 ethernet0 = &avb;
14060 +               mmc0 = &sdhi3;
14061 +               mmc1 = &sdhi0;
14062 +               mmc2 = &sdhi2;
14063         };
14065         chosen {
14066 diff --git a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
14067 index ea87cb5a459c..33257c6440b2 100644
14068 --- a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
14069 +++ b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
14070 @@ -17,6 +17,8 @@ / {
14071         aliases {
14072                 serial0 = &scif2;
14073                 serial1 = &hscif2;
14074 +               mmc0 = &sdhi0;
14075 +               mmc1 = &sdhi3;
14076         };
14078         chosen {
14079 diff --git a/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts b/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts
14080 index 273f062f2909..7b6649a3ded0 100644
14081 --- a/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts
14082 +++ b/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts
14083 @@ -22,6 +22,9 @@ aliases {
14084                 serial5 = &scif5;
14085                 serial6 = &scif4;
14086                 ethernet0 = &avb;
14087 +               mmc0 = &sdhi3;
14088 +               mmc1 = &sdhi0;
14089 +               mmc2 = &sdhi2;
14090         };
14092         chosen {
14093 diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
14094 index ec7ca72399ec..1ffa4a995a7a 100644
14095 --- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi
14096 +++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
14097 @@ -992,8 +992,8 @@ port@1 {
14099                                         reg = <1>;
14101 -                                       vin4csi41: endpoint@2 {
14102 -                                               reg = <2>;
14103 +                                       vin4csi41: endpoint@3 {
14104 +                                               reg = <3>;
14105                                                 remote-endpoint = <&csi41vin4>;
14106                                         };
14107                                 };
14108 @@ -1020,8 +1020,8 @@ port@1 {
14110                                         reg = <1>;
14112 -                                       vin5csi41: endpoint@2 {
14113 -                                               reg = <2>;
14114 +                                       vin5csi41: endpoint@3 {
14115 +                                               reg = <3>;
14116                                                 remote-endpoint = <&csi41vin5>;
14117                                         };
14118                                 };
14119 @@ -1048,8 +1048,8 @@ port@1 {
14121                                         reg = <1>;
14123 -                                       vin6csi41: endpoint@2 {
14124 -                                               reg = <2>;
14125 +                                       vin6csi41: endpoint@3 {
14126 +                                               reg = <3>;
14127                                                 remote-endpoint = <&csi41vin6>;
14128                                         };
14129                                 };
14130 @@ -1076,8 +1076,8 @@ port@1 {
14132                                         reg = <1>;
14134 -                                       vin7csi41: endpoint@2 {
14135 -                                               reg = <2>;
14136 +                                       vin7csi41: endpoint@3 {
14137 +                                               reg = <3>;
14138                                                 remote-endpoint = <&csi41vin7>;
14139                                         };
14140                                 };
14141 diff --git a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
14142 index f74f8b9993f1..6d6cdc4c324b 100644
14143 --- a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
14144 +++ b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
14145 @@ -16,6 +16,9 @@ / {
14146         aliases {
14147                 serial0 = &scif2;
14148                 ethernet0 = &avb;
14149 +               mmc0 = &sdhi3;
14150 +               mmc1 = &sdhi0;
14151 +               mmc2 = &sdhi1;
14152         };
14154         chosen {
14155 diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi
14156 index fa284a7260d6..e202e8aa6941 100644
14157 --- a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi
14158 +++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi
14159 @@ -12,6 +12,14 @@ / {
14160         model = "Renesas Falcon CPU board";
14161         compatible = "renesas,falcon-cpu", "renesas,r8a779a0";
14163 +       aliases {
14164 +               serial0 = &scif0;
14165 +       };
14167 +       chosen {
14168 +               stdout-path = "serial0:115200n8";
14169 +       };
14171         memory@48000000 {
14172                 device_type = "memory";
14173                 /* first 128MB is reserved for secure area. */
14174 diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts b/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts
14175 index 5617b81dd7dc..273857ae38f3 100644
14176 --- a/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts
14177 +++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts
14178 @@ -14,11 +14,6 @@ / {
14180         aliases {
14181                 ethernet0 = &avb0;
14182 -               serial0 = &scif0;
14183 -       };
14185 -       chosen {
14186 -               stdout-path = "serial0:115200n8";
14187         };
14188  };
14190 diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
14191 index dfd6ae8b564f..86ac48e2c849 100644
14192 --- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
14193 +++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
14194 @@ -60,10 +60,7 @@ extalr_clk: extalr {
14196         pmu_a76 {
14197                 compatible = "arm,cortex-a76-pmu";
14198 -               interrupts-extended = <&gic GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
14199 -                                     <&gic GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
14200 -                                     <&gic GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
14201 -                                     <&gic GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
14202 +               interrupts-extended = <&gic GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
14203         };
14205         /* External SCIF clock - to be overridden by boards that provide it */
14206 diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
14207 index c22bb38994e8..15bb1eeb6601 100644
14208 --- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
14209 +++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
14210 @@ -36,6 +36,9 @@ aliases {
14211                 serial0 = &scif2;
14212                 serial1 = &hscif1;
14213                 ethernet0 = &avb;
14214 +               mmc0 = &sdhi2;
14215 +               mmc1 = &sdhi0;
14216 +               mmc2 = &sdhi3;
14217         };
14219         chosen {
14220 diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
14221 index e9ed2597f1c2..61bd4df09df0 100644
14222 --- a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
14223 +++ b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
14224 @@ -16,6 +16,7 @@ / {
14225         aliases {
14226                 serial1 = &hscif0;
14227                 serial2 = &scif1;
14228 +               mmc2 = &sdhi3;
14229         };
14231         clksndsel: clksndsel {
14232 diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi
14233 index a04eae55dd6c..3d88e95c65a5 100644
14234 --- a/arch/arm64/boot/dts/renesas/ulcb.dtsi
14235 +++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi
14236 @@ -23,6 +23,8 @@ / {
14237         aliases {
14238                 serial0 = &scif2;
14239                 ethernet0 = &avb;
14240 +               mmc0 = &sdhi2;
14241 +               mmc1 = &sdhi0;
14242         };
14244         chosen {
14245 diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
14246 index a87b8a678719..8f2c1c1e2c64 100644
14247 --- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
14248 +++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
14249 @@ -734,7 +734,7 @@ eth: ethernet@65000000 {
14250                         clocks = <&sys_clk 6>;
14251                         reset-names = "ether";
14252                         resets = <&sys_rst 6>;
14253 -                       phy-mode = "rgmii";
14254 +                       phy-mode = "rgmii-id";
14255                         local-mac-address = [00 00 00 00 00 00];
14256                         socionext,syscon-phy-mode = <&soc_glue 0>;
14258 diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
14259 index 0e52dadf54b3..be97da132258 100644
14260 --- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
14261 +++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
14262 @@ -564,7 +564,7 @@ eth0: ethernet@65000000 {
14263                         clocks = <&sys_clk 6>;
14264                         reset-names = "ether";
14265                         resets = <&sys_rst 6>;
14266 -                       phy-mode = "rgmii";
14267 +                       phy-mode = "rgmii-id";
14268                         local-mac-address = [00 00 00 00 00 00];
14269                         socionext,syscon-phy-mode = <&soc_glue 0>;
14271 @@ -585,7 +585,7 @@ eth1: ethernet@65200000 {
14272                         clocks = <&sys_clk 7>;
14273                         reset-names = "ether";
14274                         resets = <&sys_rst 7>;
14275 -                       phy-mode = "rgmii";
14276 +                       phy-mode = "rgmii-id";
14277                         local-mac-address = [00 00 00 00 00 00];
14278                         socionext,syscon-phy-mode = <&soc_glue 1>;
14280 diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
14281 index 8c84dafb7125..f1e7da3dfa27 100644
14282 --- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
14283 +++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
14284 @@ -1042,13 +1042,16 @@ main_sdhci0: mmc@4f80000 {
14285                 assigned-clocks = <&k3_clks 91 1>;
14286                 assigned-clock-parents = <&k3_clks 91 2>;
14287                 bus-width = <8>;
14288 -               mmc-hs400-1_8v;
14289 +               mmc-hs200-1_8v;
14290                 mmc-ddr-1_8v;
14291                 ti,otap-del-sel-legacy = <0xf>;
14292                 ti,otap-del-sel-mmc-hs = <0xf>;
14293                 ti,otap-del-sel-ddr52 = <0x5>;
14294                 ti,otap-del-sel-hs200 = <0x6>;
14295                 ti,otap-del-sel-hs400 = <0x0>;
14296 +               ti,itap-del-sel-legacy = <0x10>;
14297 +               ti,itap-del-sel-mmc-hs = <0xa>;
14298 +               ti,itap-del-sel-ddr52 = <0x3>;
14299                 ti,trm-icp = <0x8>;
14300                 ti,strobe-sel = <0x77>;
14301                 dma-coherent;
14302 @@ -1069,9 +1072,15 @@ main_sdhci1: mmc@4fb0000 {
14303                 ti,otap-del-sel-sdr25 = <0xf>;
14304                 ti,otap-del-sel-sdr50 = <0xc>;
14305                 ti,otap-del-sel-ddr50 = <0xc>;
14306 +               ti,itap-del-sel-legacy = <0x0>;
14307 +               ti,itap-del-sel-sd-hs = <0x0>;
14308 +               ti,itap-del-sel-sdr12 = <0x0>;
14309 +               ti,itap-del-sel-sdr25 = <0x0>;
14310 +               ti,itap-del-sel-ddr50 = <0x2>;
14311                 ti,trm-icp = <0x8>;
14312                 ti,clkbuf-sel = <0x7>;
14313                 dma-coherent;
14314 +               sdhci-caps-mask = <0x2 0x0>;
14315         };
14317         main_sdhci2: mmc@4f98000 {
14318 @@ -1089,9 +1098,15 @@ main_sdhci2: mmc@4f98000 {
14319                 ti,otap-del-sel-sdr25 = <0xf>;
14320                 ti,otap-del-sel-sdr50 = <0xc>;
14321                 ti,otap-del-sel-ddr50 = <0xc>;
14322 +               ti,itap-del-sel-legacy = <0x0>;
14323 +               ti,itap-del-sel-sd-hs = <0x0>;
14324 +               ti,itap-del-sel-sdr12 = <0x0>;
14325 +               ti,itap-del-sel-sdr25 = <0x0>;
14326 +               ti,itap-del-sel-ddr50 = <0x2>;
14327                 ti,trm-icp = <0x8>;
14328                 ti,clkbuf-sel = <0x7>;
14329                 dma-coherent;
14330 +               sdhci-caps-mask = <0x2 0x0>;
14331         };
14333         usbss0: cdns-usb@4104000 {
14334 diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
14335 index bbdb54702aa7..247011356d11 100644
14336 --- a/arch/arm64/crypto/aes-modes.S
14337 +++ b/arch/arm64/crypto/aes-modes.S
14338 @@ -359,6 +359,7 @@ ST5(        mov             v4.16b, vctr.16b                )
14339         ins             vctr.d[0], x8
14341         /* apply carry to N counter blocks for N := x12 */
14342 +       cbz             x12, 2f
14343         adr             x16, 1f
14344         sub             x16, x16, x12, lsl #3
14345         br              x16
14346 diff --git a/arch/arm64/crypto/poly1305-glue.c b/arch/arm64/crypto/poly1305-glue.c
14347 index 683de671741a..9c3d86e397bf 100644
14348 --- a/arch/arm64/crypto/poly1305-glue.c
14349 +++ b/arch/arm64/crypto/poly1305-glue.c
14350 @@ -25,7 +25,7 @@ asmlinkage void poly1305_emit(void *state, u8 *digest, const u32 *nonce);
14352  static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
14354 -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
14355 +void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
14357         poly1305_init_arm64(&dctx->h, key);
14358         dctx->s[0] = get_unaligned_le32(key + 16);
14359 diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
14360 index 1c26d7baa67f..cfdde3a56805 100644
14361 --- a/arch/arm64/include/asm/daifflags.h
14362 +++ b/arch/arm64/include/asm/daifflags.h
14363 @@ -131,6 +131,9 @@ static inline void local_daif_inherit(struct pt_regs *regs)
14364         if (interrupts_enabled(regs))
14365                 trace_hardirqs_on();
14367 +       if (system_uses_irq_prio_masking())
14368 +               gic_write_pmr(regs->pmr_save);
14370         /*
14371          * We can't use local_daif_restore(regs->pstate) here as
14372          * system_has_prio_mask_debugging() won't restore the I bit if it can
14373 diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
14374 index 3d10e6527f7d..858c2fcfc043 100644
14375 --- a/arch/arm64/include/asm/kvm_host.h
14376 +++ b/arch/arm64/include/asm/kvm_host.h
14377 @@ -713,6 +713,7 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
14378  static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
14380  void kvm_arm_init_debug(void);
14381 +void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
14382  void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
14383  void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
14384  void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
14385 diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
14386 index 949788f5ba40..727bfc3be99b 100644
14387 --- a/arch/arm64/include/asm/unistd.h
14388 +++ b/arch/arm64/include/asm/unistd.h
14389 @@ -38,7 +38,7 @@
14390  #define __ARM_NR_compat_set_tls                (__ARM_NR_COMPAT_BASE + 5)
14391  #define __ARM_NR_COMPAT_END            (__ARM_NR_COMPAT_BASE + 0x800)
14393 -#define __NR_compat_syscalls           443
14394 +#define __NR_compat_syscalls           447
14395  #endif
14397  #define __ARCH_WANT_SYS_CLONE
14398 diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
14399 index 3d874f624056..e5015a2b9c94 100644
14400 --- a/arch/arm64/include/asm/unistd32.h
14401 +++ b/arch/arm64/include/asm/unistd32.h
14402 @@ -893,6 +893,14 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise)
14403  __SYSCALL(__NR_epoll_pwait2, compat_sys_epoll_pwait2)
14404  #define __NR_mount_setattr 442
14405  __SYSCALL(__NR_mount_setattr, sys_mount_setattr)
14406 +#define __NR_futex_wait 443
14407 +__SYSCALL(__NR_futex_wait, sys_futex_wait)
14408 +#define __NR_futex_wake 444
14409 +__SYSCALL(__NR_futex_wake, sys_futex_wake)
14410 +#define __NR_futex_waitv 445
14411 +__SYSCALL(__NR_futex_waitv, compat_sys_futex_waitv)
14412 +#define __NR_futex_waitv 446
14413 +__SYSCALL(__NR_futex_requeue, compat_sys_futex_requeue)
14415  /*
14416   * Please add new compat syscalls above this comment and update
14417 diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
14418 index 9d3588450473..117412bae915 100644
14419 --- a/arch/arm64/kernel/entry-common.c
14420 +++ b/arch/arm64/kernel/entry-common.c
14421 @@ -226,14 +226,6 @@ static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
14423         unsigned long far = read_sysreg(far_el1);
14425 -       /*
14426 -        * The CPU masked interrupts, and we are leaving them masked during
14427 -        * do_debug_exception(). Update PMR as if we had called
14428 -        * local_daif_mask().
14429 -        */
14430 -       if (system_uses_irq_prio_masking())
14431 -               gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
14433         arm64_enter_el1_dbg(regs);
14434         if (!cortex_a76_erratum_1463225_debug_handler(regs))
14435                 do_debug_exception(far, esr, regs);
14436 @@ -398,9 +390,6 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
14437         /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
14438         unsigned long far = read_sysreg(far_el1);
14440 -       if (system_uses_irq_prio_masking())
14441 -               gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
14443         enter_from_user_mode();
14444         do_debug_exception(far, esr, regs);
14445         local_daif_restore(DAIF_PROCCTX_NOIRQ);
14446 @@ -408,9 +397,6 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
14448  static void noinstr el0_svc(struct pt_regs *regs)
14450 -       if (system_uses_irq_prio_masking())
14451 -               gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
14453         enter_from_user_mode();
14454         cortex_a76_erratum_1463225_svc_handler();
14455         do_el0_svc(regs);
14456 @@ -486,9 +472,6 @@ static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
14458  static void noinstr el0_svc_compat(struct pt_regs *regs)
14460 -       if (system_uses_irq_prio_masking())
14461 -               gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
14463         enter_from_user_mode();
14464         cortex_a76_erratum_1463225_svc_handler();
14465         do_el0_svc_compat(regs);
14466 diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
14467 index 6acfc5e6b5e0..e03fba3ae2a0 100644
14468 --- a/arch/arm64/kernel/entry.S
14469 +++ b/arch/arm64/kernel/entry.S
14470 @@ -263,16 +263,16 @@ alternative_else_nop_endif
14471         stp     lr, x21, [sp, #S_LR]
14473         /*
14474 -        * For exceptions from EL0, terminate the callchain here.
14475 +        * For exceptions from EL0, create a terminal frame record.
14476          * For exceptions from EL1, create a synthetic frame record so the
14477          * interrupted code shows up in the backtrace.
14478          */
14479         .if \el == 0
14480 -       mov     x29, xzr
14481 +       stp     xzr, xzr, [sp, #S_STACKFRAME]
14482         .else
14483         stp     x29, x22, [sp, #S_STACKFRAME]
14484 -       add     x29, sp, #S_STACKFRAME
14485         .endif
14486 +       add     x29, sp, #S_STACKFRAME
14488  #ifdef CONFIG_ARM64_SW_TTBR0_PAN
14489  alternative_if_not ARM64_HAS_PAN
14490 @@ -292,6 +292,8 @@ alternative_else_nop_endif
14491  alternative_if ARM64_HAS_IRQ_PRIO_MASKING
14492         mrs_s   x20, SYS_ICC_PMR_EL1
14493         str     x20, [sp, #S_PMR_SAVE]
14494 +       mov     x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
14495 +       msr_s   SYS_ICC_PMR_EL1, x20
14496  alternative_else_nop_endif
14498         /* Re-enable tag checking (TCO set on exception entry) */
14499 @@ -493,8 +495,8 @@ tsk .req    x28             // current thread_info
14500  /*
14501   * Interrupt handling.
14502   */
14503 -       .macro  irq_handler
14504 -       ldr_l   x1, handle_arch_irq
14505 +       .macro  irq_handler, handler:req
14506 +       ldr_l   x1, \handler
14507         mov     x0, sp
14508         irq_stack_entry
14509         blr     x1
14510 @@ -524,13 +526,41 @@ alternative_endif
14511  #endif
14512         .endm
14514 -       .macro  gic_prio_irq_setup, pmr:req, tmp:req
14515 -#ifdef CONFIG_ARM64_PSEUDO_NMI
14516 -       alternative_if ARM64_HAS_IRQ_PRIO_MASKING
14517 -       orr     \tmp, \pmr, #GIC_PRIO_PSR_I_SET
14518 -       msr_s   SYS_ICC_PMR_EL1, \tmp
14519 -       alternative_else_nop_endif
14520 +       .macro el1_interrupt_handler, handler:req
14521 +       enable_da_f
14523 +       mov     x0, sp
14524 +       bl      enter_el1_irq_or_nmi
14526 +       irq_handler     \handler
14528 +#ifdef CONFIG_PREEMPTION
14529 +       ldr     x24, [tsk, #TSK_TI_PREEMPT]     // get preempt count
14530 +alternative_if ARM64_HAS_IRQ_PRIO_MASKING
14531 +       /*
14532 +        * DA_F were cleared at start of handling. If anything is set in DAIF,
14533 +        * we come back from an NMI, so skip preemption
14534 +        */
14535 +       mrs     x0, daif
14536 +       orr     x24, x24, x0
14537 +alternative_else_nop_endif
14538 +       cbnz    x24, 1f                         // preempt count != 0 || NMI return path
14539 +       bl      arm64_preempt_schedule_irq      // irq en/disable is done inside
14541  #endif
14543 +       mov     x0, sp
14544 +       bl      exit_el1_irq_or_nmi
14545 +       .endm
14547 +       .macro el0_interrupt_handler, handler:req
14548 +       user_exit_irqoff
14549 +       enable_da_f
14551 +       tbz     x22, #55, 1f
14552 +       bl      do_el0_irq_bp_hardening
14554 +       irq_handler     \handler
14555         .endm
14557         .text
14558 @@ -662,32 +692,7 @@ SYM_CODE_END(el1_sync)
14559         .align  6
14560  SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
14561         kernel_entry 1
14562 -       gic_prio_irq_setup pmr=x20, tmp=x1
14563 -       enable_da_f
14565 -       mov     x0, sp
14566 -       bl      enter_el1_irq_or_nmi
14568 -       irq_handler
14570 -#ifdef CONFIG_PREEMPTION
14571 -       ldr     x24, [tsk, #TSK_TI_PREEMPT]     // get preempt count
14572 -alternative_if ARM64_HAS_IRQ_PRIO_MASKING
14573 -       /*
14574 -        * DA_F were cleared at start of handling. If anything is set in DAIF,
14575 -        * we come back from an NMI, so skip preemption
14576 -        */
14577 -       mrs     x0, daif
14578 -       orr     x24, x24, x0
14579 -alternative_else_nop_endif
14580 -       cbnz    x24, 1f                         // preempt count != 0 || NMI return path
14581 -       bl      arm64_preempt_schedule_irq      // irq en/disable is done inside
14583 -#endif
14585 -       mov     x0, sp
14586 -       bl      exit_el1_irq_or_nmi
14588 +       el1_interrupt_handler handle_arch_irq
14589         kernel_exit 1
14590  SYM_CODE_END(el1_irq)
14592 @@ -727,22 +732,13 @@ SYM_CODE_END(el0_error_compat)
14593  SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
14594         kernel_entry 0
14595  el0_irq_naked:
14596 -       gic_prio_irq_setup pmr=x20, tmp=x0
14597 -       user_exit_irqoff
14598 -       enable_da_f
14600 -       tbz     x22, #55, 1f
14601 -       bl      do_el0_irq_bp_hardening
14603 -       irq_handler
14605 +       el0_interrupt_handler handle_arch_irq
14606         b       ret_to_user
14607  SYM_CODE_END(el0_irq)
14609  SYM_CODE_START_LOCAL(el1_error)
14610         kernel_entry 1
14611         mrs     x1, esr_el1
14612 -       gic_prio_kentry_setup tmp=x2
14613         enable_dbg
14614         mov     x0, sp
14615         bl      do_serror
14616 @@ -753,7 +749,6 @@ SYM_CODE_START_LOCAL(el0_error)
14617         kernel_entry 0
14618  el0_error_naked:
14619         mrs     x25, esr_el1
14620 -       gic_prio_kentry_setup tmp=x2
14621         user_exit_irqoff
14622         enable_dbg
14623         mov     x0, sp
14624 diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
14625 index d55bdfb7789c..7032a5f9e624 100644
14626 --- a/arch/arm64/kernel/stacktrace.c
14627 +++ b/arch/arm64/kernel/stacktrace.c
14628 @@ -44,10 +44,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
14629         unsigned long fp = frame->fp;
14630         struct stack_info info;
14632 -       /* Terminal record; nothing to unwind */
14633 -       if (!fp)
14634 -               return -ENOENT;
14636         if (fp & 0xf)
14637                 return -EINVAL;
14639 @@ -108,6 +104,12 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
14641         frame->pc = ptrauth_strip_insn_pac(frame->pc);
14643 +       /*
14644 +        * This is a terminal record, so we have finished unwinding.
14645 +        */
14646 +       if (!frame->fp && !frame->pc)
14647 +               return -ENOENT;
14649         return 0;
14651  NOKPROBE_SYMBOL(unwind_frame);
14652 diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
14653 index 61dbb4c838ef..a5e61e09ea92 100644
14654 --- a/arch/arm64/kernel/vdso/vdso.lds.S
14655 +++ b/arch/arm64/kernel/vdso/vdso.lds.S
14656 @@ -31,6 +31,13 @@ SECTIONS
14657         .gnu.version_d  : { *(.gnu.version_d) }
14658         .gnu.version_r  : { *(.gnu.version_r) }
14660 +       /*
14661 +        * Discard .note.gnu.property sections which are unused and have
14662 +        * different alignment requirement from vDSO note sections.
14663 +        */
14664 +       /DISCARD/       : {
14665 +               *(.note.GNU-stack .note.gnu.property)
14666 +       }
14667         .note           : { *(.note.*) }                :text   :note
14669         . = ALIGN(16);
14670 @@ -48,7 +55,6 @@ SECTIONS
14671         PROVIDE(end = .);
14673         /DISCARD/       : {
14674 -               *(.note.GNU-stack)
14675                 *(.data .data.* .gnu.linkonce.d.* .sdata*)
14676                 *(.bss .sbss .dynbss .dynsbss)
14677                 *(.eh_frame .eh_frame_hdr)
14678 diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
14679 index 7f06ba76698d..84b5f79c9eab 100644
14680 --- a/arch/arm64/kvm/arm.c
14681 +++ b/arch/arm64/kvm/arm.c
14682 @@ -580,6 +580,8 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
14684         vcpu->arch.has_run_once = true;
14686 +       kvm_arm_vcpu_init_debug(vcpu);
14688         if (likely(irqchip_in_kernel(kvm))) {
14689                 /*
14690                  * Map the VGIC hardware resources before running a vcpu the
14691 @@ -1808,8 +1810,10 @@ static int init_hyp_mode(void)
14692         if (is_protected_kvm_enabled()) {
14693                 init_cpu_logical_map();
14695 -               if (!init_psci_relay())
14696 +               if (!init_psci_relay()) {
14697 +                       err = -ENODEV;
14698                         goto out_err;
14699 +               }
14700         }
14702         return 0;
14703 diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
14704 index dbc890511631..2484b2cca74b 100644
14705 --- a/arch/arm64/kvm/debug.c
14706 +++ b/arch/arm64/kvm/debug.c
14707 @@ -68,6 +68,64 @@ void kvm_arm_init_debug(void)
14708         __this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
14711 +/**
14712 + * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
14713 + *
14714 + * @vcpu:      the vcpu pointer
14715 + *
14716 + * This ensures we will trap access to:
14717 + *  - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
14718 + *  - Debug ROM Address (MDCR_EL2_TDRA)
14719 + *  - OS related registers (MDCR_EL2_TDOSA)
14720 + *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
14721 + *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
14722 + */
14723 +static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
14725 +       /*
14726 +        * This also clears MDCR_EL2_E2PB_MASK to disable guest access
14727 +        * to the profiling buffer.
14728 +        */
14729 +       vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
14730 +       vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
14731 +                               MDCR_EL2_TPMS |
14732 +                               MDCR_EL2_TTRF |
14733 +                               MDCR_EL2_TPMCR |
14734 +                               MDCR_EL2_TDRA |
14735 +                               MDCR_EL2_TDOSA);
14737 +       /* Is the VM being debugged by userspace? */
14738 +       if (vcpu->guest_debug)
14739 +               /* Route all software debug exceptions to EL2 */
14740 +               vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
14742 +       /*
14743 +        * Trap debug register access when one of the following is true:
14744 +        *  - Userspace is using the hardware to debug the guest
14745 +        *  (KVM_GUESTDBG_USE_HW is set).
14746 +        *  - The guest is not using debug (KVM_ARM64_DEBUG_DIRTY is clear).
14747 +        */
14748 +       if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) ||
14749 +           !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
14750 +               vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
14752 +       trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
14755 +/**
14756 + * kvm_arm_vcpu_init_debug - setup vcpu debug traps
14757 + *
14758 + * @vcpu:      the vcpu pointer
14759 + *
14760 + * Set vcpu initial mdcr_el2 value.
14761 + */
14762 +void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
14764 +       preempt_disable();
14765 +       kvm_arm_setup_mdcr_el2(vcpu);
14766 +       preempt_enable();
14769  /**
14770   * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
14771   */
14772 @@ -83,13 +141,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
14773   * @vcpu:      the vcpu pointer
14774   *
14775   * This is called before each entry into the hypervisor to setup any
14776 - * debug related registers. Currently this just ensures we will trap
14777 - * access to:
14778 - *  - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
14779 - *  - Debug ROM Address (MDCR_EL2_TDRA)
14780 - *  - OS related registers (MDCR_EL2_TDOSA)
14781 - *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
14782 - *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
14783 + * debug related registers.
14784   *
14785   * Additionally, KVM only traps guest accesses to the debug registers if
14786   * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
14787 @@ -101,28 +153,14 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
14789  void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
14791 -       bool trap_debug = !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY);
14792         unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2;
14794         trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
14796 -       /*
14797 -        * This also clears MDCR_EL2_E2PB_MASK to disable guest access
14798 -        * to the profiling buffer.
14799 -        */
14800 -       vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
14801 -       vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
14802 -                               MDCR_EL2_TPMS |
14803 -                               MDCR_EL2_TTRF |
14804 -                               MDCR_EL2_TPMCR |
14805 -                               MDCR_EL2_TDRA |
14806 -                               MDCR_EL2_TDOSA);
14807 +       kvm_arm_setup_mdcr_el2(vcpu);
14809         /* Is Guest debugging in effect? */
14810         if (vcpu->guest_debug) {
14811 -               /* Route all software debug exceptions to EL2 */
14812 -               vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
14814                 /* Save guest debug state */
14815                 save_guest_debug_regs(vcpu);
14817 @@ -176,7 +214,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
14819                         vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
14820                         vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
14821 -                       trap_debug = true;
14823                         trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
14824                                                 &vcpu->arch.debug_ptr->dbg_bcr[0],
14825 @@ -191,10 +228,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
14826         BUG_ON(!vcpu->guest_debug &&
14827                 vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
14829 -       /* Trap debug register access */
14830 -       if (trap_debug)
14831 -               vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
14833         /* If KDE or MDE are set, perform a full save/restore cycle. */
14834         if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
14835                 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
14836 @@ -203,7 +236,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
14837         if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
14838                 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
14840 -       trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
14841         trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
14844 diff --git a/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c b/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c
14845 index ead02c6a7628..6bc88a756cb7 100644
14846 --- a/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c
14847 +++ b/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c
14848 @@ -50,6 +50,18 @@
14849  #ifndef R_AARCH64_ABS64
14850  #define R_AARCH64_ABS64                        257
14851  #endif
14852 +#ifndef R_AARCH64_PREL64
14853 +#define R_AARCH64_PREL64               260
14854 +#endif
14855 +#ifndef R_AARCH64_PREL32
14856 +#define R_AARCH64_PREL32               261
14857 +#endif
14858 +#ifndef R_AARCH64_PREL16
14859 +#define R_AARCH64_PREL16               262
14860 +#endif
14861 +#ifndef R_AARCH64_PLT32
14862 +#define R_AARCH64_PLT32                        314
14863 +#endif
14864  #ifndef R_AARCH64_LD_PREL_LO19
14865  #define R_AARCH64_LD_PREL_LO19         273
14866  #endif
14867 @@ -371,6 +383,12 @@ static void emit_rela_section(Elf64_Shdr *sh_rela)
14868                 case R_AARCH64_ABS64:
14869                         emit_rela_abs64(rela, sh_orig_name);
14870                         break;
14871 +               /* Allow position-relative data relocations. */
14872 +               case R_AARCH64_PREL64:
14873 +               case R_AARCH64_PREL32:
14874 +               case R_AARCH64_PREL16:
14875 +               case R_AARCH64_PLT32:
14876 +                       break;
14877                 /* Allow relocations to generate PC-relative addressing. */
14878                 case R_AARCH64_LD_PREL_LO19:
14879                 case R_AARCH64_ADR_PREL_LO21:
14880 diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
14881 index bd354cd45d28..4b5acd84b8c8 100644
14882 --- a/arch/arm64/kvm/reset.c
14883 +++ b/arch/arm64/kvm/reset.c
14884 @@ -242,6 +242,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
14886         /* Reset core registers */
14887         memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
14888 +       memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
14889 +       vcpu->arch.ctxt.spsr_abt = 0;
14890 +       vcpu->arch.ctxt.spsr_und = 0;
14891 +       vcpu->arch.ctxt.spsr_irq = 0;
14892 +       vcpu->arch.ctxt.spsr_fiq = 0;
14893         vcpu_gp_regs(vcpu)->pstate = pstate;
14895         /* Reset system registers */
14896 diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c
14897 index 44419679f91a..7740995de982 100644
14898 --- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
14899 +++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
14900 @@ -87,8 +87,8 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
14901                         r = vgic_v3_set_redist_base(kvm, 0, *addr, 0);
14902                         goto out;
14903                 }
14904 -               rdreg = list_first_entry(&vgic->rd_regions,
14905 -                                        struct vgic_redist_region, list);
14906 +               rdreg = list_first_entry_or_null(&vgic->rd_regions,
14907 +                                                struct vgic_redist_region, list);
14908                 if (!rdreg)
14909                         addr_ptr = &undef_value;
14910                 else
14911 @@ -226,6 +226,9 @@ static int vgic_get_common_attr(struct kvm_device *dev,
14912                 u64 addr;
14913                 unsigned long type = (unsigned long)attr->attr;
14915 +               if (copy_from_user(&addr, uaddr, sizeof(addr)))
14916 +                       return -EFAULT;
14918                 r = kvm_vgic_addr(dev->kvm, type, &addr, false);
14919                 if (r)
14920                         return (r == -ENODEV) ? -ENXIO : r;
14921 diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
14922 index ac485163a4a7..6d44c028d1c9 100644
14923 --- a/arch/arm64/mm/flush.c
14924 +++ b/arch/arm64/mm/flush.c
14925 @@ -55,8 +55,10 @@ void __sync_icache_dcache(pte_t pte)
14927         struct page *page = pte_page(pte);
14929 -       if (!test_and_set_bit(PG_dcache_clean, &page->flags))
14930 +       if (!test_bit(PG_dcache_clean, &page->flags)) {
14931                 sync_icache_aliases(page_address(page), page_size(page));
14932 +               set_bit(PG_dcache_clean, &page->flags);
14933 +       }
14935  EXPORT_SYMBOL_GPL(__sync_icache_dcache);
14937 diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
14938 index c967bfd30d2b..b183216a591c 100644
14939 --- a/arch/arm64/mm/proc.S
14940 +++ b/arch/arm64/mm/proc.S
14941 @@ -444,6 +444,18 @@ SYM_FUNC_START(__cpu_setup)
14942         mov     x10, #(SYS_GCR_EL1_RRND | SYS_GCR_EL1_EXCL_MASK)
14943         msr_s   SYS_GCR_EL1, x10
14945 +       /*
14946 +        * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
14947 +        * RGSR_EL1.SEED must be non-zero for IRG to produce
14948 +        * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
14949 +        * must initialize it.
14950 +        */
14951 +       mrs     x10, CNTVCT_EL0
14952 +       ands    x10, x10, #SYS_RGSR_EL1_SEED_MASK
14953 +       csinc   x10, x10, xzr, ne
14954 +       lsl     x10, x10, #SYS_RGSR_EL1_SEED_SHIFT
14955 +       msr_s   SYS_RGSR_EL1, x10
14957         /* clear any pending tag check faults in TFSR*_EL1 */
14958         msr_s   SYS_TFSR_EL1, xzr
14959         msr_s   SYS_TFSRE0_EL1, xzr
14960 diff --git a/arch/ia64/include/asm/module.h b/arch/ia64/include/asm/module.h
14961 index 5a29652e6def..7271b9c5fc76 100644
14962 --- a/arch/ia64/include/asm/module.h
14963 +++ b/arch/ia64/include/asm/module.h
14964 @@ -14,16 +14,20 @@
14965  struct elf64_shdr;                     /* forward declration */
14967  struct mod_arch_specific {
14968 +       /* Used only at module load time. */
14969         struct elf64_shdr *core_plt;    /* core PLT section */
14970         struct elf64_shdr *init_plt;    /* init PLT section */
14971         struct elf64_shdr *got;         /* global offset table */
14972         struct elf64_shdr *opd;         /* official procedure descriptors */
14973         struct elf64_shdr *unwind;      /* unwind-table section */
14974         unsigned long gp;               /* global-pointer for module */
14975 +       unsigned int next_got_entry;    /* index of next available got entry */
14977 +       /* Used at module run and cleanup time. */
14978         void *core_unw_table;           /* core unwind-table cookie returned by unwinder */
14979         void *init_unw_table;           /* init unwind-table cookie returned by unwinder */
14980 -       unsigned int next_got_entry;    /* index of next available got entry */
14981 +       void *opd_addr;                 /* symbolize uses .opd to get to actual function */
14982 +       unsigned long opd_size;
14983  };
14985  #define ARCH_SHF_SMALL SHF_IA_64_SHORT
14986 diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
14987 index a5636524af76..e2af6b172200 100644
14988 --- a/arch/ia64/kernel/acpi.c
14989 +++ b/arch/ia64/kernel/acpi.c
14990 @@ -446,7 +446,8 @@ void __init acpi_numa_fixup(void)
14991         if (srat_num_cpus == 0) {
14992                 node_set_online(0);
14993                 node_cpuid[0].phys_id = hard_smp_processor_id();
14994 -               return;
14995 +               slit_distance(0, 0) = LOCAL_DISTANCE;
14996 +               goto out;
14997         }
14999         /*
15000 @@ -489,7 +490,7 @@ void __init acpi_numa_fixup(void)
15001                         for (j = 0; j < MAX_NUMNODES; j++)
15002                                 slit_distance(i, j) = i == j ?
15003                                         LOCAL_DISTANCE : REMOTE_DISTANCE;
15004 -               return;
15005 +               goto out;
15006         }
15008         memset(numa_slit, -1, sizeof(numa_slit));
15009 @@ -514,6 +515,8 @@ void __init acpi_numa_fixup(void)
15010                 printk("\n");
15011         }
15012  #endif
15013 +out:
15014 +       node_possible_map = node_online_map;
15016  #endif                         /* CONFIG_ACPI_NUMA */
15018 diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
15019 index c5fe21de46a8..31149e41f9be 100644
15020 --- a/arch/ia64/kernel/efi.c
15021 +++ b/arch/ia64/kernel/efi.c
15022 @@ -415,10 +415,10 @@ efi_get_pal_addr (void)
15023                 mask  = ~((1 << IA64_GRANULE_SHIFT) - 1);
15025                 printk(KERN_INFO "CPU %d: mapping PAL code "
15026 -                       "[0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
15027 -                       smp_processor_id(), md->phys_addr,
15028 -                       md->phys_addr + efi_md_size(md),
15029 -                       vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
15030 +                       "[0x%llx-0x%llx) into [0x%llx-0x%llx)\n",
15031 +                       smp_processor_id(), md->phys_addr,
15032 +                       md->phys_addr + efi_md_size(md),
15033 +                       vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
15034  #endif
15035                 return __va(md->phys_addr);
15036         }
15037 @@ -560,6 +560,7 @@ efi_init (void)
15038         {
15039                 efi_memory_desc_t *md;
15040                 void *p;
15041 +               unsigned int i;
15043                 for (i = 0, p = efi_map_start; p < efi_map_end;
15044                      ++i, p += efi_desc_size)
15045 @@ -586,7 +587,7 @@ efi_init (void)
15046                         }
15048                         printk("mem%02d: %s "
15049 -                              "range=[0x%016lx-0x%016lx) (%4lu%s)\n",
15050 +                              "range=[0x%016llx-0x%016llx) (%4lu%s)\n",
15051                                i, efi_md_typeattr_format(buf, sizeof(buf), md),
15052                                md->phys_addr,
15053                                md->phys_addr + efi_md_size(md), size, unit);
15054 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
15055 index 00a496cb346f..2cba53c1da82 100644
15056 --- a/arch/ia64/kernel/module.c
15057 +++ b/arch/ia64/kernel/module.c
15058 @@ -905,9 +905,31 @@ register_unwind_table (struct module *mod)
15059  int
15060  module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
15062 +       struct mod_arch_specific *mas = &mod->arch;
15064         DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
15065 -       if (mod->arch.unwind)
15066 +       if (mas->unwind)
15067                 register_unwind_table(mod);
15069 +       /*
15070 +        * ".opd" was already relocated to the final destination. Store
15071 +        * it's address for use in symbolizer.
15072 +        */
15073 +       mas->opd_addr = (void *)mas->opd->sh_addr;
15074 +       mas->opd_size = mas->opd->sh_size;
15076 +       /*
15077 +        * Module relocation was already done at this point. Section
15078 +        * headers are about to be deleted. Wipe out load-time context.
15079 +        */
15080 +       mas->core_plt = NULL;
15081 +       mas->init_plt = NULL;
15082 +       mas->got = NULL;
15083 +       mas->opd = NULL;
15084 +       mas->unwind = NULL;
15085 +       mas->gp = 0;
15086 +       mas->next_got_entry = 0;
15088         return 0;
15091 @@ -926,10 +948,9 @@ module_arch_cleanup (struct module *mod)
15093  void *dereference_module_function_descriptor(struct module *mod, void *ptr)
15095 -       Elf64_Shdr *opd = mod->arch.opd;
15096 +       struct mod_arch_specific *mas = &mod->arch;
15098 -       if (ptr < (void *)opd->sh_addr ||
15099 -                       ptr >= (void *)(opd->sh_addr + opd->sh_size))
15100 +       if (ptr < mas->opd_addr || ptr >= mas->opd_addr + mas->opd_size)
15101                 return ptr;
15103         return dereference_function_descriptor(ptr);
15104 diff --git a/arch/m68k/include/asm/mvme147hw.h b/arch/m68k/include/asm/mvme147hw.h
15105 index 257b29184af9..e28eb1c0e0bf 100644
15106 --- a/arch/m68k/include/asm/mvme147hw.h
15107 +++ b/arch/m68k/include/asm/mvme147hw.h
15108 @@ -66,6 +66,9 @@ struct pcc_regs {
15109  #define PCC_INT_ENAB           0x08
15111  #define PCC_TIMER_INT_CLR      0x80
15113 +#define PCC_TIMER_TIC_EN       0x01
15114 +#define PCC_TIMER_COC_EN       0x02
15115  #define PCC_TIMER_CLR_OVF      0x04
15117  #define PCC_LEVEL_ABORT                0x07
15118 diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
15119 index 1c235d8f53f3..f55bdcb8e4f1 100644
15120 --- a/arch/m68k/kernel/sys_m68k.c
15121 +++ b/arch/m68k/kernel/sys_m68k.c
15122 @@ -388,6 +388,8 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
15123                 ret = -EPERM;
15124                 if (!capable(CAP_SYS_ADMIN))
15125                         goto out;
15127 +               mmap_read_lock(current->mm);
15128         } else {
15129                 struct vm_area_struct *vma;
15131 diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
15132 index cfdc7f912e14..e1e90c49a496 100644
15133 --- a/arch/m68k/mvme147/config.c
15134 +++ b/arch/m68k/mvme147/config.c
15135 @@ -114,8 +114,10 @@ static irqreturn_t mvme147_timer_int (int irq, void *dev_id)
15136         unsigned long flags;
15138         local_irq_save(flags);
15139 -       m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;
15140 -       m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF;
15141 +       m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF | PCC_TIMER_COC_EN |
15142 +                            PCC_TIMER_TIC_EN;
15143 +       m147_pcc->t1_int_cntrl = PCC_INT_ENAB | PCC_TIMER_INT_CLR |
15144 +                                PCC_LEVEL_TIMER1;
15145         clk_total += PCC_TIMER_CYCLES;
15146         legacy_timer_tick(1);
15147         local_irq_restore(flags);
15148 @@ -133,10 +135,10 @@ void mvme147_sched_init (void)
15149         /* Init the clock with a value */
15150         /* The clock counter increments until 0xFFFF then reloads */
15151         m147_pcc->t1_preload = PCC_TIMER_PRELOAD;
15152 -       m147_pcc->t1_cntrl = 0x0;       /* clear timer */
15153 -       m147_pcc->t1_cntrl = 0x3;       /* start timer */
15154 -       m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;  /* clear pending ints */
15155 -       m147_pcc->t1_int_cntrl = PCC_INT_ENAB|PCC_LEVEL_TIMER1;
15156 +       m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF | PCC_TIMER_COC_EN |
15157 +                            PCC_TIMER_TIC_EN;
15158 +       m147_pcc->t1_int_cntrl = PCC_INT_ENAB | PCC_TIMER_INT_CLR |
15159 +                                PCC_LEVEL_TIMER1;
15161         clocksource_register_hz(&mvme147_clk, PCC_TIMER_CLOCK_FREQ);
15163 diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
15164 index 30357fe4ba6c..b59593c7cfb9 100644
15165 --- a/arch/m68k/mvme16x/config.c
15166 +++ b/arch/m68k/mvme16x/config.c
15167 @@ -366,6 +366,7 @@ static u32 clk_total;
15168  #define PCCTOVR1_COC_EN      0x02
15169  #define PCCTOVR1_OVR_CLR     0x04
15171 +#define PCCTIC1_INT_LEVEL    6
15172  #define PCCTIC1_INT_CLR      0x08
15173  #define PCCTIC1_INT_EN       0x10
15175 @@ -374,8 +375,8 @@ static irqreturn_t mvme16x_timer_int (int irq, void *dev_id)
15176         unsigned long flags;
15178         local_irq_save(flags);
15179 -       out_8(PCCTIC1, in_8(PCCTIC1) | PCCTIC1_INT_CLR);
15180 -       out_8(PCCTOVR1, PCCTOVR1_OVR_CLR);
15181 +       out_8(PCCTOVR1, PCCTOVR1_OVR_CLR | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
15182 +       out_8(PCCTIC1, PCCTIC1_INT_EN | PCCTIC1_INT_CLR | PCCTIC1_INT_LEVEL);
15183         clk_total += PCC_TIMER_CYCLES;
15184         legacy_timer_tick(1);
15185         local_irq_restore(flags);
15186 @@ -389,14 +390,15 @@ void mvme16x_sched_init(void)
15187      int irq;
15189      /* Using PCCchip2 or MC2 chip tick timer 1 */
15190 -    out_be32(PCCTCNT1, 0);
15191 -    out_be32(PCCTCMP1, PCC_TIMER_CYCLES);
15192 -    out_8(PCCTOVR1, in_8(PCCTOVR1) | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
15193 -    out_8(PCCTIC1, PCCTIC1_INT_EN | 6);
15194      if (request_irq(MVME16x_IRQ_TIMER, mvme16x_timer_int, IRQF_TIMER, "timer",
15195                      NULL))
15196         panic ("Couldn't register timer int");
15198 +    out_be32(PCCTCNT1, 0);
15199 +    out_be32(PCCTCMP1, PCC_TIMER_CYCLES);
15200 +    out_8(PCCTOVR1, PCCTOVR1_OVR_CLR | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
15201 +    out_8(PCCTIC1, PCCTIC1_INT_EN | PCCTIC1_INT_CLR | PCCTIC1_INT_LEVEL);
15203      clocksource_register_hz(&mvme16x_clk, PCC_TIMER_CLOCK_FREQ);
15205      if (brdno == 0x0162 || brdno == 0x172)
15206 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
15207 index d89efba3d8a4..e89d63cd92d1 100644
15208 --- a/arch/mips/Kconfig
15209 +++ b/arch/mips/Kconfig
15210 @@ -6,6 +6,7 @@ config MIPS
15211         select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
15212         select ARCH_HAS_FORTIFY_SOURCE
15213         select ARCH_HAS_KCOV
15214 +       select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE if !EVA
15215         select ARCH_HAS_PTE_SPECIAL if !(32BIT && CPU_HAS_RIXI)
15216         select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
15217         select ARCH_HAS_UBSAN_SANITIZE_ALL
15218 diff --git a/arch/mips/boot/dts/brcm/bcm3368.dtsi b/arch/mips/boot/dts/brcm/bcm3368.dtsi
15219 index 69cbef472377..d4b2b430dad0 100644
15220 --- a/arch/mips/boot/dts/brcm/bcm3368.dtsi
15221 +++ b/arch/mips/boot/dts/brcm/bcm3368.dtsi
15222 @@ -59,7 +59,7 @@ clkctl: clock-controller@fff8c004 {
15224                 periph_cntl: syscon@fff8c008 {
15225                         compatible = "syscon";
15226 -                       reg = <0xfff8c000 0x4>;
15227 +                       reg = <0xfff8c008 0x4>;
15228                         native-endian;
15229                 };
15231 diff --git a/arch/mips/boot/dts/brcm/bcm63268.dtsi b/arch/mips/boot/dts/brcm/bcm63268.dtsi
15232 index e0021ff9f144..940594436872 100644
15233 --- a/arch/mips/boot/dts/brcm/bcm63268.dtsi
15234 +++ b/arch/mips/boot/dts/brcm/bcm63268.dtsi
15235 @@ -59,7 +59,7 @@ clkctl: clock-controller@10000004 {
15237                 periph_cntl: syscon@10000008 {
15238                         compatible = "syscon";
15239 -                       reg = <0x10000000 0xc>;
15240 +                       reg = <0x10000008 0x4>;
15241                         native-endian;
15242                 };
15244 diff --git a/arch/mips/boot/dts/brcm/bcm6358.dtsi b/arch/mips/boot/dts/brcm/bcm6358.dtsi
15245 index 9d93e7f5e6fc..d79c88c2fc9c 100644
15246 --- a/arch/mips/boot/dts/brcm/bcm6358.dtsi
15247 +++ b/arch/mips/boot/dts/brcm/bcm6358.dtsi
15248 @@ -59,7 +59,7 @@ clkctl: clock-controller@fffe0004 {
15250                 periph_cntl: syscon@fffe0008 {
15251                         compatible = "syscon";
15252 -                       reg = <0xfffe0000 0x4>;
15253 +                       reg = <0xfffe0008 0x4>;
15254                         native-endian;
15255                 };
15257 diff --git a/arch/mips/boot/dts/brcm/bcm6362.dtsi b/arch/mips/boot/dts/brcm/bcm6362.dtsi
15258 index eb10341b75ba..8a21cb761ffd 100644
15259 --- a/arch/mips/boot/dts/brcm/bcm6362.dtsi
15260 +++ b/arch/mips/boot/dts/brcm/bcm6362.dtsi
15261 @@ -59,7 +59,7 @@ clkctl: clock-controller@10000004 {
15263                 periph_cntl: syscon@10000008 {
15264                         compatible = "syscon";
15265 -                       reg = <0x10000000 0xc>;
15266 +                       reg = <0x10000008 0x4>;
15267                         native-endian;
15268                 };
15270 diff --git a/arch/mips/boot/dts/brcm/bcm6368.dtsi b/arch/mips/boot/dts/brcm/bcm6368.dtsi
15271 index 52c19f40b9cc..8e87867ebc04 100644
15272 --- a/arch/mips/boot/dts/brcm/bcm6368.dtsi
15273 +++ b/arch/mips/boot/dts/brcm/bcm6368.dtsi
15274 @@ -59,7 +59,7 @@ clkctl: clock-controller@10000004 {
15276                 periph_cntl: syscon@100000008 {
15277                         compatible = "syscon";
15278 -                       reg = <0x10000000 0xc>;
15279 +                       reg = <0x10000008 0x4>;
15280                         native-endian;
15281                 };
15283 diff --git a/arch/mips/crypto/poly1305-glue.c b/arch/mips/crypto/poly1305-glue.c
15284 index fc881b46d911..bc6110fb98e0 100644
15285 --- a/arch/mips/crypto/poly1305-glue.c
15286 +++ b/arch/mips/crypto/poly1305-glue.c
15287 @@ -17,7 +17,7 @@ asmlinkage void poly1305_init_mips(void *state, const u8 *key);
15288  asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit);
15289  asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce);
15291 -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
15292 +void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
15294         poly1305_init_mips(&dctx->h, key);
15295         dctx->s[0] = get_unaligned_le32(key + 16);
15296 diff --git a/arch/mips/generic/board-boston.its.S b/arch/mips/generic/board-boston.its.S
15297 index a7f51f97b910..c45ad2759421 100644
15298 --- a/arch/mips/generic/board-boston.its.S
15299 +++ b/arch/mips/generic/board-boston.its.S
15300 @@ -1,22 +1,22 @@
15301  / {
15302         images {
15303 -               fdt@boston {
15304 +               fdt-boston {
15305                         description = "img,boston Device Tree";
15306                         data = /incbin/("boot/dts/img/boston.dtb");
15307                         type = "flat_dt";
15308                         arch = "mips";
15309                         compression = "none";
15310 -                       hash@0 {
15311 +                       hash {
15312                                 algo = "sha1";
15313                         };
15314                 };
15315         };
15317         configurations {
15318 -               conf@boston {
15319 +               conf-boston {
15320                         description = "Boston Linux kernel";
15321 -                       kernel = "kernel@0";
15322 -                       fdt = "fdt@boston";
15323 +                       kernel = "kernel";
15324 +                       fdt = "fdt-boston";
15325                 };
15326         };
15327  };
15328 diff --git a/arch/mips/generic/board-jaguar2.its.S b/arch/mips/generic/board-jaguar2.its.S
15329 index fb0e589eeff7..c2b8d479b26c 100644
15330 --- a/arch/mips/generic/board-jaguar2.its.S
15331 +++ b/arch/mips/generic/board-jaguar2.its.S
15332 @@ -1,23 +1,23 @@
15333  /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
15334  / {
15335         images {
15336 -               fdt@jaguar2_pcb110 {
15337 +               fdt-jaguar2_pcb110 {
15338                         description = "MSCC Jaguar2 PCB110 Device Tree";
15339                         data = /incbin/("boot/dts/mscc/jaguar2_pcb110.dtb");
15340                         type = "flat_dt";
15341                         arch = "mips";
15342                         compression = "none";
15343 -                       hash@0 {
15344 +                       hash {
15345                                 algo = "sha1";
15346                         };
15347                 };
15348 -               fdt@jaguar2_pcb111 {
15349 +               fdt-jaguar2_pcb111 {
15350                         description = "MSCC Jaguar2 PCB111 Device Tree";
15351                         data = /incbin/("boot/dts/mscc/jaguar2_pcb111.dtb");
15352                         type = "flat_dt";
15353                         arch = "mips";
15354                         compression = "none";
15355 -                       hash@0 {
15356 +                       hash {
15357                                 algo = "sha1";
15358                         };
15359                 };
15360 @@ -26,14 +26,14 @@
15361         configurations {
15362                 pcb110 {
15363                         description = "Jaguar2 Linux kernel";
15364 -                       kernel = "kernel@0";
15365 -                       fdt = "fdt@jaguar2_pcb110";
15366 +                       kernel = "kernel";
15367 +                       fdt = "fdt-jaguar2_pcb110";
15368                         ramdisk = "ramdisk";
15369                 };
15370                 pcb111 {
15371                         description = "Jaguar2 Linux kernel";
15372 -                       kernel = "kernel@0";
15373 -                       fdt = "fdt@jaguar2_pcb111";
15374 +                       kernel = "kernel";
15375 +                       fdt = "fdt-jaguar2_pcb111";
15376                         ramdisk = "ramdisk";
15377                 };
15378         };
15379 diff --git a/arch/mips/generic/board-luton.its.S b/arch/mips/generic/board-luton.its.S
15380 index 39a543f62f25..bd9837c9af97 100644
15381 --- a/arch/mips/generic/board-luton.its.S
15382 +++ b/arch/mips/generic/board-luton.its.S
15383 @@ -1,13 +1,13 @@
15384  /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
15385  / {
15386         images {
15387 -               fdt@luton_pcb091 {
15388 +               fdt-luton_pcb091 {
15389                         description = "MSCC Luton PCB091 Device Tree";
15390                         data = /incbin/("boot/dts/mscc/luton_pcb091.dtb");
15391                         type = "flat_dt";
15392                         arch = "mips";
15393                         compression = "none";
15394 -                       hash@0 {
15395 +                       hash {
15396                                 algo = "sha1";
15397                         };
15398                 };
15399 @@ -16,8 +16,8 @@
15400         configurations {
15401                 pcb091 {
15402                         description = "Luton Linux kernel";
15403 -                       kernel = "kernel@0";
15404 -                       fdt = "fdt@luton_pcb091";
15405 +                       kernel = "kernel";
15406 +                       fdt = "fdt-luton_pcb091";
15407                 };
15408         };
15409  };
15410 diff --git a/arch/mips/generic/board-ni169445.its.S b/arch/mips/generic/board-ni169445.its.S
15411 index e4cb4f95a8cc..0a2e8f7a8526 100644
15412 --- a/arch/mips/generic/board-ni169445.its.S
15413 +++ b/arch/mips/generic/board-ni169445.its.S
15414 @@ -1,22 +1,22 @@
15415  / {
15416         images {
15417 -               fdt@ni169445 {
15418 +               fdt-ni169445 {
15419                         description = "NI 169445 device tree";
15420                         data = /incbin/("boot/dts/ni/169445.dtb");
15421                         type = "flat_dt";
15422                         arch = "mips";
15423                         compression = "none";
15424 -                       hash@0 {
15425 +                       hash {
15426                                 algo = "sha1";
15427                         };
15428                 };
15429         };
15431         configurations {
15432 -               conf@ni169445 {
15433 +               conf-ni169445 {
15434                         description = "NI 169445 Linux Kernel";
15435 -                       kernel = "kernel@0";
15436 -                       fdt = "fdt@ni169445";
15437 +                       kernel = "kernel";
15438 +                       fdt = "fdt-ni169445";
15439                 };
15440         };
15441  };
15442 diff --git a/arch/mips/generic/board-ocelot.its.S b/arch/mips/generic/board-ocelot.its.S
15443 index 3da23988149a..8c7e3a1b68d3 100644
15444 --- a/arch/mips/generic/board-ocelot.its.S
15445 +++ b/arch/mips/generic/board-ocelot.its.S
15446 @@ -1,40 +1,40 @@
15447  /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
15448  / {
15449         images {
15450 -               fdt@ocelot_pcb123 {
15451 +               fdt-ocelot_pcb123 {
15452                         description = "MSCC Ocelot PCB123 Device Tree";
15453                         data = /incbin/("boot/dts/mscc/ocelot_pcb123.dtb");
15454                         type = "flat_dt";
15455                         arch = "mips";
15456                         compression = "none";
15457 -                       hash@0 {
15458 +                       hash {
15459                                 algo = "sha1";
15460                         };
15461                 };
15463 -               fdt@ocelot_pcb120 {
15464 +               fdt-ocelot_pcb120 {
15465                         description = "MSCC Ocelot PCB120 Device Tree";
15466                         data = /incbin/("boot/dts/mscc/ocelot_pcb120.dtb");
15467                         type = "flat_dt";
15468                         arch = "mips";
15469                         compression = "none";
15470 -                       hash@0 {
15471 +                       hash {
15472                                 algo = "sha1";
15473                         };
15474                 };
15475         };
15477         configurations {
15478 -               conf@ocelot_pcb123 {
15479 +               conf-ocelot_pcb123 {
15480                         description = "Ocelot Linux kernel";
15481 -                       kernel = "kernel@0";
15482 -                       fdt = "fdt@ocelot_pcb123";
15483 +                       kernel = "kernel";
15484 +                       fdt = "fdt-ocelot_pcb123";
15485                 };
15487 -               conf@ocelot_pcb120 {
15488 +               conf-ocelot_pcb120 {
15489                         description = "Ocelot Linux kernel";
15490 -                       kernel = "kernel@0";
15491 -                       fdt = "fdt@ocelot_pcb120";
15492 +                       kernel = "kernel";
15493 +                       fdt = "fdt-ocelot_pcb120";
15494                 };
15495         };
15496  };
15497 diff --git a/arch/mips/generic/board-serval.its.S b/arch/mips/generic/board-serval.its.S
15498 index 4ea4fc9d757f..dde833efe980 100644
15499 --- a/arch/mips/generic/board-serval.its.S
15500 +++ b/arch/mips/generic/board-serval.its.S
15501 @@ -1,13 +1,13 @@
15502  /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
15503  / {
15504         images {
15505 -               fdt@serval_pcb105 {
15506 +               fdt-serval_pcb105 {
15507                         description = "MSCC Serval PCB105 Device Tree";
15508                         data = /incbin/("boot/dts/mscc/serval_pcb105.dtb");
15509                         type = "flat_dt";
15510                         arch = "mips";
15511                         compression = "none";
15512 -                       hash@0 {
15513 +                       hash {
15514                                 algo = "sha1";
15515                         };
15516                 };
15517 @@ -16,8 +16,8 @@
15518         configurations {
15519                 pcb105 {
15520                         description = "Serval Linux kernel";
15521 -                       kernel = "kernel@0";
15522 -                       fdt = "fdt@serval_pcb105";
15523 +                       kernel = "kernel";
15524 +                       fdt = "fdt-serval_pcb105";
15525                         ramdisk = "ramdisk";
15526                 };
15527         };
15528 diff --git a/arch/mips/generic/board-xilfpga.its.S b/arch/mips/generic/board-xilfpga.its.S
15529 index a2e773d3f14f..08c1e900eb4e 100644
15530 --- a/arch/mips/generic/board-xilfpga.its.S
15531 +++ b/arch/mips/generic/board-xilfpga.its.S
15532 @@ -1,22 +1,22 @@
15533  / {
15534         images {
15535 -               fdt@xilfpga {
15536 +               fdt-xilfpga {
15537                         description = "MIPSfpga (xilfpga) Device Tree";
15538                         data = /incbin/("boot/dts/xilfpga/nexys4ddr.dtb");
15539                         type = "flat_dt";
15540                         arch = "mips";
15541                         compression = "none";
15542 -                       hash@0 {
15543 +                       hash {
15544                                 algo = "sha1";
15545                         };
15546                 };
15547         };
15549         configurations {
15550 -               conf@xilfpga {
15551 +               conf-xilfpga {
15552                         description = "MIPSfpga Linux kernel";
15553 -                       kernel = "kernel@0";
15554 -                       fdt = "fdt@xilfpga";
15555 +                       kernel = "kernel";
15556 +                       fdt = "fdt-xilfpga";
15557                 };
15558         };
15559  };
15560 diff --git a/arch/mips/generic/vmlinux.its.S b/arch/mips/generic/vmlinux.its.S
15561 index 1a08438fd893..3e254676540f 100644
15562 --- a/arch/mips/generic/vmlinux.its.S
15563 +++ b/arch/mips/generic/vmlinux.its.S
15564 @@ -6,7 +6,7 @@
15565         #address-cells = <ADDR_CELLS>;
15567         images {
15568 -               kernel@0 {
15569 +               kernel {
15570                         description = KERNEL_NAME;
15571                         data = /incbin/(VMLINUX_BINARY);
15572                         type = "kernel";
15573 @@ -15,18 +15,18 @@
15574                         compression = VMLINUX_COMPRESSION;
15575                         load = /bits/ ADDR_BITS <VMLINUX_LOAD_ADDRESS>;
15576                         entry = /bits/ ADDR_BITS <VMLINUX_ENTRY_ADDRESS>;
15577 -                       hash@0 {
15578 +                       hash {
15579                                 algo = "sha1";
15580                         };
15581                 };
15582         };
15584         configurations {
15585 -               default = "conf@default";
15586 +               default = "conf-default";
15588 -               conf@default {
15589 +               conf-default {
15590                         description = "Generic Linux kernel";
15591 -                       kernel = "kernel@0";
15592 +                       kernel = "kernel";
15593                 };
15594         };
15595  };
15596 diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
15597 index 86f2323ebe6b..ca83ada7015f 100644
15598 --- a/arch/mips/include/asm/asmmacro.h
15599 +++ b/arch/mips/include/asm/asmmacro.h
15600 @@ -44,8 +44,7 @@
15601         .endm
15602  #endif
15604 -#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
15605 -    defined(CONFIG_CPU_MIPSR6)
15606 +#ifdef CONFIG_CPU_HAS_DIEI
15607         .macro  local_irq_enable reg=t0
15608         ei
15609         irq_enable_hazard
15610 diff --git a/arch/mips/include/asm/div64.h b/arch/mips/include/asm/div64.h
15611 index dc5ea5736440..ceece76fc971 100644
15612 --- a/arch/mips/include/asm/div64.h
15613 +++ b/arch/mips/include/asm/div64.h
15614 @@ -1,5 +1,5 @@
15615  /*
15616 - * Copyright (C) 2000, 2004  Maciej W. Rozycki
15617 + * Copyright (C) 2000, 2004, 2021  Maciej W. Rozycki
15618   * Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org)
15619   *
15620   * This file is subject to the terms and conditions of the GNU General Public
15621 @@ -9,25 +9,18 @@
15622  #ifndef __ASM_DIV64_H
15623  #define __ASM_DIV64_H
15625 -#include <asm-generic/div64.h>
15627 -#if BITS_PER_LONG == 64
15628 +#include <asm/bitsperlong.h>
15630 -#include <linux/types.h>
15631 +#if BITS_PER_LONG == 32
15633  /*
15634   * No traps on overflows for any of these...
15635   */
15637 -#define __div64_32(n, base)                                            \
15638 -({                                                                     \
15639 +#define do_div64_32(res, high, low, base) ({                           \
15640         unsigned long __cf, __tmp, __tmp2, __i;                         \
15641         unsigned long __quot32, __mod32;                                \
15642 -       unsigned long __high, __low;                                    \
15643 -       unsigned long long __n;                                         \
15644                                                                         \
15645 -       __high = *__n >> 32;                                            \
15646 -       __low = __n;                                                    \
15647         __asm__(                                                        \
15648         "       .set    push                                    \n"     \
15649         "       .set    noat                                    \n"     \
15650 @@ -51,18 +44,48 @@
15651         "       subu    %0, %0, %z6                             \n"     \
15652         "       addiu   %2, %2, 1                               \n"     \
15653         "3:                                                     \n"     \
15654 -       "       bnez    %4, 0b\n\t"                                     \
15655 -       "        srl    %5, %1, 0x1f\n\t"                               \
15656 +       "       bnez    %4, 0b                                  \n"     \
15657 +       "        srl    %5, %1, 0x1f                            \n"     \
15658         "       .set    pop"                                            \
15659         : "=&r" (__mod32), "=&r" (__tmp),                               \
15660           "=&r" (__quot32), "=&r" (__cf),                               \
15661           "=&r" (__i), "=&r" (__tmp2)                                   \
15662 -       : "Jr" (base), "0" (__high), "1" (__low));                      \
15663 +       : "Jr" (base), "0" (high), "1" (low));                          \
15664                                                                         \
15665 -       (__n) = __quot32;                                               \
15666 +       (res) = __quot32;                                               \
15667         __mod32;                                                        \
15668  })
15670 -#endif /* BITS_PER_LONG == 64 */
15671 +#define __div64_32(n, base) ({                                         \
15672 +       unsigned long __upper, __low, __high, __radix;                  \
15673 +       unsigned long long __quot;                                      \
15674 +       unsigned long long __div;                                       \
15675 +       unsigned long __mod;                                            \
15676 +                                                                       \
15677 +       __div = (*n);                                                   \
15678 +       __radix = (base);                                               \
15679 +                                                                       \
15680 +       __high = __div >> 32;                                           \
15681 +       __low = __div;                                                  \
15682 +                                                                       \
15683 +       if (__high < __radix) {                                         \
15684 +               __upper = __high;                                       \
15685 +               __high = 0;                                             \
15686 +       } else {                                                        \
15687 +               __upper = __high % __radix;                             \
15688 +               __high /= __radix;                                      \
15689 +       }                                                               \
15690 +                                                                       \
15691 +       __mod = do_div64_32(__low, __upper, __low, __radix);            \
15692 +                                                                       \
15693 +       __quot = __high;                                                \
15694 +       __quot = __quot << 32 | __low;                                  \
15695 +       (*n) = __quot;                                                  \
15696 +       __mod;                                                          \
15699 +#endif /* BITS_PER_LONG == 32 */
15701 +#include <asm-generic/div64.h>
15703  #endif /* __ASM_DIV64_H */
15704 diff --git a/arch/mips/include/asm/vdso/gettimeofday.h b/arch/mips/include/asm/vdso/gettimeofday.h
15705 index 2203e2d0ae2a..44a45f3fa4b0 100644
15706 --- a/arch/mips/include/asm/vdso/gettimeofday.h
15707 +++ b/arch/mips/include/asm/vdso/gettimeofday.h
15708 @@ -20,6 +20,12 @@
15710  #define VDSO_HAS_CLOCK_GETRES          1
15712 +#if MIPS_ISA_REV < 6
15713 +#define VDSO_SYSCALL_CLOBBERS "hi", "lo",
15714 +#else
15715 +#define VDSO_SYSCALL_CLOBBERS
15716 +#endif
15718  static __always_inline long gettimeofday_fallback(
15719                                 struct __kernel_old_timeval *_tv,
15720                                 struct timezone *_tz)
15721 @@ -35,7 +41,9 @@ static __always_inline long gettimeofday_fallback(
15722         : "=r" (ret), "=r" (error)
15723         : "r" (tv), "r" (tz), "r" (nr)
15724         : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
15725 -         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
15726 +         "$14", "$15", "$24", "$25",
15727 +         VDSO_SYSCALL_CLOBBERS
15728 +         "memory");
15730         return error ? -ret : ret;
15732 @@ -59,7 +67,9 @@ static __always_inline long clock_gettime_fallback(
15733         : "=r" (ret), "=r" (error)
15734         : "r" (clkid), "r" (ts), "r" (nr)
15735         : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
15736 -         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
15737 +         "$14", "$15", "$24", "$25",
15738 +         VDSO_SYSCALL_CLOBBERS
15739 +         "memory");
15741         return error ? -ret : ret;
15743 @@ -83,7 +93,9 @@ static __always_inline int clock_getres_fallback(
15744         : "=r" (ret), "=r" (error)
15745         : "r" (clkid), "r" (ts), "r" (nr)
15746         : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
15747 -         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
15748 +         "$14", "$15", "$24", "$25",
15749 +         VDSO_SYSCALL_CLOBBERS
15750 +         "memory");
15752         return error ? -ret : ret;
15754 @@ -105,7 +117,9 @@ static __always_inline long clock_gettime32_fallback(
15755         : "=r" (ret), "=r" (error)
15756         : "r" (clkid), "r" (ts), "r" (nr)
15757         : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
15758 -         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
15759 +         "$14", "$15", "$24", "$25",
15760 +         VDSO_SYSCALL_CLOBBERS
15761 +         "memory");
15763         return error ? -ret : ret;
15765 @@ -125,7 +139,9 @@ static __always_inline int clock_getres32_fallback(
15766         : "=r" (ret), "=r" (error)
15767         : "r" (clkid), "r" (ts), "r" (nr)
15768         : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
15769 -         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
15770 +         "$14", "$15", "$24", "$25",
15771 +         VDSO_SYSCALL_CLOBBERS
15772 +         "memory");
15774         return error ? -ret : ret;
15776 diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
15777 index b71892064f27..0ef240adefb5 100644
15778 --- a/arch/mips/kernel/cpu-probe.c
15779 +++ b/arch/mips/kernel/cpu-probe.c
15780 @@ -1752,7 +1752,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
15781                         set_isa(c, MIPS_CPU_ISA_M64R2);
15782                         break;
15783                 }
15784 -               c->writecombine = _CACHE_UNCACHED_ACCELERATED;
15785                 c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_EXT |
15786                                 MIPS_ASE_LOONGSON_EXT2);
15787                 break;
15788 @@ -1782,7 +1781,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
15789                  * register, we correct it here.
15790                  */
15791                 c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
15792 -               c->writecombine = _CACHE_UNCACHED_ACCELERATED;
15793                 c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
15794                         MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
15795                 c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */
15796 @@ -1793,7 +1791,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
15797                 set_elf_platform(cpu, "loongson3a");
15798                 set_isa(c, MIPS_CPU_ISA_M64R2);
15799                 decode_cpucfg(c);
15800 -               c->writecombine = _CACHE_UNCACHED_ACCELERATED;
15801                 break;
15802         default:
15803                 panic("Unknown Loongson Processor ID!");
15804 diff --git a/arch/mips/loongson64/init.c b/arch/mips/loongson64/init.c
15805 index cfa788bca871..1c664b23c0f9 100644
15806 --- a/arch/mips/loongson64/init.c
15807 +++ b/arch/mips/loongson64/init.c
15808 @@ -126,7 +126,7 @@ static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, resource_size_
15809                 return -ENOMEM;
15811         range->fwnode = fwnode;
15812 -       range->size = size;
15813 +       range->size = size = round_up(size, PAGE_SIZE);
15814         range->hw_start = hw_start;
15815         range->flags = LOGIC_PIO_CPU_MMIO;
15817 diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
15818 index 39052de915f3..3a909194284a 100644
15819 --- a/arch/mips/pci/pci-legacy.c
15820 +++ b/arch/mips/pci/pci-legacy.c
15821 @@ -166,8 +166,13 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
15822                         res = hose->mem_resource;
15823                         break;
15824                 }
15825 -               if (res != NULL)
15826 -                       of_pci_range_to_resource(&range, node, res);
15827 +               if (res != NULL) {
15828 +                       res->name = node->full_name;
15829 +                       res->flags = range.flags;
15830 +                       res->start = range.cpu_addr;
15831 +                       res->end = range.cpu_addr + range.size - 1;
15832 +                       res->parent = res->child = res->sibling = NULL;
15833 +               }
15834         }
15837 diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
15838 index d36061603752..e032932348d6 100644
15839 --- a/arch/mips/pci/pci-mt7620.c
15840 +++ b/arch/mips/pci/pci-mt7620.c
15841 @@ -30,6 +30,7 @@
15842  #define RALINK_GPIOMODE                        0x60
15844  #define PPLL_CFG1                      0x9c
15845 +#define PPLL_LD                                BIT(23)
15847  #define PPLL_DRV                       0xa0
15848  #define PDRV_SW_SET                    BIT(31)
15849 @@ -239,8 +240,8 @@ static int mt7620_pci_hw_init(struct platform_device *pdev)
15850         rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1);
15851         mdelay(100);
15853 -       if (!(rt_sysc_r32(PPLL_CFG1) & PDRV_SW_SET)) {
15854 -               dev_err(&pdev->dev, "MT7620 PPLL unlock\n");
15855 +       if (!(rt_sysc_r32(PPLL_CFG1) & PPLL_LD)) {
15856 +               dev_err(&pdev->dev, "pcie PLL not locked, aborting init\n");
15857                 reset_control_assert(rstpcie0);
15858                 rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1);
15859                 return -1;
15860 diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c
15861 index e1f12e398136..f1538d2be89e 100644
15862 --- a/arch/mips/pci/pci-rt2880.c
15863 +++ b/arch/mips/pci/pci-rt2880.c
15864 @@ -180,7 +180,6 @@ static inline void rt2880_pci_write_u32(unsigned long reg, u32 val)
15866  int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
15868 -       u16 cmd;
15869         int irq = -1;
15871         if (dev->bus->number != 0)
15872 @@ -188,8 +187,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
15874         switch (PCI_SLOT(dev->devfn)) {
15875         case 0x00:
15876 -               rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000);
15877 -               (void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0);
15878                 break;
15879         case 0x11:
15880                 irq = RT288X_CPU_IRQ_PCI;
15881 @@ -201,16 +198,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
15882                 break;
15883         }
15885 -       pci_write_config_byte((struct pci_dev *) dev,
15886 -               PCI_CACHE_LINE_SIZE, 0x14);
15887 -       pci_write_config_byte((struct pci_dev *) dev, PCI_LATENCY_TIMER, 0xFF);
15888 -       pci_read_config_word((struct pci_dev *) dev, PCI_COMMAND, &cmd);
15889 -       cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
15890 -               PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK |
15891 -               PCI_COMMAND_SERR | PCI_COMMAND_WAIT | PCI_COMMAND_PARITY;
15892 -       pci_write_config_word((struct pci_dev *) dev, PCI_COMMAND, cmd);
15893 -       pci_write_config_byte((struct pci_dev *) dev, PCI_INTERRUPT_LINE,
15894 -                             dev->irq);
15895         return irq;
15898 @@ -251,6 +238,30 @@ static int rt288x_pci_probe(struct platform_device *pdev)
15900  int pcibios_plat_dev_init(struct pci_dev *dev)
15902 +       static bool slot0_init;
15904 +       /*
15905 +        * Nobody seems to initialize slot 0, but this platform requires it, so
15906 +        * do it once when some other slot is being enabled. The PCI subsystem
15907 +        * should configure other slots properly, so no need to do anything
15908 +        * special for those.
15909 +        */
15910 +       if (!slot0_init && dev->bus->number == 0) {
15911 +               u16 cmd;
15912 +               u32 bar0;
15914 +               slot0_init = true;
15916 +               pci_bus_write_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0,
15917 +                                          0x08000000);
15918 +               pci_bus_read_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0,
15919 +                                         &bar0);
15921 +               pci_bus_read_config_word(dev->bus, 0, PCI_COMMAND, &cmd);
15922 +               cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
15923 +               pci_bus_write_config_word(dev->bus, 0, PCI_COMMAND, cmd);
15924 +       }
15926         return 0;
15929 diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
15930 index 2416a9f91533..c6f9e7b9f7cb 100644
15931 --- a/arch/openrisc/kernel/setup.c
15932 +++ b/arch/openrisc/kernel/setup.c
15933 @@ -278,6 +278,8 @@ void calibrate_delay(void)
15934         pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
15935                 loops_per_jiffy / (500000 / HZ),
15936                 (loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy);
15938 +       of_node_put(cpu);
15941  void __init setup_arch(char **cmdline_p)
15942 diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
15943 index bf9b2310fc93..f3fa02b8838a 100644
15944 --- a/arch/openrisc/mm/init.c
15945 +++ b/arch/openrisc/mm/init.c
15946 @@ -75,7 +75,6 @@ static void __init map_ram(void)
15947         /* These mark extents of read-only kernel pages...
15948          * ...from vmlinux.lds.S
15949          */
15950 -       struct memblock_region *region;
15952         v = PAGE_OFFSET;
15954 @@ -121,7 +120,7 @@ static void __init map_ram(void)
15955                 }
15957                 printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
15958 -                      region->base, region->base + region->size);
15959 +                      start, end);
15960         }
15963 diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
15964 index 386ae12d8523..57c0ab71d51e 100644
15965 --- a/arch/powerpc/Kconfig
15966 +++ b/arch/powerpc/Kconfig
15967 @@ -224,7 +224,7 @@ config PPC
15968         select HAVE_LIVEPATCH                   if HAVE_DYNAMIC_FTRACE_WITH_REGS
15969         select HAVE_MOD_ARCH_SPECIFIC
15970         select HAVE_NMI                         if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
15971 -       select HAVE_HARDLOCKUP_DETECTOR_ARCH    if (PPC64 && PPC_BOOK3S)
15972 +       select HAVE_HARDLOCKUP_DETECTOR_ARCH    if PPC64 && PPC_BOOK3S && SMP
15973         select HAVE_OPTPROBES                   if PPC64
15974         select HAVE_PERF_EVENTS
15975         select HAVE_PERF_EVENTS_NMI             if PPC64
15976 diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
15977 index ae084357994e..6342f9da4545 100644
15978 --- a/arch/powerpc/Kconfig.debug
15979 +++ b/arch/powerpc/Kconfig.debug
15980 @@ -353,6 +353,7 @@ config PPC_EARLY_DEBUG_CPM_ADDR
15981  config FAIL_IOMMU
15982         bool "Fault-injection capability for IOMMU"
15983         depends on FAULT_INJECTION
15984 +       depends on PCI || IBMVIO
15985         help
15986           Provide fault-injection capability for IOMMU. Each device can
15987           be selectively enabled via the fail_iommu property.
15988 diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
15989 index 058601efbc8a..b703330459b8 100644
15990 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
15991 +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
15992 @@ -7,6 +7,7 @@
15993  #ifndef __ASSEMBLY__
15994  #include <linux/mmdebug.h>
15995  #include <linux/bug.h>
15996 +#include <linux/sizes.h>
15997  #endif
15999  /*
16000 @@ -323,7 +324,8 @@ extern unsigned long pci_io_base;
16001  #define  PHB_IO_END    (KERN_IO_START + FULL_IO_SIZE)
16002  #define IOREMAP_BASE   (PHB_IO_END)
16003  #define IOREMAP_START  (ioremap_bot)
16004 -#define IOREMAP_END    (KERN_IO_END)
16005 +#define IOREMAP_END    (KERN_IO_END - FIXADDR_SIZE)
16006 +#define FIXADDR_SIZE   SZ_32M
16008  /* Advertise special mapping type for AGP */
16009  #define HAVE_PAGE_AGP
16010 diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
16011 index c7813dc628fc..59cab558e2f0 100644
16012 --- a/arch/powerpc/include/asm/book3s/64/radix.h
16013 +++ b/arch/powerpc/include/asm/book3s/64/radix.h
16014 @@ -222,8 +222,10 @@ static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr,
16015          * from ptesync, it should probably go into update_mmu_cache, rather
16016          * than set_pte_at (which is used to set ptes unrelated to faults).
16017          *
16018 -        * Spurious faults to vmalloc region are not tolerated, so there is
16019 -        * a ptesync in flush_cache_vmap.
16020 +        * Spurious faults from the kernel memory are not tolerated, so there
16021 +        * is a ptesync in flush_cache_vmap, and __map_kernel_page() follows
16022 +        * the pte update sequence from ISA Book III 6.10 Translation Table
16023 +        * Update Synchronization Requirements.
16024          */
16027 diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
16028 index 8d03c16a3663..947b5b9c4424 100644
16029 --- a/arch/powerpc/include/asm/fixmap.h
16030 +++ b/arch/powerpc/include/asm/fixmap.h
16031 @@ -23,12 +23,17 @@
16032  #include <asm/kmap_size.h>
16033  #endif
16035 +#ifdef CONFIG_PPC64
16036 +#define FIXADDR_TOP    (IOREMAP_END + FIXADDR_SIZE)
16037 +#else
16038 +#define FIXADDR_SIZE   0
16039  #ifdef CONFIG_KASAN
16040  #include <asm/kasan.h>
16041  #define FIXADDR_TOP    (KASAN_SHADOW_START - PAGE_SIZE)
16042  #else
16043  #define FIXADDR_TOP    ((unsigned long)(-PAGE_SIZE))
16044  #endif
16045 +#endif
16047  /*
16048   * Here we define all the compile-time 'special' virtual
16049 @@ -50,6 +55,7 @@
16050   */
16051  enum fixed_addresses {
16052         FIX_HOLE,
16053 +#ifdef CONFIG_PPC32
16054         /* reserve the top 128K for early debugging purposes */
16055         FIX_EARLY_DEBUG_TOP = FIX_HOLE,
16056         FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
16057 @@ -72,6 +78,7 @@ enum fixed_addresses {
16058                        FIX_IMMR_SIZE,
16059  #endif
16060         /* FIX_PCIE_MCFG, */
16061 +#endif /* CONFIG_PPC32 */
16062         __end_of_permanent_fixed_addresses,
16064  #define NR_FIX_BTMAPS          (SZ_256K / PAGE_SIZE)
16065 @@ -98,6 +105,8 @@ enum fixed_addresses {
16066  static inline void __set_fixmap(enum fixed_addresses idx,
16067                                 phys_addr_t phys, pgprot_t flags)
16069 +       BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC64) && __FIXADDR_SIZE > FIXADDR_SIZE);
16071         if (__builtin_constant_p(idx))
16072                 BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
16073         else if (WARN_ON(idx >= __end_of_fixed_addresses))
16074 diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
16075 index ed6086d57b22..0c92b01a3c3c 100644
16076 --- a/arch/powerpc/include/asm/hvcall.h
16077 +++ b/arch/powerpc/include/asm/hvcall.h
16078 @@ -446,6 +446,9 @@
16079   */
16080  long plpar_hcall_norets(unsigned long opcode, ...);
16082 +/* Variant which does not do hcall tracing */
16083 +long plpar_hcall_norets_notrace(unsigned long opcode, ...);
16085  /**
16086   * plpar_hcall: - Make a pseries hypervisor call
16087   * @opcode: The hypervisor call to make.
16088 diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
16089 index e8d09a841373..31ed5356590a 100644
16090 --- a/arch/powerpc/include/asm/interrupt.h
16091 +++ b/arch/powerpc/include/asm/interrupt.h
16092 @@ -138,6 +138,13 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
16093         local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
16094         local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
16096 +       if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !(regs->msr & MSR_PR) &&
16097 +                               regs->nip < (unsigned long)__end_interrupts) {
16098 +               // Kernel code running below __end_interrupts is
16099 +               // implicitly soft-masked.
16100 +               regs->softe = IRQS_ALL_DISABLED;
16101 +       }
16103         /* Don't do any per-CPU operations until interrupt state is fixed */
16104  #endif
16105         /* Allow DEC and PMI to be traced when they are soft-NMI */
16106 diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
16107 index 652ce85f9410..4bc45d3ed8b0 100644
16108 --- a/arch/powerpc/include/asm/mmu_context.h
16109 +++ b/arch/powerpc/include/asm/mmu_context.h
16110 @@ -263,7 +263,7 @@ extern void arch_exit_mmap(struct mm_struct *mm);
16111  static inline void arch_unmap(struct mm_struct *mm,
16112                               unsigned long start, unsigned long end)
16114 -       unsigned long vdso_base = (unsigned long)mm->context.vdso - PAGE_SIZE;
16115 +       unsigned long vdso_base = (unsigned long)mm->context.vdso;
16117         if (start <= vdso_base && vdso_base < end)
16118                 mm->context.vdso = NULL;
16119 diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
16120 index 6cb8aa357191..57cd3892bfe0 100644
16121 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h
16122 +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
16123 @@ -6,6 +6,8 @@
16124   * the ppc64 non-hashed page table.
16125   */
16127 +#include <linux/sizes.h>
16129  #include <asm/nohash/64/pgtable-4k.h>
16130  #include <asm/barrier.h>
16131  #include <asm/asm-const.h>
16132 @@ -54,7 +56,8 @@
16133  #define  PHB_IO_END    (KERN_IO_START + FULL_IO_SIZE)
16134  #define IOREMAP_BASE   (PHB_IO_END)
16135  #define IOREMAP_START  (ioremap_bot)
16136 -#define IOREMAP_END    (KERN_VIRT_START + KERN_VIRT_SIZE)
16137 +#define IOREMAP_END    (KERN_VIRT_START + KERN_VIRT_SIZE - FIXADDR_SIZE)
16138 +#define FIXADDR_SIZE   SZ_32M
16141  /*
16142 diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h
16143 index 5d1726bb28e7..bcb7b5f917be 100644
16144 --- a/arch/powerpc/include/asm/paravirt.h
16145 +++ b/arch/powerpc/include/asm/paravirt.h
16146 @@ -28,19 +28,35 @@ static inline u32 yield_count_of(int cpu)
16147         return be32_to_cpu(yield_count);
16151 + * Spinlock code confers and prods, so don't trace the hcalls because the
16152 + * tracing code takes spinlocks which can cause recursion deadlocks.
16153 + *
16154 + * These calls are made while the lock is not held: the lock slowpath yields if
16155 + * it can not acquire the lock, and unlock slow path might prod if a waiter has
16156 + * yielded). So this may not be a problem for simple spin locks because the
16157 + * tracing does not technically recurse on the lock, but we avoid it anyway.
16158 + *
16159 + * However the queued spin lock contended path is more strictly ordered: the
16160 + * H_CONFER hcall is made after the task has queued itself on the lock, so then
16161 + * recursing on that lock will cause the task to then queue up again behind the
16162 + * first instance (or worse: queued spinlocks use tricks that assume a context
16163 + * never waits on more than one spinlock, so such recursion may cause random
16164 + * corruption in the lock code).
16165 + */
16166  static inline void yield_to_preempted(int cpu, u32 yield_count)
16168 -       plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
16169 +       plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
16172  static inline void prod_cpu(int cpu)
16174 -       plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
16175 +       plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
16178  static inline void yield_to_any(void)
16180 -       plpar_hcall_norets(H_CONFER, -1, 0);
16181 +       plpar_hcall_norets_notrace(H_CONFER, -1, 0);
16183  #else
16184  static inline bool is_shared_processor(void)
16185 diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
16186 index 1499e928ea6a..5d8d397e928a 100644
16187 --- a/arch/powerpc/include/asm/ptrace.h
16188 +++ b/arch/powerpc/include/asm/ptrace.h
16189 @@ -19,6 +19,7 @@
16190  #ifndef _ASM_POWERPC_PTRACE_H
16191  #define _ASM_POWERPC_PTRACE_H
16193 +#include <linux/err.h>
16194  #include <uapi/asm/ptrace.h>
16195  #include <asm/asm-const.h>
16197 @@ -152,25 +153,6 @@ extern unsigned long profile_pc(struct pt_regs *regs);
16198  long do_syscall_trace_enter(struct pt_regs *regs);
16199  void do_syscall_trace_leave(struct pt_regs *regs);
16201 -#define kernel_stack_pointer(regs) ((regs)->gpr[1])
16202 -static inline int is_syscall_success(struct pt_regs *regs)
16204 -       return !(regs->ccr & 0x10000000);
16207 -static inline long regs_return_value(struct pt_regs *regs)
16209 -       if (is_syscall_success(regs))
16210 -               return regs->gpr[3];
16211 -       else
16212 -               return -regs->gpr[3];
16215 -static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
16217 -       regs->gpr[3] = rc;
16220  #ifdef __powerpc64__
16221  #define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
16222  #else
16223 @@ -252,6 +234,31 @@ static inline void set_trap_norestart(struct pt_regs *regs)
16224         regs->trap |= 0x10;
16227 +#define kernel_stack_pointer(regs) ((regs)->gpr[1])
16228 +static inline int is_syscall_success(struct pt_regs *regs)
16230 +       if (trap_is_scv(regs))
16231 +               return !IS_ERR_VALUE((unsigned long)regs->gpr[3]);
16232 +       else
16233 +               return !(regs->ccr & 0x10000000);
16236 +static inline long regs_return_value(struct pt_regs *regs)
16238 +       if (trap_is_scv(regs))
16239 +               return regs->gpr[3];
16241 +       if (is_syscall_success(regs))
16242 +               return regs->gpr[3];
16243 +       else
16244 +               return -regs->gpr[3];
16247 +static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
16249 +       regs->gpr[3] = rc;
16252  #define arch_has_single_step() (1)
16253  #define arch_has_block_step()  (true)
16254  #define ARCH_HAS_USER_SINGLE_STEP_REPORT
16255 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
16256 index da103e92c112..37d0b8c76a59 100644
16257 --- a/arch/powerpc/include/asm/reg.h
16258 +++ b/arch/powerpc/include/asm/reg.h
16259 @@ -441,6 +441,7 @@
16260  #define   LPCR_VRMA_LP1                ASM_CONST(0x0000800000000000)
16261  #define   LPCR_RMLS            0x1C000000      /* Implementation dependent RMO limit sel */
16262  #define   LPCR_RMLS_SH         26
16263 +#define   LPCR_HAIL            ASM_CONST(0x0000000004000000)   /* HV AIL (ISAv3.1) */
16264  #define   LPCR_ILE             ASM_CONST(0x0000000002000000)   /* !HV irqs set MSR:LE */
16265  #define   LPCR_AIL             ASM_CONST(0x0000000001800000)   /* Alternate interrupt location */
16266  #define   LPCR_AIL_0           ASM_CONST(0x0000000000000000)   /* MMU off exception offset 0x0 */
16267 diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
16268 index 7a13bc20f0a0..47081a9e13ca 100644
16269 --- a/arch/powerpc/include/asm/smp.h
16270 +++ b/arch/powerpc/include/asm/smp.h
16271 @@ -121,6 +121,11 @@ static inline struct cpumask *cpu_sibling_mask(int cpu)
16272         return per_cpu(cpu_sibling_map, cpu);
16275 +static inline struct cpumask *cpu_core_mask(int cpu)
16277 +       return per_cpu(cpu_core_map, cpu);
16280  static inline struct cpumask *cpu_l2_cache_mask(int cpu)
16282         return per_cpu(cpu_l2_cache_map, cpu);
16283 diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
16284 index fd1b518eed17..ba0f88f3a30d 100644
16285 --- a/arch/powerpc/include/asm/syscall.h
16286 +++ b/arch/powerpc/include/asm/syscall.h
16287 @@ -41,11 +41,17 @@ static inline void syscall_rollback(struct task_struct *task,
16288  static inline long syscall_get_error(struct task_struct *task,
16289                                      struct pt_regs *regs)
16291 -       /*
16292 -        * If the system call failed,
16293 -        * regs->gpr[3] contains a positive ERRORCODE.
16294 -        */
16295 -       return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
16296 +       if (trap_is_scv(regs)) {
16297 +               unsigned long error = regs->gpr[3];
16299 +               return IS_ERR_VALUE(error) ? error : 0;
16300 +       } else {
16301 +               /*
16302 +                * If the system call failed,
16303 +                * regs->gpr[3] contains a positive ERRORCODE.
16304 +                */
16305 +               return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
16306 +       }
16309  static inline long syscall_get_return_value(struct task_struct *task,
16310 @@ -58,18 +64,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
16311                                             struct pt_regs *regs,
16312                                             int error, long val)
16314 -       /*
16315 -        * In the general case it's not obvious that we must deal with CCR
16316 -        * here, as the syscall exit path will also do that for us. However
16317 -        * there are some places, eg. the signal code, which check ccr to
16318 -        * decide if the value in r3 is actually an error.
16319 -        */
16320 -       if (error) {
16321 -               regs->ccr |= 0x10000000L;
16322 -               regs->gpr[3] = error;
16323 +       if (trap_is_scv(regs)) {
16324 +               regs->gpr[3] = (long) error ?: val;
16325         } else {
16326 -               regs->ccr &= ~0x10000000L;
16327 -               regs->gpr[3] = val;
16328 +               /*
16329 +                * In the general case it's not obvious that we must deal with
16330 +                * CCR here, as the syscall exit path will also do that for us.
16331 +                * However there are some places, eg. the signal code, which
16332 +                * check ccr to decide if the value in r3 is actually an error.
16333 +                */
16334 +               if (error) {
16335 +                       regs->ccr |= 0x10000000L;
16336 +                       regs->gpr[3] = error;
16337 +               } else {
16338 +                       regs->ccr &= ~0x10000000L;
16339 +                       regs->gpr[3] = val;
16340 +               }
16341         }
16344 diff --git a/arch/powerpc/include/uapi/asm/errno.h b/arch/powerpc/include/uapi/asm/errno.h
16345 index cc79856896a1..4ba87de32be0 100644
16346 --- a/arch/powerpc/include/uapi/asm/errno.h
16347 +++ b/arch/powerpc/include/uapi/asm/errno.h
16348 @@ -2,6 +2,7 @@
16349  #ifndef _ASM_POWERPC_ERRNO_H
16350  #define _ASM_POWERPC_ERRNO_H
16352 +#undef EDEADLOCK
16353  #include <asm-generic/errno.h>
16355  #undef EDEADLOCK
16356 diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
16357 index cd60bc1c8701..7040e430a124 100644
16358 --- a/arch/powerpc/kernel/eeh.c
16359 +++ b/arch/powerpc/kernel/eeh.c
16360 @@ -362,14 +362,11 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
16361         pa = pte_pfn(*ptep);
16363         /* On radix we can do hugepage mappings for io, so handle that */
16364 -       if (hugepage_shift) {
16365 -               pa <<= hugepage_shift;
16366 -               pa |= token & ((1ul << hugepage_shift) - 1);
16367 -       } else {
16368 -               pa <<= PAGE_SHIFT;
16369 -               pa |= token & (PAGE_SIZE - 1);
16370 -       }
16371 +       if (!hugepage_shift)
16372 +               hugepage_shift = PAGE_SHIFT;
16374 +       pa <<= PAGE_SHIFT;
16375 +       pa |= token & ((1ul << hugepage_shift) - 1);
16376         return pa;
16379 diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
16380 index 8482739d42f3..eddf362caedc 100644
16381 --- a/arch/powerpc/kernel/fadump.c
16382 +++ b/arch/powerpc/kernel/fadump.c
16383 @@ -292,7 +292,7 @@ static void fadump_show_config(void)
16384   * that is required for a kernel to boot successfully.
16385   *
16386   */
16387 -static inline u64 fadump_calculate_reserve_size(void)
16388 +static __init u64 fadump_calculate_reserve_size(void)
16390         u64 base, size, bootmem_min;
16391         int ret;
16392 diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
16393 index 5d4706c14572..cf8ca08295bf 100644
16394 --- a/arch/powerpc/kernel/head_32.h
16395 +++ b/arch/powerpc/kernel/head_32.h
16396 @@ -261,11 +261,7 @@
16397         lis     r1, emergency_ctx@ha
16398  #endif
16399         lwz     r1, emergency_ctx@l(r1)
16400 -       cmpwi   cr1, r1, 0
16401 -       bne     cr1, 1f
16402 -       lis     r1, init_thread_union@ha
16403 -       addi    r1, r1, init_thread_union@l
16404 -1:     addi    r1, r1, THREAD_SIZE - INT_FRAME_SIZE
16405 +       addi    r1, r1, THREAD_SIZE - INT_FRAME_SIZE
16406         EXCEPTION_PROLOG_2
16407         SAVE_NVGPRS(r11)
16408         addi    r3, r1, STACK_FRAME_OVERHEAD
16409 diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
16410 index c475a229a42a..352346e14a08 100644
16411 --- a/arch/powerpc/kernel/interrupt.c
16412 +++ b/arch/powerpc/kernel/interrupt.c
16413 @@ -34,11 +34,11 @@ notrace long system_call_exception(long r3, long r4, long r5,
16414         if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
16415                 BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
16417 +       trace_hardirqs_off(); /* finish reconciling */
16419         CT_WARN_ON(ct_state() == CONTEXT_KERNEL);
16420         user_exit_irqoff();
16422 -       trace_hardirqs_off(); /* finish reconciling */
16424         if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x))
16425                 BUG_ON(!(regs->msr & MSR_RI));
16426         BUG_ON(!(regs->msr & MSR_PR));
16427 diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
16428 index c00214a4355c..4023f91defa6 100644
16429 --- a/arch/powerpc/kernel/iommu.c
16430 +++ b/arch/powerpc/kernel/iommu.c
16431 @@ -1096,7 +1096,7 @@ int iommu_take_ownership(struct iommu_table *tbl)
16433         spin_lock_irqsave(&tbl->large_pool.lock, flags);
16434         for (i = 0; i < tbl->nr_pools; i++)
16435 -               spin_lock(&tbl->pools[i].lock);
16436 +               spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
16438         iommu_table_release_pages(tbl);
16440 @@ -1124,7 +1124,7 @@ void iommu_release_ownership(struct iommu_table *tbl)
16442         spin_lock_irqsave(&tbl->large_pool.lock, flags);
16443         for (i = 0; i < tbl->nr_pools; i++)
16444 -               spin_lock(&tbl->pools[i].lock);
16445 +               spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
16447         memset(tbl->it_map, 0, sz);
16449 diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
16450 index 9a4797d1d40d..a8b2d6bfc1ca 100644
16451 --- a/arch/powerpc/kernel/prom.c
16452 +++ b/arch/powerpc/kernel/prom.c
16453 @@ -267,7 +267,7 @@ static struct feature_property {
16454  };
16456  #if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
16457 -static inline void identical_pvr_fixup(unsigned long node)
16458 +static __init void identical_pvr_fixup(unsigned long node)
16460         unsigned int pvr;
16461         const char *model = of_get_flat_dt_prop(node, "model", NULL);
16462 diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
16463 index 8ba49a6bf515..d7c1f92152af 100644
16464 --- a/arch/powerpc/kernel/setup_32.c
16465 +++ b/arch/powerpc/kernel/setup_32.c
16466 @@ -164,7 +164,7 @@ void __init irqstack_early_init(void)
16469  #ifdef CONFIG_VMAP_STACK
16470 -void *emergency_ctx[NR_CPUS] __ro_after_init;
16471 +void *emergency_ctx[NR_CPUS] __ro_after_init = {[0] = &init_stack};
16473  void __init emergency_stack_init(void)
16475 diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
16476 index 560ed8b975e7..c914fe8a2c67 100644
16477 --- a/arch/powerpc/kernel/setup_64.c
16478 +++ b/arch/powerpc/kernel/setup_64.c
16479 @@ -232,10 +232,23 @@ static void cpu_ready_for_interrupts(void)
16480          * If we are not in hypervisor mode the job is done once for
16481          * the whole partition in configure_exceptions().
16482          */
16483 -       if (cpu_has_feature(CPU_FTR_HVMODE) &&
16484 -           cpu_has_feature(CPU_FTR_ARCH_207S)) {
16485 +       if (cpu_has_feature(CPU_FTR_HVMODE)) {
16486                 unsigned long lpcr = mfspr(SPRN_LPCR);
16487 -               mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
16488 +               unsigned long new_lpcr = lpcr;
16490 +               if (cpu_has_feature(CPU_FTR_ARCH_31)) {
16491 +                       /* P10 DD1 does not have HAIL */
16492 +                       if (pvr_version_is(PVR_POWER10) &&
16493 +                                       (mfspr(SPRN_PVR) & 0xf00) == 0x100)
16494 +                               new_lpcr |= LPCR_AIL_3;
16495 +                       else
16496 +                               new_lpcr |= LPCR_HAIL;
16497 +               } else if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
16498 +                       new_lpcr |= LPCR_AIL_3;
16499 +               }
16501 +               if (new_lpcr != lpcr)
16502 +                       mtspr(SPRN_LPCR, new_lpcr);
16503         }
16505         /*
16506 @@ -356,11 +369,11 @@ void __init early_setup(unsigned long dt_ptr)
16507         apply_feature_fixups();
16508         setup_feature_keys();
16510 -       early_ioremap_setup();
16512         /* Initialize the hash table or TLB handling */
16513         early_init_mmu();
16515 +       early_ioremap_setup();
16517         /*
16518          * After firmware and early platform setup code has set things up,
16519          * we note the SPR values for configurable control/performance
16520 diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
16521 index 5a4d59a1070d..c2473e20f5f5 100644
16522 --- a/arch/powerpc/kernel/smp.c
16523 +++ b/arch/powerpc/kernel/smp.c
16524 @@ -1057,17 +1057,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
16525                                 local_memory_node(numa_cpu_lookup_table[cpu]));
16526                 }
16527  #endif
16528 -               /*
16529 -                * cpu_core_map is now more updated and exists only since
16530 -                * its been exported for long. It only will have a snapshot
16531 -                * of cpu_cpu_mask.
16532 -                */
16533 -               cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
16534         }
16536         /* Init the cpumasks so the boot CPU is related to itself */
16537         cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
16538         cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
16539 +       cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
16541         if (has_coregroup_support())
16542                 cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
16543 @@ -1408,6 +1403,9 @@ static void remove_cpu_from_masks(int cpu)
16544                         set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
16545         }
16547 +       for_each_cpu(i, cpu_core_mask(cpu))
16548 +               set_cpus_unrelated(cpu, i, cpu_core_mask);
16550         if (has_coregroup_support()) {
16551                 for_each_cpu(i, cpu_coregroup_mask(cpu))
16552                         set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
16553 @@ -1468,8 +1466,11 @@ static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
16555  static void add_cpu_to_masks(int cpu)
16557 +       struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
16558         int first_thread = cpu_first_thread_sibling(cpu);
16559 +       int chip_id = cpu_to_chip_id(cpu);
16560         cpumask_var_t mask;
16561 +       bool ret;
16562         int i;
16564         /*
16565 @@ -1485,12 +1486,36 @@ static void add_cpu_to_masks(int cpu)
16566         add_cpu_to_smallcore_masks(cpu);
16568         /* In CPU-hotplug path, hence use GFP_ATOMIC */
16569 -       alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
16570 +       ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
16571         update_mask_by_l2(cpu, &mask);
16573         if (has_coregroup_support())
16574                 update_coregroup_mask(cpu, &mask);
16576 +       if (chip_id == -1 || !ret) {
16577 +               cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
16578 +               goto out;
16579 +       }
16581 +       if (shared_caches)
16582 +               submask_fn = cpu_l2_cache_mask;
16584 +       /* Update core_mask with all the CPUs that are part of submask */
16585 +       or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask);
16587 +       /* Skip all CPUs already part of current CPU core mask */
16588 +       cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
16590 +       for_each_cpu(i, mask) {
16591 +               if (chip_id == cpu_to_chip_id(i)) {
16592 +                       or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
16593 +                       cpumask_andnot(mask, mask, submask_fn(i));
16594 +               } else {
16595 +                       cpumask_andnot(mask, mask, cpu_core_mask(i));
16596 +               }
16597 +       }
16599 +out:
16600         free_cpumask_var(mask);
16603 @@ -1521,6 +1546,9 @@ void start_secondary(void *unused)
16605         vdso_getcpu_init();
16606  #endif
16607 +       set_numa_node(numa_cpu_lookup_table[cpu]);
16608 +       set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
16610         /* Update topology CPU masks */
16611         add_cpu_to_masks(cpu);
16613 @@ -1539,9 +1567,6 @@ void start_secondary(void *unused)
16614                         shared_caches = true;
16615         }
16617 -       set_numa_node(numa_cpu_lookup_table[cpu]);
16618 -       set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
16620         smp_wmb();
16621         notify_cpu_starting(cpu);
16622         set_cpu_online(cpu, true);
16623 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
16624 index e839a906fdf2..b14907209822 100644
16625 --- a/arch/powerpc/kernel/vdso.c
16626 +++ b/arch/powerpc/kernel/vdso.c
16627 @@ -55,10 +55,10 @@ static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struc
16629         unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
16631 -       if (new_size != text_size + PAGE_SIZE)
16632 +       if (new_size != text_size)
16633                 return -EINVAL;
16635 -       current->mm->context.vdso = (void __user *)new_vma->vm_start + PAGE_SIZE;
16636 +       current->mm->context.vdso = (void __user *)new_vma->vm_start;
16638         return 0;
16640 @@ -73,6 +73,10 @@ static int vdso64_mremap(const struct vm_special_mapping *sm, struct vm_area_str
16641         return vdso_mremap(sm, new_vma, &vdso64_end - &vdso64_start);
16644 +static struct vm_special_mapping vvar_spec __ro_after_init = {
16645 +       .name = "[vvar]",
16648  static struct vm_special_mapping vdso32_spec __ro_after_init = {
16649         .name = "[vdso]",
16650         .mremap = vdso32_mremap,
16651 @@ -89,11 +93,11 @@ static struct vm_special_mapping vdso64_spec __ro_after_init = {
16652   */
16653  static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
16655 -       struct mm_struct *mm = current->mm;
16656 +       unsigned long vdso_size, vdso_base, mappings_size;
16657         struct vm_special_mapping *vdso_spec;
16658 +       unsigned long vvar_size = PAGE_SIZE;
16659 +       struct mm_struct *mm = current->mm;
16660         struct vm_area_struct *vma;
16661 -       unsigned long vdso_size;
16662 -       unsigned long vdso_base;
16664         if (is_32bit_task()) {
16665                 vdso_spec = &vdso32_spec;
16666 @@ -110,8 +114,8 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
16667                 vdso_base = 0;
16668         }
16670 -       /* Add a page to the vdso size for the data page */
16671 -       vdso_size += PAGE_SIZE;
16672 +       mappings_size = vdso_size + vvar_size;
16673 +       mappings_size += (VDSO_ALIGNMENT - 1) & PAGE_MASK;
16675         /*
16676          * pick a base address for the vDSO in process space. We try to put it
16677 @@ -119,9 +123,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
16678          * and end up putting it elsewhere.
16679          * Add enough to the size so that the result can be aligned.
16680          */
16681 -       vdso_base = get_unmapped_area(NULL, vdso_base,
16682 -                                     vdso_size + ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
16683 -                                     0, 0);
16684 +       vdso_base = get_unmapped_area(NULL, vdso_base, mappings_size, 0, 0);
16685         if (IS_ERR_VALUE(vdso_base))
16686                 return vdso_base;
16688 @@ -133,7 +135,13 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
16689          * install_special_mapping or the perf counter mmap tracking code
16690          * will fail to recognise it as a vDSO.
16691          */
16692 -       mm->context.vdso = (void __user *)vdso_base + PAGE_SIZE;
16693 +       mm->context.vdso = (void __user *)vdso_base + vvar_size;
16695 +       vma = _install_special_mapping(mm, vdso_base, vvar_size,
16696 +                                      VM_READ | VM_MAYREAD | VM_IO |
16697 +                                      VM_DONTDUMP | VM_PFNMAP, &vvar_spec);
16698 +       if (IS_ERR(vma))
16699 +               return PTR_ERR(vma);
16701         /*
16702          * our vma flags don't have VM_WRITE so by default, the process isn't
16703 @@ -145,9 +153,12 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
16704          * It's fine to use that for setting breakpoints in the vDSO code
16705          * pages though.
16706          */
16707 -       vma = _install_special_mapping(mm, vdso_base, vdso_size,
16708 +       vma = _install_special_mapping(mm, vdso_base + vvar_size, vdso_size,
16709                                        VM_READ | VM_EXEC | VM_MAYREAD |
16710                                        VM_MAYWRITE | VM_MAYEXEC, vdso_spec);
16711 +       if (IS_ERR(vma))
16712 +               do_munmap(mm, vdso_base, vvar_size, NULL);
16714         return PTR_ERR_OR_ZERO(vma);
16717 @@ -249,11 +260,22 @@ static struct page ** __init vdso_setup_pages(void *start, void *end)
16718         if (!pagelist)
16719                 panic("%s: Cannot allocate page list for VDSO", __func__);
16721 -       pagelist[0] = virt_to_page(vdso_data);
16723         for (i = 0; i < pages; i++)
16724 -               pagelist[i + 1] = virt_to_page(start + i * PAGE_SIZE);
16725 +               pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
16727 +       return pagelist;
16730 +static struct page ** __init vvar_setup_pages(void)
16732 +       struct page **pagelist;
16734 +       /* .pages is NULL-terminated */
16735 +       pagelist = kcalloc(2, sizeof(struct page *), GFP_KERNEL);
16736 +       if (!pagelist)
16737 +               panic("%s: Cannot allocate page list for VVAR", __func__);
16739 +       pagelist[0] = virt_to_page(vdso_data);
16740         return pagelist;
16743 @@ -295,6 +317,8 @@ static int __init vdso_init(void)
16744         if (IS_ENABLED(CONFIG_PPC64))
16745                 vdso64_spec.pages = vdso_setup_pages(&vdso64_start, &vdso64_end);
16747 +       vvar_spec.pages = vvar_setup_pages();
16749         smp_wmb();
16751         return 0;
16752 diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
16753 index 02b9e4d0dc40..a8a7cb71086b 100644
16754 --- a/arch/powerpc/kexec/file_load_64.c
16755 +++ b/arch/powerpc/kexec/file_load_64.c
16756 @@ -960,6 +960,93 @@ unsigned int kexec_fdt_totalsize_ppc64(struct kimage *image)
16757         return fdt_size;
16760 +/**
16761 + * add_node_props - Reads node properties from device node structure and add
16762 + *                  them to fdt.
16763 + * @fdt:            Flattened device tree of the kernel
16764 + * @node_offset:    offset of the node to add a property at
16765 + * @dn:             device node pointer
16766 + *
16767 + * Returns 0 on success, negative errno on error.
16768 + */
16769 +static int add_node_props(void *fdt, int node_offset, const struct device_node *dn)
16771 +       int ret = 0;
16772 +       struct property *pp;
16774 +       if (!dn)
16775 +               return -EINVAL;
16777 +       for_each_property_of_node(dn, pp) {
16778 +               ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length);
16779 +               if (ret < 0) {
16780 +                       pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret));
16781 +                       return ret;
16782 +               }
16783 +       }
16784 +       return ret;
16787 +/**
16788 + * update_cpus_node - Update cpus node of flattened device tree using of_root
16789 + *                    device node.
16790 + * @fdt:              Flattened device tree of the kernel.
16791 + *
16792 + * Returns 0 on success, negative errno on error.
16793 + */
16794 +static int update_cpus_node(void *fdt)
16796 +       struct device_node *cpus_node, *dn;
16797 +       int cpus_offset, cpus_subnode_offset, ret = 0;
16799 +       cpus_offset = fdt_path_offset(fdt, "/cpus");
16800 +       if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) {
16801 +               pr_err("Malformed device tree: error reading /cpus node: %s\n",
16802 +                      fdt_strerror(cpus_offset));
16803 +               return cpus_offset;
16804 +       }
16806 +       if (cpus_offset > 0) {
16807 +               ret = fdt_del_node(fdt, cpus_offset);
16808 +               if (ret < 0) {
16809 +                       pr_err("Error deleting /cpus node: %s\n", fdt_strerror(ret));
16810 +                       return -EINVAL;
16811 +               }
16812 +       }
16814 +       /* Add cpus node to fdt */
16815 +       cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), "cpus");
16816 +       if (cpus_offset < 0) {
16817 +               pr_err("Error creating /cpus node: %s\n", fdt_strerror(cpus_offset));
16818 +               return -EINVAL;
16819 +       }
16821 +       /* Add cpus node properties */
16822 +       cpus_node = of_find_node_by_path("/cpus");
16823 +       ret = add_node_props(fdt, cpus_offset, cpus_node);
16824 +       of_node_put(cpus_node);
16825 +       if (ret < 0)
16826 +               return ret;
16828 +       /* Loop through all subnodes of cpus and add them to fdt */
16829 +       for_each_node_by_type(dn, "cpu") {
16830 +               cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name);
16831 +               if (cpus_subnode_offset < 0) {
16832 +                       pr_err("Unable to add %s subnode: %s\n", dn->full_name,
16833 +                              fdt_strerror(cpus_subnode_offset));
16834 +                       ret = cpus_subnode_offset;
16835 +                       goto out;
16836 +               }
16838 +               ret = add_node_props(fdt, cpus_subnode_offset, dn);
16839 +               if (ret < 0)
16840 +                       goto out;
16841 +       }
16842 +out:
16843 +       of_node_put(dn);
16844 +       return ret;
16847  /**
16848   * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
16849   *                       being loaded.
16850 @@ -1020,6 +1107,11 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
16851                 }
16852         }
16854 +       /* Update cpus nodes information to account hotplug CPUs. */
16855 +       ret =  update_cpus_node(fdt);
16856 +       if (ret < 0)
16857 +               goto out;
16859         /* Update memory reserve map */
16860         ret = get_reserved_memory_ranges(&rmem);
16861         if (ret)
16862 diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
16863 index e452158a18d7..c3e31fef0be1 100644
16864 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c
16865 +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
16866 @@ -8,6 +8,7 @@
16867   */
16869  #include <linux/kvm_host.h>
16870 +#include <linux/pkeys.h>
16872  #include <asm/kvm_ppc.h>
16873  #include <asm/kvm_book3s.h>
16874 @@ -133,6 +134,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
16875         else
16876                 kvmppc_mmu_flush_icache(pfn);
16878 +       rflags |= pte_to_hpte_pkey_bits(0, HPTE_USE_KERNEL_KEY);
16879         rflags = (rflags & ~HPTE_R_WIMG) | orig_pte->wimg;
16881         /*
16882 diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
16883 index 13bad6bf4c95..208a053c9adf 100644
16884 --- a/arch/powerpc/kvm/book3s_hv.c
16885 +++ b/arch/powerpc/kvm/book3s_hv.c
16886 @@ -3728,7 +3728,10 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
16887         vcpu->arch.dec_expires = dec + tb;
16888         vcpu->cpu = -1;
16889         vcpu->arch.thread_cpu = -1;
16890 +       /* Save guest CTRL register, set runlatch to 1 */
16891         vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
16892 +       if (!(vcpu->arch.ctrl & 1))
16893 +               mtspr(SPRN_CTRLT, vcpu->arch.ctrl | 1);
16895         vcpu->arch.iamr = mfspr(SPRN_IAMR);
16896         vcpu->arch.pspb = mfspr(SPRN_PSPB);
16897 diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
16898 index d4efc182662a..248f7c9e36fc 100644
16899 --- a/arch/powerpc/lib/Makefile
16900 +++ b/arch/powerpc/lib/Makefile
16901 @@ -5,6 +5,9 @@
16903  ccflags-$(CONFIG_PPC64)        := $(NO_MINIMAL_TOC)
16905 +CFLAGS_code-patching.o += -fno-stack-protector
16906 +CFLAGS_feature-fixups.o += -fno-stack-protector
16908  CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE)
16909  CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
16911 diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
16912 index 1fd31b4b0e13..0aefa6a4a259 100644
16913 --- a/arch/powerpc/lib/feature-fixups.c
16914 +++ b/arch/powerpc/lib/feature-fixups.c
16915 @@ -14,6 +14,7 @@
16916  #include <linux/string.h>
16917  #include <linux/init.h>
16918  #include <linux/sched/mm.h>
16919 +#include <linux/stop_machine.h>
16920  #include <asm/cputable.h>
16921  #include <asm/code-patching.h>
16922  #include <asm/page.h>
16923 @@ -227,11 +228,25 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
16924                                                            : "unknown");
16927 +static int __do_stf_barrier_fixups(void *data)
16929 +       enum stf_barrier_type *types = data;
16931 +       do_stf_entry_barrier_fixups(*types);
16932 +       do_stf_exit_barrier_fixups(*types);
16934 +       return 0;
16937  void do_stf_barrier_fixups(enum stf_barrier_type types)
16939 -       do_stf_entry_barrier_fixups(types);
16940 -       do_stf_exit_barrier_fixups(types);
16941 +       /*
16942 +        * The call to the fallback entry flush, and the fallback/sync-ori exit
16943 +        * flush can not be safely patched in/out while other CPUs are executing
16944 +        * them. So call __do_stf_barrier_fixups() on one CPU while all other CPUs
16945 +        * spin in the stop machine core with interrupts hard disabled.
16946 +        */
16947 +       stop_machine(__do_stf_barrier_fixups, &types, NULL);
16950  void do_uaccess_flush_fixups(enum l1d_flush_type types)
16951 @@ -284,8 +299,9 @@ void do_uaccess_flush_fixups(enum l1d_flush_type types)
16952                                                 : "unknown");
16955 -void do_entry_flush_fixups(enum l1d_flush_type types)
16956 +static int __do_entry_flush_fixups(void *data)
16958 +       enum l1d_flush_type types = *(enum l1d_flush_type *)data;
16959         unsigned int instrs[3], *dest;
16960         long *start, *end;
16961         int i;
16962 @@ -354,6 +370,19 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
16963                                                         : "ori type" :
16964                 (types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
16965                                                 : "unknown");
16967 +       return 0;
16970 +void do_entry_flush_fixups(enum l1d_flush_type types)
16972 +       /*
16973 +        * The call to the fallback flush can not be safely patched in/out while
16974 +        * other CPUs are executing it. So call __do_entry_flush_fixups() on one
16975 +        * CPU while all other CPUs spin in the stop machine core with interrupts
16976 +        * hard disabled.
16977 +        */
16978 +       stop_machine(__do_entry_flush_fixups, &types, NULL);
16981  void do_rfi_flush_fixups(enum l1d_flush_type types)
16982 diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c
16983 index 567e0c6b3978..03819c259f0a 100644
16984 --- a/arch/powerpc/mm/book3s64/hash_pgtable.c
16985 +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c
16986 @@ -428,12 +428,14 @@ static bool hash__change_memory_range(unsigned long start, unsigned long end,
16988  void hash__mark_rodata_ro(void)
16990 -       unsigned long start, end;
16991 +       unsigned long start, end, pp;
16993         start = (unsigned long)_stext;
16994         end = (unsigned long)__init_begin;
16996 -       WARN_ON(!hash__change_memory_range(start, end, PP_RXXX));
16997 +       pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL_ROX), HPTE_USE_KERNEL_KEY);
16999 +       WARN_ON(!hash__change_memory_range(start, end, pp));
17002  void hash__mark_initmem_nx(void)
17003 diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
17004 index 581b20a2feaf..12de1906e97b 100644
17005 --- a/arch/powerpc/mm/book3s64/hash_utils.c
17006 +++ b/arch/powerpc/mm/book3s64/hash_utils.c
17007 @@ -338,7 +338,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
17008  int htab_remove_mapping(unsigned long vstart, unsigned long vend,
17009                       int psize, int ssize)
17011 -       unsigned long vaddr;
17012 +       unsigned long vaddr, time_limit;
17013         unsigned int step, shift;
17014         int rc;
17015         int ret = 0;
17016 @@ -351,8 +351,19 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend,
17018         /* Unmap the full range specificied */
17019         vaddr = ALIGN_DOWN(vstart, step);
17020 +       time_limit = jiffies + HZ;
17022         for (;vaddr < vend; vaddr += step) {
17023                 rc = mmu_hash_ops.hpte_removebolted(vaddr, psize, ssize);
17025 +               /*
17026 +                * For large number of mappings introduce a cond_resched()
17027 +                * to prevent softlockup warnings.
17028 +                */
17029 +               if (time_after(jiffies, time_limit)) {
17030 +                       cond_resched();
17031 +                       time_limit = jiffies + HZ;
17032 +               }
17033                 if (rc == -ENOENT) {
17034                         ret = -ENOENT;
17035                         continue;
17036 @@ -1545,10 +1556,10 @@ DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
17037         if (user_mode(regs) || (region_id == USER_REGION_ID))
17038                 access &= ~_PAGE_PRIVILEGED;
17040 -       if (regs->trap == 0x400)
17041 +       if (TRAP(regs) == 0x400)
17042                 access |= _PAGE_EXEC;
17044 -       err = hash_page_mm(mm, ea, access, regs->trap, flags);
17045 +       err = hash_page_mm(mm, ea, access, TRAP(regs), flags);
17046         if (unlikely(err < 0)) {
17047                 // failed to instert a hash PTE due to an hypervisor error
17048                 if (user_mode(regs)) {
17049 diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
17050 index 98f0b243c1ab..39d488a212a0 100644
17051 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c
17052 +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
17053 @@ -108,7 +108,7 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa,
17055  set_the_pte:
17056         set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
17057 -       smp_wmb();
17058 +       asm volatile("ptesync": : :"memory");
17059         return 0;
17062 @@ -168,7 +168,7 @@ static int __map_kernel_page(unsigned long ea, unsigned long pa,
17064  set_the_pte:
17065         set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
17066 -       smp_wmb();
17067 +       asm volatile("ptesync": : :"memory");
17068         return 0;
17071 diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
17072 index 4e8ce6d85232..7a59a5c9aa5d 100644
17073 --- a/arch/powerpc/mm/mem.c
17074 +++ b/arch/powerpc/mm/mem.c
17075 @@ -54,7 +54,6 @@
17077  #include <mm/mmu_decl.h>
17079 -static DEFINE_MUTEX(linear_mapping_mutex);
17080  unsigned long long memory_limit;
17081  bool init_mem_is_free;
17083 @@ -72,6 +71,7 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
17084  EXPORT_SYMBOL(phys_mem_access_prot);
17086  #ifdef CONFIG_MEMORY_HOTPLUG
17087 +static DEFINE_MUTEX(linear_mapping_mutex);
17089  #ifdef CONFIG_NUMA
17090  int memory_add_physaddr_to_nid(u64 start)
17091 diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
17092 index e4f577da33d8..8b5eeb6fb2fb 100644
17093 --- a/arch/powerpc/perf/isa207-common.c
17094 +++ b/arch/powerpc/perf/isa207-common.c
17095 @@ -447,8 +447,8 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp,
17096          * EBB events are pinned & exclusive, so this should never actually
17097          * hit, but we leave it as a fallback in case.
17098          */
17099 -       mask  |= CNST_EBB_VAL(ebb);
17100 -       value |= CNST_EBB_MASK;
17101 +       mask  |= CNST_EBB_MASK;
17102 +       value |= CNST_EBB_VAL(ebb);
17104         *maskp = mask;
17105         *valp = value;
17106 diff --git a/arch/powerpc/perf/power10-events-list.h b/arch/powerpc/perf/power10-events-list.h
17107 index e45dafe818ed..93be7197d250 100644
17108 --- a/arch/powerpc/perf/power10-events-list.h
17109 +++ b/arch/powerpc/perf/power10-events-list.h
17110 @@ -75,5 +75,5 @@ EVENT(PM_RUN_INST_CMPL_ALT,                   0x00002);
17111   *     thresh end (TE)
17112   */
17114 -EVENT(MEM_LOADS,                               0x34340401e0);
17115 -EVENT(MEM_STORES,                              0x343c0401e0);
17116 +EVENT(MEM_LOADS,                               0x35340401e0);
17117 +EVENT(MEM_STORES,                              0x353c0401e0);
17118 diff --git a/arch/powerpc/platforms/52xx/lite5200_sleep.S b/arch/powerpc/platforms/52xx/lite5200_sleep.S
17119 index 11475c58ea43..afee8b1515a8 100644
17120 --- a/arch/powerpc/platforms/52xx/lite5200_sleep.S
17121 +++ b/arch/powerpc/platforms/52xx/lite5200_sleep.S
17122 @@ -181,7 +181,7 @@ sram_code:
17123    udelay: /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */
17124         mullw   r12, r12, r11
17125         mftb    r13     /* start */
17126 -       addi    r12, r13, r12 /* end */
17127 +       add     r12, r13, r12 /* end */
17128      1:
17129         mftb    r13     /* current */
17130         cmp     cr0, r13, r12
17131 diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
17132 index 019669eb21d2..4ab7c3ef5826 100644
17133 --- a/arch/powerpc/platforms/powernv/memtrace.c
17134 +++ b/arch/powerpc/platforms/powernv/memtrace.c
17135 @@ -88,8 +88,8 @@ static void memtrace_clear_range(unsigned long start_pfn,
17136          * Before we go ahead and use this range as cache inhibited range
17137          * flush the cache.
17138          */
17139 -       flush_dcache_range_chunked(PFN_PHYS(start_pfn),
17140 -                                  PFN_PHYS(start_pfn + nr_pages),
17141 +       flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn),
17142 +                                  (unsigned long)pfn_to_kaddr(start_pfn + nr_pages),
17143                                    FLUSH_CHUNK_SIZE);
17146 diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
17147 index 12cbffd3c2e3..325f3b220f36 100644
17148 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
17149 +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
17150 @@ -47,9 +47,6 @@ static void rtas_stop_self(void)
17152         BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
17154 -       printk("cpu %u (hwid %u) Ready to die...\n",
17155 -              smp_processor_id(), hard_smp_processor_id());
17157         rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL);
17159         panic("Alas, I survived.\n");
17160 diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
17161 index 2136e42833af..8a2b8d64265b 100644
17162 --- a/arch/powerpc/platforms/pseries/hvCall.S
17163 +++ b/arch/powerpc/platforms/pseries/hvCall.S
17164 @@ -102,6 +102,16 @@ END_FTR_SECTION(0, 1);                                             \
17165  #define HCALL_BRANCH(LABEL)
17166  #endif
17168 +_GLOBAL_TOC(plpar_hcall_norets_notrace)
17169 +       HMT_MEDIUM
17171 +       mfcr    r0
17172 +       stw     r0,8(r1)
17173 +       HVSC                            /* invoke the hypervisor */
17174 +       lwz     r0,8(r1)
17175 +       mtcrf   0xff,r0
17176 +       blr                             /* return r3 = status */
17178  _GLOBAL_TOC(plpar_hcall_norets)
17179         HMT_MEDIUM
17181 diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
17182 index 9fc5217f0c8e..836cbbe0ecc5 100644
17183 --- a/arch/powerpc/platforms/pseries/iommu.c
17184 +++ b/arch/powerpc/platforms/pseries/iommu.c
17185 @@ -1229,7 +1229,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
17186         if (pmem_present) {
17187                 if (query.largest_available_block >=
17188                     (1ULL << (MAX_PHYSMEM_BITS - page_shift)))
17189 -                       len = MAX_PHYSMEM_BITS - page_shift;
17190 +                       len = MAX_PHYSMEM_BITS;
17191                 else
17192                         dev_info(&dev->dev, "Skipping ibm,pmemory");
17193         }
17194 diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
17195 index 3805519a6469..d4aa6a46e1fa 100644
17196 --- a/arch/powerpc/platforms/pseries/lpar.c
17197 +++ b/arch/powerpc/platforms/pseries/lpar.c
17198 @@ -977,11 +977,13 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
17199         slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
17200         BUG_ON(slot == -1);
17202 -       flags = newpp & 7;
17203 +       flags = newpp & (HPTE_R_PP | HPTE_R_N);
17204         if (mmu_has_feature(MMU_FTR_KERNEL_RO))
17205                 /* Move pp0 into bit 8 (IBM 55) */
17206                 flags |= (newpp & HPTE_R_PP0) >> 55;
17208 +       flags |= ((newpp & HPTE_R_KEY_HI) >> 48) | (newpp & HPTE_R_KEY_LO);
17210         lpar_rc = plpar_pte_protect(flags, slot, 0);
17212         BUG_ON(lpar_rc != H_SUCCESS);
17213 @@ -1828,8 +1830,7 @@ void hcall_tracepoint_unregfunc(void)
17215  /*
17216   * Since the tracing code might execute hcalls we need to guard against
17217 - * recursion. One example of this are spinlocks calling H_YIELD on
17218 - * shared processor partitions.
17219 + * recursion.
17220   */
17221  static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
17223 diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
17224 index f9ae17e8a0f4..a8f9140a24fa 100644
17225 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c
17226 +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
17227 @@ -50,6 +50,7 @@ EXPORT_SYMBOL_GPL(init_phb_dynamic);
17228  int remove_phb_dynamic(struct pci_controller *phb)
17230         struct pci_bus *b = phb->bus;
17231 +       struct pci_host_bridge *host_bridge = to_pci_host_bridge(b->bridge);
17232         struct resource *res;
17233         int rc, i;
17235 @@ -76,7 +77,8 @@ int remove_phb_dynamic(struct pci_controller *phb)
17236         /* Remove the PCI bus and unregister the bridge device from sysfs */
17237         phb->bus = NULL;
17238         pci_remove_bus(b);
17239 -       device_unregister(b->bridge);
17240 +       host_bridge->bus = NULL;
17241 +       device_unregister(&host_bridge->dev);
17243         /* Now release the IO resource */
17244         if (res->flags & IORESOURCE_IO)
17245 diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
17246 index 9cb4fc839fd5..429053d0402a 100644
17247 --- a/arch/powerpc/platforms/pseries/vio.c
17248 +++ b/arch/powerpc/platforms/pseries/vio.c
17249 @@ -1285,6 +1285,10 @@ static int vio_bus_remove(struct device *dev)
17250  int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
17251                           const char *mod_name)
17253 +       // vio_bus_type is only initialised for pseries
17254 +       if (!machine_is(pseries))
17255 +               return -ENODEV;
17257         pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
17259         /* fill in 'struct driver' fields */
17260 diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
17261 index 595310e056f4..31b657c37735 100644
17262 --- a/arch/powerpc/sysdev/xive/common.c
17263 +++ b/arch/powerpc/sysdev/xive/common.c
17264 @@ -253,17 +253,20 @@ notrace void xmon_xive_do_dump(int cpu)
17265         xmon_printf("\n");
17268 +static struct irq_data *xive_get_irq_data(u32 hw_irq)
17270 +       unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq);
17272 +       return irq ? irq_get_irq_data(irq) : NULL;
17275  int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
17277 -       struct irq_chip *chip = irq_data_get_irq_chip(d);
17278         int rc;
17279         u32 target;
17280         u8 prio;
17281         u32 lirq;
17283 -       if (!is_xive_irq(chip))
17284 -               return -EINVAL;
17286         rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
17287         if (rc) {
17288                 xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
17289 @@ -273,6 +276,9 @@ int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
17290         xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
17291                     hw_irq, target, prio, lirq);
17293 +       if (!d)
17294 +               d = xive_get_irq_data(hw_irq);
17296         if (d) {
17297                 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
17298                 u64 val = xive_esb_read(xd, XIVE_ESB_GET);
17299 @@ -1335,17 +1341,14 @@ static int xive_prepare_cpu(unsigned int cpu)
17301         xc = per_cpu(xive_cpu, cpu);
17302         if (!xc) {
17303 -               struct device_node *np;
17305                 xc = kzalloc_node(sizeof(struct xive_cpu),
17306                                   GFP_KERNEL, cpu_to_node(cpu));
17307                 if (!xc)
17308                         return -ENOMEM;
17309 -               np = of_get_cpu_node(cpu, NULL);
17310 -               if (np)
17311 -                       xc->chip_id = of_get_ibm_chip_id(np);
17312 -               of_node_put(np);
17313                 xc->hw_ipi = XIVE_BAD_IRQ;
17314 +               xc->chip_id = XIVE_INVALID_CHIP_ID;
17315 +               if (xive_ops->prepare_cpu)
17316 +                       xive_ops->prepare_cpu(cpu, xc);
17318                 per_cpu(xive_cpu, cpu) = xc;
17319         }
17320 @@ -1599,6 +1602,8 @@ static void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data
17321         u32 target;
17322         u8 prio;
17323         u32 lirq;
17324 +       struct xive_irq_data *xd;
17325 +       u64 val;
17327         if (!is_xive_irq(chip))
17328                 return;
17329 @@ -1612,17 +1617,14 @@ static void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data
17330         seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
17331                    hw_irq, target, prio, lirq);
17333 -       if (d) {
17334 -               struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
17335 -               u64 val = xive_esb_read(xd, XIVE_ESB_GET);
17337 -               seq_printf(m, "flags=%c%c%c PQ=%c%c",
17338 -                          xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
17339 -                          xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
17340 -                          xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
17341 -                          val & XIVE_ESB_VAL_P ? 'P' : '-',
17342 -                          val & XIVE_ESB_VAL_Q ? 'Q' : '-');
17343 -       }
17344 +       xd = irq_data_get_irq_handler_data(d);
17345 +       val = xive_esb_read(xd, XIVE_ESB_GET);
17346 +       seq_printf(m, "flags=%c%c%c PQ=%c%c",
17347 +                  xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
17348 +                  xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
17349 +                  xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
17350 +                  val & XIVE_ESB_VAL_P ? 'P' : '-',
17351 +                  val & XIVE_ESB_VAL_Q ? 'Q' : '-');
17352         seq_puts(m, "\n");
17355 diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
17356 index 05a800a3104e..57e3f1540435 100644
17357 --- a/arch/powerpc/sysdev/xive/native.c
17358 +++ b/arch/powerpc/sysdev/xive/native.c
17359 @@ -380,6 +380,11 @@ static void xive_native_update_pending(struct xive_cpu *xc)
17360         }
17363 +static void xive_native_prepare_cpu(unsigned int cpu, struct xive_cpu *xc)
17365 +       xc->chip_id = cpu_to_chip_id(cpu);
17368  static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
17370         s64 rc;
17371 @@ -462,6 +467,7 @@ static const struct xive_ops xive_native_ops = {
17372         .match                  = xive_native_match,
17373         .shutdown               = xive_native_shutdown,
17374         .update_pending         = xive_native_update_pending,
17375 +       .prepare_cpu            = xive_native_prepare_cpu,
17376         .setup_cpu              = xive_native_setup_cpu,
17377         .teardown_cpu           = xive_native_teardown_cpu,
17378         .sync_source            = xive_native_sync_source,
17379 diff --git a/arch/powerpc/sysdev/xive/xive-internal.h b/arch/powerpc/sysdev/xive/xive-internal.h
17380 index 9cf57c722faa..6478be19b4d3 100644
17381 --- a/arch/powerpc/sysdev/xive/xive-internal.h
17382 +++ b/arch/powerpc/sysdev/xive/xive-internal.h
17383 @@ -46,6 +46,7 @@ struct xive_ops {
17384                                   u32 *sw_irq);
17385         int     (*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
17386         void    (*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
17387 +       void    (*prepare_cpu)(unsigned int cpu, struct xive_cpu *xc);
17388         void    (*setup_cpu)(unsigned int cpu, struct xive_cpu *xc);
17389         void    (*teardown_cpu)(unsigned int cpu, struct xive_cpu *xc);
17390         bool    (*match)(struct device_node *np);
17391 diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
17392 index 4515a10c5d22..d9522fc35ca5 100644
17393 --- a/arch/riscv/Kconfig
17394 +++ b/arch/riscv/Kconfig
17395 @@ -227,7 +227,7 @@ config ARCH_RV64I
17396         bool "RV64I"
17397         select 64BIT
17398         select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
17399 -       select HAVE_DYNAMIC_FTRACE if MMU
17400 +       select HAVE_DYNAMIC_FTRACE if MMU && $(cc-option,-fpatchable-function-entry=8)
17401         select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
17402         select HAVE_FTRACE_MCOUNT_RECORD
17403         select HAVE_FUNCTION_GRAPH_TRACER
17404 diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
17405 index 845002cc2e57..04dad3380041 100644
17406 --- a/arch/riscv/include/asm/ftrace.h
17407 +++ b/arch/riscv/include/asm/ftrace.h
17408 @@ -13,9 +13,19 @@
17409  #endif
17410  #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
17413 + * Clang prior to 13 had "mcount" instead of "_mcount":
17414 + * https://reviews.llvm.org/D98881
17415 + */
17416 +#if defined(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 130000
17417 +#define MCOUNT_NAME _mcount
17418 +#else
17419 +#define MCOUNT_NAME mcount
17420 +#endif
17422  #define ARCH_SUPPORTS_FTRACE_OPS 1
17423  #ifndef __ASSEMBLY__
17424 -void _mcount(void);
17425 +void MCOUNT_NAME(void);
17426  static inline unsigned long ftrace_call_adjust(unsigned long addr)
17428         return addr;
17429 @@ -36,7 +46,7 @@ struct dyn_arch_ftrace {
17430   * both auipc and jalr at the same time.
17431   */
17433 -#define MCOUNT_ADDR            ((unsigned long)_mcount)
17434 +#define MCOUNT_ADDR            ((unsigned long)MCOUNT_NAME)
17435  #define JALR_SIGN_MASK         (0x00000800)
17436  #define JALR_OFFSET_MASK       (0x00000fff)
17437  #define AUIPC_OFFSET_MASK      (0xfffff000)
17438 diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S
17439 index 8a5593ff9ff3..6d462681c9c0 100644
17440 --- a/arch/riscv/kernel/mcount.S
17441 +++ b/arch/riscv/kernel/mcount.S
17442 @@ -47,8 +47,8 @@
17444  ENTRY(ftrace_stub)
17445  #ifdef CONFIG_DYNAMIC_FTRACE
17446 -       .global _mcount
17447 -       .set    _mcount, ftrace_stub
17448 +       .global MCOUNT_NAME
17449 +       .set    MCOUNT_NAME, ftrace_stub
17450  #endif
17451         ret
17452  ENDPROC(ftrace_stub)
17453 @@ -78,7 +78,7 @@ ENDPROC(return_to_handler)
17454  #endif
17456  #ifndef CONFIG_DYNAMIC_FTRACE
17457 -ENTRY(_mcount)
17458 +ENTRY(MCOUNT_NAME)
17459         la      t4, ftrace_stub
17460  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
17461         la      t0, ftrace_graph_return
17462 @@ -124,6 +124,6 @@ do_trace:
17463         jalr    t5
17464         RESTORE_ABI_STATE
17465         ret
17466 -ENDPROC(_mcount)
17467 +ENDPROC(MCOUNT_NAME)
17468  #endif
17469 -EXPORT_SYMBOL(_mcount)
17470 +EXPORT_SYMBOL(MCOUNT_NAME)
17471 diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
17472 index 7e2c78e2ca6b..d71f7c49a721 100644
17473 --- a/arch/riscv/kernel/probes/kprobes.c
17474 +++ b/arch/riscv/kernel/probes/kprobes.c
17475 @@ -260,8 +260,10 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
17477                 if (kcb->kprobe_status == KPROBE_REENTER)
17478                         restore_previous_kprobe(kcb);
17479 -               else
17480 +               else {
17481 +                       kprobes_restore_local_irqflag(kcb, regs);
17482                         reset_current_kprobe();
17483 +               }
17485                 break;
17486         case KPROBE_HIT_ACTIVE:
17487 diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
17488 index ea028d9e0d24..d44567490d91 100644
17489 --- a/arch/riscv/kernel/smp.c
17490 +++ b/arch/riscv/kernel/smp.c
17491 @@ -54,7 +54,7 @@ int riscv_hartid_to_cpuid(int hartid)
17492                         return i;
17494         pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
17495 -       return i;
17496 +       return -ENOENT;
17499  void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
17500 diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
17501 index 71a315e73cbe..ca2b40dfd24b 100644
17502 --- a/arch/riscv/kernel/vdso/Makefile
17503 +++ b/arch/riscv/kernel/vdso/Makefile
17504 @@ -41,11 +41,10 @@ KASAN_SANITIZE := n
17505  $(obj)/vdso.o: $(obj)/vdso.so
17507  # link rule for the .so file, .lds has to be first
17508 -SYSCFLAGS_vdso.so.dbg = $(c_flags)
17509  $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
17510         $(call if_changed,vdsold)
17511 -SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
17512 -       -Wl,--build-id=sha1 -Wl,--hash-style=both
17513 +LDFLAGS_vdso.so.dbg = -shared -s -soname=linux-vdso.so.1 \
17514 +       --build-id=sha1 --hash-style=both --eh-frame-hdr
17516  # We also create a special relocatable object that should mirror the symbol
17517  # table and layout of the linked DSO. With ld --just-symbols we can then
17518 @@ -60,13 +59,10 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
17520  # actual build commands
17521  # The DSO images are built using a special linker script
17522 -# Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions.
17523  # Make sure only to export the intended __vdso_xxx symbol offsets.
17524  quiet_cmd_vdsold = VDSOLD  $@
17525 -      cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \
17526 -                           -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \
17527 -                   $(CROSS_COMPILE)objcopy \
17528 -                           $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
17529 +      cmd_vdsold = $(LD) $(ld_flags) -T $(filter-out FORCE,$^) -o $@.tmp && \
17530 +                   $(OBJCOPY) $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
17531                     rm $@.tmp
17533  # Extracts symbol offsets from the VDSO, converting them into an assembly file
17534 diff --git a/arch/s390/crypto/arch_random.c b/arch/s390/crypto/arch_random.c
17535 index 7b947728d57e..56007c763902 100644
17536 --- a/arch/s390/crypto/arch_random.c
17537 +++ b/arch/s390/crypto/arch_random.c
17538 @@ -54,6 +54,10 @@ static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer);
17540  bool s390_arch_random_generate(u8 *buf, unsigned int nbytes)
17542 +       /* max hunk is ARCH_RNG_BUF_SIZE */
17543 +       if (nbytes > ARCH_RNG_BUF_SIZE)
17544 +               return false;
17546         /* lock rng buffer */
17547         if (!spin_trylock(&arch_rng_lock))
17548                 return false;
17549 diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
17550 index d9215c7106f0..8fc52679543d 100644
17551 --- a/arch/s390/include/asm/qdio.h
17552 +++ b/arch/s390/include/asm/qdio.h
17553 @@ -246,21 +246,8 @@ struct slsb {
17554         u8 val[QDIO_MAX_BUFFERS_PER_Q];
17555  } __attribute__ ((packed, aligned(256)));
17557 -/**
17558 - * struct qdio_outbuf_state - SBAL related asynchronous operation information
17559 - *   (for communication with upper layer programs)
17560 - *   (only required for use with completion queues)
17561 - * @user: pointer to upper layer program's state information related to SBAL
17562 - *        (stored in user1 data of QAOB)
17563 - */
17564 -struct qdio_outbuf_state {
17565 -       void *user;
17568 -#define CHSC_AC1_INITIATE_INPUTQ       0x80
17571  /* qdio adapter-characteristics-1 flag */
17572 +#define CHSC_AC1_INITIATE_INPUTQ       0x80
17573  #define AC1_SIGA_INPUT_NEEDED          0x40    /* process input queues */
17574  #define AC1_SIGA_OUTPUT_NEEDED         0x20    /* process output queues */
17575  #define AC1_SIGA_SYNC_NEEDED           0x10    /* ask hypervisor to sync */
17576 @@ -338,7 +325,6 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
17577   * @int_parm: interruption parameter
17578   * @input_sbal_addr_array:  per-queue array, each element points to 128 SBALs
17579   * @output_sbal_addr_array: per-queue array, each element points to 128 SBALs
17580 - * @output_sbal_state_array: no_output_qs * 128 state info (for CQ or NULL)
17581   */
17582  struct qdio_initialize {
17583         unsigned char q_format;
17584 @@ -357,7 +343,6 @@ struct qdio_initialize {
17585         unsigned long int_parm;
17586         struct qdio_buffer ***input_sbal_addr_array;
17587         struct qdio_buffer ***output_sbal_addr_array;
17588 -       struct qdio_outbuf_state *output_sbal_state_array;
17589  };
17591  #define QDIO_STATE_INACTIVE            0x00000002 /* after qdio_cleanup */
17592 @@ -378,9 +363,10 @@ extern int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
17593  extern int qdio_establish(struct ccw_device *cdev,
17594                           struct qdio_initialize *init_data);
17595  extern int qdio_activate(struct ccw_device *);
17596 +extern struct qaob *qdio_allocate_aob(void);
17597  extern void qdio_release_aob(struct qaob *);
17598 -extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int,
17599 -                  unsigned int);
17600 +extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, int q_nr,
17601 +                  unsigned int bufnr, unsigned int count, struct qaob *aob);
17602  extern int qdio_start_irq(struct ccw_device *cdev);
17603  extern int qdio_stop_irq(struct ccw_device *cdev);
17604  extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
17605 diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
17606 index a7eab7be4db0..5412efe328f8 100644
17607 --- a/arch/s390/kernel/dis.c
17608 +++ b/arch/s390/kernel/dis.c
17609 @@ -563,7 +563,7 @@ void show_code(struct pt_regs *regs)
17611  void print_fn_code(unsigned char *code, unsigned long len)
17613 -       char buffer[64], *ptr;
17614 +       char buffer[128], *ptr;
17615         int opsize, i;
17617         while (len) {
17618 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
17619 index 72134f9f6ff5..5aab59ad5688 100644
17620 --- a/arch/s390/kernel/setup.c
17621 +++ b/arch/s390/kernel/setup.c
17622 @@ -937,9 +937,9 @@ static int __init setup_hwcaps(void)
17623         if (MACHINE_HAS_VX) {
17624                 elf_hwcap |= HWCAP_S390_VXRS;
17625                 if (test_facility(134))
17626 -                       elf_hwcap |= HWCAP_S390_VXRS_EXT;
17627 -               if (test_facility(135))
17628                         elf_hwcap |= HWCAP_S390_VXRS_BCD;
17629 +               if (test_facility(135))
17630 +                       elf_hwcap |= HWCAP_S390_VXRS_EXT;
17631                 if (test_facility(148))
17632                         elf_hwcap |= HWCAP_S390_VXRS_EXT2;
17633                 if (test_facility(152))
17634 diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
17635 index 6d6b57059493..b9f85b2dc053 100644
17636 --- a/arch/s390/kvm/gaccess.c
17637 +++ b/arch/s390/kvm/gaccess.c
17638 @@ -976,7 +976,9 @@ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
17639   * kvm_s390_shadow_tables - walk the guest page table and create shadow tables
17640   * @sg: pointer to the shadow guest address space structure
17641   * @saddr: faulting address in the shadow gmap
17642 - * @pgt: pointer to the page table address result
17643 + * @pgt: pointer to the beginning of the page table for the given address if
17644 + *      successful (return value 0), or to the first invalid DAT entry in
17645 + *      case of exceptions (return value > 0)
17646   * @fake: pgt references contiguous guest memory block, not a pgtable
17647   */
17648  static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17649 @@ -1034,6 +1036,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17650                         rfte.val = ptr;
17651                         goto shadow_r2t;
17652                 }
17653 +               *pgt = ptr + vaddr.rfx * 8;
17654                 rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val);
17655                 if (rc)
17656                         return rc;
17657 @@ -1060,6 +1063,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17658                         rste.val = ptr;
17659                         goto shadow_r3t;
17660                 }
17661 +               *pgt = ptr + vaddr.rsx * 8;
17662                 rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val);
17663                 if (rc)
17664                         return rc;
17665 @@ -1087,6 +1091,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17666                         rtte.val = ptr;
17667                         goto shadow_sgt;
17668                 }
17669 +               *pgt = ptr + vaddr.rtx * 8;
17670                 rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val);
17671                 if (rc)
17672                         return rc;
17673 @@ -1123,6 +1128,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17674                         ste.val = ptr;
17675                         goto shadow_pgt;
17676                 }
17677 +               *pgt = ptr + vaddr.sx * 8;
17678                 rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val);
17679                 if (rc)
17680                         return rc;
17681 @@ -1157,6 +1163,8 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17682   * @vcpu: virtual cpu
17683   * @sg: pointer to the shadow guest address space structure
17684   * @saddr: faulting address in the shadow gmap
17685 + * @datptr: will contain the address of the faulting DAT table entry, or of
17686 + *         the valid leaf, plus some flags
17687   *
17688   * Returns: - 0 if the shadow fault was successfully resolved
17689   *         - > 0 (pgm exception code) on exceptions while faulting
17690 @@ -1165,11 +1173,11 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17691   *         - -ENOMEM if out of memory
17692   */
17693  int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
17694 -                         unsigned long saddr)
17695 +                         unsigned long saddr, unsigned long *datptr)
17697         union vaddress vaddr;
17698         union page_table_entry pte;
17699 -       unsigned long pgt;
17700 +       unsigned long pgt = 0;
17701         int dat_protection, fake;
17702         int rc;
17704 @@ -1191,8 +1199,20 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
17705                 pte.val = pgt + vaddr.px * PAGE_SIZE;
17706                 goto shadow_page;
17707         }
17708 -       if (!rc)
17709 -               rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
17711 +       switch (rc) {
17712 +       case PGM_SEGMENT_TRANSLATION:
17713 +       case PGM_REGION_THIRD_TRANS:
17714 +       case PGM_REGION_SECOND_TRANS:
17715 +       case PGM_REGION_FIRST_TRANS:
17716 +               pgt |= PEI_NOT_PTE;
17717 +               break;
17718 +       case 0:
17719 +               pgt += vaddr.px * 8;
17720 +               rc = gmap_read_table(sg->parent, pgt, &pte.val);
17721 +       }
17722 +       if (datptr)
17723 +               *datptr = pgt | dat_protection * PEI_DAT_PROT;
17724         if (!rc && pte.i)
17725                 rc = PGM_PAGE_TRANSLATION;
17726         if (!rc && pte.z)
17727 diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
17728 index f4c51756c462..7c72a5e3449f 100644
17729 --- a/arch/s390/kvm/gaccess.h
17730 +++ b/arch/s390/kvm/gaccess.h
17731 @@ -18,17 +18,14 @@
17733  /**
17734   * kvm_s390_real_to_abs - convert guest real address to guest absolute address
17735 - * @vcpu - guest virtual cpu
17736 + * @prefix - guest prefix
17737   * @gra - guest real address
17738   *
17739   * Returns the guest absolute address that corresponds to the passed guest real
17740 - * address @gra of a virtual guest cpu by applying its prefix.
17741 + * address @gra of by applying the given prefix.
17742   */
17743 -static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
17744 -                                                unsigned long gra)
17745 +static inline unsigned long _kvm_s390_real_to_abs(u32 prefix, unsigned long gra)
17747 -       unsigned long prefix  = kvm_s390_get_prefix(vcpu);
17749         if (gra < 2 * PAGE_SIZE)
17750                 gra += prefix;
17751         else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE)
17752 @@ -36,6 +33,43 @@ static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
17753         return gra;
17756 +/**
17757 + * kvm_s390_real_to_abs - convert guest real address to guest absolute address
17758 + * @vcpu - guest virtual cpu
17759 + * @gra - guest real address
17760 + *
17761 + * Returns the guest absolute address that corresponds to the passed guest real
17762 + * address @gra of a virtual guest cpu by applying its prefix.
17763 + */
17764 +static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
17765 +                                                unsigned long gra)
17767 +       return _kvm_s390_real_to_abs(kvm_s390_get_prefix(vcpu), gra);
17770 +/**
17771 + * _kvm_s390_logical_to_effective - convert guest logical to effective address
17772 + * @psw: psw of the guest
17773 + * @ga: guest logical address
17774 + *
17775 + * Convert a guest logical address to an effective address by applying the
17776 + * rules of the addressing mode defined by bits 31 and 32 of the given PSW
17777 + * (extendended/basic addressing mode).
17778 + *
17779 + * Depending on the addressing mode, the upper 40 bits (24 bit addressing
17780 + * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing
17781 + * mode) of @ga will be zeroed and the remaining bits will be returned.
17782 + */
17783 +static inline unsigned long _kvm_s390_logical_to_effective(psw_t *psw,
17784 +                                                          unsigned long ga)
17786 +       if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
17787 +               return ga;
17788 +       if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
17789 +               return ga & ((1UL << 31) - 1);
17790 +       return ga & ((1UL << 24) - 1);
17793  /**
17794   * kvm_s390_logical_to_effective - convert guest logical to effective address
17795   * @vcpu: guest virtual cpu
17796 @@ -52,13 +86,7 @@ static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
17797  static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
17798                                                           unsigned long ga)
17800 -       psw_t *psw = &vcpu->arch.sie_block->gpsw;
17802 -       if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
17803 -               return ga;
17804 -       if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
17805 -               return ga & ((1UL << 31) - 1);
17806 -       return ga & ((1UL << 24) - 1);
17807 +       return _kvm_s390_logical_to_effective(&vcpu->arch.sie_block->gpsw, ga);
17810  /*
17811 @@ -359,7 +387,11 @@ void ipte_unlock(struct kvm_vcpu *vcpu);
17812  int ipte_lock_held(struct kvm_vcpu *vcpu);
17813  int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
17815 +/* MVPG PEI indication bits */
17816 +#define PEI_DAT_PROT 2
17817 +#define PEI_NOT_PTE 4
17819  int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow,
17820 -                         unsigned long saddr);
17821 +                         unsigned long saddr, unsigned long *datptr);
17823  #endif /* __KVM_S390_GACCESS_H */
17824 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
17825 index 2f09e9d7dc95..24ad447e648c 100644
17826 --- a/arch/s390/kvm/kvm-s390.c
17827 +++ b/arch/s390/kvm/kvm-s390.c
17828 @@ -4307,16 +4307,16 @@ static void store_regs_fmt2(struct kvm_vcpu *vcpu)
17829         kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
17830         kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
17831         if (MACHINE_HAS_GS) {
17832 +               preempt_disable();
17833                 __ctl_set_bit(2, 4);
17834                 if (vcpu->arch.gs_enabled)
17835                         save_gs_cb(current->thread.gs_cb);
17836 -               preempt_disable();
17837                 current->thread.gs_cb = vcpu->arch.host_gscb;
17838                 restore_gs_cb(vcpu->arch.host_gscb);
17839 -               preempt_enable();
17840                 if (!vcpu->arch.host_gscb)
17841                         __ctl_clear_bit(2, 4);
17842                 vcpu->arch.host_gscb = NULL;
17843 +               preempt_enable();
17844         }
17845         /* SIE will save etoken directly into SDNX and therefore kvm_run */
17847 diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
17848 index bd803e091918..4002a24bc43a 100644
17849 --- a/arch/s390/kvm/vsie.c
17850 +++ b/arch/s390/kvm/vsie.c
17851 @@ -417,11 +417,6 @@ static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
17852                 memcpy((void *)((u64)scb_o + 0xc0),
17853                        (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0);
17854                 break;
17855 -       case ICPT_PARTEXEC:
17856 -               /* MVPG only */
17857 -               memcpy((void *)((u64)scb_o + 0xc0),
17858 -                      (void *)((u64)scb_s + 0xc0), 0xd0 - 0xc0);
17859 -               break;
17860         }
17862         if (scb_s->ihcpu != 0xffffU)
17863 @@ -620,10 +615,10 @@ static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
17864         /* with mso/msl, the prefix lies at offset *mso* */
17865         prefix += scb_s->mso;
17867 -       rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix);
17868 +       rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix, NULL);
17869         if (!rc && (scb_s->ecb & ECB_TE))
17870                 rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
17871 -                                          prefix + PAGE_SIZE);
17872 +                                          prefix + PAGE_SIZE, NULL);
17873         /*
17874          * We don't have to mprotect, we will be called for all unshadows.
17875          * SIE will detect if protection applies and trigger a validity.
17876 @@ -914,7 +909,7 @@ static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
17877                                     current->thread.gmap_addr, 1);
17879         rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
17880 -                                  current->thread.gmap_addr);
17881 +                                  current->thread.gmap_addr, NULL);
17882         if (rc > 0) {
17883                 rc = inject_fault(vcpu, rc,
17884                                   current->thread.gmap_addr,
17885 @@ -936,7 +931,7 @@ static void handle_last_fault(struct kvm_vcpu *vcpu,
17887         if (vsie_page->fault_addr)
17888                 kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
17889 -                                     vsie_page->fault_addr);
17890 +                                     vsie_page->fault_addr, NULL);
17891         vsie_page->fault_addr = 0;
17894 @@ -983,6 +978,98 @@ static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
17895         return 0;
17899 + * Get a register for a nested guest.
17900 + * @vcpu the vcpu of the guest
17901 + * @vsie_page the vsie_page for the nested guest
17902 + * @reg the register number, the upper 4 bits are ignored.
17903 + * returns: the value of the register.
17904 + */
17905 +static u64 vsie_get_register(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, u8 reg)
17907 +       /* no need to validate the parameter and/or perform error handling */
17908 +       reg &= 0xf;
17909 +       switch (reg) {
17910 +       case 15:
17911 +               return vsie_page->scb_s.gg15;
17912 +       case 14:
17913 +               return vsie_page->scb_s.gg14;
17914 +       default:
17915 +               return vcpu->run->s.regs.gprs[reg];
17916 +       }
17919 +static int vsie_handle_mvpg(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
17921 +       struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
17922 +       unsigned long pei_dest, pei_src, src, dest, mask, prefix;
17923 +       u64 *pei_block = &vsie_page->scb_o->mcic;
17924 +       int edat, rc_dest, rc_src;
17925 +       union ctlreg0 cr0;
17927 +       cr0.val = vcpu->arch.sie_block->gcr[0];
17928 +       edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
17929 +       mask = _kvm_s390_logical_to_effective(&scb_s->gpsw, PAGE_MASK);
17930 +       prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
17932 +       dest = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 20) & mask;
17933 +       dest = _kvm_s390_real_to_abs(prefix, dest) + scb_s->mso;
17934 +       src = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 16) & mask;
17935 +       src = _kvm_s390_real_to_abs(prefix, src) + scb_s->mso;
17937 +       rc_dest = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, dest, &pei_dest);
17938 +       rc_src = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, src, &pei_src);
17939 +       /*
17940 +        * Either everything went well, or something non-critical went wrong
17941 +        * e.g. because of a race. In either case, simply retry.
17942 +        */
17943 +       if (rc_dest == -EAGAIN || rc_src == -EAGAIN || (!rc_dest && !rc_src)) {
17944 +               retry_vsie_icpt(vsie_page);
17945 +               return -EAGAIN;
17946 +       }
17947 +       /* Something more serious went wrong, propagate the error */
17948 +       if (rc_dest < 0)
17949 +               return rc_dest;
17950 +       if (rc_src < 0)
17951 +               return rc_src;
17953 +       /* The only possible suppressing exception: just deliver it */
17954 +       if (rc_dest == PGM_TRANSLATION_SPEC || rc_src == PGM_TRANSLATION_SPEC) {
17955 +               clear_vsie_icpt(vsie_page);
17956 +               rc_dest = kvm_s390_inject_program_int(vcpu, PGM_TRANSLATION_SPEC);
17957 +               WARN_ON_ONCE(rc_dest);
17958 +               return 1;
17959 +       }
17961 +       /*
17962 +        * Forward the PEI intercept to the guest if it was a page fault, or
17963 +        * also for segment and region table faults if EDAT applies.
17964 +        */
17965 +       if (edat) {
17966 +               rc_dest = rc_dest == PGM_ASCE_TYPE ? rc_dest : 0;
17967 +               rc_src = rc_src == PGM_ASCE_TYPE ? rc_src : 0;
17968 +       } else {
17969 +               rc_dest = rc_dest != PGM_PAGE_TRANSLATION ? rc_dest : 0;
17970 +               rc_src = rc_src != PGM_PAGE_TRANSLATION ? rc_src : 0;
17971 +       }
17972 +       if (!rc_dest && !rc_src) {
17973 +               pei_block[0] = pei_dest;
17974 +               pei_block[1] = pei_src;
17975 +               return 1;
17976 +       }
17978 +       retry_vsie_icpt(vsie_page);
17980 +       /*
17981 +        * The host has edat, and the guest does not, or it was an ASCE type
17982 +        * exception. The host needs to inject the appropriate DAT interrupts
17983 +        * into the guest.
17984 +        */
17985 +       if (rc_dest)
17986 +               return inject_fault(vcpu, rc_dest, dest, 1);
17987 +       return inject_fault(vcpu, rc_src, src, 0);
17990  /*
17991   * Run the vsie on a shadow scb and a shadow gmap, without any further
17992   * sanity checks, handling SIE faults.
17993 @@ -1071,6 +1158,10 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
17994                 if ((scb_s->ipa & 0xf000) != 0xf000)
17995                         scb_s->ipa += 0x1000;
17996                 break;
17997 +       case ICPT_PARTEXEC:
17998 +               if (scb_s->ipa == 0xb254)
17999 +                       rc = vsie_handle_mvpg(vcpu, vsie_page);
18000 +               break;
18001         }
18002         return rc;
18004 diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
18005 index f5beecdac693..e76b22157099 100644
18006 --- a/arch/sh/kernel/traps.c
18007 +++ b/arch/sh/kernel/traps.c
18008 @@ -180,7 +180,6 @@ static inline void arch_ftrace_nmi_exit(void) { }
18010  BUILD_TRAP_HANDLER(nmi)
18012 -       unsigned int cpu = smp_processor_id();
18013         TRAP_HANDLER_DECL;
18015         arch_ftrace_nmi_enter();
18016 diff --git a/arch/um/Kconfig.debug b/arch/um/Kconfig.debug
18017 index 315d368e63ad..1dfb2959c73b 100644
18018 --- a/arch/um/Kconfig.debug
18019 +++ b/arch/um/Kconfig.debug
18020 @@ -17,6 +17,7 @@ config GCOV
18021         bool "Enable gcov support"
18022         depends on DEBUG_INFO
18023         depends on !KCOV
18024 +       depends on !MODULES
18025         help
18026           This option allows developers to retrieve coverage data from a UML
18027           session.
18028 diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
18029 index 5aa882011e04..e698e0c7dbdc 100644
18030 --- a/arch/um/kernel/Makefile
18031 +++ b/arch/um/kernel/Makefile
18032 @@ -21,7 +21,6 @@ obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \
18034  obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
18035  obj-$(CONFIG_GPROF)    += gprof_syms.o
18036 -obj-$(CONFIG_GCOV)     += gmon_syms.o
18037  obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
18038  obj-$(CONFIG_STACKTRACE) += stacktrace.o
18040 diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
18041 index dacbfabf66d8..2f2a8ce92f1e 100644
18042 --- a/arch/um/kernel/dyn.lds.S
18043 +++ b/arch/um/kernel/dyn.lds.S
18044 @@ -6,6 +6,12 @@ OUTPUT_ARCH(ELF_ARCH)
18045  ENTRY(_start)
18046  jiffies = jiffies_64;
18048 +VERSION {
18049 +  {
18050 +    local: *;
18051 +  };
18054  SECTIONS
18056    PROVIDE (__executable_start = START);
18057 diff --git a/arch/um/kernel/gmon_syms.c b/arch/um/kernel/gmon_syms.c
18058 deleted file mode 100644
18059 index 9361a8eb9bf1..000000000000
18060 --- a/arch/um/kernel/gmon_syms.c
18061 +++ /dev/null
18062 @@ -1,16 +0,0 @@
18063 -// SPDX-License-Identifier: GPL-2.0
18065 - * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
18066 - */
18068 -#include <linux/module.h>
18070 -extern void __bb_init_func(void *)  __attribute__((weak));
18071 -EXPORT_SYMBOL(__bb_init_func);
18073 -extern void __gcov_init(void *)  __attribute__((weak));
18074 -EXPORT_SYMBOL(__gcov_init);
18075 -extern void __gcov_merge_add(void *, unsigned int)  __attribute__((weak));
18076 -EXPORT_SYMBOL(__gcov_merge_add);
18077 -extern void __gcov_exit(void)  __attribute__((weak));
18078 -EXPORT_SYMBOL(__gcov_exit);
18079 diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
18080 index 45d957d7004c..7a8e2b123e29 100644
18081 --- a/arch/um/kernel/uml.lds.S
18082 +++ b/arch/um/kernel/uml.lds.S
18083 @@ -7,6 +7,12 @@ OUTPUT_ARCH(ELF_ARCH)
18084  ENTRY(_start)
18085  jiffies = jiffies_64;
18087 +VERSION {
18088 +  {
18089 +    local: *;
18090 +  };
18093  SECTIONS
18095    /* This must contain the right address - not quite the default ELF one.*/
18096 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
18097 index 2792879d398e..ab2e8502c27c 100644
18098 --- a/arch/x86/Kconfig
18099 +++ b/arch/x86/Kconfig
18100 @@ -163,6 +163,7 @@ config X86
18101         select HAVE_ARCH_TRACEHOOK
18102         select HAVE_ARCH_TRANSPARENT_HUGEPAGE
18103         select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64
18104 +       select HAVE_ARCH_PARENT_PMD_YOUNG       if X86_64
18105         select HAVE_ARCH_USERFAULTFD_WP         if X86_64 && USERFAULTFD
18106         select HAVE_ARCH_VMAP_STACK             if X86_64
18107         select HAVE_ARCH_WITHIN_STACK_FRAMES
18108 @@ -571,6 +572,7 @@ config X86_UV
18109         depends on X86_EXTENDED_PLATFORM
18110         depends on NUMA
18111         depends on EFI
18112 +       depends on KEXEC_CORE
18113         depends on X86_X2APIC
18114         depends on PCI
18115         help
18116 @@ -1406,7 +1408,7 @@ config HIGHMEM4G
18118  config HIGHMEM64G
18119         bool "64GB"
18120 -       depends on !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
18121 +       depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
18122         select X86_PAE
18123         help
18124           Select this if you have a 32-bit processor and more than 4
18125 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
18126 index 814fe0d349b0..872b9cf598e3 100644
18127 --- a/arch/x86/Kconfig.cpu
18128 +++ b/arch/x86/Kconfig.cpu
18129 @@ -157,7 +157,7 @@ config MPENTIUM4
18132  config MK6
18133 -       bool "K6/K6-II/K6-III"
18134 +       bool "AMD K6/K6-II/K6-III"
18135         depends on X86_32
18136         help
18137           Select this for an AMD K6-family processor.  Enables use of
18138 @@ -165,7 +165,7 @@ config MK6
18139           flags to GCC.
18141  config MK7
18142 -       bool "Athlon/Duron/K7"
18143 +       bool "AMD Athlon/Duron/K7"
18144         depends on X86_32
18145         help
18146           Select this for an AMD Athlon K7-family processor.  Enables use of
18147 @@ -173,12 +173,98 @@ config MK7
18148           flags to GCC.
18150  config MK8
18151 -       bool "Opteron/Athlon64/Hammer/K8"
18152 +       bool "AMD Opteron/Athlon64/Hammer/K8"
18153         help
18154           Select this for an AMD Opteron or Athlon64 Hammer-family processor.
18155           Enables use of some extended instructions, and passes appropriate
18156           optimization flags to GCC.
18158 +config MK8SSE3
18159 +       bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3"
18160 +       help
18161 +         Select this for improved AMD Opteron or Athlon64 Hammer-family processors.
18162 +         Enables use of some extended instructions, and passes appropriate
18163 +         optimization flags to GCC.
18165 +config MK10
18166 +       bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
18167 +       help
18168 +         Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
18169 +         Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
18170 +         Enables use of some extended instructions, and passes appropriate
18171 +         optimization flags to GCC.
18173 +config MBARCELONA
18174 +       bool "AMD Barcelona"
18175 +       help
18176 +         Select this for AMD Family 10h Barcelona processors.
18178 +         Enables -march=barcelona
18180 +config MBOBCAT
18181 +       bool "AMD Bobcat"
18182 +       help
18183 +         Select this for AMD Family 14h Bobcat processors.
18185 +         Enables -march=btver1
18187 +config MJAGUAR
18188 +       bool "AMD Jaguar"
18189 +       help
18190 +         Select this for AMD Family 16h Jaguar processors.
18192 +         Enables -march=btver2
18194 +config MBULLDOZER
18195 +       bool "AMD Bulldozer"
18196 +       help
18197 +         Select this for AMD Family 15h Bulldozer processors.
18199 +         Enables -march=bdver1
18201 +config MPILEDRIVER
18202 +       bool "AMD Piledriver"
18203 +       help
18204 +         Select this for AMD Family 15h Piledriver processors.
18206 +         Enables -march=bdver2
18208 +config MSTEAMROLLER
18209 +       bool "AMD Steamroller"
18210 +       help
18211 +         Select this for AMD Family 15h Steamroller processors.
18213 +         Enables -march=bdver3
18215 +config MEXCAVATOR
18216 +       bool "AMD Excavator"
18217 +       help
18218 +         Select this for AMD Family 15h Excavator processors.
18220 +         Enables -march=bdver4
18222 +config MZEN
18223 +       bool "AMD Zen"
18224 +       help
18225 +         Select this for AMD Family 17h Zen processors.
18227 +         Enables -march=znver1
18229 +config MZEN2
18230 +       bool "AMD Zen 2"
18231 +       help
18232 +         Select this for AMD Family 17h Zen 2 processors.
18234 +         Enables -march=znver2
18236 +config MZEN3
18237 +       bool "AMD Zen 3"
18238 +       depends on GCC_VERSION > 100300
18239 +       help
18240 +         Select this for AMD Family 19h Zen 3 processors.
18242 +         Enables -march=znver3
18244  config MCRUSOE
18245         bool "Crusoe"
18246         depends on X86_32
18247 @@ -270,7 +356,7 @@ config MPSC
18248           in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
18250  config MCORE2
18251 -       bool "Core 2/newer Xeon"
18252 +       bool "Intel Core 2"
18253         help
18255           Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
18256 @@ -278,6 +364,8 @@ config MCORE2
18257           family in /proc/cpuinfo. Newer ones have 6 and older ones 15
18258           (not a typo)
18260 +         Enables -march=core2
18262  config MATOM
18263         bool "Intel Atom"
18264         help
18265 @@ -287,6 +375,182 @@ config MATOM
18266           accordingly optimized code. Use a recent GCC with specific Atom
18267           support in order to fully benefit from selecting this option.
18269 +config MNEHALEM
18270 +       bool "Intel Nehalem"
18271 +       select X86_P6_NOP
18272 +       help
18274 +         Select this for 1st Gen Core processors in the Nehalem family.
18276 +         Enables -march=nehalem
18278 +config MWESTMERE
18279 +       bool "Intel Westmere"
18280 +       select X86_P6_NOP
18281 +       help
18283 +         Select this for the Intel Westmere formerly Nehalem-C family.
18285 +         Enables -march=westmere
18287 +config MSILVERMONT
18288 +       bool "Intel Silvermont"
18289 +       select X86_P6_NOP
18290 +       help
18292 +         Select this for the Intel Silvermont platform.
18294 +         Enables -march=silvermont
18296 +config MGOLDMONT
18297 +       bool "Intel Goldmont"
18298 +       select X86_P6_NOP
18299 +       help
18301 +         Select this for the Intel Goldmont platform including Apollo Lake and Denverton.
18303 +         Enables -march=goldmont
18305 +config MGOLDMONTPLUS
18306 +       bool "Intel Goldmont Plus"
18307 +       select X86_P6_NOP
18308 +       help
18310 +         Select this for the Intel Goldmont Plus platform including Gemini Lake.
18312 +         Enables -march=goldmont-plus
18314 +config MSANDYBRIDGE
18315 +       bool "Intel Sandy Bridge"
18316 +       select X86_P6_NOP
18317 +       help
18319 +         Select this for 2nd Gen Core processors in the Sandy Bridge family.
18321 +         Enables -march=sandybridge
18323 +config MIVYBRIDGE
18324 +       bool "Intel Ivy Bridge"
18325 +       select X86_P6_NOP
18326 +       help
18328 +         Select this for 3rd Gen Core processors in the Ivy Bridge family.
18330 +         Enables -march=ivybridge
18332 +config MHASWELL
18333 +       bool "Intel Haswell"
18334 +       select X86_P6_NOP
18335 +       help
18337 +         Select this for 4th Gen Core processors in the Haswell family.
18339 +         Enables -march=haswell
18341 +config MBROADWELL
18342 +       bool "Intel Broadwell"
18343 +       select X86_P6_NOP
18344 +       help
18346 +         Select this for 5th Gen Core processors in the Broadwell family.
18348 +         Enables -march=broadwell
18350 +config MSKYLAKE
18351 +       bool "Intel Skylake"
18352 +       select X86_P6_NOP
18353 +       help
18355 +         Select this for 6th Gen Core processors in the Skylake family.
18357 +         Enables -march=skylake
18359 +config MSKYLAKEX
18360 +       bool "Intel Skylake X"
18361 +       select X86_P6_NOP
18362 +       help
18364 +         Select this for 6th Gen Core processors in the Skylake X family.
18366 +         Enables -march=skylake-avx512
18368 +config MCANNONLAKE
18369 +       bool "Intel Cannon Lake"
18370 +       select X86_P6_NOP
18371 +       help
18373 +         Select this for 8th Gen Core processors
18375 +         Enables -march=cannonlake
18377 +config MICELAKE
18378 +       bool "Intel Ice Lake"
18379 +       select X86_P6_NOP
18380 +       help
18382 +         Select this for 10th Gen Core processors in the Ice Lake family.
18384 +         Enables -march=icelake-client
18386 +config MCASCADELAKE
18387 +       bool "Intel Cascade Lake"
18388 +       select X86_P6_NOP
18389 +       help
18391 +         Select this for Xeon processors in the Cascade Lake family.
18393 +         Enables -march=cascadelake
18395 +config MCOOPERLAKE
18396 +       bool "Intel Cooper Lake"
18397 +       depends on GCC_VERSION > 100100
18398 +       select X86_P6_NOP
18399 +       help
18401 +         Select this for Xeon processors in the Cooper Lake family.
18403 +         Enables -march=cooperlake
18405 +config MTIGERLAKE
18406 +       bool "Intel Tiger Lake"
18407 +       depends on GCC_VERSION > 100100
18408 +       select X86_P6_NOP
18409 +       help
18411 +         Select this for third-generation 10 nm process processors in the Tiger Lake family.
18413 +         Enables -march=tigerlake
18415 +config MSAPPHIRERAPIDS
18416 +       bool "Intel Sapphire Rapids"
18417 +       depends on GCC_VERSION > 110000
18418 +       select X86_P6_NOP
18419 +       help
18421 +         Select this for third-generation 10 nm process processors in the Sapphire Rapids family.
18423 +         Enables -march=sapphirerapids
18425 +config MROCKETLAKE
18426 +       bool "Intel Rocket Lake"
18427 +       depends on GCC_VERSION > 110000
18428 +       select X86_P6_NOP
18429 +       help
18431 +         Select this for eleventh-generation processors in the Rocket Lake family.
18433 +         Enables -march=rocketlake
18435 +config MALDERLAKE
18436 +       bool "Intel Alder Lake"
18437 +       depends on GCC_VERSION > 110000
18438 +       select X86_P6_NOP
18439 +       help
18441 +         Select this for twelfth-generation processors in the Alder Lake family.
18443 +         Enables -march=alderlake
18445  config GENERIC_CPU
18446         bool "Generic-x86-64"
18447         depends on X86_64
18448 @@ -294,6 +558,50 @@ config GENERIC_CPU
18449           Generic x86-64 CPU.
18450           Run equally well on all x86-64 CPUs.
18452 +config GENERIC_CPU2
18453 +       bool "Generic-x86-64-v2"
18454 +       depends on GCC_VERSION > 110000
18455 +       depends on X86_64
18456 +       help
18457 +         Generic x86-64 CPU.
18458 +         Run equally well on all x86-64 CPUs with min support of x86-64-v2.
18460 +config GENERIC_CPU3
18461 +       bool "Generic-x86-64-v3"
18462 +       depends on GCC_VERSION > 110000
18463 +       depends on X86_64
18464 +       help
18465 +         Generic x86-64-v3 CPU with v3 instructions.
18466 +         Run equally well on all x86-64 CPUs with min support of x86-64-v3.
18468 +config GENERIC_CPU4
18469 +       bool "Generic-x86-64-v4"
18470 +       depends on GCC_VERSION > 110000
18471 +       depends on X86_64
18472 +       help
18473 +         Generic x86-64 CPU with v4 instructions.
18474 +         Run equally well on all x86-64 CPUs with min support of x86-64-v4.
18476 +config MNATIVE_INTEL
18477 +       bool "Intel-Native optimizations autodetected by GCC"
18478 +       help
18480 +         GCC 4.2 and above support -march=native, which automatically detects
18481 +         the optimum settings to use based on your processor. Do NOT use this
18482 +         for AMD CPUs.  Intel Only!
18484 +         Enables -march=native
18486 +config MNATIVE_AMD
18487 +       bool "AMD-Native optimizations autodetected by GCC"
18488 +       help
18490 +         GCC 4.2 and above support -march=native, which automatically detects
18491 +         the optimum settings to use based on your processor. Do NOT use this
18492 +         for Intel CPUs.  AMD Only!
18494 +         Enables -march=native
18496  endchoice
18498  config X86_GENERIC
18499 @@ -318,7 +626,7 @@ config X86_INTERNODE_CACHE_SHIFT
18500  config X86_L1_CACHE_SHIFT
18501         int
18502         default "7" if MPENTIUM4 || MPSC
18503 -       default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
18504 +       default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD || X86_GENERIC || GENERIC_CPU || GENERIC_CPU2 || GENERIC_CPU3 || GENERIC_CPU4
18505         default "4" if MELAN || M486SX || M486 || MGEODEGX1
18506         default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
18508 @@ -336,11 +644,11 @@ config X86_ALIGNMENT_16
18510  config X86_INTEL_USERCOPY
18511         def_bool y
18512 -       depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
18513 +       depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL
18515  config X86_USE_PPRO_CHECKSUM
18516         def_bool y
18517 -       depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
18518 +       depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD
18520  config X86_USE_3DNOW
18521         def_bool y
18522 @@ -360,26 +668,26 @@ config X86_USE_3DNOW
18523  config X86_P6_NOP
18524         def_bool y
18525         depends on X86_64
18526 -       depends on (MCORE2 || MPENTIUM4 || MPSC)
18527 +       depends on (MCORE2 || MPENTIUM4 || MPSC || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL)
18529  config X86_TSC
18530         def_bool y
18531 -       depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
18532 +       depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD) || X86_64
18534  config X86_CMPXCHG64
18535         def_bool y
18536 -       depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8
18537 +       depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD
18539  # this should be set for all -march=.. options where the compiler
18540  # generates cmov.
18541  config X86_CMOV
18542         def_bool y
18543 -       depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
18544 +       depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD)
18546  config X86_MINIMUM_CPU_FAMILY
18547         int
18548         default "64" if X86_64
18549 -       default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
18550 +       default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8 ||  MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD)
18551         default "5" if X86_32 && X86_CMPXCHG64
18552         default "4"
18554 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
18555 index 9a85eae37b17..3d7b305bc301 100644
18556 --- a/arch/x86/Makefile
18557 +++ b/arch/x86/Makefile
18558 @@ -33,6 +33,7 @@ REALMODE_CFLAGS += -ffreestanding
18559  REALMODE_CFLAGS += -fno-stack-protector
18560  REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
18561  REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
18562 +REALMODE_CFLAGS += $(CLANG_FLAGS)
18563  export REALMODE_CFLAGS
18565  # BITS is used as extension for files which are available in a 32 bit
18566 @@ -113,11 +114,48 @@ else
18567          # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
18568          cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
18569          cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
18571 -        cflags-$(CONFIG_MCORE2) += \
18572 -                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
18573 -       cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
18574 -               $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
18575 +        cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3)
18576 +        cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
18577 +        cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
18578 +        cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
18579 +        cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
18580 +        cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
18581 +        cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
18582 +        cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-mno-tbm)
18583 +        cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3)
18584 +        cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-mno-tbm)
18585 +        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4)
18586 +        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-mno-tbm)
18587 +        cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1)
18588 +        cflags-$(CONFIG_MZEN2) += $(call cc-option,-march=znver2)
18589 +        cflags-$(CONFIG_MZEN3) += $(call cc-option,-march=znver3)
18591 +        cflags-$(CONFIG_MNATIVE_INTEL) += $(call cc-option,-march=native)
18592 +        cflags-$(CONFIG_MNATIVE_AMD) += $(call cc-option,-march=native)
18593 +        cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell)
18594 +        cflags-$(CONFIG_MCORE2) += $(call cc-option,-march=core2)
18595 +        cflags-$(CONFIG_MNEHALEM) += $(call cc-option,-march=nehalem)
18596 +        cflags-$(CONFIG_MWESTMERE) += $(call cc-option,-march=westmere)
18597 +        cflags-$(CONFIG_MSILVERMONT) += $(call cc-option,-march=silvermont)
18598 +        cflags-$(CONFIG_MGOLDMONT) += $(call cc-option,-march=goldmont)
18599 +        cflags-$(CONFIG_MGOLDMONTPLUS) += $(call cc-option,-march=goldmont-plus)
18600 +        cflags-$(CONFIG_MSANDYBRIDGE) += $(call cc-option,-march=sandybridge)
18601 +        cflags-$(CONFIG_MIVYBRIDGE) += $(call cc-option,-march=ivybridge)
18602 +        cflags-$(CONFIG_MHASWELL) += $(call cc-option,-march=haswell)
18603 +        cflags-$(CONFIG_MBROADWELL) += $(call cc-option,-march=broadwell)
18604 +        cflags-$(CONFIG_MSKYLAKE) += $(call cc-option,-march=skylake)
18605 +        cflags-$(CONFIG_MSKYLAKEX) += $(call cc-option,-march=skylake-avx512)
18606 +        cflags-$(CONFIG_MCANNONLAKE) += $(call cc-option,-march=cannonlake)
18607 +        cflags-$(CONFIG_MICELAKE) += $(call cc-option,-march=icelake-client)
18608 +        cflags-$(CONFIG_MCASCADELAKE) += $(call cc-option,-march=cascadelake)
18609 +        cflags-$(CONFIG_MCOOPERLAKE) += $(call cc-option,-march=cooperlake)
18610 +        cflags-$(CONFIG_MTIGERLAKE) += $(call cc-option,-march=tigerlake)
18611 +        cflags-$(CONFIG_MSAPPHIRERAPIDS) += $(call cc-option,-march=sapphirerapids)
18612 +        cflags-$(CONFIG_MROCKETLAKE) += $(call cc-option,-march=rocketlake)
18613 +        cflags-$(CONFIG_MALDERLAKE) += $(call cc-option,-march=alderlake)
18614 +        cflags-$(CONFIG_GENERIC_CPU2) += $(call cc-option,-march=x86-64-v2)
18615 +        cflags-$(CONFIG_GENERIC_CPU3) += $(call cc-option,-march=x86-64-v3)
18616 +        cflags-$(CONFIG_GENERIC_CPU4) += $(call cc-option,-march=x86-64-v4)
18617          cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
18618          KBUILD_CFLAGS += $(cflags-y)
18620 @@ -169,11 +207,6 @@ ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
18621         KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
18622  endif
18624 -ifdef CONFIG_LTO_CLANG
18625 -KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
18626 -                  -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
18627 -endif
18629  # Workaround for a gcc prelease that unfortunately was shipped in a suse release
18630  KBUILD_CFLAGS += -Wno-sign-compare
18632 @@ -193,7 +226,12 @@ ifdef CONFIG_RETPOLINE
18633    endif
18634  endif
18636 -KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)
18637 +KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
18639 +ifdef CONFIG_LTO_CLANG
18640 +KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
18641 +                  -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
18642 +endif
18644  ifdef CONFIG_X86_NEED_RELOCS
18645  LDFLAGS_vmlinux := --emit-relocs --discard-none
18646 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
18647 index e0bc3988c3fa..6e5522aebbbd 100644
18648 --- a/arch/x86/boot/compressed/Makefile
18649 +++ b/arch/x86/boot/compressed/Makefile
18650 @@ -46,6 +46,7 @@ KBUILD_CFLAGS += -D__DISABLE_EXPORTS
18651  # Disable relocation relaxation in case the link is not PIE.
18652  KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
18653  KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h
18654 +KBUILD_CFLAGS += $(CLANG_FLAGS)
18656  # sev-es.c indirectly inludes inat-table.h which is generated during
18657  # compilation and stored in $(objtree). Add the directory to the includes so
18658 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
18659 index e94874f4bbc1..ae1fe558a2d8 100644
18660 --- a/arch/x86/boot/compressed/head_64.S
18661 +++ b/arch/x86/boot/compressed/head_64.S
18662 @@ -172,11 +172,21 @@ SYM_FUNC_START(startup_32)
18663          */
18664         call    get_sev_encryption_bit
18665         xorl    %edx, %edx
18666 +#ifdef CONFIG_AMD_MEM_ENCRYPT
18667         testl   %eax, %eax
18668         jz      1f
18669         subl    $32, %eax       /* Encryption bit is always above bit 31 */
18670         bts     %eax, %edx      /* Set encryption mask for page tables */
18671 +       /*
18672 +        * Mark SEV as active in sev_status so that startup32_check_sev_cbit()
18673 +        * will do a check. The sev_status memory will be fully initialized
18674 +        * with the contents of MSR_AMD_SEV_STATUS later in
18675 +        * set_sev_encryption_mask(). For now it is sufficient to know that SEV
18676 +        * is active.
18677 +        */
18678 +       movl    $1, rva(sev_status)(%ebp)
18679  1:
18680 +#endif
18682         /* Initialize Page tables to 0 */
18683         leal    rva(pgtable)(%ebx), %edi
18684 @@ -261,6 +271,9 @@ SYM_FUNC_START(startup_32)
18685         movl    %esi, %edx
18686  1:
18687  #endif
18688 +       /* Check if the C-bit position is correct when SEV is active */
18689 +       call    startup32_check_sev_cbit
18691         pushl   $__KERNEL_CS
18692         pushl   %eax
18694 @@ -786,6 +799,78 @@ SYM_DATA_START_LOCAL(loaded_image_proto)
18695  SYM_DATA_END(loaded_image_proto)
18696  #endif
18699 + * Check for the correct C-bit position when the startup_32 boot-path is used.
18700 + *
18701 + * The check makes use of the fact that all memory is encrypted when paging is
18702 + * disabled. The function creates 64 bits of random data using the RDRAND
18703 + * instruction. RDRAND is mandatory for SEV guests, so always available. If the
18704 + * hypervisor violates that the kernel will crash right here.
18705 + *
18706 + * The 64 bits of random data are stored to a memory location and at the same
18707 + * time kept in the %eax and %ebx registers. Since encryption is always active
18708 + * when paging is off the random data will be stored encrypted in main memory.
18709 + *
18710 + * Then paging is enabled. When the C-bit position is correct all memory is
18711 + * still mapped encrypted and comparing the register values with memory will
18712 + * succeed. An incorrect C-bit position will map all memory unencrypted, so that
18713 + * the compare will use the encrypted random data and fail.
18714 + */
18715 +       __HEAD
18716 +       .code32
18717 +SYM_FUNC_START(startup32_check_sev_cbit)
18718 +#ifdef CONFIG_AMD_MEM_ENCRYPT
18719 +       pushl   %eax
18720 +       pushl   %ebx
18721 +       pushl   %ecx
18722 +       pushl   %edx
18724 +       /* Check for non-zero sev_status */
18725 +       movl    rva(sev_status)(%ebp), %eax
18726 +       testl   %eax, %eax
18727 +       jz      4f
18729 +       /*
18730 +        * Get two 32-bit random values - Don't bail out if RDRAND fails
18731 +        * because it is better to prevent forward progress if no random value
18732 +        * can be gathered.
18733 +        */
18734 +1:     rdrand  %eax
18735 +       jnc     1b
18736 +2:     rdrand  %ebx
18737 +       jnc     2b
18739 +       /* Store to memory and keep it in the registers */
18740 +       movl    %eax, rva(sev_check_data)(%ebp)
18741 +       movl    %ebx, rva(sev_check_data+4)(%ebp)
18743 +       /* Enable paging to see if encryption is active */
18744 +       movl    %cr0, %edx                       /* Backup %cr0 in %edx */
18745 +       movl    $(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */
18746 +       movl    %ecx, %cr0
18748 +       cmpl    %eax, rva(sev_check_data)(%ebp)
18749 +       jne     3f
18750 +       cmpl    %ebx, rva(sev_check_data+4)(%ebp)
18751 +       jne     3f
18753 +       movl    %edx, %cr0      /* Restore previous %cr0 */
18755 +       jmp     4f
18757 +3:     /* Check failed - hlt the machine */
18758 +       hlt
18759 +       jmp     3b
18762 +       popl    %edx
18763 +       popl    %ecx
18764 +       popl    %ebx
18765 +       popl    %eax
18766 +#endif
18767 +       ret
18768 +SYM_FUNC_END(startup32_check_sev_cbit)
18770  /*
18771   * Stack and heap for uncompression
18772   */
18773 diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
18774 index aa561795efd1..a6dea4e8a082 100644
18775 --- a/arch/x86/boot/compressed/mem_encrypt.S
18776 +++ b/arch/x86/boot/compressed/mem_encrypt.S
18777 @@ -23,12 +23,6 @@ SYM_FUNC_START(get_sev_encryption_bit)
18778         push    %ecx
18779         push    %edx
18781 -       /* Check if running under a hypervisor */
18782 -       movl    $1, %eax
18783 -       cpuid
18784 -       bt      $31, %ecx               /* Check the hypervisor bit */
18785 -       jnc     .Lno_sev
18787         movl    $0x80000000, %eax       /* CPUID to check the highest leaf */
18788         cpuid
18789         cmpl    $0x8000001f, %eax       /* See if 0x8000001f is available */
18790 diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
18791 index 646da46e8d10..1dfb8af48a3c 100644
18792 --- a/arch/x86/crypto/poly1305_glue.c
18793 +++ b/arch/x86/crypto/poly1305_glue.c
18794 @@ -16,7 +16,7 @@
18795  #include <asm/simd.h>
18797  asmlinkage void poly1305_init_x86_64(void *ctx,
18798 -                                    const u8 key[POLY1305_KEY_SIZE]);
18799 +                                    const u8 key[POLY1305_BLOCK_SIZE]);
18800  asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp,
18801                                        const size_t len, const u32 padbit);
18802  asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
18803 @@ -81,7 +81,7 @@ static void convert_to_base2_64(void *ctx)
18804         state->is_base2_26 = 0;
18807 -static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_KEY_SIZE])
18808 +static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_BLOCK_SIZE])
18810         poly1305_init_x86_64(ctx, key);
18812 @@ -129,7 +129,7 @@ static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
18813                 poly1305_emit_avx(ctx, mac, nonce);
18816 -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
18817 +void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
18819         poly1305_simd_init(&dctx->h, key);
18820         dctx->s[0] = get_unaligned_le32(&key[16]);
18821 diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
18822 index a1c9f496fca6..4d0111f44d79 100644
18823 --- a/arch/x86/entry/syscalls/syscall_32.tbl
18824 +++ b/arch/x86/entry/syscalls/syscall_32.tbl
18825 @@ -447,3 +447,7 @@
18826  440    i386    process_madvise         sys_process_madvise
18827  441    i386    epoll_pwait2            sys_epoll_pwait2                compat_sys_epoll_pwait2
18828  442    i386    mount_setattr           sys_mount_setattr
18829 +443    i386    futex_wait              sys_futex_wait
18830 +444    i386    futex_wake              sys_futex_wake
18831 +445    i386    futex_waitv             sys_futex_waitv                 compat_sys_futex_waitv
18832 +446    i386    futex_requeue           sys_futex_requeue               compat_sys_futex_requeue
18833 diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
18834 index 7bf01cbe582f..61c0b47365e3 100644
18835 --- a/arch/x86/entry/syscalls/syscall_64.tbl
18836 +++ b/arch/x86/entry/syscalls/syscall_64.tbl
18837 @@ -364,6 +364,10 @@
18838  440    common  process_madvise         sys_process_madvise
18839  441    common  epoll_pwait2            sys_epoll_pwait2
18840  442    common  mount_setattr           sys_mount_setattr
18841 +443    common  futex_wait              sys_futex_wait
18842 +444    common  futex_wake              sys_futex_wake
18843 +445    common  futex_waitv             sys_futex_waitv
18844 +446    common  futex_requeue           sys_futex_requeue
18847  # Due to a historical design error, certain syscalls are numbered differently
18848 diff --git a/arch/x86/entry/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h
18849 index 1c7cfac7e64a..5264daa8859f 100644
18850 --- a/arch/x86/entry/vdso/vdso2c.h
18851 +++ b/arch/x86/entry/vdso/vdso2c.h
18852 @@ -35,7 +35,7 @@ static void BITSFUNC(extract)(const unsigned char *data, size_t data_len,
18853         if (offset + len > data_len)
18854                 fail("section to extract overruns input data");
18856 -       fprintf(outfile, "static const unsigned char %s[%lu] = {", name, len);
18857 +       fprintf(outfile, "static const unsigned char %s[%zu] = {", name, len);
18858         BITSFUNC(copy)(outfile, data + offset, len);
18859         fprintf(outfile, "\n};\n\n");
18861 diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
18862 index be50ef8572cc..6a98a7651621 100644
18863 --- a/arch/x86/events/amd/iommu.c
18864 +++ b/arch/x86/events/amd/iommu.c
18865 @@ -81,12 +81,12 @@ static struct attribute_group amd_iommu_events_group = {
18866  };
18868  struct amd_iommu_event_desc {
18869 -       struct kobj_attribute attr;
18870 +       struct device_attribute attr;
18871         const char *event;
18872  };
18874 -static ssize_t _iommu_event_show(struct kobject *kobj,
18875 -                               struct kobj_attribute *attr, char *buf)
18876 +static ssize_t _iommu_event_show(struct device *dev,
18877 +                               struct device_attribute *attr, char *buf)
18879         struct amd_iommu_event_desc *event =
18880                 container_of(attr, struct amd_iommu_event_desc, attr);
18881 diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
18882 index 7f014d450bc2..582c0ffb5e98 100644
18883 --- a/arch/x86/events/amd/uncore.c
18884 +++ b/arch/x86/events/amd/uncore.c
18885 @@ -275,14 +275,14 @@ static struct attribute_group amd_uncore_attr_group = {
18886  };
18888  #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format)                        \
18889 -static ssize_t __uncore_##_var##_show(struct kobject *kobj,            \
18890 -                               struct kobj_attribute *attr,            \
18891 +static ssize_t __uncore_##_var##_show(struct device *dev,              \
18892 +                               struct device_attribute *attr,          \
18893                                 char *page)                             \
18894  {                                                                      \
18895         BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                     \
18896         return sprintf(page, _format "\n");                             \
18897  }                                                                      \
18898 -static struct kobj_attribute format_attr_##_var =                      \
18899 +static struct device_attribute format_attr_##_var =                    \
18900         __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
18902  DEFINE_UNCORE_FORMAT_ATTR(event12,     event,          "config:0-7,32-35");
18903 diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
18904 index c57ec8e27907..4c18e7fb58f5 100644
18905 --- a/arch/x86/events/intel/core.c
18906 +++ b/arch/x86/events/intel/core.c
18907 @@ -5741,7 +5741,7 @@ __init int intel_pmu_init(void)
18908          * Check all LBT MSR here.
18909          * Disable LBR access if any LBR MSRs can not be accessed.
18910          */
18911 -       if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
18912 +       if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
18913                 x86_pmu.lbr_nr = 0;
18914         for (i = 0; i < x86_pmu.lbr_nr; i++) {
18915                 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
18916 diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
18917 index 5eb3bdf36a41..06b0789d61b9 100644
18918 --- a/arch/x86/include/asm/idtentry.h
18919 +++ b/arch/x86/include/asm/idtentry.h
18920 @@ -588,6 +588,21 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_MC,  xenpv_exc_machine_check);
18921  #endif
18923  /* NMI */
18925 +#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
18927 + * Special NOIST entry point for VMX which invokes this on the kernel
18928 + * stack. asm_exc_nmi() requires an IST to work correctly vs. the NMI
18929 + * 'executing' marker.
18930 + *
18931 + * On 32bit this just uses the regular NMI entry point because 32-bit does
18932 + * not have ISTs.
18933 + */
18934 +DECLARE_IDTENTRY(X86_TRAP_NMI,         exc_nmi_noist);
18935 +#else
18936 +#define asm_exc_nmi_noist              asm_exc_nmi
18937 +#endif
18939  DECLARE_IDTENTRY_NMI(X86_TRAP_NMI,     exc_nmi);
18940  #ifdef CONFIG_XEN_PV
18941  DECLARE_IDTENTRY_RAW(X86_TRAP_NMI,     xenpv_exc_nmi);
18942 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
18943 index 3768819693e5..eec2dcca2f39 100644
18944 --- a/arch/x86/include/asm/kvm_host.h
18945 +++ b/arch/x86/include/asm/kvm_host.h
18946 @@ -1753,6 +1753,7 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
18947                     unsigned long icr, int op_64_bit);
18949  void kvm_define_user_return_msr(unsigned index, u32 msr);
18950 +int kvm_probe_user_return_msr(u32 msr);
18951  int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
18953  u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
18954 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
18955 index a02c67291cfc..a6b5cfe1fc5a 100644
18956 --- a/arch/x86/include/asm/pgtable.h
18957 +++ b/arch/x86/include/asm/pgtable.h
18958 @@ -846,7 +846,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
18960  static inline int pmd_bad(pmd_t pmd)
18962 -       return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
18963 +       return ((pmd_flags(pmd) | _PAGE_ACCESSED) & ~_PAGE_USER) != _KERNPG_TABLE;
18966  static inline unsigned long pages_to_mb(unsigned long npg)
18967 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18968 index f1b9ed5efaa9..908bcaea1361 100644
18969 --- a/arch/x86/include/asm/processor.h
18970 +++ b/arch/x86/include/asm/processor.h
18971 @@ -804,8 +804,10 @@ DECLARE_PER_CPU(u64, msr_misc_features_shadow);
18973  #ifdef CONFIG_CPU_SUP_AMD
18974  extern u32 amd_get_nodes_per_socket(void);
18975 +extern u32 amd_get_highest_perf(void);
18976  #else
18977  static inline u32 amd_get_nodes_per_socket(void)       { return 0; }
18978 +static inline u32 amd_get_highest_perf(void)           { return 0; }
18979  #endif
18981  static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18982 diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h
18983 index 75884d2cdec3..4e6a08d4c7e5 100644
18984 --- a/arch/x86/include/asm/vermagic.h
18985 +++ b/arch/x86/include/asm/vermagic.h
18986 @@ -17,6 +17,48 @@
18987  #define MODULE_PROC_FAMILY "586MMX "
18988  #elif defined CONFIG_MCORE2
18989  #define MODULE_PROC_FAMILY "CORE2 "
18990 +#elif defined CONFIG_MNATIVE_INTEL
18991 +#define MODULE_PROC_FAMILY "NATIVE_INTEL "
18992 +#elif defined CONFIG_MNATIVE_AMD
18993 +#define MODULE_PROC_FAMILY "NATIVE_AMD "
18994 +#elif defined CONFIG_MNEHALEM
18995 +#define MODULE_PROC_FAMILY "NEHALEM "
18996 +#elif defined CONFIG_MWESTMERE
18997 +#define MODULE_PROC_FAMILY "WESTMERE "
18998 +#elif defined CONFIG_MSILVERMONT
18999 +#define MODULE_PROC_FAMILY "SILVERMONT "
19000 +#elif defined CONFIG_MGOLDMONT
19001 +#define MODULE_PROC_FAMILY "GOLDMONT "
19002 +#elif defined CONFIG_MGOLDMONTPLUS
19003 +#define MODULE_PROC_FAMILY "GOLDMONTPLUS "
19004 +#elif defined CONFIG_MSANDYBRIDGE
19005 +#define MODULE_PROC_FAMILY "SANDYBRIDGE "
19006 +#elif defined CONFIG_MIVYBRIDGE
19007 +#define MODULE_PROC_FAMILY "IVYBRIDGE "
19008 +#elif defined CONFIG_MHASWELL
19009 +#define MODULE_PROC_FAMILY "HASWELL "
19010 +#elif defined CONFIG_MBROADWELL
19011 +#define MODULE_PROC_FAMILY "BROADWELL "
19012 +#elif defined CONFIG_MSKYLAKE
19013 +#define MODULE_PROC_FAMILY "SKYLAKE "
19014 +#elif defined CONFIG_MSKYLAKEX
19015 +#define MODULE_PROC_FAMILY "SKYLAKEX "
19016 +#elif defined CONFIG_MCANNONLAKE
19017 +#define MODULE_PROC_FAMILY "CANNONLAKE "
19018 +#elif defined CONFIG_MICELAKE
19019 +#define MODULE_PROC_FAMILY "ICELAKE "
19020 +#elif defined CONFIG_MCASCADELAKE
19021 +#define MODULE_PROC_FAMILY "CASCADELAKE "
19022 +#elif defined CONFIG_MCOOPERLAKE
19023 +#define MODULE_PROC_FAMILY "COOPERLAKE "
19024 +#elif defined CONFIG_MTIGERLAKE
19025 +#define MODULE_PROC_FAMILY "TIGERLAKE "
19026 +#elif defined CONFIG_MSAPPHIRERAPIDS
19027 +#define MODULE_PROC_FAMILY "SAPPHIRERAPIDS "
19028 +#elif defined CONFIG_ROCKETLAKE
19029 +#define MODULE_PROC_FAMILY "ROCKETLAKE "
19030 +#elif defined CONFIG_MALDERLAKE
19031 +#define MODULE_PROC_FAMILY "ALDERLAKE "
19032  #elif defined CONFIG_MATOM
19033  #define MODULE_PROC_FAMILY "ATOM "
19034  #elif defined CONFIG_M686
19035 @@ -35,6 +77,30 @@
19036  #define MODULE_PROC_FAMILY "K7 "
19037  #elif defined CONFIG_MK8
19038  #define MODULE_PROC_FAMILY "K8 "
19039 +#elif defined CONFIG_MK8SSE3
19040 +#define MODULE_PROC_FAMILY "K8SSE3 "
19041 +#elif defined CONFIG_MK10
19042 +#define MODULE_PROC_FAMILY "K10 "
19043 +#elif defined CONFIG_MBARCELONA
19044 +#define MODULE_PROC_FAMILY "BARCELONA "
19045 +#elif defined CONFIG_MBOBCAT
19046 +#define MODULE_PROC_FAMILY "BOBCAT "
19047 +#elif defined CONFIG_MBULLDOZER
19048 +#define MODULE_PROC_FAMILY "BULLDOZER "
19049 +#elif defined CONFIG_MPILEDRIVER
19050 +#define MODULE_PROC_FAMILY "PILEDRIVER "
19051 +#elif defined CONFIG_MSTEAMROLLER
19052 +#define MODULE_PROC_FAMILY "STEAMROLLER "
19053 +#elif defined CONFIG_MJAGUAR
19054 +#define MODULE_PROC_FAMILY "JAGUAR "
19055 +#elif defined CONFIG_MEXCAVATOR
19056 +#define MODULE_PROC_FAMILY "EXCAVATOR "
19057 +#elif defined CONFIG_MZEN
19058 +#define MODULE_PROC_FAMILY "ZEN "
19059 +#elif defined CONFIG_MZEN2
19060 +#define MODULE_PROC_FAMILY "ZEN2 "
19061 +#elif defined CONFIG_MZEN3
19062 +#define MODULE_PROC_FAMILY "ZEN3 "
19063  #elif defined CONFIG_MELAN
19064  #define MODULE_PROC_FAMILY "ELAN "
19065  #elif defined CONFIG_MCRUSOE
19066 diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
19067 index 52bc217ca8c3..c9ddd233e32f 100644
19068 --- a/arch/x86/kernel/apic/x2apic_uv_x.c
19069 +++ b/arch/x86/kernel/apic/x2apic_uv_x.c
19070 @@ -1671,6 +1671,9 @@ static __init int uv_system_init_hubless(void)
19071         if (rc < 0)
19072                 return rc;
19074 +       /* Set section block size for current node memory */
19075 +       set_block_size();
19077         /* Create user access node */
19078         if (rc >= 0)
19079                 uv_setup_proc_files(1);
19080 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
19081 index 347a956f71ca..eedb2b320946 100644
19082 --- a/arch/x86/kernel/cpu/amd.c
19083 +++ b/arch/x86/kernel/cpu/amd.c
19084 @@ -1170,3 +1170,19 @@ void set_dr_addr_mask(unsigned long mask, int dr)
19085                 break;
19086         }
19089 +u32 amd_get_highest_perf(void)
19091 +       struct cpuinfo_x86 *c = &boot_cpu_data;
19093 +       if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) ||
19094 +                              (c->x86_model >= 0x70 && c->x86_model < 0x80)))
19095 +               return 166;
19097 +       if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) ||
19098 +                              (c->x86_model >= 0x40 && c->x86_model < 0x70)))
19099 +               return 166;
19101 +       return 255;
19103 +EXPORT_SYMBOL_GPL(amd_get_highest_perf);
19104 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
19105 index ab640abe26b6..1e576cc831c1 100644
19106 --- a/arch/x86/kernel/cpu/common.c
19107 +++ b/arch/x86/kernel/cpu/common.c
19108 @@ -1850,7 +1850,7 @@ static inline void setup_getcpu(int cpu)
19109         unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
19110         struct desc_struct d = { };
19112 -       if (boot_cpu_has(X86_FEATURE_RDTSCP))
19113 +       if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
19114                 write_rdtscp_aux(cpudata);
19116         /* Store CPU and node number in limit. */
19117 diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
19118 index b935e1b5f115..6a6318e9590c 100644
19119 --- a/arch/x86/kernel/cpu/microcode/core.c
19120 +++ b/arch/x86/kernel/cpu/microcode/core.c
19121 @@ -629,16 +629,16 @@ static ssize_t reload_store(struct device *dev,
19122         if (val != 1)
19123                 return size;
19125 -       tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
19126 -       if (tmp_ret != UCODE_NEW)
19127 -               return size;
19129         get_online_cpus();
19131         ret = check_online_cpus();
19132         if (ret)
19133                 goto put;
19135 +       tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
19136 +       if (tmp_ret != UCODE_NEW)
19137 +               goto put;
19139         mutex_lock(&microcode_mutex);
19140         ret = microcode_reload_late();
19141         mutex_unlock(&microcode_mutex);
19142 diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
19143 index 22aad412f965..629c4994f165 100644
19144 --- a/arch/x86/kernel/e820.c
19145 +++ b/arch/x86/kernel/e820.c
19146 @@ -31,8 +31,8 @@
19147   *       - inform the user about the firmware's notion of memory layout
19148   *         via /sys/firmware/memmap
19149   *
19150 - *       - the hibernation code uses it to generate a kernel-independent MD5
19151 - *         fingerprint of the physical memory layout of a system.
19152 + *       - the hibernation code uses it to generate a kernel-independent CRC32
19153 + *         checksum of the physical memory layout of a system.
19154   *
19155   * - 'e820_table_kexec': a slightly modified (by the kernel) firmware version
19156   *   passed to us by the bootloader - the major difference between
19157 diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
19158 index df776cdca327..0bb9fe021bbe 100644
19159 --- a/arch/x86/kernel/kprobes/core.c
19160 +++ b/arch/x86/kernel/kprobes/core.c
19161 @@ -139,6 +139,8 @@ NOKPROBE_SYMBOL(synthesize_relcall);
19162  int can_boost(struct insn *insn, void *addr)
19164         kprobe_opcode_t opcode;
19165 +       insn_byte_t prefix;
19166 +       int i;
19168         if (search_exception_tables((unsigned long)addr))
19169                 return 0;       /* Page fault may occur on this address. */
19170 @@ -151,9 +153,14 @@ int can_boost(struct insn *insn, void *addr)
19171         if (insn->opcode.nbytes != 1)
19172                 return 0;
19174 -       /* Can't boost Address-size override prefix */
19175 -       if (unlikely(inat_is_address_size_prefix(insn->attr)))
19176 -               return 0;
19177 +       for_each_insn_prefix(insn, i, prefix) {
19178 +               insn_attr_t attr;
19180 +               attr = inat_get_opcode_attribute(prefix);
19181 +               /* Can't boost Address-size override prefix and CS override prefix */
19182 +               if (prefix == 0x2e || inat_is_address_size_prefix(attr))
19183 +                       return 0;
19184 +       }
19186         opcode = insn->opcode.bytes[0];
19188 @@ -178,8 +185,8 @@ int can_boost(struct insn *insn, void *addr)
19189                 /* clear and set flags are boostable */
19190                 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
19191         default:
19192 -               /* CS override prefix and call are not boostable */
19193 -               return (opcode != 0x2e && opcode != 0x9a);
19194 +               /* call is not boostable */
19195 +               return opcode != 0x9a;
19196         }
19199 @@ -448,7 +455,11 @@ static void set_resume_flags(struct kprobe *p, struct insn *insn)
19200                 break;
19201  #endif
19202         case 0xff:
19203 -               opcode = insn->opcode.bytes[1];
19204 +               /*
19205 +                * Since the 0xff is an extended group opcode, the instruction
19206 +                * is determined by the MOD/RM byte.
19207 +                */
19208 +               opcode = insn->modrm.bytes[0];
19209                 if ((opcode & 0x30) == 0x10) {
19210                         /*
19211                          * call absolute, indirect
19212 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
19213 index bf250a339655..2ef961cf4cfc 100644
19214 --- a/arch/x86/kernel/nmi.c
19215 +++ b/arch/x86/kernel/nmi.c
19216 @@ -524,6 +524,16 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
19217                 mds_user_clear_cpu_buffers();
19220 +#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
19221 +DEFINE_IDTENTRY_RAW(exc_nmi_noist)
19223 +       exc_nmi(regs);
19225 +#endif
19226 +#if IS_MODULE(CONFIG_KVM_INTEL)
19227 +EXPORT_SYMBOL_GPL(asm_exc_nmi_noist);
19228 +#endif
19230  void stop_nmi(void)
19232         ignore_nmis++;
19233 diff --git a/arch/x86/kernel/sev-es-shared.c b/arch/x86/kernel/sev-es-shared.c
19234 index cdc04d091242..ecb20b17b7df 100644
19235 --- a/arch/x86/kernel/sev-es-shared.c
19236 +++ b/arch/x86/kernel/sev-es-shared.c
19237 @@ -63,6 +63,7 @@ static bool sev_es_negotiate_protocol(void)
19239  static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb)
19241 +       ghcb->save.sw_exit_code = 0;
19242         memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
19245 @@ -186,7 +187,6 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
19246          * make it accessible to the hypervisor.
19247          *
19248          * In particular, check for:
19249 -        *      - Hypervisor CPUID bit
19250          *      - Availability of CPUID leaf 0x8000001f
19251          *      - SEV CPUID bit.
19252          *
19253 @@ -194,10 +194,7 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
19254          * can't be checked here.
19255          */
19257 -       if ((fn == 1 && !(regs->cx & BIT(31))))
19258 -               /* Hypervisor bit */
19259 -               goto fail;
19260 -       else if (fn == 0x80000000 && (regs->ax < 0x8000001f))
19261 +       if (fn == 0x80000000 && (regs->ax < 0x8000001f))
19262                 /* SEV leaf check */
19263                 goto fail;
19264         else if ((fn == 0x8000001f && !(regs->ax & BIT(1))))
19265 diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c
19266 index 04a780abb512..e0cdab7cb632 100644
19267 --- a/arch/x86/kernel/sev-es.c
19268 +++ b/arch/x86/kernel/sev-es.c
19269 @@ -191,8 +191,18 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
19270         if (unlikely(data->ghcb_active)) {
19271                 /* GHCB is already in use - save its contents */
19273 -               if (unlikely(data->backup_ghcb_active))
19274 -                       return NULL;
19275 +               if (unlikely(data->backup_ghcb_active)) {
19276 +                       /*
19277 +                        * Backup-GHCB is also already in use. There is no way
19278 +                        * to continue here so just kill the machine. To make
19279 +                        * panic() work, mark GHCBs inactive so that messages
19280 +                        * can be printed out.
19281 +                        */
19282 +                       data->ghcb_active        = false;
19283 +                       data->backup_ghcb_active = false;
19285 +                       panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
19286 +               }
19288                 /* Mark backup_ghcb active before writing to it */
19289                 data->backup_ghcb_active = true;
19290 @@ -209,24 +219,6 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
19291         return ghcb;
19294 -static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
19296 -       struct sev_es_runtime_data *data;
19297 -       struct ghcb *ghcb;
19299 -       data = this_cpu_read(runtime_data);
19300 -       ghcb = &data->ghcb_page;
19302 -       if (state->ghcb) {
19303 -               /* Restore GHCB from Backup */
19304 -               *ghcb = *state->ghcb;
19305 -               data->backup_ghcb_active = false;
19306 -               state->ghcb = NULL;
19307 -       } else {
19308 -               data->ghcb_active = false;
19309 -       }
19312  /* Needed in vc_early_forward_exception */
19313  void do_early_exception(struct pt_regs *regs, int trapnr);
19315 @@ -296,31 +288,44 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
19316         u16 d2;
19317         u8  d1;
19319 -       /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
19320 -       if (!user_mode(ctxt->regs) && !access_ok(target, size)) {
19321 -               memcpy(dst, buf, size);
19322 -               return ES_OK;
19323 -       }
19325 +       /*
19326 +        * This function uses __put_user() independent of whether kernel or user
19327 +        * memory is accessed. This works fine because __put_user() does no
19328 +        * sanity checks of the pointer being accessed. All that it does is
19329 +        * to report when the access failed.
19330 +        *
19331 +        * Also, this function runs in atomic context, so __put_user() is not
19332 +        * allowed to sleep. The page-fault handler detects that it is running
19333 +        * in atomic context and will not try to take mmap_sem and handle the
19334 +        * fault, so additional pagefault_enable()/disable() calls are not
19335 +        * needed.
19336 +        *
19337 +        * The access can't be done via copy_to_user() here because
19338 +        * vc_write_mem() must not use string instructions to access unsafe
19339 +        * memory. The reason is that MOVS is emulated by the #VC handler by
19340 +        * splitting the move up into a read and a write and taking a nested #VC
19341 +        * exception on whatever of them is the MMIO access. Using string
19342 +        * instructions here would cause infinite nesting.
19343 +        */
19344         switch (size) {
19345         case 1:
19346                 memcpy(&d1, buf, 1);
19347 -               if (put_user(d1, target))
19348 +               if (__put_user(d1, target))
19349                         goto fault;
19350                 break;
19351         case 2:
19352                 memcpy(&d2, buf, 2);
19353 -               if (put_user(d2, target))
19354 +               if (__put_user(d2, target))
19355                         goto fault;
19356                 break;
19357         case 4:
19358                 memcpy(&d4, buf, 4);
19359 -               if (put_user(d4, target))
19360 +               if (__put_user(d4, target))
19361                         goto fault;
19362                 break;
19363         case 8:
19364                 memcpy(&d8, buf, 8);
19365 -               if (put_user(d8, target))
19366 +               if (__put_user(d8, target))
19367                         goto fault;
19368                 break;
19369         default:
19370 @@ -351,30 +356,43 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
19371         u16 d2;
19372         u8  d1;
19374 -       /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
19375 -       if (!user_mode(ctxt->regs) && !access_ok(s, size)) {
19376 -               memcpy(buf, src, size);
19377 -               return ES_OK;
19378 -       }
19380 +       /*
19381 +        * This function uses __get_user() independent of whether kernel or user
19382 +        * memory is accessed. This works fine because __get_user() does no
19383 +        * sanity checks of the pointer being accessed. All that it does is
19384 +        * to report when the access failed.
19385 +        *
19386 +        * Also, this function runs in atomic context, so __get_user() is not
19387 +        * allowed to sleep. The page-fault handler detects that it is running
19388 +        * in atomic context and will not try to take mmap_sem and handle the
19389 +        * fault, so additional pagefault_enable()/disable() calls are not
19390 +        * needed.
19391 +        *
19392 +        * The access can't be done via copy_from_user() here because
19393 +        * vc_read_mem() must not use string instructions to access unsafe
19394 +        * memory. The reason is that MOVS is emulated by the #VC handler by
19395 +        * splitting the move up into a read and a write and taking a nested #VC
19396 +        * exception on whatever of them is the MMIO access. Using string
19397 +        * instructions here would cause infinite nesting.
19398 +        */
19399         switch (size) {
19400         case 1:
19401 -               if (get_user(d1, s))
19402 +               if (__get_user(d1, s))
19403                         goto fault;
19404                 memcpy(buf, &d1, 1);
19405                 break;
19406         case 2:
19407 -               if (get_user(d2, s))
19408 +               if (__get_user(d2, s))
19409                         goto fault;
19410                 memcpy(buf, &d2, 2);
19411                 break;
19412         case 4:
19413 -               if (get_user(d4, s))
19414 +               if (__get_user(d4, s))
19415                         goto fault;
19416                 memcpy(buf, &d4, 4);
19417                 break;
19418         case 8:
19419 -               if (get_user(d8, s))
19420 +               if (__get_user(d8, s))
19421                         goto fault;
19422                 memcpy(buf, &d8, 8);
19423                 break;
19424 @@ -434,6 +452,29 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
19425  /* Include code shared with pre-decompression boot stage */
19426  #include "sev-es-shared.c"
19428 +static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
19430 +       struct sev_es_runtime_data *data;
19431 +       struct ghcb *ghcb;
19433 +       data = this_cpu_read(runtime_data);
19434 +       ghcb = &data->ghcb_page;
19436 +       if (state->ghcb) {
19437 +               /* Restore GHCB from Backup */
19438 +               *ghcb = *state->ghcb;
19439 +               data->backup_ghcb_active = false;
19440 +               state->ghcb = NULL;
19441 +       } else {
19442 +               /*
19443 +                * Invalidate the GHCB so a VMGEXIT instruction issued
19444 +                * from userspace won't appear to be valid.
19445 +                */
19446 +               vc_ghcb_invalidate(ghcb);
19447 +               data->ghcb_active = false;
19448 +       }
19451  void noinstr __sev_es_nmi_complete(void)
19453         struct ghcb_state state;
19454 @@ -1228,6 +1269,10 @@ static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
19455         case X86_TRAP_UD:
19456                 exc_invalid_op(ctxt->regs);
19457                 break;
19458 +       case X86_TRAP_PF:
19459 +               write_cr2(ctxt->fi.cr2);
19460 +               exc_page_fault(ctxt->regs, error_code);
19461 +               break;
19462         case X86_TRAP_AC:
19463                 exc_alignment_check(ctxt->regs, error_code);
19464                 break;
19465 @@ -1257,7 +1302,6 @@ static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
19466   */
19467  DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
19469 -       struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
19470         irqentry_state_t irq_state;
19471         struct ghcb_state state;
19472         struct es_em_ctxt ctxt;
19473 @@ -1283,16 +1327,6 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
19474          */
19476         ghcb = sev_es_get_ghcb(&state);
19477 -       if (!ghcb) {
19478 -               /*
19479 -                * Mark GHCBs inactive so that panic() is able to print the
19480 -                * message.
19481 -                */
19482 -               data->ghcb_active        = false;
19483 -               data->backup_ghcb_active = false;
19485 -               panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
19486 -       }
19488         vc_ghcb_invalidate(ghcb);
19489         result = vc_init_em_ctxt(&ctxt, regs, error_code);
19490 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19491 index 16703c35a944..363b36bbd791 100644
19492 --- a/arch/x86/kernel/smpboot.c
19493 +++ b/arch/x86/kernel/smpboot.c
19494 @@ -458,29 +458,52 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
19495         return false;
19498 +static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
19500 +       if (c->phys_proc_id == o->phys_proc_id &&
19501 +           c->cpu_die_id == o->cpu_die_id)
19502 +               return true;
19503 +       return false;
19506  /*
19507 - * Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs.
19508 + * Unlike the other levels, we do not enforce keeping a
19509 + * multicore group inside a NUMA node.  If this happens, we will
19510 + * discard the MC level of the topology later.
19511 + */
19512 +static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
19514 +       if (c->phys_proc_id == o->phys_proc_id)
19515 +               return true;
19516 +       return false;
19520 + * Define intel_cod_cpu[] for Intel COD (Cluster-on-Die) CPUs.
19521   *
19522 - * These are Intel CPUs that enumerate an LLC that is shared by
19523 - * multiple NUMA nodes. The LLC on these systems is shared for
19524 - * off-package data access but private to the NUMA node (half
19525 - * of the package) for on-package access.
19526 + * Any Intel CPU that has multiple nodes per package and does not
19527 + * match intel_cod_cpu[] has the SNC (Sub-NUMA Cluster) topology.
19528   *
19529 - * CPUID (the source of the information about the LLC) can only
19530 - * enumerate the cache as being shared *or* unshared, but not
19531 - * this particular configuration. The CPU in this case enumerates
19532 - * the cache to be shared across the entire package (spanning both
19533 - * NUMA nodes).
19534 + * When in SNC mode, these CPUs enumerate an LLC that is shared
19535 + * by multiple NUMA nodes. The LLC is shared for off-package data
19536 + * access but private to the NUMA node (half of the package) for
19537 + * on-package access. CPUID (the source of the information about
19538 + * the LLC) can only enumerate the cache as shared or unshared,
19539 + * but not this particular configuration.
19540   */
19542 -static const struct x86_cpu_id snc_cpu[] = {
19543 -       X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
19544 +static const struct x86_cpu_id intel_cod_cpu[] = {
19545 +       X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, 0),       /* COD */
19546 +       X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, 0),     /* COD */
19547 +       X86_MATCH_INTEL_FAM6_MODEL(ANY, 1),             /* SNC */
19548         {}
19549  };
19551  static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
19553 +       const struct x86_cpu_id *id = x86_match_cpu(intel_cod_cpu);
19554         int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
19555 +       bool intel_snc = id && id->driver_data;
19557         /* Do not match if we do not have a valid APICID for cpu: */
19558         if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
19559 @@ -495,32 +518,12 @@ static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
19560          * means 'c' does not share the LLC of 'o'. This will be
19561          * reflected to userspace.
19562          */
19563 -       if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu))
19564 +       if (match_pkg(c, o) && !topology_same_node(c, o) && intel_snc)
19565                 return false;
19567         return topology_sane(c, o, "llc");
19571 - * Unlike the other levels, we do not enforce keeping a
19572 - * multicore group inside a NUMA node.  If this happens, we will
19573 - * discard the MC level of the topology later.
19574 - */
19575 -static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
19577 -       if (c->phys_proc_id == o->phys_proc_id)
19578 -               return true;
19579 -       return false;
19582 -static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
19584 -       if ((c->phys_proc_id == o->phys_proc_id) &&
19585 -               (c->cpu_die_id == o->cpu_die_id))
19586 -               return true;
19587 -       return false;
19591  #if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
19592  static inline int x86_sched_itmt_flags(void)
19593 @@ -592,14 +595,23 @@ void set_cpu_sibling_map(int cpu)
19594         for_each_cpu(i, cpu_sibling_setup_mask) {
19595                 o = &cpu_data(i);
19597 +               if (match_pkg(c, o) && !topology_same_node(c, o))
19598 +                       x86_has_numa_in_package = true;
19600                 if ((i == cpu) || (has_smt && match_smt(c, o)))
19601                         link_mask(topology_sibling_cpumask, cpu, i);
19603                 if ((i == cpu) || (has_mp && match_llc(c, o)))
19604                         link_mask(cpu_llc_shared_mask, cpu, i);
19606 +               if ((i == cpu) || (has_mp && match_die(c, o)))
19607 +                       link_mask(topology_die_cpumask, cpu, i);
19608         }
19610 +       threads = cpumask_weight(topology_sibling_cpumask(cpu));
19611 +       if (threads > __max_smt_threads)
19612 +               __max_smt_threads = threads;
19614         /*
19615          * This needs a separate iteration over the cpus because we rely on all
19616          * topology_sibling_cpumask links to be set-up.
19617 @@ -613,8 +625,7 @@ void set_cpu_sibling_map(int cpu)
19618                         /*
19619                          *  Does this new cpu bringup a new core?
19620                          */
19621 -                       if (cpumask_weight(
19622 -                           topology_sibling_cpumask(cpu)) == 1) {
19623 +                       if (threads == 1) {
19624                                 /*
19625                                  * for each core in package, increment
19626                                  * the booted_cores for this new cpu
19627 @@ -631,16 +642,7 @@ void set_cpu_sibling_map(int cpu)
19628                         } else if (i != cpu && !c->booted_cores)
19629                                 c->booted_cores = cpu_data(i).booted_cores;
19630                 }
19631 -               if (match_pkg(c, o) && !topology_same_node(c, o))
19632 -                       x86_has_numa_in_package = true;
19634 -               if ((i == cpu) || (has_mp && match_die(c, o)))
19635 -                       link_mask(topology_die_cpumask, cpu, i);
19636         }
19638 -       threads = cpumask_weight(topology_sibling_cpumask(cpu));
19639 -       if (threads > __max_smt_threads)
19640 -               __max_smt_threads = threads;
19643  /* maps the cpu to the sched domain representing multi-core */
19644 @@ -2044,7 +2046,7 @@ static bool amd_set_max_freq_ratio(void)
19645                 return false;
19646         }
19648 -       highest_perf = perf_caps.highest_perf;
19649 +       highest_perf = amd_get_highest_perf();
19650         nominal_perf = perf_caps.nominal_perf;
19652         if (!highest_perf || !nominal_perf) {
19653 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
19654 index 6bd2f8b830e4..62f795352c02 100644
19655 --- a/arch/x86/kvm/cpuid.c
19656 +++ b/arch/x86/kvm/cpuid.c
19657 @@ -589,7 +589,8 @@ static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
19658         case 7:
19659                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
19660                 entry->eax = 0;
19661 -               entry->ecx = F(RDPID);
19662 +               if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
19663 +                       entry->ecx = F(RDPID);
19664                 ++array->nent;
19665         default:
19666                 break;
19667 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
19668 index f7970ba6219f..8fc71e70857d 100644
19669 --- a/arch/x86/kvm/emulate.c
19670 +++ b/arch/x86/kvm/emulate.c
19671 @@ -4220,7 +4220,7 @@ static bool valid_cr(int nr)
19672         }
19675 -static int check_cr_read(struct x86_emulate_ctxt *ctxt)
19676 +static int check_cr_access(struct x86_emulate_ctxt *ctxt)
19678         if (!valid_cr(ctxt->modrm_reg))
19679                 return emulate_ud(ctxt);
19680 @@ -4228,80 +4228,6 @@ static int check_cr_read(struct x86_emulate_ctxt *ctxt)
19681         return X86EMUL_CONTINUE;
19684 -static int check_cr_write(struct x86_emulate_ctxt *ctxt)
19686 -       u64 new_val = ctxt->src.val64;
19687 -       int cr = ctxt->modrm_reg;
19688 -       u64 efer = 0;
19690 -       static u64 cr_reserved_bits[] = {
19691 -               0xffffffff00000000ULL,
19692 -               0, 0, 0, /* CR3 checked later */
19693 -               CR4_RESERVED_BITS,
19694 -               0, 0, 0,
19695 -               CR8_RESERVED_BITS,
19696 -       };
19698 -       if (!valid_cr(cr))
19699 -               return emulate_ud(ctxt);
19701 -       if (new_val & cr_reserved_bits[cr])
19702 -               return emulate_gp(ctxt, 0);
19704 -       switch (cr) {
19705 -       case 0: {
19706 -               u64 cr4;
19707 -               if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
19708 -                   ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
19709 -                       return emulate_gp(ctxt, 0);
19711 -               cr4 = ctxt->ops->get_cr(ctxt, 4);
19712 -               ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
19714 -               if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
19715 -                   !(cr4 & X86_CR4_PAE))
19716 -                       return emulate_gp(ctxt, 0);
19718 -               break;
19719 -               }
19720 -       case 3: {
19721 -               u64 rsvd = 0;
19723 -               ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
19724 -               if (efer & EFER_LMA) {
19725 -                       u64 maxphyaddr;
19726 -                       u32 eax, ebx, ecx, edx;
19728 -                       eax = 0x80000008;
19729 -                       ecx = 0;
19730 -                       if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
19731 -                                                &edx, true))
19732 -                               maxphyaddr = eax & 0xff;
19733 -                       else
19734 -                               maxphyaddr = 36;
19735 -                       rsvd = rsvd_bits(maxphyaddr, 63);
19736 -                       if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
19737 -                               rsvd &= ~X86_CR3_PCID_NOFLUSH;
19738 -               }
19740 -               if (new_val & rsvd)
19741 -                       return emulate_gp(ctxt, 0);
19743 -               break;
19744 -               }
19745 -       case 4: {
19746 -               ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
19748 -               if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
19749 -                       return emulate_gp(ctxt, 0);
19751 -               break;
19752 -               }
19753 -       }
19755 -       return X86EMUL_CONTINUE;
19758  static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
19760         unsigned long dr7;
19761 @@ -4576,7 +4502,7 @@ static const struct opcode group8[] = {
19762   * from the register case of group9.
19763   */
19764  static const struct gprefix pfx_0f_c7_7 = {
19765 -       N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
19766 +       N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
19767  };
19770 @@ -4841,10 +4767,10 @@ static const struct opcode twobyte_table[256] = {
19771         D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
19772         D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
19773         /* 0x20 - 0x2F */
19774 -       DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
19775 +       DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
19776         DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
19777         IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
19778 -                                               check_cr_write),
19779 +                                               check_cr_access),
19780         IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
19781                                                 check_dr_write),
19782         N, N, N, N,
19783 diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
19784 index 0d359115429a..f016838faedd 100644
19785 --- a/arch/x86/kvm/kvm_emulate.h
19786 +++ b/arch/x86/kvm/kvm_emulate.h
19787 @@ -468,6 +468,7 @@ enum x86_intercept {
19788         x86_intercept_clgi,
19789         x86_intercept_skinit,
19790         x86_intercept_rdtscp,
19791 +       x86_intercept_rdpid,
19792         x86_intercept_icebp,
19793         x86_intercept_wbinvd,
19794         x86_intercept_monitor,
19795 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
19796 index cc369b9ad8f1..fa023f3feb25 100644
19797 --- a/arch/x86/kvm/lapic.c
19798 +++ b/arch/x86/kvm/lapic.c
19799 @@ -296,6 +296,10 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
19801                 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
19802         }
19804 +       /* Check if there are APF page ready requests pending */
19805 +       if (enabled)
19806 +               kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
19809  static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
19810 @@ -1909,8 +1913,8 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
19811         if (!apic->lapic_timer.hv_timer_in_use)
19812                 goto out;
19813         WARN_ON(rcuwait_active(&vcpu->wait));
19814 -       cancel_hv_timer(apic);
19815         apic_timer_expired(apic, false);
19816 +       cancel_hv_timer(apic);
19818         if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
19819                 advance_periodic_target_expiration(apic);
19820 @@ -2261,6 +2265,8 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
19821                 if (value & MSR_IA32_APICBASE_ENABLE) {
19822                         kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
19823                         static_branch_slow_dec_deferred(&apic_hw_disabled);
19824 +                       /* Check if there are APF page ready requests pending */
19825 +                       kvm_make_request(KVM_REQ_APF_READY, vcpu);
19826                 } else {
19827                         static_branch_inc(&apic_hw_disabled.key);
19828                         atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
19829 diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
19830 index 951dae4e7175..cd0faa187674 100644
19831 --- a/arch/x86/kvm/mmu/mmu.c
19832 +++ b/arch/x86/kvm/mmu/mmu.c
19833 @@ -3193,14 +3193,14 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
19834                 if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
19835                     (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
19836                         mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
19837 -               } else {
19838 +               } else if (mmu->pae_root) {
19839                         for (i = 0; i < 4; ++i)
19840                                 if (mmu->pae_root[i] != 0)
19841                                         mmu_free_root_page(kvm,
19842                                                            &mmu->pae_root[i],
19843                                                            &invalid_list);
19844 -                       mmu->root_hpa = INVALID_PAGE;
19845                 }
19846 +               mmu->root_hpa = INVALID_PAGE;
19847                 mmu->root_pgd = 0;
19848         }
19850 @@ -3312,9 +3312,23 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
19851          * the shadow page table may be a PAE or a long mode page table.
19852          */
19853         pm_mask = PT_PRESENT_MASK;
19854 -       if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL)
19855 +       if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
19856                 pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
19858 +               /*
19859 +                * Allocate the page for the PDPTEs when shadowing 32-bit NPT
19860 +                * with 64-bit only when needed.  Unlike 32-bit NPT, it doesn't
19861 +                * need to be in low mem.  See also lm_root below.
19862 +                */
19863 +               if (!vcpu->arch.mmu->pae_root) {
19864 +                       WARN_ON_ONCE(!tdp_enabled);
19866 +                       vcpu->arch.mmu->pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
19867 +                       if (!vcpu->arch.mmu->pae_root)
19868 +                               return -ENOMEM;
19869 +               }
19870 +       }
19872         for (i = 0; i < 4; ++i) {
19873                 MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
19874                 if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
19875 @@ -3337,21 +3351,19 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
19876         vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
19878         /*
19879 -        * If we shadow a 32 bit page table with a long mode page
19880 -        * table we enter this path.
19881 +        * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
19882 +        * tables are allocated and initialized at MMU creation as there is no
19883 +        * equivalent level in the guest's NPT to shadow.  Allocate the tables
19884 +        * on demand, as running a 32-bit L1 VMM is very rare.  The PDP is
19885 +        * handled above (to share logic with PAE), deal with the PML4 here.
19886          */
19887         if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
19888                 if (vcpu->arch.mmu->lm_root == NULL) {
19889 -                       /*
19890 -                        * The additional page necessary for this is only
19891 -                        * allocated on demand.
19892 -                        */
19894                         u64 *lm_root;
19896                         lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
19897 -                       if (lm_root == NULL)
19898 -                               return 1;
19899 +                       if (!lm_root)
19900 +                               return -ENOMEM;
19902                         lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
19904 @@ -3653,6 +3665,14 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
19905         struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
19906         bool async;
19908 +       /*
19909 +        * Retry the page fault if the gfn hit a memslot that is being deleted
19910 +        * or moved.  This ensures any existing SPTEs for the old memslot will
19911 +        * be zapped before KVM inserts a new MMIO SPTE for the gfn.
19912 +        */
19913 +       if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
19914 +               return true;
19916         /* Don't expose private memslots to L2. */
19917         if (is_guest_mode(vcpu) && !kvm_is_visible_memslot(slot)) {
19918                 *pfn = KVM_PFN_NOSLOT;
19919 @@ -4615,12 +4635,17 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
19920         struct kvm_mmu *context = &vcpu->arch.guest_mmu;
19921         union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);
19923 -       context->shadow_root_level = new_role.base.level;
19925         __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, false, false);
19927 -       if (new_role.as_u64 != context->mmu_role.as_u64)
19928 +       if (new_role.as_u64 != context->mmu_role.as_u64) {
19929                 shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
19931 +               /*
19932 +                * Override the level set by the common init helper, nested TDP
19933 +                * always uses the host's TDP configuration.
19934 +                */
19935 +               context->shadow_root_level = new_role.base.level;
19936 +       }
19938  EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
19940 @@ -5240,9 +5265,11 @@ static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
19941          * while the PDP table is a per-vCPU construct that's allocated at MMU
19942          * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
19943          * x86_64.  Therefore we need to allocate the PDP table in the first
19944 -        * 4GB of memory, which happens to fit the DMA32 zone.  Except for
19945 -        * SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can
19946 -        * skip allocating the PDP table.
19947 +        * 4GB of memory, which happens to fit the DMA32 zone.  TDP paging
19948 +        * generally doesn't use PAE paging and can skip allocating the PDP
19949 +        * table.  The main exception, handled here, is SVM's 32-bit NPT.  The
19950 +        * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
19951 +        * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots().
19952          */
19953         if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
19954                 return 0;
19955 diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
19956 index 874ea309279f..dbc6214d69de 100644
19957 --- a/arch/x86/kvm/svm/sev.c
19958 +++ b/arch/x86/kvm/svm/sev.c
19959 @@ -87,7 +87,7 @@ static bool __sev_recycle_asids(int min_asid, int max_asid)
19960         return true;
19963 -static int sev_asid_new(struct kvm_sev_info *sev)
19964 +static int sev_asid_new(bool es_active)
19966         int pos, min_asid, max_asid;
19967         bool retry = true;
19968 @@ -98,8 +98,8 @@ static int sev_asid_new(struct kvm_sev_info *sev)
19969          * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
19970          * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
19971          */
19972 -       min_asid = sev->es_active ? 0 : min_sev_asid - 1;
19973 -       max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
19974 +       min_asid = es_active ? 0 : min_sev_asid - 1;
19975 +       max_asid = es_active ? min_sev_asid - 1 : max_sev_asid;
19976  again:
19977         pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_asid);
19978         if (pos >= max_asid) {
19979 @@ -179,13 +179,17 @@ static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
19980  static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
19982         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
19983 +       bool es_active = argp->id == KVM_SEV_ES_INIT;
19984         int asid, ret;
19986 +       if (kvm->created_vcpus)
19987 +               return -EINVAL;
19989         ret = -EBUSY;
19990         if (unlikely(sev->active))
19991                 return ret;
19993 -       asid = sev_asid_new(sev);
19994 +       asid = sev_asid_new(es_active);
19995         if (asid < 0)
19996                 return ret;
19998 @@ -194,6 +198,7 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
19999                 goto e_free;
20001         sev->active = true;
20002 +       sev->es_active = es_active;
20003         sev->asid = asid;
20004         INIT_LIST_HEAD(&sev->regions_list);
20006 @@ -204,16 +209,6 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
20007         return ret;
20010 -static int sev_es_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
20012 -       if (!sev_es)
20013 -               return -ENOTTY;
20015 -       to_kvm_svm(kvm)->sev_info.es_active = true;
20017 -       return sev_guest_init(kvm, argp);
20020  static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
20022         struct sev_data_activate *data;
20023 @@ -564,6 +559,7 @@ static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
20025         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
20026         struct sev_data_launch_update_vmsa *vmsa;
20027 +       struct kvm_vcpu *vcpu;
20028         int i, ret;
20030         if (!sev_es_guest(kvm))
20031 @@ -573,8 +569,8 @@ static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
20032         if (!vmsa)
20033                 return -ENOMEM;
20035 -       for (i = 0; i < kvm->created_vcpus; i++) {
20036 -               struct vcpu_svm *svm = to_svm(kvm->vcpus[i]);
20037 +       kvm_for_each_vcpu(i, vcpu, kvm) {
20038 +               struct vcpu_svm *svm = to_svm(vcpu);
20040                 /* Perform some pre-encryption checks against the VMSA */
20041                 ret = sev_es_sync_vmsa(svm);
20042 @@ -1127,12 +1123,15 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
20043         mutex_lock(&kvm->lock);
20045         switch (sev_cmd.id) {
20046 +       case KVM_SEV_ES_INIT:
20047 +               if (!sev_es) {
20048 +                       r = -ENOTTY;
20049 +                       goto out;
20050 +               }
20051 +               fallthrough;
20052         case KVM_SEV_INIT:
20053                 r = sev_guest_init(kvm, &sev_cmd);
20054                 break;
20055 -       case KVM_SEV_ES_INIT:
20056 -               r = sev_es_guest_init(kvm, &sev_cmd);
20057 -               break;
20058         case KVM_SEV_LAUNCH_START:
20059                 r = sev_launch_start(kvm, &sev_cmd);
20060                 break;
20061 @@ -1349,8 +1348,11 @@ void __init sev_hardware_setup(void)
20062                 goto out;
20064         sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
20065 -       if (!sev_reclaim_asid_bitmap)
20066 +       if (!sev_reclaim_asid_bitmap) {
20067 +               bitmap_free(sev_asid_bitmap);
20068 +               sev_asid_bitmap = NULL;
20069                 goto out;
20070 +       }
20072         pr_info("SEV supported: %u ASIDs\n", max_sev_asid - min_sev_asid + 1);
20073         sev_supported = true;
20074 @@ -1666,7 +1668,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
20075         return -EINVAL;
20078 -static void pre_sev_es_run(struct vcpu_svm *svm)
20079 +void sev_es_unmap_ghcb(struct vcpu_svm *svm)
20081         if (!svm->ghcb)
20082                 return;
20083 @@ -1702,9 +1704,6 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
20084         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
20085         int asid = sev_get_asid(svm->vcpu.kvm);
20087 -       /* Perform any SEV-ES pre-run actions */
20088 -       pre_sev_es_run(svm);
20090         /* Assign the asid allocated with this SEV guest */
20091         svm->asid = asid;
20093 @@ -2104,5 +2103,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
20094          * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
20095          * non-zero value.
20096          */
20097 +       if (!svm->ghcb)
20098 +               return;
20100         ghcb_set_sw_exit_info_2(svm->ghcb, 1);
20102 diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
20103 index 58a45bb139f8..9a6825feaf53 100644
20104 --- a/arch/x86/kvm/svm/svm.c
20105 +++ b/arch/x86/kvm/svm/svm.c
20106 @@ -564,9 +564,8 @@ static int svm_cpu_init(int cpu)
20107         clear_page(page_address(sd->save_area));
20109         if (svm_sev_enabled()) {
20110 -               sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
20111 -                                             sizeof(void *),
20112 -                                             GFP_KERNEL);
20113 +               sd->sev_vmcbs = kcalloc(max_sev_asid + 1, sizeof(void *),
20114 +                                       GFP_KERNEL);
20115                 if (!sd->sev_vmcbs)
20116                         goto free_save_area;
20117         }
20118 @@ -969,21 +968,6 @@ static __init int svm_hardware_setup(void)
20119                 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
20120         }
20122 -       if (IS_ENABLED(CONFIG_KVM_AMD_SEV) && sev) {
20123 -               sev_hardware_setup();
20124 -       } else {
20125 -               sev = false;
20126 -               sev_es = false;
20127 -       }
20129 -       svm_adjust_mmio_mask();
20131 -       for_each_possible_cpu(cpu) {
20132 -               r = svm_cpu_init(cpu);
20133 -               if (r)
20134 -                       goto err;
20135 -       }
20137         /*
20138          * KVM's MMU doesn't support using 2-level paging for itself, and thus
20139          * NPT isn't supported if the host is using 2-level paging since host
20140 @@ -998,6 +982,21 @@ static __init int svm_hardware_setup(void)
20141         kvm_configure_mmu(npt_enabled, get_max_npt_level(), PG_LEVEL_1G);
20142         pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
20144 +       if (IS_ENABLED(CONFIG_KVM_AMD_SEV) && sev && npt_enabled) {
20145 +               sev_hardware_setup();
20146 +       } else {
20147 +               sev = false;
20148 +               sev_es = false;
20149 +       }
20151 +       svm_adjust_mmio_mask();
20153 +       for_each_possible_cpu(cpu) {
20154 +               r = svm_cpu_init(cpu);
20155 +               if (r)
20156 +                       goto err;
20157 +       }
20159         if (nrips) {
20160                 if (!boot_cpu_has(X86_FEATURE_NRIPS))
20161                         nrips = false;
20162 @@ -1417,6 +1416,9 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
20163         struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
20164         unsigned int i;
20166 +       if (sev_es_guest(vcpu->kvm))
20167 +               sev_es_unmap_ghcb(svm);
20169         if (svm->guest_state_loaded)
20170                 return;
20172 @@ -1898,7 +1900,7 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
20174  static int pf_interception(struct vcpu_svm *svm)
20176 -       u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
20177 +       u64 fault_address = svm->vmcb->control.exit_info_2;
20178         u64 error_code = svm->vmcb->control.exit_info_1;
20180         return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
20181 @@ -2738,6 +2740,10 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
20182         case MSR_TSC_AUX:
20183                 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
20184                         return 1;
20185 +               if (!msr_info->host_initiated &&
20186 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
20187 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
20188 +                       return 1;
20189                 msr_info->data = svm->tsc_aux;
20190                 break;
20191         /*
20192 @@ -2809,7 +2815,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
20193  static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
20195         struct vcpu_svm *svm = to_svm(vcpu);
20196 -       if (!sev_es_guest(svm->vcpu.kvm) || !err)
20197 +       if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->ghcb))
20198                 return kvm_complete_insn_gp(&svm->vcpu, err);
20200         ghcb_set_sw_exit_info_1(svm->ghcb, 1);
20201 @@ -2946,6 +2952,11 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
20202                 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
20203                         return 1;
20205 +               if (!msr->host_initiated &&
20206 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
20207 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
20208 +                       return 1;
20210                 /*
20211                  * This is rare, so we update the MSR here instead of using
20212                  * direct_access_msrs.  Doing that would require a rdmsr in
20213 @@ -3804,15 +3815,15 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
20214          * have them in state 'on' as recorded before entering guest mode.
20215          * Same as enter_from_user_mode().
20216          *
20217 -        * guest_exit_irqoff() restores host context and reinstates RCU if
20218 -        * enabled and required.
20219 +        * context_tracking_guest_exit() restores host context and reinstates
20220 +        * RCU if enabled and required.
20221          *
20222          * This needs to be done before the below as native_read_msr()
20223          * contains a tracepoint and x86_spec_ctrl_restore_host() calls
20224          * into world and some more.
20225          */
20226         lockdep_hardirqs_off(CALLER_ADDR0);
20227 -       guest_exit_irqoff();
20228 +       context_tracking_guest_exit();
20230         instrumentation_begin();
20231         trace_hardirqs_off_finish();
20232 diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
20233 index 39e071fdab0c..98da0b91f273 100644
20234 --- a/arch/x86/kvm/svm/svm.h
20235 +++ b/arch/x86/kvm/svm/svm.h
20236 @@ -571,6 +571,7 @@ void sev_es_init_vmcb(struct vcpu_svm *svm);
20237  void sev_es_create_vcpu(struct vcpu_svm *svm);
20238  void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
20239  void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu);
20240 +void sev_es_unmap_ghcb(struct vcpu_svm *svm);
20242  /* vmenter.S */
20244 diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
20245 index bcca0b80e0d0..4ba2a43e188b 100644
20246 --- a/arch/x86/kvm/vmx/nested.c
20247 +++ b/arch/x86/kvm/vmx/nested.c
20248 @@ -619,6 +619,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
20249         }
20251         /* KVM unconditionally exposes the FS/GS base MSRs to L1. */
20252 +#ifdef CONFIG_X86_64
20253         nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
20254                                              MSR_FS_BASE, MSR_TYPE_RW);
20256 @@ -627,6 +628,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
20258         nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
20259                                              MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
20260 +#endif
20262         /*
20263          * Checking the L0->L1 bitmap is trying to verify two things:
20264 @@ -3098,15 +3100,8 @@ static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
20265                         nested_vmx_handle_enlightened_vmptrld(vcpu, false);
20267                 if (evmptrld_status == EVMPTRLD_VMFAIL ||
20268 -                   evmptrld_status == EVMPTRLD_ERROR) {
20269 -                       pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
20270 -                                            __func__);
20271 -                       vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
20272 -                       vcpu->run->internal.suberror =
20273 -                               KVM_INTERNAL_ERROR_EMULATION;
20274 -                       vcpu->run->internal.ndata = 0;
20275 +                   evmptrld_status == EVMPTRLD_ERROR)
20276                         return false;
20277 -               }
20278         }
20280         return true;
20281 @@ -3194,8 +3189,16 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
20283  static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
20285 -       if (!nested_get_evmcs_page(vcpu))
20286 +       if (!nested_get_evmcs_page(vcpu)) {
20287 +               pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
20288 +                                    __func__);
20289 +               vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
20290 +               vcpu->run->internal.suberror =
20291 +                       KVM_INTERNAL_ERROR_EMULATION;
20292 +               vcpu->run->internal.ndata = 0;
20294                 return false;
20295 +       }
20297         if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
20298                 return false;
20299 @@ -4422,7 +4425,15 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
20300         /* trying to cancel vmlaunch/vmresume is a bug */
20301         WARN_ON_ONCE(vmx->nested.nested_run_pending);
20303 -       kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
20304 +       if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
20305 +               /*
20306 +                * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
20307 +                * Enlightened VMCS after migration and we still need to
20308 +                * do that when something is forcing L2->L1 exit prior to
20309 +                * the first L2 run.
20310 +                */
20311 +               (void)nested_get_evmcs_page(vcpu);
20312 +       }
20314         /* Service the TLB flush request for L2 before switching to L1. */
20315         if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
20316 @@ -4601,9 +4612,9 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
20317         else if (addr_size == 0)
20318                 off = (gva_t)sign_extend64(off, 15);
20319         if (base_is_valid)
20320 -               off += kvm_register_read(vcpu, base_reg);
20321 +               off += kvm_register_readl(vcpu, base_reg);
20322         if (index_is_valid)
20323 -               off += kvm_register_read(vcpu, index_reg) << scaling;
20324 +               off += kvm_register_readl(vcpu, index_reg) << scaling;
20325         vmx_get_segment(vcpu, &s, seg_reg);
20327         /*
20328 @@ -5479,16 +5490,11 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
20329                 if (!nested_vmx_check_eptp(vcpu, new_eptp))
20330                         return 1;
20332 -               kvm_mmu_unload(vcpu);
20333                 mmu->ept_ad = accessed_dirty;
20334                 mmu->mmu_role.base.ad_disabled = !accessed_dirty;
20335                 vmcs12->ept_pointer = new_eptp;
20336 -               /*
20337 -                * TODO: Check what's the correct approach in case
20338 -                * mmu reload fails. Currently, we just let the next
20339 -                * reload potentially fail
20340 -                */
20341 -               kvm_mmu_reload(vcpu);
20343 +               kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
20344         }
20346         return 0;
20347 @@ -5717,7 +5723,7 @@ static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
20349         /* Decode instruction info and find the field to access */
20350         vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
20351 -       field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
20352 +       field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
20354         /* Out-of-range fields always cause a VM exit from L2 to L1 */
20355         if (field >> 15)
20356 diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
20357 index 29b40e092d13..ae63d59be38c 100644
20358 --- a/arch/x86/kvm/vmx/vmx.c
20359 +++ b/arch/x86/kvm/vmx/vmx.c
20360 @@ -36,6 +36,7 @@
20361  #include <asm/debugreg.h>
20362  #include <asm/desc.h>
20363  #include <asm/fpu/internal.h>
20364 +#include <asm/idtentry.h>
20365  #include <asm/io.h>
20366  #include <asm/irq_remapping.h>
20367  #include <asm/kexec.h>
20368 @@ -156,9 +157,11 @@ static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = {
20369         MSR_IA32_SPEC_CTRL,
20370         MSR_IA32_PRED_CMD,
20371         MSR_IA32_TSC,
20372 +#ifdef CONFIG_X86_64
20373         MSR_FS_BASE,
20374         MSR_GS_BASE,
20375         MSR_KERNEL_GS_BASE,
20376 +#endif
20377         MSR_IA32_SYSENTER_CS,
20378         MSR_IA32_SYSENTER_ESP,
20379         MSR_IA32_SYSENTER_EIP,
20380 @@ -1731,7 +1734,8 @@ static void setup_msrs(struct vcpu_vmx *vmx)
20381         if (update_transition_efer(vmx))
20382                 vmx_setup_uret_msr(vmx, MSR_EFER);
20384 -       if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
20385 +       if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)  ||
20386 +           guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDPID))
20387                 vmx_setup_uret_msr(vmx, MSR_TSC_AUX);
20389         vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL);
20390 @@ -1930,7 +1934,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
20391                 break;
20392         case MSR_TSC_AUX:
20393                 if (!msr_info->host_initiated &&
20394 -                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
20395 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
20396 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
20397                         return 1;
20398                 goto find_uret_msr;
20399         case MSR_IA32_DEBUGCTLMSR:
20400 @@ -2227,7 +2232,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
20401                 break;
20402         case MSR_TSC_AUX:
20403                 if (!msr_info->host_initiated &&
20404 -                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
20405 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
20406 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
20407                         return 1;
20408                 /* Check reserved bit, higher 32 bits should be zero */
20409                 if ((data >> 32) != 0)
20410 @@ -4299,7 +4305,23 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
20411                                                   xsaves_enabled, false);
20412         }
20414 -       vmx_adjust_sec_exec_feature(vmx, &exec_control, rdtscp, RDTSCP);
20415 +       /*
20416 +        * RDPID is also gated by ENABLE_RDTSCP, turn on the control if either
20417 +        * feature is exposed to the guest.  This creates a virtualization hole
20418 +        * if both are supported in hardware but only one is exposed to the
20419 +        * guest, but letting the guest execute RDTSCP or RDPID when either one
20420 +        * is advertised is preferable to emulating the advertised instruction
20421 +        * in KVM on #UD, and obviously better than incorrectly injecting #UD.
20422 +        */
20423 +       if (cpu_has_vmx_rdtscp()) {
20424 +               bool rdpid_or_rdtscp_enabled =
20425 +                       guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) ||
20426 +                       guest_cpuid_has(vcpu, X86_FEATURE_RDPID);
20428 +               vmx_adjust_secondary_exec_control(vmx, &exec_control,
20429 +                                                 SECONDARY_EXEC_ENABLE_RDTSCP,
20430 +                                                 rdpid_or_rdtscp_enabled, false);
20431 +       }
20432         vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID);
20434         vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND);
20435 @@ -5062,12 +5084,12 @@ static int handle_cr(struct kvm_vcpu *vcpu)
20436                 case 3:
20437                         WARN_ON_ONCE(enable_unrestricted_guest);
20438                         val = kvm_read_cr3(vcpu);
20439 -                       kvm_register_write(vcpu, reg, val);
20440 +                       kvm_register_writel(vcpu, reg, val);
20441                         trace_kvm_cr_read(cr, val);
20442                         return kvm_skip_emulated_instruction(vcpu);
20443                 case 8:
20444                         val = kvm_get_cr8(vcpu);
20445 -                       kvm_register_write(vcpu, reg, val);
20446 +                       kvm_register_writel(vcpu, reg, val);
20447                         trace_kvm_cr_read(cr, val);
20448                         return kvm_skip_emulated_instruction(vcpu);
20449                 }
20450 @@ -5140,7 +5162,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
20451                 unsigned long val;
20453                 kvm_get_dr(vcpu, dr, &val);
20454 -               kvm_register_write(vcpu, reg, val);
20455 +               kvm_register_writel(vcpu, reg, val);
20456                 err = 0;
20457         } else {
20458                 err = kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg));
20459 @@ -5792,7 +5814,6 @@ void dump_vmcs(void)
20460         u32 vmentry_ctl, vmexit_ctl;
20461         u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control;
20462         unsigned long cr4;
20463 -       u64 efer;
20465         if (!dump_invalid_vmcs) {
20466                 pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n");
20467 @@ -5804,7 +5825,6 @@ void dump_vmcs(void)
20468         cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
20469         pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
20470         cr4 = vmcs_readl(GUEST_CR4);
20471 -       efer = vmcs_read64(GUEST_IA32_EFER);
20472         secondary_exec_control = 0;
20473         if (cpu_has_secondary_exec_ctrls())
20474                 secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
20475 @@ -5816,9 +5836,7 @@ void dump_vmcs(void)
20476         pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
20477                cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
20478         pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
20479 -       if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
20480 -           (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA))
20481 -       {
20482 +       if (cpu_has_vmx_ept()) {
20483                 pr_err("PDPTR0 = 0x%016llx  PDPTR1 = 0x%016llx\n",
20484                        vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1));
20485                 pr_err("PDPTR2 = 0x%016llx  PDPTR3 = 0x%016llx\n",
20486 @@ -5844,7 +5862,8 @@ void dump_vmcs(void)
20487         if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) ||
20488             (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER)))
20489                 pr_err("EFER =     0x%016llx  PAT = 0x%016llx\n",
20490 -                      efer, vmcs_read64(GUEST_IA32_PAT));
20491 +                      vmcs_read64(GUEST_IA32_EFER),
20492 +                      vmcs_read64(GUEST_IA32_PAT));
20493         pr_err("DebugCtl = 0x%016llx  DebugExceptions = 0x%016lx\n",
20494                vmcs_read64(GUEST_IA32_DEBUGCTL),
20495                vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
20496 @@ -6395,18 +6414,17 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
20498  void vmx_do_interrupt_nmi_irqoff(unsigned long entry);
20500 -static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu, u32 intr_info)
20501 +static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu,
20502 +                                       unsigned long entry)
20504 -       unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
20505 -       gate_desc *desc = (gate_desc *)host_idt_base + vector;
20507         kvm_before_interrupt(vcpu);
20508 -       vmx_do_interrupt_nmi_irqoff(gate_offset(desc));
20509 +       vmx_do_interrupt_nmi_irqoff(entry);
20510         kvm_after_interrupt(vcpu);
20513  static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
20515 +       const unsigned long nmi_entry = (unsigned long)asm_exc_nmi_noist;
20516         u32 intr_info = vmx_get_intr_info(&vmx->vcpu);
20518         /* if exit due to PF check for async PF */
20519 @@ -6417,18 +6435,20 @@ static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
20520                 kvm_machine_check();
20521         /* We need to handle NMIs before interrupts are enabled */
20522         else if (is_nmi(intr_info))
20523 -               handle_interrupt_nmi_irqoff(&vmx->vcpu, intr_info);
20524 +               handle_interrupt_nmi_irqoff(&vmx->vcpu, nmi_entry);
20527  static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
20529         u32 intr_info = vmx_get_intr_info(vcpu);
20530 +       unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
20531 +       gate_desc *desc = (gate_desc *)host_idt_base + vector;
20533         if (WARN_ONCE(!is_external_intr(intr_info),
20534             "KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info))
20535                 return;
20537 -       handle_interrupt_nmi_irqoff(vcpu, intr_info);
20538 +       handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc));
20541  static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
20542 @@ -6681,15 +6701,15 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
20543          * have them in state 'on' as recorded before entering guest mode.
20544          * Same as enter_from_user_mode().
20545          *
20546 -        * guest_exit_irqoff() restores host context and reinstates RCU if
20547 -        * enabled and required.
20548 +        * context_tracking_guest_exit() restores host context and reinstates
20549 +        * RCU if enabled and required.
20550          *
20551          * This needs to be done before the below as native_read_msr()
20552          * contains a tracepoint and x86_spec_ctrl_restore_host() calls
20553          * into world and some more.
20554          */
20555         lockdep_hardirqs_off(CALLER_ADDR0);
20556 -       guest_exit_irqoff();
20557 +       context_tracking_guest_exit();
20559         instrumentation_begin();
20560         trace_hardirqs_off_finish();
20561 @@ -6894,12 +6914,9 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
20563         for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i) {
20564                 u32 index = vmx_uret_msrs_list[i];
20565 -               u32 data_low, data_high;
20566                 int j = vmx->nr_uret_msrs;
20568 -               if (rdmsr_safe(index, &data_low, &data_high) < 0)
20569 -                       continue;
20570 -               if (wrmsr_safe(index, data_low, data_high) < 0)
20571 +               if (kvm_probe_user_return_msr(index))
20572                         continue;
20574                 vmx->guest_uret_msrs[j].slot = i;
20575 @@ -6938,9 +6955,11 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
20576         bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
20578         vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R);
20579 +#ifdef CONFIG_X86_64
20580         vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
20581         vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
20582         vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
20583 +#endif
20584         vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
20585         vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
20586         vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
20587 @@ -7330,9 +7349,11 @@ static __init void vmx_set_cpu_caps(void)
20588         if (!cpu_has_vmx_xsaves())
20589                 kvm_cpu_cap_clear(X86_FEATURE_XSAVES);
20591 -       /* CPUID 0x80000001 */
20592 -       if (!cpu_has_vmx_rdtscp())
20593 +       /* CPUID 0x80000001 and 0x7 (RDPID) */
20594 +       if (!cpu_has_vmx_rdtscp()) {
20595                 kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
20596 +               kvm_cpu_cap_clear(X86_FEATURE_RDPID);
20597 +       }
20599         if (cpu_has_vmx_waitpkg())
20600                 kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
20601 @@ -7388,8 +7409,9 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
20602         /*
20603          * RDPID causes #UD if disabled through secondary execution controls.
20604          * Because it is marked as EmulateOnUD, we need to intercept it here.
20605 +        * Note, RDPID is hidden behind ENABLE_RDTSCP.
20606          */
20607 -       case x86_intercept_rdtscp:
20608 +       case x86_intercept_rdpid:
20609                 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_RDTSCP)) {
20610                         exception->vector = UD_VECTOR;
20611                         exception->error_code_valid = false;
20612 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
20613 index ee0dc58ac3a5..86678f8b3502 100644
20614 --- a/arch/x86/kvm/x86.c
20615 +++ b/arch/x86/kvm/x86.c
20616 @@ -335,6 +335,22 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
20617         }
20620 +int kvm_probe_user_return_msr(u32 msr)
20622 +       u64 val;
20623 +       int ret;
20625 +       preempt_disable();
20626 +       ret = rdmsrl_safe(msr, &val);
20627 +       if (ret)
20628 +               goto out;
20629 +       ret = wrmsrl_safe(msr, val);
20630 +out:
20631 +       preempt_enable();
20632 +       return ret;
20634 +EXPORT_SYMBOL_GPL(kvm_probe_user_return_msr);
20636  void kvm_define_user_return_msr(unsigned slot, u32 msr)
20638         BUG_ON(slot >= KVM_MAX_NR_USER_RETURN_MSRS);
20639 @@ -1072,10 +1088,15 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
20640                 return 0;
20641         }
20643 -       if (is_long_mode(vcpu) && kvm_vcpu_is_illegal_gpa(vcpu, cr3))
20644 +       /*
20645 +        * Do not condition the GPA check on long mode, this helper is used to
20646 +        * stuff CR3, e.g. for RSM emulation, and there is no guarantee that
20647 +        * the current vCPU mode is accurate.
20648 +        */
20649 +       if (kvm_vcpu_is_illegal_gpa(vcpu, cr3))
20650                 return 1;
20651 -       else if (is_pae_paging(vcpu) &&
20652 -                !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
20654 +       if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
20655                 return 1;
20657         kvm_mmu_new_pgd(vcpu, cr3, skip_tlb_flush, skip_tlb_flush);
20658 @@ -5859,7 +5880,8 @@ static void kvm_init_msr_list(void)
20659                                 continue;
20660                         break;
20661                 case MSR_TSC_AUX:
20662 -                       if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
20663 +                       if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP) &&
20664 +                           !kvm_cpu_cap_has(X86_FEATURE_RDPID))
20665                                 continue;
20666                         break;
20667                 case MSR_IA32_UMWAIT_CONTROL:
20668 @@ -7959,6 +7981,18 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
20670  static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
20673 + * Indirection to move queue_work() out of the tk_core.seq write held
20674 + * region to prevent possible deadlocks against time accessors which
20675 + * are invoked with work related locks held.
20676 + */
20677 +static void pvclock_irq_work_fn(struct irq_work *w)
20679 +       queue_work(system_long_wq, &pvclock_gtod_work);
20682 +static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn);
20684  /*
20685   * Notification about pvclock gtod data update.
20686   */
20687 @@ -7970,13 +8004,14 @@ static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
20689         update_pvclock_gtod(tk);
20691 -       /* disable master clock if host does not trust, or does not
20692 -        * use, TSC based clocksource.
20693 +       /*
20694 +        * Disable master clock if host does not trust, or does not use,
20695 +        * TSC based clocksource. Delegate queue_work() to irq_work as
20696 +        * this is invoked with tk_core.seq write held.
20697          */
20698         if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) &&
20699             atomic_read(&kvm_guest_has_master_clock) != 0)
20700 -               queue_work(system_long_wq, &pvclock_gtod_work);
20702 +               irq_work_queue(&pvclock_irq_work);
20703         return 0;
20706 @@ -8091,6 +8126,8 @@ void kvm_arch_exit(void)
20707         cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
20708  #ifdef CONFIG_X86_64
20709         pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
20710 +       irq_work_sync(&pvclock_irq_work);
20711 +       cancel_work_sync(&pvclock_gtod_work);
20712  #endif
20713         kvm_x86_ops.hardware_enable = NULL;
20714         kvm_mmu_module_exit();
20715 @@ -9199,6 +9236,15 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
20716         local_irq_disable();
20717         kvm_after_interrupt(vcpu);
20719 +       /*
20720 +        * Wait until after servicing IRQs to account guest time so that any
20721 +        * ticks that occurred while running the guest are properly accounted
20722 +        * to the guest.  Waiting until IRQs are enabled degrades the accuracy
20723 +        * of accounting via context tracking, but the loss of accuracy is
20724 +        * acceptable for all known use cases.
20725 +        */
20726 +       vtime_account_guest_exit();
20728         if (lapic_in_kernel(vcpu)) {
20729                 s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta;
20730                 if (delta != S64_MIN) {
20731 @@ -11020,6 +11066,9 @@ bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
20733  bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
20735 +       if (vcpu->arch.guest_state_protected)
20736 +               return true;
20738         return vcpu->arch.preempted_in_kernel;
20741 @@ -11290,7 +11339,7 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
20742         if (!kvm_pv_async_pf_enabled(vcpu))
20743                 return true;
20744         else
20745 -               return apf_pageready_slot_free(vcpu);
20746 +               return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu);
20749  void kvm_arch_start_assignment(struct kvm *kvm)
20750 @@ -11539,7 +11588,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
20752                 fallthrough;
20753         case INVPCID_TYPE_ALL_INCL_GLOBAL:
20754 -               kvm_mmu_unload(vcpu);
20755 +               kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
20756                 return kvm_skip_emulated_instruction(vcpu);
20758         default:
20759 diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
20760 index ae17250e1efe..7f27bb65a572 100644
20761 --- a/arch/x86/kvm/xen.c
20762 +++ b/arch/x86/kvm/xen.c
20763 @@ -673,7 +673,7 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
20764         bool longmode;
20765         u64 input, params[6];
20767 -       input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
20768 +       input = (u64)kvm_register_readl(vcpu, VCPU_REGS_RAX);
20770         /* Hyper-V hypercalls get bit 31 set in EAX */
20771         if ((input & 0x80000000) &&
20772 diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
20773 index 75a0915b0d01..40bbe56bde32 100644
20774 --- a/arch/x86/lib/msr-smp.c
20775 +++ b/arch/x86/lib/msr-smp.c
20776 @@ -252,7 +252,7 @@ static void __wrmsr_safe_regs_on_cpu(void *info)
20777         rv->err = wrmsr_safe_regs(rv->regs);
20780 -int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
20781 +int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
20783         int err;
20784         struct msr_regs_info rv;
20785 @@ -265,7 +265,7 @@ int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
20787  EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
20789 -int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
20790 +int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
20792         int err;
20793         struct msr_regs_info rv;
20794 diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
20795 index 6c5eb6f3f14f..a19374d26101 100644
20796 --- a/arch/x86/mm/mem_encrypt_identity.c
20797 +++ b/arch/x86/mm/mem_encrypt_identity.c
20798 @@ -503,14 +503,10 @@ void __init sme_enable(struct boot_params *bp)
20800  #define AMD_SME_BIT    BIT(0)
20801  #define AMD_SEV_BIT    BIT(1)
20802 -       /*
20803 -        * Set the feature mask (SME or SEV) based on whether we are
20804 -        * running under a hypervisor.
20805 -        */
20806 -       eax = 1;
20807 -       ecx = 0;
20808 -       native_cpuid(&eax, &ebx, &ecx, &edx);
20809 -       feature_mask = (ecx & BIT(31)) ? AMD_SEV_BIT : AMD_SME_BIT;
20811 +       /* Check the SEV MSR whether SEV or SME is enabled */
20812 +       sev_status   = __rdmsr(MSR_AMD64_SEV);
20813 +       feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
20815         /*
20816          * Check for the SME/SEV feature:
20817 @@ -530,19 +526,26 @@ void __init sme_enable(struct boot_params *bp)
20819         /* Check if memory encryption is enabled */
20820         if (feature_mask == AMD_SME_BIT) {
20821 +               /*
20822 +                * No SME if Hypervisor bit is set. This check is here to
20823 +                * prevent a guest from trying to enable SME. For running as a
20824 +                * KVM guest the MSR_K8_SYSCFG will be sufficient, but there
20825 +                * might be other hypervisors which emulate that MSR as non-zero
20826 +                * or even pass it through to the guest.
20827 +                * A malicious hypervisor can still trick a guest into this
20828 +                * path, but there is no way to protect against that.
20829 +                */
20830 +               eax = 1;
20831 +               ecx = 0;
20832 +               native_cpuid(&eax, &ebx, &ecx, &edx);
20833 +               if (ecx & BIT(31))
20834 +                       return;
20836                 /* For SME, check the SYSCFG MSR */
20837                 msr = __rdmsr(MSR_K8_SYSCFG);
20838                 if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
20839                         return;
20840         } else {
20841 -               /* For SEV, check the SEV MSR */
20842 -               msr = __rdmsr(MSR_AMD64_SEV);
20843 -               if (!(msr & MSR_AMD64_SEV_ENABLED))
20844 -                       return;
20846 -               /* Save SEV_STATUS to avoid reading MSR again */
20847 -               sev_status = msr;
20849                 /* SEV state cannot be controlled by a command line option */
20850                 sme_me_mask = me_mask;
20851                 sev_enabled = true;
20852 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
20853 index f6a9e2e36642..1c27e6f43f80 100644
20854 --- a/arch/x86/mm/pgtable.c
20855 +++ b/arch/x86/mm/pgtable.c
20856 @@ -550,7 +550,7 @@ int ptep_test_and_clear_young(struct vm_area_struct *vma,
20857         return ret;
20860 -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
20861 +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG)
20862  int pmdp_test_and_clear_young(struct vm_area_struct *vma,
20863                               unsigned long addr, pmd_t *pmdp)
20865 @@ -562,6 +562,9 @@ int pmdp_test_and_clear_young(struct vm_area_struct *vma,
20867         return ret;
20869 +#endif
20871 +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
20872  int pudp_test_and_clear_young(struct vm_area_struct *vma,
20873                               unsigned long addr, pud_t *pudp)
20875 diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
20876 index cd3914fc9f3d..e94e0050a583 100644
20877 --- a/arch/x86/power/hibernate.c
20878 +++ b/arch/x86/power/hibernate.c
20879 @@ -13,8 +13,8 @@
20880  #include <linux/kdebug.h>
20881  #include <linux/cpu.h>
20882  #include <linux/pgtable.h>
20884 -#include <crypto/hash.h>
20885 +#include <linux/types.h>
20886 +#include <linux/crc32.h>
20888  #include <asm/e820/api.h>
20889  #include <asm/init.h>
20890 @@ -54,95 +54,33 @@ int pfn_is_nosave(unsigned long pfn)
20891         return pfn >= nosave_begin_pfn && pfn < nosave_end_pfn;
20895 -#define MD5_DIGEST_SIZE 16
20897  struct restore_data_record {
20898         unsigned long jump_address;
20899         unsigned long jump_address_phys;
20900         unsigned long cr3;
20901         unsigned long magic;
20902 -       u8 e820_digest[MD5_DIGEST_SIZE];
20903 +       unsigned long e820_checksum;
20904  };
20906 -#if IS_BUILTIN(CONFIG_CRYPTO_MD5)
20907  /**
20908 - * get_e820_md5 - calculate md5 according to given e820 table
20909 + * compute_e820_crc32 - calculate crc32 of a given e820 table
20910   *
20911   * @table: the e820 table to be calculated
20912 - * @buf: the md5 result to be stored to
20913 + *
20914 + * Return: the resulting checksum
20915   */
20916 -static int get_e820_md5(struct e820_table *table, void *buf)
20917 +static inline u32 compute_e820_crc32(struct e820_table *table)
20919 -       struct crypto_shash *tfm;
20920 -       struct shash_desc *desc;
20921 -       int size;
20922 -       int ret = 0;
20924 -       tfm = crypto_alloc_shash("md5", 0, 0);
20925 -       if (IS_ERR(tfm))
20926 -               return -ENOMEM;
20928 -       desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
20929 -                      GFP_KERNEL);
20930 -       if (!desc) {
20931 -               ret = -ENOMEM;
20932 -               goto free_tfm;
20933 -       }
20935 -       desc->tfm = tfm;
20937 -       size = offsetof(struct e820_table, entries) +
20938 +       int size = offsetof(struct e820_table, entries) +
20939                 sizeof(struct e820_entry) * table->nr_entries;
20941 -       if (crypto_shash_digest(desc, (u8 *)table, size, buf))
20942 -               ret = -EINVAL;
20944 -       kfree_sensitive(desc);
20946 -free_tfm:
20947 -       crypto_free_shash(tfm);
20948 -       return ret;
20951 -static int hibernation_e820_save(void *buf)
20953 -       return get_e820_md5(e820_table_firmware, buf);
20956 -static bool hibernation_e820_mismatch(void *buf)
20958 -       int ret;
20959 -       u8 result[MD5_DIGEST_SIZE];
20961 -       memset(result, 0, MD5_DIGEST_SIZE);
20962 -       /* If there is no digest in suspend kernel, let it go. */
20963 -       if (!memcmp(result, buf, MD5_DIGEST_SIZE))
20964 -               return false;
20966 -       ret = get_e820_md5(e820_table_firmware, result);
20967 -       if (ret)
20968 -               return true;
20970 -       return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false;
20972 -#else
20973 -static int hibernation_e820_save(void *buf)
20975 -       return 0;
20978 -static bool hibernation_e820_mismatch(void *buf)
20980 -       /* If md5 is not builtin for restore kernel, let it go. */
20981 -       return false;
20982 +       return ~crc32_le(~0, (unsigned char const *)table, size);
20984 -#endif
20986  #ifdef CONFIG_X86_64
20987 -#define RESTORE_MAGIC  0x23456789ABCDEF01UL
20988 +#define RESTORE_MAGIC  0x23456789ABCDEF02UL
20989  #else
20990 -#define RESTORE_MAGIC  0x12345678UL
20991 +#define RESTORE_MAGIC  0x12345679UL
20992  #endif
20994  /**
20995 @@ -179,7 +117,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
20996          */
20997         rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK;
20999 -       return hibernation_e820_save(rdr->e820_digest);
21000 +       rdr->e820_checksum = compute_e820_crc32(e820_table_firmware);
21001 +       return 0;
21004  /**
21005 @@ -200,7 +139,7 @@ int arch_hibernation_header_restore(void *addr)
21006         jump_address_phys = rdr->jump_address_phys;
21007         restore_cr3 = rdr->cr3;
21009 -       if (hibernation_e820_mismatch(rdr->e820_digest)) {
21010 +       if (rdr->e820_checksum != compute_e820_crc32(e820_table_firmware)) {
21011                 pr_crit("Hibernate inconsistent memory map detected!\n");
21012                 return -ENODEV;
21013         }
21014 diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
21015 index dc0a337f985b..8183ddb3700c 100644
21016 --- a/arch/x86/xen/enlighten_pv.c
21017 +++ b/arch/x86/xen/enlighten_pv.c
21018 @@ -1276,16 +1276,16 @@ asmlinkage __visible void __init xen_start_kernel(void)
21019         /* Get mfn list */
21020         xen_build_dynamic_phys_to_machine();
21022 +       /* Work out if we support NX */
21023 +       get_cpu_cap(&boot_cpu_data);
21024 +       x86_configure_nx();
21026         /*
21027          * Set up kernel GDT and segment registers, mainly so that
21028          * -fstack-protector code can be executed.
21029          */
21030         xen_setup_gdt(0);
21032 -       /* Work out if we support NX */
21033 -       get_cpu_cap(&boot_cpu_data);
21034 -       x86_configure_nx();
21036         /* Determine virtual and physical address sizes */
21037         get_cpu_address_sizes(&boot_cpu_data);
21039 diff --git a/block/Kconfig b/block/Kconfig
21040 index a2297edfdde8..f688ea5f0dbd 100644
21041 --- a/block/Kconfig
21042 +++ b/block/Kconfig
21043 @@ -83,7 +83,7 @@ config BLK_DEV_INTEGRITY_T10
21045  config BLK_DEV_ZONED
21046         bool "Zoned block device support"
21047 -       select MQ_IOSCHED_DEADLINE
21048 +       select IOSCHED_BFQ
21049         help
21050         Block layer zoned block device support. This option enables
21051         support for ZAC/ZBC/ZNS host-managed and host-aware zoned block
21052 diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
21053 index 2f2158e05a91..e58b2953ac16 100644
21054 --- a/block/Kconfig.iosched
21055 +++ b/block/Kconfig.iosched
21056 @@ -5,13 +5,11 @@ menu "IO Schedulers"
21058  config MQ_IOSCHED_DEADLINE
21059         tristate "MQ deadline I/O scheduler"
21060 -       default y
21061         help
21062           MQ version of the deadline IO scheduler.
21064  config MQ_IOSCHED_KYBER
21065         tristate "Kyber I/O scheduler"
21066 -       default y
21067         help
21068           The Kyber I/O scheduler is a low-overhead scheduler suitable for
21069           multiqueue and other fast devices. Given target latencies for reads and
21070 @@ -20,6 +18,7 @@ config MQ_IOSCHED_KYBER
21072  config IOSCHED_BFQ
21073         tristate "BFQ I/O scheduler"
21074 +       default y
21075         help
21076         BFQ I/O scheduler for BLK-MQ. BFQ distributes the bandwidth of
21077         of the device among all processes according to their weights,
21078 diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
21079 index 95586137194e..bc319931d2b3 100644
21080 --- a/block/bfq-iosched.c
21081 +++ b/block/bfq-iosched.c
21082 @@ -1012,7 +1012,7 @@ static void
21083  bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
21084                       struct bfq_io_cq *bic, bool bfq_already_existing)
21086 -       unsigned int old_wr_coeff = bfqq->wr_coeff;
21087 +       unsigned int old_wr_coeff = 1;
21088         bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
21090         if (bic->saved_has_short_ttime)
21091 @@ -1033,7 +1033,13 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
21092         bfqq->ttime = bic->saved_ttime;
21093         bfqq->io_start_time = bic->saved_io_start_time;
21094         bfqq->tot_idle_time = bic->saved_tot_idle_time;
21095 -       bfqq->wr_coeff = bic->saved_wr_coeff;
21096 +       /*
21097 +        * Restore weight coefficient only if low_latency is on
21098 +        */
21099 +       if (bfqd->low_latency) {
21100 +               old_wr_coeff = bfqq->wr_coeff;
21101 +               bfqq->wr_coeff = bic->saved_wr_coeff;
21102 +       }
21103         bfqq->service_from_wr = bic->saved_service_from_wr;
21104         bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
21105         bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
21106 @@ -2257,10 +2263,9 @@ static void bfq_remove_request(struct request_queue *q,
21110 -static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
21111 +static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
21112                 unsigned int nr_segs)
21114 -       struct request_queue *q = hctx->queue;
21115         struct bfq_data *bfqd = q->elevator->elevator_data;
21116         struct request *free = NULL;
21117         /*
21118 diff --git a/block/blk-iocost.c b/block/blk-iocost.c
21119 index 98d656bdb42b..4fbc875f7cb2 100644
21120 --- a/block/blk-iocost.c
21121 +++ b/block/blk-iocost.c
21122 @@ -1073,7 +1073,17 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
21124         lockdep_assert_held(&ioc->lock);
21126 -       inuse = clamp_t(u32, inuse, 1, active);
21127 +       /*
21128 +        * For an active leaf node, its inuse shouldn't be zero or exceed
21129 +        * @active. An active internal node's inuse is solely determined by the
21130 +        * inuse to active ratio of its children regardless of @inuse.
21131 +        */
21132 +       if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
21133 +               inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
21134 +                                          iocg->child_active_sum);
21135 +       } else {
21136 +               inuse = clamp_t(u32, inuse, 1, active);
21137 +       }
21139         iocg->last_inuse = iocg->inuse;
21140         if (save)
21141 @@ -1090,7 +1100,7 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
21142                 /* update the level sums */
21143                 parent->child_active_sum += (s32)(active - child->active);
21144                 parent->child_inuse_sum += (s32)(inuse - child->inuse);
21145 -               /* apply the udpates */
21146 +               /* apply the updates */
21147                 child->active = active;
21148                 child->inuse = inuse;
21150 diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
21151 index e1e997af89a0..fdeb9773b55c 100644
21152 --- a/block/blk-mq-sched.c
21153 +++ b/block/blk-mq-sched.c
21154 @@ -348,14 +348,16 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
21155                 unsigned int nr_segs)
21157         struct elevator_queue *e = q->elevator;
21158 -       struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
21159 -       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
21160 +       struct blk_mq_ctx *ctx;
21161 +       struct blk_mq_hw_ctx *hctx;
21162         bool ret = false;
21163         enum hctx_type type;
21165         if (e && e->type->ops.bio_merge)
21166 -               return e->type->ops.bio_merge(hctx, bio, nr_segs);
21167 +               return e->type->ops.bio_merge(q, bio, nr_segs);
21169 +       ctx = blk_mq_get_ctx(q);
21170 +       hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
21171         type = hctx->type;
21172         if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
21173             list_empty_careful(&ctx->rq_lists[type]))
21174 diff --git a/block/blk-mq.c b/block/blk-mq.c
21175 index d4d7c1caa439..0e120547ccb7 100644
21176 --- a/block/blk-mq.c
21177 +++ b/block/blk-mq.c
21178 @@ -2216,8 +2216,9 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
21179                 /* Bypass scheduler for flush requests */
21180                 blk_insert_flush(rq);
21181                 blk_mq_run_hw_queue(data.hctx, true);
21182 -       } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
21183 -                               !blk_queue_nonrot(q))) {
21184 +       } else if (plug && (q->nr_hw_queues == 1 ||
21185 +                  blk_mq_is_sbitmap_shared(rq->mq_hctx->flags) ||
21186 +                  q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
21187                 /*
21188                  * Use plugging if we have a ->commit_rqs() hook as well, as
21189                  * we know the driver uses bd->last in a smart fashion.
21190 @@ -3269,10 +3270,12 @@ EXPORT_SYMBOL(blk_mq_init_allocated_queue);
21191  /* tags can _not_ be used after returning from blk_mq_exit_queue */
21192  void blk_mq_exit_queue(struct request_queue *q)
21194 -       struct blk_mq_tag_set   *set = q->tag_set;
21195 +       struct blk_mq_tag_set *set = q->tag_set;
21197 -       blk_mq_del_queue_tag_set(q);
21198 +       /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
21199         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
21200 +       /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
21201 +       blk_mq_del_queue_tag_set(q);
21204  static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
21205 diff --git a/block/elevator.c b/block/elevator.c
21206 index 293c5c81397a..71111fa80628 100644
21207 --- a/block/elevator.c
21208 +++ b/block/elevator.c
21209 @@ -616,15 +616,15 @@ static inline bool elv_support_iosched(struct request_queue *q)
21212  /*
21213 - * For single queue devices, default to using mq-deadline. If we have multiple
21214 - * queues or mq-deadline is not available, default to "none".
21215 + * For single queue devices, default to using bfq. If we have multiple
21216 + * queues or bfq is not available, default to "none".
21217   */
21218  static struct elevator_type *elevator_get_default(struct request_queue *q)
21220         if (q->nr_hw_queues != 1)
21221                 return NULL;
21223 -       return elevator_get(q, "mq-deadline", false);
21224 +       return elevator_get(q, "bfq", false);
21227  /*
21228 diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
21229 index 33d34d69cade..79b69d7046d6 100644
21230 --- a/block/kyber-iosched.c
21231 +++ b/block/kyber-iosched.c
21232 @@ -560,11 +560,12 @@ static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
21233         }
21236 -static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
21237 +static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
21238                 unsigned int nr_segs)
21240 +       struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
21241 +       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
21242         struct kyber_hctx_data *khd = hctx->sched_data;
21243 -       struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
21244         struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
21245         unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
21246         struct list_head *rq_list = &kcq->rq_list[sched_domain];
21247 diff --git a/block/mq-deadline.c b/block/mq-deadline.c
21248 index f3631a287466..3aabcd2a7893 100644
21249 --- a/block/mq-deadline.c
21250 +++ b/block/mq-deadline.c
21251 @@ -461,10 +461,9 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
21252         return ELEVATOR_NO_MERGE;
21255 -static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
21256 +static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
21257                 unsigned int nr_segs)
21259 -       struct request_queue *q = hctx->queue;
21260         struct deadline_data *dd = q->elevator->elevator_data;
21261         struct request *free = NULL;
21262         bool ret;
21263 diff --git a/crypto/api.c b/crypto/api.c
21264 index ed08cbd5b9d3..c4eda56cff89 100644
21265 --- a/crypto/api.c
21266 +++ b/crypto/api.c
21267 @@ -562,7 +562,7 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
21269         struct crypto_alg *alg;
21271 -       if (unlikely(!mem))
21272 +       if (IS_ERR_OR_NULL(mem))
21273                 return;
21275         alg = tfm->__crt_alg;
21276 diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
21277 index a057ecb1288d..6cd7f7025df4 100644
21278 --- a/crypto/async_tx/async_xor.c
21279 +++ b/crypto/async_tx/async_xor.c
21280 @@ -233,6 +233,7 @@ async_xor_offs(struct page *dest, unsigned int offset,
21281                 if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
21282                         src_cnt--;
21283                         src_list++;
21284 +                       src_offs++;
21285                 }
21287                 /* wait for any prerequisite operations */
21288 diff --git a/crypto/rng.c b/crypto/rng.c
21289 index a888d84b524a..fea082b25fe4 100644
21290 --- a/crypto/rng.c
21291 +++ b/crypto/rng.c
21292 @@ -34,22 +34,18 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
21293         u8 *buf = NULL;
21294         int err;
21296 -       crypto_stats_get(alg);
21297         if (!seed && slen) {
21298                 buf = kmalloc(slen, GFP_KERNEL);
21299 -               if (!buf) {
21300 -                       crypto_alg_put(alg);
21301 +               if (!buf)
21302                         return -ENOMEM;
21303 -               }
21305                 err = get_random_bytes_wait(buf, slen);
21306 -               if (err) {
21307 -                       crypto_alg_put(alg);
21308 +               if (err)
21309                         goto out;
21310 -               }
21311                 seed = buf;
21312         }
21314 +       crypto_stats_get(alg);
21315         err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
21316         crypto_stats_rng_seed(alg, err);
21317  out:
21318 diff --git a/crypto/zstd.c b/crypto/zstd.c
21319 index 1a3309f066f7..154a969c83a8 100644
21320 --- a/crypto/zstd.c
21321 +++ b/crypto/zstd.c
21322 @@ -18,22 +18,22 @@
21323  #define ZSTD_DEF_LEVEL 3
21325  struct zstd_ctx {
21326 -       ZSTD_CCtx *cctx;
21327 -       ZSTD_DCtx *dctx;
21328 +       zstd_cctx *cctx;
21329 +       zstd_dctx *dctx;
21330         void *cwksp;
21331         void *dwksp;
21332  };
21334 -static ZSTD_parameters zstd_params(void)
21335 +static zstd_parameters zstd_params(void)
21337 -       return ZSTD_getParams(ZSTD_DEF_LEVEL, 0, 0);
21338 +       return zstd_get_params(ZSTD_DEF_LEVEL, 0);
21341  static int zstd_comp_init(struct zstd_ctx *ctx)
21343         int ret = 0;
21344 -       const ZSTD_parameters params = zstd_params();
21345 -       const size_t wksp_size = ZSTD_CCtxWorkspaceBound(params.cParams);
21346 +       const zstd_parameters params = zstd_params();
21347 +       const size_t wksp_size = zstd_cctx_workspace_bound(&params.cParams);
21349         ctx->cwksp = vzalloc(wksp_size);
21350         if (!ctx->cwksp) {
21351 @@ -41,7 +41,7 @@ static int zstd_comp_init(struct zstd_ctx *ctx)
21352                 goto out;
21353         }
21355 -       ctx->cctx = ZSTD_initCCtx(ctx->cwksp, wksp_size);
21356 +       ctx->cctx = zstd_init_cctx(ctx->cwksp, wksp_size);
21357         if (!ctx->cctx) {
21358                 ret = -EINVAL;
21359                 goto out_free;
21360 @@ -56,7 +56,7 @@ static int zstd_comp_init(struct zstd_ctx *ctx)
21361  static int zstd_decomp_init(struct zstd_ctx *ctx)
21363         int ret = 0;
21364 -       const size_t wksp_size = ZSTD_DCtxWorkspaceBound();
21365 +       const size_t wksp_size = zstd_dctx_workspace_bound();
21367         ctx->dwksp = vzalloc(wksp_size);
21368         if (!ctx->dwksp) {
21369 @@ -64,7 +64,7 @@ static int zstd_decomp_init(struct zstd_ctx *ctx)
21370                 goto out;
21371         }
21373 -       ctx->dctx = ZSTD_initDCtx(ctx->dwksp, wksp_size);
21374 +       ctx->dctx = zstd_init_dctx(ctx->dwksp, wksp_size);
21375         if (!ctx->dctx) {
21376                 ret = -EINVAL;
21377                 goto out_free;
21378 @@ -152,10 +152,10 @@ static int __zstd_compress(const u8 *src, unsigned int slen,
21380         size_t out_len;
21381         struct zstd_ctx *zctx = ctx;
21382 -       const ZSTD_parameters params = zstd_params();
21383 +       const zstd_parameters params = zstd_params();
21385 -       out_len = ZSTD_compressCCtx(zctx->cctx, dst, *dlen, src, slen, params);
21386 -       if (ZSTD_isError(out_len))
21387 +       out_len = zstd_compress_cctx(zctx->cctx, dst, *dlen, src, slen, &params);
21388 +       if (zstd_is_error(out_len))
21389                 return -EINVAL;
21390         *dlen = out_len;
21391         return 0;
21392 @@ -182,8 +182,8 @@ static int __zstd_decompress(const u8 *src, unsigned int slen,
21393         size_t out_len;
21394         struct zstd_ctx *zctx = ctx;
21396 -       out_len = ZSTD_decompressDCtx(zctx->dctx, dst, *dlen, src, slen);
21397 -       if (ZSTD_isError(out_len))
21398 +       out_len = zstd_decompress_dctx(zctx->dctx, dst, *dlen, src, slen);
21399 +       if (zstd_is_error(out_len))
21400                 return -EINVAL;
21401         *dlen = out_len;
21402         return 0;
21403 diff --git a/drivers/accessibility/speakup/speakup_acntpc.c b/drivers/accessibility/speakup/speakup_acntpc.c
21404 index c1ec087dca13..b2d0d4266f62 100644
21405 --- a/drivers/accessibility/speakup/speakup_acntpc.c
21406 +++ b/drivers/accessibility/speakup/speakup_acntpc.c
21407 @@ -198,7 +198,7 @@ static void do_catch_up(struct spk_synth *synth)
21408                 full_time_val = full_time->u.n.value;
21409                 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
21410                 if (synth_full()) {
21411 -                       schedule_timeout(msecs_to_jiffies(full_time_val));
21412 +                       schedule_msec_hrtimeout((full_time_val));
21413                         continue;
21414                 }
21415                 set_current_state(TASK_RUNNING);
21416 @@ -226,7 +226,7 @@ static void do_catch_up(struct spk_synth *synth)
21417                         jiffy_delta_val = jiffy_delta->u.n.value;
21418                         delay_time_val = delay_time->u.n.value;
21419                         spin_unlock_irqrestore(&speakup_info.spinlock, flags);
21420 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
21421 +                       schedule_msec_hrtimeout(delay_time_val);
21422                         jiff_max = jiffies + jiffy_delta_val;
21423                 }
21424         }
21425 diff --git a/drivers/accessibility/speakup/speakup_apollo.c b/drivers/accessibility/speakup/speakup_apollo.c
21426 index cd63581b2e99..d636157a2844 100644
21427 --- a/drivers/accessibility/speakup/speakup_apollo.c
21428 +++ b/drivers/accessibility/speakup/speakup_apollo.c
21429 @@ -165,7 +165,7 @@ static void do_catch_up(struct spk_synth *synth)
21430                 if (!synth->io_ops->synth_out(synth, ch)) {
21431                         synth->io_ops->tiocmset(synth, 0, UART_MCR_RTS);
21432                         synth->io_ops->tiocmset(synth, UART_MCR_RTS, 0);
21433 -                       schedule_timeout(msecs_to_jiffies(full_time_val));
21434 +                       schedule_msec_hrtimeout(full_time_val);
21435                         continue;
21436                 }
21437                 if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
21438 diff --git a/drivers/accessibility/speakup/speakup_decext.c b/drivers/accessibility/speakup/speakup_decext.c
21439 index 092cfd08a9e1..e7fc85f8ce5c 100644
21440 --- a/drivers/accessibility/speakup/speakup_decext.c
21441 +++ b/drivers/accessibility/speakup/speakup_decext.c
21442 @@ -180,7 +180,7 @@ static void do_catch_up(struct spk_synth *synth)
21443                 if (ch == '\n')
21444                         ch = 0x0D;
21445                 if (synth_full() || !synth->io_ops->synth_out(synth, ch)) {
21446 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
21447 +                       schedule_msec_hrtimeout(delay_time_val);
21448                         continue;
21449                 }
21450                 set_current_state(TASK_RUNNING);
21451 diff --git a/drivers/accessibility/speakup/speakup_decpc.c b/drivers/accessibility/speakup/speakup_decpc.c
21452 index dec314dee214..2a5deb5256b2 100644
21453 --- a/drivers/accessibility/speakup/speakup_decpc.c
21454 +++ b/drivers/accessibility/speakup/speakup_decpc.c
21455 @@ -398,7 +398,7 @@ static void do_catch_up(struct spk_synth *synth)
21456                 if (ch == '\n')
21457                         ch = 0x0D;
21458                 if (dt_sendchar(ch)) {
21459 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
21460 +                       schedule_msec_hrtimeout((delay_time_val));
21461                         continue;
21462                 }
21463                 set_current_state(TASK_RUNNING);
21464 diff --git a/drivers/accessibility/speakup/speakup_dectlk.c b/drivers/accessibility/speakup/speakup_dectlk.c
21465 index 580ec796816b..67c156b90ddb 100644
21466 --- a/drivers/accessibility/speakup/speakup_dectlk.c
21467 +++ b/drivers/accessibility/speakup/speakup_dectlk.c
21468 @@ -256,7 +256,7 @@ static void do_catch_up(struct spk_synth *synth)
21469                 if (ch == '\n')
21470                         ch = 0x0D;
21471                 if (synth_full_val || !synth->io_ops->synth_out(synth, ch)) {
21472 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
21473 +                       schedule_msec_hrtimeout(delay_time_val);
21474                         continue;
21475                 }
21476                 set_current_state(TASK_RUNNING);
21477 diff --git a/drivers/accessibility/speakup/speakup_dtlk.c b/drivers/accessibility/speakup/speakup_dtlk.c
21478 index 92838d3ae9eb..b687cb4d3268 100644
21479 --- a/drivers/accessibility/speakup/speakup_dtlk.c
21480 +++ b/drivers/accessibility/speakup/speakup_dtlk.c
21481 @@ -211,7 +211,7 @@ static void do_catch_up(struct spk_synth *synth)
21482                 delay_time_val = delay_time->u.n.value;
21483                 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
21484                 if (synth_full()) {
21485 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
21486 +                       schedule_msec_hrtimeout((delay_time_val));
21487                         continue;
21488                 }
21489                 set_current_state(TASK_RUNNING);
21490 @@ -227,7 +227,7 @@ static void do_catch_up(struct spk_synth *synth)
21491                         delay_time_val = delay_time->u.n.value;
21492                         jiffy_delta_val = jiffy_delta->u.n.value;
21493                         spin_unlock_irqrestore(&speakup_info.spinlock, flags);
21494 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
21495 +                       schedule_msec_hrtimeout((delay_time_val));
21496                         jiff_max = jiffies + jiffy_delta_val;
21497                 }
21498         }
21499 diff --git a/drivers/accessibility/speakup/speakup_keypc.c b/drivers/accessibility/speakup/speakup_keypc.c
21500 index 311f4aa0be22..99c523fdcc98 100644
21501 --- a/drivers/accessibility/speakup/speakup_keypc.c
21502 +++ b/drivers/accessibility/speakup/speakup_keypc.c
21503 @@ -199,7 +199,7 @@ static void do_catch_up(struct spk_synth *synth)
21504                 full_time_val = full_time->u.n.value;
21505                 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
21506                 if (synth_full()) {
21507 -                       schedule_timeout(msecs_to_jiffies(full_time_val));
21508 +                       schedule_msec_hrtimeout((full_time_val));
21509                         continue;
21510                 }
21511                 set_current_state(TASK_RUNNING);
21512 @@ -232,7 +232,7 @@ static void do_catch_up(struct spk_synth *synth)
21513                         jiffy_delta_val = jiffy_delta->u.n.value;
21514                         delay_time_val = delay_time->u.n.value;
21515                         spin_unlock_irqrestore(&speakup_info.spinlock, flags);
21516 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
21517 +                       schedule_msec_hrtimeout(delay_time_val);
21518                         jiff_max = jiffies + jiffy_delta_val;
21519                 }
21520         }
21521 diff --git a/drivers/accessibility/speakup/synth.c b/drivers/accessibility/speakup/synth.c
21522 index 2b8699673bac..bf0cbdaf564f 100644
21523 --- a/drivers/accessibility/speakup/synth.c
21524 +++ b/drivers/accessibility/speakup/synth.c
21525 @@ -93,12 +93,8 @@ static void _spk_do_catch_up(struct spk_synth *synth, int unicode)
21526                 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
21527                 if (ch == '\n')
21528                         ch = synth->procspeech;
21529 -               if (unicode)
21530 -                       ret = synth->io_ops->synth_out_unicode(synth, ch);
21531 -               else
21532 -                       ret = synth->io_ops->synth_out(synth, ch);
21533 -               if (!ret) {
21534 -                       schedule_timeout(msecs_to_jiffies(full_time_val));
21535 +               if (!synth->io_ops->synth_out(synth, ch)) {
21536 +                       schedule_msec_hrtimeout(full_time_val);
21537                         continue;
21538                 }
21539                 if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
21540 @@ -108,11 +104,9 @@ static void _spk_do_catch_up(struct spk_synth *synth, int unicode)
21541                         full_time_val = full_time->u.n.value;
21542                         spin_unlock_irqrestore(&speakup_info.spinlock, flags);
21543                         if (synth->io_ops->synth_out(synth, synth->procspeech))
21544 -                               schedule_timeout(
21545 -                                       msecs_to_jiffies(delay_time_val));
21546 +                               schedule_msec_hrtimeout(delay_time_val);
21547                         else
21548 -                               schedule_timeout(
21549 -                                       msecs_to_jiffies(full_time_val));
21550 +                               schedule_msec_hrtimeout(full_time_val);
21551                         jiff_max = jiffies + jiffy_delta_val;
21552                 }
21553                 set_current_state(TASK_RUNNING);
21554 diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
21555 index f2d0e5915dab..0a0a982f9c28 100644
21556 --- a/drivers/acpi/arm64/gtdt.c
21557 +++ b/drivers/acpi/arm64/gtdt.c
21558 @@ -329,7 +329,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
21559                                         int index)
21561         struct platform_device *pdev;
21562 -       int irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
21563 +       int irq;
21565         /*
21566          * According to SBSA specification the size of refresh and control
21567 @@ -338,7 +338,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
21568         struct resource res[] = {
21569                 DEFINE_RES_MEM(wd->control_frame_address, SZ_4K),
21570                 DEFINE_RES_MEM(wd->refresh_frame_address, SZ_4K),
21571 -               DEFINE_RES_IRQ(irq),
21572 +               {},
21573         };
21574         int nr_res = ARRAY_SIZE(res);
21576 @@ -348,10 +348,11 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
21578         if (!(wd->refresh_frame_address && wd->control_frame_address)) {
21579                 pr_err(FW_BUG "failed to get the Watchdog base address.\n");
21580 -               acpi_unregister_gsi(wd->timer_interrupt);
21581                 return -EINVAL;
21582         }
21584 +       irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
21585 +       res[2] = (struct resource)DEFINE_RES_IRQ(irq);
21586         if (irq <= 0) {
21587                 pr_warn("failed to map the Watchdog interrupt.\n");
21588                 nr_res--;
21589 @@ -364,7 +365,8 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
21590          */
21591         pdev = platform_device_register_simple("sbsa-gwdt", index, res, nr_res);
21592         if (IS_ERR(pdev)) {
21593 -               acpi_unregister_gsi(wd->timer_interrupt);
21594 +               if (irq > 0)
21595 +                       acpi_unregister_gsi(wd->timer_interrupt);
21596                 return PTR_ERR(pdev);
21597         }
21599 diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
21600 index 69057fcd2c04..a5e6fd0bafa1 100644
21601 --- a/drivers/acpi/cppc_acpi.c
21602 +++ b/drivers/acpi/cppc_acpi.c
21603 @@ -119,23 +119,15 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
21604   */
21605  #define NUM_RETRIES 500ULL
21607 -struct cppc_attr {
21608 -       struct attribute attr;
21609 -       ssize_t (*show)(struct kobject *kobj,
21610 -                       struct attribute *attr, char *buf);
21611 -       ssize_t (*store)(struct kobject *kobj,
21612 -                       struct attribute *attr, const char *c, ssize_t count);
21615  #define define_one_cppc_ro(_name)              \
21616 -static struct cppc_attr _name =                        \
21617 +static struct kobj_attribute _name =           \
21618  __ATTR(_name, 0444, show_##_name, NULL)
21620  #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
21622  #define show_cppc_data(access_fn, struct_name, member_name)            \
21623         static ssize_t show_##member_name(struct kobject *kobj,         \
21624 -                                       struct attribute *attr, char *buf) \
21625 +                               struct kobj_attribute *attr, char *buf) \
21626         {                                                               \
21627                 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);           \
21628                 struct struct_name st_name = {0};                       \
21629 @@ -161,7 +153,7 @@ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
21630  show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
21632  static ssize_t show_feedback_ctrs(struct kobject *kobj,
21633 -               struct attribute *attr, char *buf)
21634 +               struct kobj_attribute *attr, char *buf)
21636         struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
21637         struct cppc_perf_fb_ctrs fb_ctrs = {0};
21638 diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
21639 index 7b54dc95d36b..4058e0241091 100644
21640 --- a/drivers/acpi/custom_method.c
21641 +++ b/drivers/acpi/custom_method.c
21642 @@ -42,6 +42,8 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
21643                                    sizeof(struct acpi_table_header)))
21644                         return -EFAULT;
21645                 uncopied_bytes = max_size = table.length;
21646 +               /* make sure the buf is not allocated */
21647 +               kfree(buf);
21648                 buf = kzalloc(max_size, GFP_KERNEL);
21649                 if (!buf)
21650                         return -ENOMEM;
21651 @@ -55,6 +57,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
21652             (*ppos + count < count) ||
21653             (count > uncopied_bytes)) {
21654                 kfree(buf);
21655 +               buf = NULL;
21656                 return -EINVAL;
21657         }
21659 @@ -76,7 +79,6 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
21660                 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
21661         }
21663 -       kfree(buf);
21664         return count;
21667 diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
21668 index 096153761ebc..58876248b192 100644
21669 --- a/drivers/acpi/device_pm.c
21670 +++ b/drivers/acpi/device_pm.c
21671 @@ -1310,6 +1310,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
21672                 {"PNP0C0B", }, /* Generic ACPI fan */
21673                 {"INT3404", }, /* Fan */
21674                 {"INTC1044", }, /* Fan for Tiger Lake generation */
21675 +               {"INTC1048", }, /* Fan for Alder Lake generation */
21676                 {}
21677         };
21678         struct acpi_device *adev = ACPI_COMPANION(dev);
21679 diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
21680 index 6efe7edd7b1e..345777bf7af9 100644
21681 --- a/drivers/acpi/scan.c
21682 +++ b/drivers/acpi/scan.c
21683 @@ -701,6 +701,7 @@ int acpi_device_add(struct acpi_device *device,
21685                 result = acpi_device_set_name(device, acpi_device_bus_id);
21686                 if (result) {
21687 +                       kfree_const(acpi_device_bus_id->bus_id);
21688                         kfree(acpi_device_bus_id);
21689                         goto err_unlock;
21690                 }
21691 diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
21692 index 53b22e26266c..2d821ed78453 100644
21693 --- a/drivers/android/Kconfig
21694 +++ b/drivers/android/Kconfig
21695 @@ -9,7 +9,7 @@ config ANDROID
21696  if ANDROID
21698  config ANDROID_BINDER_IPC
21699 -       bool "Android Binder IPC Driver"
21700 +       tristate "Android Binder IPC Driver"
21701         depends on MMU
21702         default n
21703         help
21704 @@ -21,8 +21,8 @@ config ANDROID_BINDER_IPC
21705           between said processes.
21707  config ANDROID_BINDERFS
21708 -       bool "Android Binderfs filesystem"
21709 -       depends on ANDROID_BINDER_IPC
21710 +       tristate "Android Binderfs filesystem"
21711 +       depends on (ANDROID_BINDER_IPC=y) || (ANDROID_BINDER_IPC=m && m)
21712         default n
21713         help
21714           Binderfs is a pseudo-filesystem for the Android Binder IPC driver
21715 diff --git a/drivers/android/Makefile b/drivers/android/Makefile
21716 index c9d3d0c99c25..b9d5ce8deca2 100644
21717 --- a/drivers/android/Makefile
21718 +++ b/drivers/android/Makefile
21719 @@ -1,6 +1,10 @@
21720  # SPDX-License-Identifier: GPL-2.0-only
21721  ccflags-y += -I$(src)                  # needed for trace events
21723 -obj-$(CONFIG_ANDROID_BINDERFS)         += binderfs.o
21724 -obj-$(CONFIG_ANDROID_BINDER_IPC)       += binder.o binder_alloc.o
21725 -obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
21726 +binder_linux-y := binder.o binder_alloc.o
21727 +obj-$(CONFIG_ANDROID_BINDER_IPC) += binder_linux.o
21728 +binder_linux-$(CONFIG_ANDROID_BINDERFS) += binderfs.o
21729 +binder_linux-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
21731 +# binder-$(CONFIG_ANDROID_BINDER_IPC) := binder.o binder_alloc.o
21732 +# binder-$(CONFIG_ANDROID_BINDERFS) += binderfs.o
21733 diff --git a/drivers/android/binder.c b/drivers/android/binder.c
21734 index c119736ca56a..569850551e88 100644
21735 --- a/drivers/android/binder.c
21736 +++ b/drivers/android/binder.c
21737 @@ -5788,9 +5788,20 @@ static int __init binder_init(void)
21738         return ret;
21741 -device_initcall(binder_init);
21742 +module_init(binder_init);
21744 + * binder will have no exit function since binderfs instances can be mounted
21745 + * multiple times and also in user namespaces finding and destroying them all
21746 + * is not feasible without introducing insane locking. Just ignoring existing
21747 + * instances on module unload also wouldn't work since we would loose track of
21748 + * what major numer was dynamically allocated and also what minor numbers are
21749 + * already given out. So this would get us into all kinds of issues with device
21750 + * number reuse. So simply don't allow unloading unless we are forced to do so.
21751 + */
21753 +MODULE_AUTHOR("Google, Inc.");
21754 +MODULE_DESCRIPTION("Driver for Android binder device");
21755 +MODULE_LICENSE("GPL v2");
21757  #define CREATE_TRACE_POINTS
21758  #include "binder_trace.h"
21760 -MODULE_LICENSE("GPL v2");
21761 diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
21762 index 7caf74ad2405..07c11e8d6dad 100644
21763 --- a/drivers/android/binder_alloc.c
21764 +++ b/drivers/android/binder_alloc.c
21765 @@ -38,8 +38,7 @@ enum {
21766  };
21767  static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
21769 -module_param_named(debug_mask, binder_alloc_debug_mask,
21770 -                  uint, 0644);
21771 +module_param_named(alloc_debug_mask, binder_alloc_debug_mask, uint, 0644);
21773  #define binder_alloc_debug(mask, x...) \
21774         do { \
21775 diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
21776 index 6e8e001381af..e4e0678e2781 100644
21777 --- a/drivers/android/binder_alloc.h
21778 +++ b/drivers/android/binder_alloc.h
21779 @@ -6,6 +6,7 @@
21780  #ifndef _LINUX_BINDER_ALLOC_H
21781  #define _LINUX_BINDER_ALLOC_H
21783 +#include <linux/kconfig.h>
21784  #include <linux/rbtree.h>
21785  #include <linux/list.h>
21786  #include <linux/mm.h>
21787 @@ -109,7 +110,7 @@ struct binder_alloc {
21788         size_t pages_high;
21789  };
21791 -#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
21792 +#if IS_ENABLED(CONFIG_ANDROID_BINDER_IPC_SELFTEST)
21793  void binder_selftest_alloc(struct binder_alloc *alloc);
21794  #else
21795  static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
21796 diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
21797 index 6cd79011e35d..da5bcb3203dc 100644
21798 --- a/drivers/android/binder_internal.h
21799 +++ b/drivers/android/binder_internal.h
21800 @@ -5,6 +5,7 @@
21802  #include <linux/export.h>
21803  #include <linux/fs.h>
21804 +#include <linux/kconfig.h>
21805  #include <linux/list.h>
21806  #include <linux/miscdevice.h>
21807  #include <linux/mutex.h>
21808 @@ -77,7 +78,7 @@ extern const struct file_operations binder_fops;
21810  extern char *binder_devices_param;
21812 -#ifdef CONFIG_ANDROID_BINDERFS
21813 +#if IS_ENABLED(CONFIG_ANDROID_BINDERFS)
21814  extern bool is_binderfs_device(const struct inode *inode);
21815  extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name,
21816                                            const struct file_operations *fops,
21817 @@ -98,7 +99,7 @@ static inline struct dentry *binderfs_create_file(struct dentry *dir,
21818  static inline void binderfs_remove_file(struct dentry *dentry) {}
21819  #endif
21821 -#ifdef CONFIG_ANDROID_BINDERFS
21822 +#if IS_ENABLED(CONFIG_ANDROID_BINDERFS)
21823  extern int __init init_binderfs(void);
21824  #else
21825  static inline int __init init_binderfs(void)
21826 diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
21827 index e80ba93c62a9..1a18e9dbb2a6 100644
21828 --- a/drivers/android/binderfs.c
21829 +++ b/drivers/android/binderfs.c
21830 @@ -113,7 +113,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
21831         struct super_block *sb = ref_inode->i_sb;
21832         struct binderfs_info *info = sb->s_fs_info;
21833  #if defined(CONFIG_IPC_NS)
21834 -       bool use_reserve = (info->ipc_ns == &init_ipc_ns);
21835 +       bool use_reserve = (info->ipc_ns == show_init_ipc_ns());
21836  #else
21837         bool use_reserve = true;
21838  #endif
21839 @@ -402,7 +402,7 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
21840         struct dentry *root = sb->s_root;
21841         struct binderfs_info *info = sb->s_fs_info;
21842  #if defined(CONFIG_IPC_NS)
21843 -       bool use_reserve = (info->ipc_ns == &init_ipc_ns);
21844 +       bool use_reserve = (info->ipc_ns == show_init_ipc_ns());
21845  #else
21846         bool use_reserve = true;
21847  #endif
21848 @@ -682,7 +682,7 @@ static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
21849                 return -ENOMEM;
21850         info = sb->s_fs_info;
21852 -       info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
21853 +       info->ipc_ns = get_ipc_ns_exported(current->nsproxy->ipc_ns);
21855         info->root_gid = make_kgid(sb->s_user_ns, 0);
21856         if (!gid_valid(info->root_gid))
21857 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
21858 index 00ba8e5a1ccc..33192a8f687d 100644
21859 --- a/drivers/ata/ahci.c
21860 +++ b/drivers/ata/ahci.c
21861 @@ -1772,6 +1772,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
21862                 hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
21864  #ifdef CONFIG_ARM64
21865 +       if (pdev->vendor == PCI_VENDOR_ID_HUAWEI &&
21866 +           pdev->device == 0xa235 &&
21867 +           pdev->revision < 0x30)
21868 +               hpriv->flags |= AHCI_HFLAG_NO_SXS;
21870         if (pdev->vendor == 0x177d && pdev->device == 0xa01c)
21871                 hpriv->irq_handler = ahci_thunderx_irq_handler;
21872  #endif
21873 diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
21874 index 98b8baa47dc5..d1f284f0c83d 100644
21875 --- a/drivers/ata/ahci.h
21876 +++ b/drivers/ata/ahci.h
21877 @@ -242,6 +242,7 @@ enum {
21878                                                         suspend/resume */
21879         AHCI_HFLAG_IGN_NOTSUPP_POWER_ON = (1 << 27), /* ignore -EOPNOTSUPP
21880                                                         from phy_power_on() */
21881 +       AHCI_HFLAG_NO_SXS               = (1 << 28), /* SXS not supported */
21883         /* ap->flags bits */
21885 diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
21886 index 5b32df5d33ad..6e9c5ade4c2e 100644
21887 --- a/drivers/ata/ahci_brcm.c
21888 +++ b/drivers/ata/ahci_brcm.c
21889 @@ -86,7 +86,8 @@ struct brcm_ahci_priv {
21890         u32 port_mask;
21891         u32 quirks;
21892         enum brcm_ahci_version version;
21893 -       struct reset_control *rcdev;
21894 +       struct reset_control *rcdev_rescal;
21895 +       struct reset_control *rcdev_ahci;
21896  };
21898  static inline u32 brcm_sata_readreg(void __iomem *addr)
21899 @@ -352,8 +353,8 @@ static int brcm_ahci_suspend(struct device *dev)
21900         else
21901                 ret = 0;
21903 -       if (priv->version != BRCM_SATA_BCM7216)
21904 -               reset_control_assert(priv->rcdev);
21905 +       reset_control_assert(priv->rcdev_ahci);
21906 +       reset_control_rearm(priv->rcdev_rescal);
21908         return ret;
21910 @@ -365,10 +366,10 @@ static int __maybe_unused brcm_ahci_resume(struct device *dev)
21911         struct brcm_ahci_priv *priv = hpriv->plat_data;
21912         int ret = 0;
21914 -       if (priv->version == BRCM_SATA_BCM7216)
21915 -               ret = reset_control_reset(priv->rcdev);
21916 -       else
21917 -               ret = reset_control_deassert(priv->rcdev);
21918 +       ret = reset_control_deassert(priv->rcdev_ahci);
21919 +       if (ret)
21920 +               return ret;
21921 +       ret = reset_control_reset(priv->rcdev_rescal);
21922         if (ret)
21923                 return ret;
21925 @@ -434,7 +435,6 @@ static int brcm_ahci_probe(struct platform_device *pdev)
21927         const struct of_device_id *of_id;
21928         struct device *dev = &pdev->dev;
21929 -       const char *reset_name = NULL;
21930         struct brcm_ahci_priv *priv;
21931         struct ahci_host_priv *hpriv;
21932         struct resource *res;
21933 @@ -456,15 +456,15 @@ static int brcm_ahci_probe(struct platform_device *pdev)
21934         if (IS_ERR(priv->top_ctrl))
21935                 return PTR_ERR(priv->top_ctrl);
21937 -       /* Reset is optional depending on platform and named differently */
21938 -       if (priv->version == BRCM_SATA_BCM7216)
21939 -               reset_name = "rescal";
21940 -       else
21941 -               reset_name = "ahci";
21943 -       priv->rcdev = devm_reset_control_get_optional(&pdev->dev, reset_name);
21944 -       if (IS_ERR(priv->rcdev))
21945 -               return PTR_ERR(priv->rcdev);
21946 +       if (priv->version == BRCM_SATA_BCM7216) {
21947 +               priv->rcdev_rescal = devm_reset_control_get_optional_shared(
21948 +                       &pdev->dev, "rescal");
21949 +               if (IS_ERR(priv->rcdev_rescal))
21950 +                       return PTR_ERR(priv->rcdev_rescal);
21951 +       }
21952 +       priv->rcdev_ahci = devm_reset_control_get_optional(&pdev->dev, "ahci");
21953 +       if (IS_ERR(priv->rcdev_ahci))
21954 +               return PTR_ERR(priv->rcdev_ahci);
21956         hpriv = ahci_platform_get_resources(pdev, 0);
21957         if (IS_ERR(hpriv))
21958 @@ -485,10 +485,10 @@ static int brcm_ahci_probe(struct platform_device *pdev)
21959                 break;
21960         }
21962 -       if (priv->version == BRCM_SATA_BCM7216)
21963 -               ret = reset_control_reset(priv->rcdev);
21964 -       else
21965 -               ret = reset_control_deassert(priv->rcdev);
21966 +       ret = reset_control_reset(priv->rcdev_rescal);
21967 +       if (ret)
21968 +               return ret;
21969 +       ret = reset_control_deassert(priv->rcdev_ahci);
21970         if (ret)
21971                 return ret;
21973 @@ -539,8 +539,8 @@ static int brcm_ahci_probe(struct platform_device *pdev)
21974  out_disable_clks:
21975         ahci_platform_disable_clks(hpriv);
21976  out_reset:
21977 -       if (priv->version != BRCM_SATA_BCM7216)
21978 -               reset_control_assert(priv->rcdev);
21979 +       reset_control_assert(priv->rcdev_ahci);
21980 +       reset_control_rearm(priv->rcdev_rescal);
21981         return ret;
21984 diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
21985 index ea5bf5f4cbed..fec2e9754aed 100644
21986 --- a/drivers/ata/libahci.c
21987 +++ b/drivers/ata/libahci.c
21988 @@ -493,6 +493,11 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
21989                 cap |= HOST_CAP_ALPM;
21990         }
21992 +       if ((cap & HOST_CAP_SXS) && (hpriv->flags & AHCI_HFLAG_NO_SXS)) {
21993 +               dev_info(dev, "controller does not support SXS, disabling CAP_SXS\n");
21994 +               cap &= ~HOST_CAP_SXS;
21995 +       }
21997         if (hpriv->force_port_map && port_map != hpriv->force_port_map) {
21998                 dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
21999                          port_map, hpriv->force_port_map);
22000 diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
22001 index de638dafce21..b2f552088291 100644
22002 --- a/drivers/ata/libahci_platform.c
22003 +++ b/drivers/ata/libahci_platform.c
22004 @@ -582,11 +582,13 @@ int ahci_platform_init_host(struct platform_device *pdev,
22005         int i, irq, n_ports, rc;
22007         irq = platform_get_irq(pdev, 0);
22008 -       if (irq <= 0) {
22009 +       if (irq < 0) {
22010                 if (irq != -EPROBE_DEFER)
22011                         dev_err(dev, "no irq\n");
22012                 return irq;
22013         }
22014 +       if (!irq)
22015 +               return -EINVAL;
22017         hpriv->irq = irq;
22019 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
22020 index e9cf31f38450..63f39440a9b4 100644
22021 --- a/drivers/ata/pata_arasan_cf.c
22022 +++ b/drivers/ata/pata_arasan_cf.c
22023 @@ -818,12 +818,19 @@ static int arasan_cf_probe(struct platform_device *pdev)
22024         else
22025                 quirk = CF_BROKEN_UDMA; /* as it is on spear1340 */
22027 -       /* if irq is 0, support only PIO */
22028 -       acdev->irq = platform_get_irq(pdev, 0);
22029 -       if (acdev->irq)
22030 +       /*
22031 +        * If there's an error getting IRQ (or we do get IRQ0),
22032 +        * support only PIO
22033 +        */
22034 +       ret = platform_get_irq(pdev, 0);
22035 +       if (ret > 0) {
22036 +               acdev->irq = ret;
22037                 irq_handler = arasan_cf_interrupt;
22038 -       else
22039 +       } else  if (ret == -EPROBE_DEFER) {
22040 +               return ret;
22041 +       } else  {
22042                 quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA;
22043 +       }
22045         acdev->pbase = res->start;
22046         acdev->vbase = devm_ioremap(&pdev->dev, res->start,
22047 diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
22048 index d1644a8ef9fa..abc0e87ca1a8 100644
22049 --- a/drivers/ata/pata_ixp4xx_cf.c
22050 +++ b/drivers/ata/pata_ixp4xx_cf.c
22051 @@ -165,8 +165,12 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
22052                 return -ENOMEM;
22054         irq = platform_get_irq(pdev, 0);
22055 -       if (irq)
22056 +       if (irq > 0)
22057                 irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
22058 +       else if (irq < 0)
22059 +               return irq;
22060 +       else
22061 +               return -EINVAL;
22063         /* Setup expansion bus chip selects */
22064         *data->cs0_cfg = data->cs0_bits;
22065 diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
22066 index 664ef658a955..b62446ea5f40 100644
22067 --- a/drivers/ata/sata_mv.c
22068 +++ b/drivers/ata/sata_mv.c
22069 @@ -4097,6 +4097,10 @@ static int mv_platform_probe(struct platform_device *pdev)
22070                 n_ports = mv_platform_data->n_ports;
22071                 irq = platform_get_irq(pdev, 0);
22072         }
22073 +       if (irq < 0)
22074 +               return irq;
22075 +       if (!irq)
22076 +               return -EINVAL;
22078         host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
22079         hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
22080 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
22081 index 653c8c6ac7a7..aedeb2dc1a18 100644
22082 --- a/drivers/base/devtmpfs.c
22083 +++ b/drivers/base/devtmpfs.c
22084 @@ -419,7 +419,6 @@ static int __init devtmpfs_setup(void *p)
22085         init_chroot(".");
22086  out:
22087         *(int *)p = err;
22088 -       complete(&setup_done);
22089         return err;
22092 @@ -432,6 +431,7 @@ static int __ref devtmpfsd(void *p)
22094         int err = devtmpfs_setup(p);
22096 +       complete(&setup_done);
22097         if (err)
22098                 return err;
22099         devtmpfs_work_loop();
22100 diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
22101 index 78355095e00d..7e2c79e2a88b 100644
22102 --- a/drivers/base/firmware_loader/main.c
22103 +++ b/drivers/base/firmware_loader/main.c
22104 @@ -465,6 +465,8 @@ static int fw_decompress_xz(struct device *dev, struct fw_priv *fw_priv,
22105  static char fw_path_para[256];
22106  static const char * const fw_path[] = {
22107         fw_path_para,
22108 +       "/etc/firmware/" UTS_RELEASE,
22109 +       "/etc/firmware",
22110         "/lib/firmware/updates/" UTS_RELEASE,
22111         "/lib/firmware/updates",
22112         "/lib/firmware/" UTS_RELEASE,
22113 diff --git a/drivers/base/node.c b/drivers/base/node.c
22114 index f449dbb2c746..2c36f61d30bc 100644
22115 --- a/drivers/base/node.c
22116 +++ b/drivers/base/node.c
22117 @@ -268,21 +268,20 @@ static void node_init_cache_dev(struct node *node)
22118         if (!dev)
22119                 return;
22121 +       device_initialize(dev);
22122         dev->parent = &node->dev;
22123         dev->release = node_cache_release;
22124         if (dev_set_name(dev, "memory_side_cache"))
22125 -               goto free_dev;
22126 +               goto put_device;
22128 -       if (device_register(dev))
22129 -               goto free_name;
22130 +       if (device_add(dev))
22131 +               goto put_device;
22133         pm_runtime_no_callbacks(dev);
22134         node->cache_dev = dev;
22135         return;
22136 -free_name:
22137 -       kfree_const(dev->kobj.name);
22138 -free_dev:
22139 -       kfree(dev);
22140 +put_device:
22141 +       put_device(dev);
22144  /**
22145 @@ -319,25 +318,24 @@ void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs)
22146                 return;
22148         dev = &info->dev;
22149 +       device_initialize(dev);
22150         dev->parent = node->cache_dev;
22151         dev->release = node_cacheinfo_release;
22152         dev->groups = cache_groups;
22153         if (dev_set_name(dev, "index%d", cache_attrs->level))
22154 -               goto free_cache;
22155 +               goto put_device;
22157         info->cache_attrs = *cache_attrs;
22158 -       if (device_register(dev)) {
22159 +       if (device_add(dev)) {
22160                 dev_warn(&node->dev, "failed to add cache level:%d\n",
22161                          cache_attrs->level);
22162 -               goto free_name;
22163 +               goto put_device;
22164         }
22165         pm_runtime_no_callbacks(dev);
22166         list_add_tail(&info->node, &node->cache_attrs);
22167         return;
22168 -free_name:
22169 -       kfree_const(dev->kobj.name);
22170 -free_cache:
22171 -       kfree(info);
22172 +put_device:
22173 +       put_device(dev);
22176  static void node_remove_caches(struct node *node)
22177 diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
22178 index fe1dad68aee4..ae011f2bc537 100644
22179 --- a/drivers/base/power/runtime.c
22180 +++ b/drivers/base/power/runtime.c
22181 @@ -1637,6 +1637,7 @@ void pm_runtime_init(struct device *dev)
22182         dev->power.request_pending = false;
22183         dev->power.request = RPM_REQ_NONE;
22184         dev->power.deferred_resume = false;
22185 +       dev->power.needs_force_resume = 0;
22186         INIT_WORK(&dev->power.work, pm_runtime_work);
22188         dev->power.timer_expires = 0;
22189 @@ -1804,10 +1805,12 @@ int pm_runtime_force_suspend(struct device *dev)
22190          * its parent, but set its status to RPM_SUSPENDED anyway in case this
22191          * function will be called again for it in the meantime.
22192          */
22193 -       if (pm_runtime_need_not_resume(dev))
22194 +       if (pm_runtime_need_not_resume(dev)) {
22195                 pm_runtime_set_suspended(dev);
22196 -       else
22197 +       } else {
22198                 __update_runtime_status(dev, RPM_SUSPENDED);
22199 +               dev->power.needs_force_resume = 1;
22200 +       }
22202         return 0;
22204 @@ -1834,7 +1837,7 @@ int pm_runtime_force_resume(struct device *dev)
22205         int (*callback)(struct device *);
22206         int ret = 0;
22208 -       if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
22209 +       if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
22210                 goto out;
22212         /*
22213 @@ -1853,6 +1856,7 @@ int pm_runtime_force_resume(struct device *dev)
22215         pm_runtime_mark_last_busy(dev);
22216  out:
22217 +       dev->power.needs_force_resume = 0;
22218         pm_runtime_enable(dev);
22219         return ret;
22221 diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
22222 index ff2ee87987c7..211a335a608d 100644
22223 --- a/drivers/base/regmap/regmap-debugfs.c
22224 +++ b/drivers/base/regmap/regmap-debugfs.c
22225 @@ -660,6 +660,7 @@ void regmap_debugfs_exit(struct regmap *map)
22226                 regmap_debugfs_free_dump_cache(map);
22227                 mutex_unlock(&map->cache_lock);
22228                 kfree(map->debugfs_name);
22229 +               map->debugfs_name = NULL;
22230         } else {
22231                 struct regmap_debugfs_node *node, *tmp;
22233 diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
22234 index fa3719ef80e4..88310ac9ce90 100644
22235 --- a/drivers/base/swnode.c
22236 +++ b/drivers/base/swnode.c
22237 @@ -1032,6 +1032,7 @@ int device_add_software_node(struct device *dev, const struct software_node *nod
22238         }
22240         set_secondary_fwnode(dev, &swnode->fwnode);
22241 +       software_node_notify(dev, KOBJ_ADD);
22243         return 0;
22245 @@ -1105,8 +1106,8 @@ int software_node_notify(struct device *dev, unsigned long action)
22247         switch (action) {
22248         case KOBJ_ADD:
22249 -               ret = sysfs_create_link(&dev->kobj, &swnode->kobj,
22250 -                                       "software_node");
22251 +               ret = sysfs_create_link_nowarn(&dev->kobj, &swnode->kobj,
22252 +                                              "software_node");
22253                 if (ret)
22254                         break;
22256 diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
22257 index 104b713f4055..d601e49f80e0 100644
22258 --- a/drivers/block/ataflop.c
22259 +++ b/drivers/block/ataflop.c
22260 @@ -729,8 +729,12 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
22261         unsigned long   flags;
22262         int ret;
22264 -       if (type)
22265 +       if (type) {
22266                 type--;
22267 +               if (type >= NUM_DISK_MINORS ||
22268 +                   minor2disktype[type].drive_types > DriveType)
22269 +                       return -EINVAL;
22270 +       }
22272         q = unit[drive].disk[type]->queue;
22273         blk_mq_freeze_queue(q);
22274 @@ -742,11 +746,6 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
22275         local_irq_restore(flags);
22277         if (type) {
22278 -               if (type >= NUM_DISK_MINORS ||
22279 -                   minor2disktype[type].drive_types > DriveType) {
22280 -                       ret = -EINVAL;
22281 -                       goto out;
22282 -               }
22283                 type = minor2disktype[type].index;
22284                 UDT = &atari_disk_type[type];
22285         }
22286 @@ -2002,7 +2001,10 @@ static void ataflop_probe(dev_t dev)
22287         int drive = MINOR(dev) & 3;
22288         int type  = MINOR(dev) >> 2;
22290 -       if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS)
22291 +       if (type)
22292 +               type--;
22294 +       if (drive >= FD_MAX_UNITS || type >= NUM_DISK_MINORS)
22295                 return;
22296         mutex_lock(&ataflop_probe_lock);
22297         if (!unit[drive].disk[type]) {
22298 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
22299 index 4ff71b579cfc..974da561b8e5 100644
22300 --- a/drivers/block/nbd.c
22301 +++ b/drivers/block/nbd.c
22302 @@ -1980,7 +1980,8 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
22303          * config ref and try to destroy the workqueue from inside the work
22304          * queue.
22305          */
22306 -       flush_workqueue(nbd->recv_workq);
22307 +       if (nbd->recv_workq)
22308 +               flush_workqueue(nbd->recv_workq);
22309         if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
22310                                &nbd->config->runtime_flags))
22311                 nbd_config_put(nbd);
22312 diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
22313 index bfcab1c782b5..dae54dd1aeac 100644
22314 --- a/drivers/block/null_blk/zoned.c
22315 +++ b/drivers/block/null_blk/zoned.c
22316 @@ -180,6 +180,7 @@ int null_register_zoned_dev(struct nullb *nullb)
22317  void null_free_zoned_dev(struct nullb_device *dev)
22319         kvfree(dev->zones);
22320 +       dev->zones = NULL;
22323  int null_report_zones(struct gendisk *disk, sector_t sector,
22324 diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
22325 index d4aa6bfc9555..49ad400a5225 100644
22326 --- a/drivers/block/rnbd/rnbd-clt-sysfs.c
22327 +++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
22328 @@ -432,10 +432,14 @@ void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
22329          * i.e. rnbd_clt_unmap_dev_store() leading to a sysfs warning because
22330          * of sysfs link already was removed already.
22331          */
22332 -       if (dev->blk_symlink_name && try_module_get(THIS_MODULE)) {
22333 -               sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name);
22334 +       if (dev->blk_symlink_name) {
22335 +               if (try_module_get(THIS_MODULE)) {
22336 +                       sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name);
22337 +                       module_put(THIS_MODULE);
22338 +               }
22339 +               /* It should be freed always. */
22340                 kfree(dev->blk_symlink_name);
22341 -               module_put(THIS_MODULE);
22342 +               dev->blk_symlink_name = NULL;
22343         }
22346 @@ -479,11 +483,7 @@ static int rnbd_clt_get_path_name(struct rnbd_clt_dev *dev, char *buf,
22347         while ((s = strchr(pathname, '/')))
22348                 s[0] = '!';
22350 -       ret = snprintf(buf, len, "%s", pathname);
22351 -       if (ret >= len)
22352 -               return -ENAMETOOLONG;
22354 -       ret = snprintf(buf, len, "%s@%s", buf, dev->sess->sessname);
22355 +       ret = snprintf(buf, len, "%s@%s", pathname, dev->sess->sessname);
22356         if (ret >= len)
22357                 return -ENAMETOOLONG;
22359 diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
22360 index 45a470076652..5ab7319ff2ea 100644
22361 --- a/drivers/block/rnbd/rnbd-clt.c
22362 +++ b/drivers/block/rnbd/rnbd-clt.c
22363 @@ -693,7 +693,11 @@ static void remap_devs(struct rnbd_clt_session *sess)
22364                 return;
22365         }
22367 -       rtrs_clt_query(sess->rtrs, &attrs);
22368 +       err = rtrs_clt_query(sess->rtrs, &attrs);
22369 +       if (err) {
22370 +               pr_err("rtrs_clt_query(\"%s\"): %d\n", sess->sessname, err);
22371 +               return;
22372 +       }
22373         mutex_lock(&sess->lock);
22374         sess->max_io_size = attrs.max_io_size;
22376 @@ -1234,7 +1238,11 @@ find_and_get_or_create_sess(const char *sessname,
22377                 err = PTR_ERR(sess->rtrs);
22378                 goto wake_up_and_put;
22379         }
22380 -       rtrs_clt_query(sess->rtrs, &attrs);
22382 +       err = rtrs_clt_query(sess->rtrs, &attrs);
22383 +       if (err)
22384 +               goto close_rtrs;
22386         sess->max_io_size = attrs.max_io_size;
22387         sess->queue_depth = attrs.queue_depth;
22389 diff --git a/drivers/block/rnbd/rnbd-clt.h b/drivers/block/rnbd/rnbd-clt.h
22390 index 537d499dad3b..73d980840531 100644
22391 --- a/drivers/block/rnbd/rnbd-clt.h
22392 +++ b/drivers/block/rnbd/rnbd-clt.h
22393 @@ -87,7 +87,7 @@ struct rnbd_clt_session {
22394         DECLARE_BITMAP(cpu_queues_bm, NR_CPUS);
22395         int     __percpu        *cpu_rr; /* per-cpu var for CPU round-robin */
22396         atomic_t                busy;
22397 -       int                     queue_depth;
22398 +       size_t                  queue_depth;
22399         u32                     max_io_size;
22400         struct blk_mq_tag_set   tag_set;
22401         struct mutex            lock; /* protects state and devs_list */
22402 diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
22403 index a6a68d44f517..677770f32843 100644
22404 --- a/drivers/block/rnbd/rnbd-srv.c
22405 +++ b/drivers/block/rnbd/rnbd-srv.c
22406 @@ -341,7 +341,9 @@ void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev)
22407         struct rnbd_srv_session *sess = sess_dev->sess;
22409         sess_dev->keep_id = true;
22410 -       mutex_lock(&sess->lock);
22411 +       /* It is already started to close by client's close message. */
22412 +       if (!mutex_trylock(&sess->lock))
22413 +               return;
22414         rnbd_srv_destroy_dev_session_sysfs(sess_dev);
22415         mutex_unlock(&sess->lock);
22417 diff --git a/drivers/block/swim.c b/drivers/block/swim.c
22418 index cc6a0bc6c005..ac5c170d76e8 100644
22419 --- a/drivers/block/swim.c
22420 +++ b/drivers/block/swim.c
22421 @@ -328,7 +328,7 @@ static inline void swim_motor(struct swim __iomem *base,
22422                         if (swim_readbit(base, MOTOR_ON))
22423                                 break;
22424                         set_current_state(TASK_INTERRUPTIBLE);
22425 -                       schedule_timeout(1);
22426 +                       schedule_min_hrtimeout();
22427                 }
22428         } else if (action == OFF) {
22429                 swim_action(base, MOTOR_OFF);
22430 @@ -347,7 +347,7 @@ static inline void swim_eject(struct swim __iomem *base)
22431                 if (!swim_readbit(base, DISK_IN))
22432                         break;
22433                 set_current_state(TASK_INTERRUPTIBLE);
22434 -               schedule_timeout(1);
22435 +               schedule_min_hrtimeout();
22436         }
22437         swim_select(base, RELAX);
22439 @@ -372,6 +372,7 @@ static inline int swim_step(struct swim __iomem *base)
22441                 set_current_state(TASK_INTERRUPTIBLE);
22442                 schedule_timeout(1);
22443 +               schedule_min_hrtimeout();
22445                 swim_select(base, RELAX);
22446                 if (!swim_readbit(base, STEP))
22447 diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
22448 index b0c71d3a81a0..bda5c815e441 100644
22449 --- a/drivers/block/xen-blkback/common.h
22450 +++ b/drivers/block/xen-blkback/common.h
22451 @@ -313,6 +313,7 @@ struct xen_blkif {
22453         struct work_struct      free_work;
22454         unsigned int            nr_ring_pages;
22455 +       bool                    multi_ref;
22456         /* All rings for this device. */
22457         struct xen_blkif_ring   *rings;
22458         unsigned int            nr_rings;
22459 diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
22460 index c2aaf690352c..125b22205d38 100644
22461 --- a/drivers/block/xen-blkback/xenbus.c
22462 +++ b/drivers/block/xen-blkback/xenbus.c
22463 @@ -998,14 +998,17 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
22464         for (i = 0; i < nr_grefs; i++) {
22465                 char ring_ref_name[RINGREF_NAME_LEN];
22467 -               snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
22468 +               if (blkif->multi_ref)
22469 +                       snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
22470 +               else {
22471 +                       WARN_ON(i != 0);
22472 +                       snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref");
22473 +               }
22475                 err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
22476                                    "%u", &ring_ref[i]);
22478                 if (err != 1) {
22479 -                       if (nr_grefs == 1)
22480 -                               break;
22482                         err = -EINVAL;
22483                         xenbus_dev_fatal(dev, err, "reading %s/%s",
22484                                          dir, ring_ref_name);
22485 @@ -1013,18 +1016,6 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
22486                 }
22487         }
22489 -       if (err != 1) {
22490 -               WARN_ON(nr_grefs != 1);
22492 -               err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u",
22493 -                                  &ring_ref[0]);
22494 -               if (err != 1) {
22495 -                       err = -EINVAL;
22496 -                       xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
22497 -                       return err;
22498 -               }
22499 -       }
22501         err = -ENOMEM;
22502         for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
22503                 req = kzalloc(sizeof(*req), GFP_KERNEL);
22504 @@ -1129,10 +1120,15 @@ static int connect_ring(struct backend_info *be)
22505                  blkif->nr_rings, blkif->blk_protocol, protocol,
22506                  blkif->vbd.feature_gnt_persistent ? "persistent grants" : "");
22508 -       ring_page_order = xenbus_read_unsigned(dev->otherend,
22509 -                                              "ring-page-order", 0);
22511 -       if (ring_page_order > xen_blkif_max_ring_order) {
22512 +       err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
22513 +                          &ring_page_order);
22514 +       if (err != 1) {
22515 +               blkif->nr_ring_pages = 1;
22516 +               blkif->multi_ref = false;
22517 +       } else if (ring_page_order <= xen_blkif_max_ring_order) {
22518 +               blkif->nr_ring_pages = 1 << ring_page_order;
22519 +               blkif->multi_ref = true;
22520 +       } else {
22521                 err = -EINVAL;
22522                 xenbus_dev_fatal(dev, err,
22523                                  "requested ring page order %d exceed max:%d",
22524 @@ -1141,8 +1137,6 @@ static int connect_ring(struct backend_info *be)
22525                 return err;
22526         }
22528 -       blkif->nr_ring_pages = 1 << ring_page_order;
22530         if (blkif->nr_rings == 1)
22531                 return read_per_ring_refs(&blkif->rings[0], dev->otherend);
22532         else {
22533 diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
22534 index 5cbfbd948f67..4a901508e48e 100644
22535 --- a/drivers/bluetooth/btusb.c
22536 +++ b/drivers/bluetooth/btusb.c
22537 @@ -399,7 +399,9 @@ static const struct usb_device_id blacklist_table[] = {
22539         /* MediaTek Bluetooth devices */
22540         { USB_VENDOR_AND_INTERFACE_INFO(0x0e8d, 0xe0, 0x01, 0x01),
22541 -         .driver_info = BTUSB_MEDIATEK },
22542 +         .driver_info = BTUSB_MEDIATEK |
22543 +                        BTUSB_WIDEBAND_SPEECH |
22544 +                        BTUSB_VALID_LE_STATES },
22546         /* Additional MediaTek MT7615E Bluetooth devices */
22547         { USB_DEVICE(0x13d3, 0x3560), .driver_info = BTUSB_MEDIATEK},
22548 diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c
22549 index c2546bf229fb..08c28740dc4e 100644
22550 --- a/drivers/bus/mhi/core/boot.c
22551 +++ b/drivers/bus/mhi/core/boot.c
22552 @@ -389,7 +389,6 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
22553  void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
22555         const struct firmware *firmware = NULL;
22556 -       struct image_info *image_info;
22557         struct device *dev = &mhi_cntrl->mhi_dev->dev;
22558         const char *fw_name;
22559         void *buf;
22560 @@ -491,44 +490,42 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
22561  fw_load_ee_pthru:
22562         /* Transitioning into MHI RESET->READY state */
22563         ret = mhi_ready_state_transition(mhi_cntrl);
22565 -       if (!mhi_cntrl->fbc_download)
22566 -               return;
22568         if (ret) {
22569                 dev_err(dev, "MHI did not enter READY state\n");
22570                 goto error_ready_state;
22571         }
22573 -       /* Wait for the SBL event */
22574 -       ret = wait_event_timeout(mhi_cntrl->state_event,
22575 -                                mhi_cntrl->ee == MHI_EE_SBL ||
22576 -                                MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
22577 -                                msecs_to_jiffies(mhi_cntrl->timeout_ms));
22578 +       dev_info(dev, "Wait for device to enter SBL or Mission mode\n");
22579 +       return;
22581 -       if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
22582 -               dev_err(dev, "MHI did not enter SBL\n");
22583 -               goto error_ready_state;
22584 +error_ready_state:
22585 +       if (mhi_cntrl->fbc_download) {
22586 +               mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
22587 +               mhi_cntrl->fbc_image = NULL;
22588         }
22590 -       /* Start full firmware image download */
22591 -       image_info = mhi_cntrl->fbc_image;
22592 +error_fw_load:
22593 +       mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
22594 +       wake_up_all(&mhi_cntrl->state_event);
22597 +int mhi_download_amss_image(struct mhi_controller *mhi_cntrl)
22599 +       struct image_info *image_info = mhi_cntrl->fbc_image;
22600 +       struct device *dev = &mhi_cntrl->mhi_dev->dev;
22601 +       int ret;
22603 +       if (!image_info)
22604 +               return -EIO;
22606         ret = mhi_fw_load_bhie(mhi_cntrl,
22607                                /* Vector table is the last entry */
22608                                &image_info->mhi_buf[image_info->entries - 1]);
22609         if (ret) {
22610 -               dev_err(dev, "MHI did not load image over BHIe, ret: %d\n",
22611 -                       ret);
22612 -               goto error_fw_load;
22613 +               dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret);
22614 +               mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
22615 +               wake_up_all(&mhi_cntrl->state_event);
22616         }
22618 -       return;
22620 -error_ready_state:
22621 -       mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
22622 -       mhi_cntrl->fbc_image = NULL;
22624 -error_fw_load:
22625 -       mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
22626 -       wake_up_all(&mhi_cntrl->state_event);
22627 +       return ret;
22629 diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
22630 index be4eebb0971b..08b7f4a06bfc 100644
22631 --- a/drivers/bus/mhi/core/init.c
22632 +++ b/drivers/bus/mhi/core/init.c
22633 @@ -508,8 +508,6 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
22635         /* Setup wake db */
22636         mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
22637 -       mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
22638 -       mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
22639         mhi_cntrl->wake_set = false;
22641         /* Setup channel db address for each channel in tre_ring */
22642 @@ -552,6 +550,7 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
22643         struct mhi_ring *buf_ring;
22644         struct mhi_ring *tre_ring;
22645         struct mhi_chan_ctxt *chan_ctxt;
22646 +       u32 tmp;
22648         buf_ring = &mhi_chan->buf_ring;
22649         tre_ring = &mhi_chan->tre_ring;
22650 @@ -565,7 +564,19 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
22651         vfree(buf_ring->base);
22653         buf_ring->base = tre_ring->base = NULL;
22654 +       tre_ring->ctxt_wp = NULL;
22655         chan_ctxt->rbase = 0;
22656 +       chan_ctxt->rlen = 0;
22657 +       chan_ctxt->rp = 0;
22658 +       chan_ctxt->wp = 0;
22660 +       tmp = chan_ctxt->chcfg;
22661 +       tmp &= ~CHAN_CTX_CHSTATE_MASK;
22662 +       tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
22663 +       chan_ctxt->chcfg = tmp;
22665 +       /* Update to all cores */
22666 +       smp_wmb();
22669  int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
22670 @@ -863,12 +874,10 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
22671         u32 soc_info;
22672         int ret, i;
22674 -       if (!mhi_cntrl)
22675 -               return -EINVAL;
22677 -       if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
22678 +       if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
22679 +           !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
22680             !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
22681 -           !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs)
22682 +           !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs || !mhi_cntrl->irq)
22683                 return -EINVAL;
22685         ret = parse_config(mhi_cntrl, config);
22686 @@ -890,8 +899,7 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
22687         INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
22688         init_waitqueue_head(&mhi_cntrl->state_event);
22690 -       mhi_cntrl->hiprio_wq = alloc_ordered_workqueue
22691 -                               ("mhi_hiprio_wq", WQ_MEM_RECLAIM | WQ_HIGHPRI);
22692 +       mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI);
22693         if (!mhi_cntrl->hiprio_wq) {
22694                 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
22695                 ret = -ENOMEM;
22696 @@ -1296,7 +1304,8 @@ static int mhi_driver_remove(struct device *dev)
22698                 mutex_lock(&mhi_chan->mutex);
22700 -               if (ch_state[dir] == MHI_CH_STATE_ENABLED &&
22701 +               if ((ch_state[dir] == MHI_CH_STATE_ENABLED ||
22702 +                    ch_state[dir] == MHI_CH_STATE_STOP) &&
22703                     !mhi_chan->offload_ch)
22704                         mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
22706 diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h
22707 index 6f80ec30c0cd..6f37439e5247 100644
22708 --- a/drivers/bus/mhi/core/internal.h
22709 +++ b/drivers/bus/mhi/core/internal.h
22710 @@ -619,6 +619,7 @@ int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl);
22711  int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
22712  int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
22713                  enum mhi_cmd_type cmd);
22714 +int mhi_download_amss_image(struct mhi_controller *mhi_cntrl);
22715  static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl)
22717         return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
22718 diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
22719 index 4e0131b94056..61c37b23dd71 100644
22720 --- a/drivers/bus/mhi/core/main.c
22721 +++ b/drivers/bus/mhi/core/main.c
22722 @@ -242,10 +242,17 @@ static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
22723         smp_wmb();
22726 +static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
22728 +       return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
22731  int mhi_destroy_device(struct device *dev, void *data)
22733 +       struct mhi_chan *ul_chan, *dl_chan;
22734         struct mhi_device *mhi_dev;
22735         struct mhi_controller *mhi_cntrl;
22736 +       enum mhi_ee_type ee = MHI_EE_MAX;
22738         if (dev->bus != &mhi_bus_type)
22739                 return 0;
22740 @@ -257,6 +264,17 @@ int mhi_destroy_device(struct device *dev, void *data)
22741         if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
22742                 return 0;
22744 +       ul_chan = mhi_dev->ul_chan;
22745 +       dl_chan = mhi_dev->dl_chan;
22747 +       /*
22748 +        * If execution environment is specified, remove only those devices that
22749 +        * started in them based on ee_mask for the channels as we move on to a
22750 +        * different execution environment
22751 +        */
22752 +       if (data)
22753 +               ee = *(enum mhi_ee_type *)data;
22755         /*
22756          * For the suspend and resume case, this function will get called
22757          * without mhi_unregister_controller(). Hence, we need to drop the
22758 @@ -264,11 +282,19 @@ int mhi_destroy_device(struct device *dev, void *data)
22759          * be sure that there will be no instances of mhi_dev left after
22760          * this.
22761          */
22762 -       if (mhi_dev->ul_chan)
22763 -               put_device(&mhi_dev->ul_chan->mhi_dev->dev);
22764 +       if (ul_chan) {
22765 +               if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
22766 +                       return 0;
22768 -       if (mhi_dev->dl_chan)
22769 -               put_device(&mhi_dev->dl_chan->mhi_dev->dev);
22770 +               put_device(&ul_chan->mhi_dev->dev);
22771 +       }
22773 +       if (dl_chan) {
22774 +               if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
22775 +                       return 0;
22777 +               put_device(&dl_chan->mhi_dev->dev);
22778 +       }
22780         dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
22781                  mhi_dev->name);
22782 @@ -383,7 +409,16 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev)
22783         struct mhi_event_ctxt *er_ctxt =
22784                 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
22785         struct mhi_ring *ev_ring = &mhi_event->ring;
22786 -       void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
22787 +       dma_addr_t ptr = er_ctxt->rp;
22788 +       void *dev_rp;
22790 +       if (!is_valid_ring_ptr(ev_ring, ptr)) {
22791 +               dev_err(&mhi_cntrl->mhi_dev->dev,
22792 +                       "Event ring rp points outside of the event ring\n");
22793 +               return IRQ_HANDLED;
22794 +       }
22796 +       dev_rp = mhi_to_virtual(ev_ring, ptr);
22798         /* Only proceed if event ring has pending events */
22799         if (ev_ring->rp == dev_rp)
22800 @@ -409,7 +444,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
22801         struct device *dev = &mhi_cntrl->mhi_dev->dev;
22802         enum mhi_state state = MHI_STATE_MAX;
22803         enum mhi_pm_state pm_state = 0;
22804 -       enum mhi_ee_type ee = 0;
22805 +       enum mhi_ee_type ee = MHI_EE_MAX;
22807         write_lock_irq(&mhi_cntrl->pm_lock);
22808         if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
22809 @@ -418,8 +453,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
22810         }
22812         state = mhi_get_mhi_state(mhi_cntrl);
22813 -       ee = mhi_cntrl->ee;
22814 -       mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
22815 +       ee = mhi_get_exec_env(mhi_cntrl);
22816         dev_dbg(dev, "local ee:%s device ee:%s dev_state:%s\n",
22817                 TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee),
22818                 TO_MHI_STATE_STR(state));
22819 @@ -431,27 +465,30 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
22820         }
22821         write_unlock_irq(&mhi_cntrl->pm_lock);
22823 -        /* If device supports RDDM don't bother processing SYS error */
22824 -       if (mhi_cntrl->rddm_image) {
22825 -               /* host may be performing a device power down already */
22826 -               if (!mhi_is_active(mhi_cntrl))
22827 -                       goto exit_intvec;
22828 +       if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee)
22829 +               goto exit_intvec;
22831 -               if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) {
22832 +       switch (ee) {
22833 +       case MHI_EE_RDDM:
22834 +               /* proceed if power down is not already in progress */
22835 +               if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
22836                         mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
22837 +                       mhi_cntrl->ee = ee;
22838                         wake_up_all(&mhi_cntrl->state_event);
22839                 }
22840 -               goto exit_intvec;
22841 -       }
22843 -       if (pm_state == MHI_PM_SYS_ERR_DETECT) {
22844 +               break;
22845 +       case MHI_EE_PBL:
22846 +       case MHI_EE_EDL:
22847 +       case MHI_EE_PTHRU:
22848 +               mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
22849 +               mhi_cntrl->ee = ee;
22850                 wake_up_all(&mhi_cntrl->state_event);
22852 -               /* For fatal errors, we let controller decide next step */
22853 -               if (MHI_IN_PBL(ee))
22854 -                       mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
22855 -               else
22856 -                       mhi_pm_sys_err_handler(mhi_cntrl);
22857 +               mhi_pm_sys_err_handler(mhi_cntrl);
22858 +               break;
22859 +       default:
22860 +               wake_up_all(&mhi_cntrl->state_event);
22861 +               mhi_pm_sys_err_handler(mhi_cntrl);
22862 +               break;
22863         }
22865  exit_intvec:
22866 @@ -536,6 +573,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
22867                 struct mhi_buf_info *buf_info;
22868                 u16 xfer_len;
22870 +               if (!is_valid_ring_ptr(tre_ring, ptr)) {
22871 +                       dev_err(&mhi_cntrl->mhi_dev->dev,
22872 +                               "Event element points outside of the tre ring\n");
22873 +                       break;
22874 +               }
22875                 /* Get the TRB this event points to */
22876                 ev_tre = mhi_to_virtual(tre_ring, ptr);
22878 @@ -570,8 +612,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
22879                         /* notify client */
22880                         mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
22882 -                       if (mhi_chan->dir == DMA_TO_DEVICE)
22883 +                       if (mhi_chan->dir == DMA_TO_DEVICE) {
22884                                 atomic_dec(&mhi_cntrl->pending_pkts);
22885 +                               /* Release the reference got from mhi_queue() */
22886 +                               mhi_cntrl->runtime_put(mhi_cntrl);
22887 +                       }
22889                         /*
22890                          * Recycle the buffer if buffer is pre-allocated,
22891 @@ -695,6 +740,12 @@ static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
22892         struct mhi_chan *mhi_chan;
22893         u32 chan;
22895 +       if (!is_valid_ring_ptr(mhi_ring, ptr)) {
22896 +               dev_err(&mhi_cntrl->mhi_dev->dev,
22897 +                       "Event element points outside of the cmd ring\n");
22898 +               return;
22899 +       }
22901         cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
22903         chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
22904 @@ -719,6 +770,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
22905         struct device *dev = &mhi_cntrl->mhi_dev->dev;
22906         u32 chan;
22907         int count = 0;
22908 +       dma_addr_t ptr = er_ctxt->rp;
22910         /*
22911          * This is a quick check to avoid unnecessary event processing
22912 @@ -728,7 +780,13 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
22913         if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
22914                 return -EIO;
22916 -       dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
22917 +       if (!is_valid_ring_ptr(ev_ring, ptr)) {
22918 +               dev_err(&mhi_cntrl->mhi_dev->dev,
22919 +                       "Event ring rp points outside of the event ring\n");
22920 +               return -EIO;
22921 +       }
22923 +       dev_rp = mhi_to_virtual(ev_ring, ptr);
22924         local_rp = ev_ring->rp;
22926         while (dev_rp != local_rp) {
22927 @@ -834,6 +892,8 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
22928                          */
22929                         if (chan < mhi_cntrl->max_chan) {
22930                                 mhi_chan = &mhi_cntrl->mhi_chan[chan];
22931 +                               if (!mhi_chan->configured)
22932 +                                       break;
22933                                 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
22934                                 event_quota--;
22935                         }
22936 @@ -845,7 +905,15 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
22938                 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
22939                 local_rp = ev_ring->rp;
22940 -               dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
22942 +               ptr = er_ctxt->rp;
22943 +               if (!is_valid_ring_ptr(ev_ring, ptr)) {
22944 +                       dev_err(&mhi_cntrl->mhi_dev->dev,
22945 +                               "Event ring rp points outside of the event ring\n");
22946 +                       return -EIO;
22947 +               }
22949 +               dev_rp = mhi_to_virtual(ev_ring, ptr);
22950                 count++;
22951         }
22953 @@ -868,11 +936,18 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
22954         int count = 0;
22955         u32 chan;
22956         struct mhi_chan *mhi_chan;
22957 +       dma_addr_t ptr = er_ctxt->rp;
22959         if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
22960                 return -EIO;
22962 -       dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
22963 +       if (!is_valid_ring_ptr(ev_ring, ptr)) {
22964 +               dev_err(&mhi_cntrl->mhi_dev->dev,
22965 +                       "Event ring rp points outside of the event ring\n");
22966 +               return -EIO;
22967 +       }
22969 +       dev_rp = mhi_to_virtual(ev_ring, ptr);
22970         local_rp = ev_ring->rp;
22972         while (dev_rp != local_rp && event_quota > 0) {
22973 @@ -886,7 +961,8 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
22974                  * Only process the event ring elements whose channel
22975                  * ID is within the maximum supported range.
22976                  */
22977 -               if (chan < mhi_cntrl->max_chan) {
22978 +               if (chan < mhi_cntrl->max_chan &&
22979 +                   mhi_cntrl->mhi_chan[chan].configured) {
22980                         mhi_chan = &mhi_cntrl->mhi_chan[chan];
22982                         if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
22983 @@ -900,7 +976,15 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
22985                 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
22986                 local_rp = ev_ring->rp;
22987 -               dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
22989 +               ptr = er_ctxt->rp;
22990 +               if (!is_valid_ring_ptr(ev_ring, ptr)) {
22991 +                       dev_err(&mhi_cntrl->mhi_dev->dev,
22992 +                               "Event ring rp points outside of the event ring\n");
22993 +                       return -EIO;
22994 +               }
22996 +               dev_rp = mhi_to_virtual(ev_ring, ptr);
22997                 count++;
22998         }
22999         read_lock_bh(&mhi_cntrl->pm_lock);
23000 @@ -1004,9 +1088,11 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
23001         if (unlikely(ret))
23002                 goto exit_unlock;
23004 -       /* trigger M3 exit if necessary */
23005 -       if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
23006 -               mhi_trigger_resume(mhi_cntrl);
23007 +       /* Packet is queued, take a usage ref to exit M3 if necessary
23008 +        * for host->device buffer, balanced put is done on buffer completion
23009 +        * for device->host buffer, balanced put is after ringing the DB
23010 +        */
23011 +       mhi_cntrl->runtime_get(mhi_cntrl);
23013         /* Assert dev_wake (to exit/prevent M1/M2)*/
23014         mhi_cntrl->wake_toggle(mhi_cntrl);
23015 @@ -1014,12 +1100,11 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
23016         if (mhi_chan->dir == DMA_TO_DEVICE)
23017                 atomic_inc(&mhi_cntrl->pending_pkts);
23019 -       if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {
23020 -               ret = -EIO;
23021 -               goto exit_unlock;
23022 -       }
23023 +       if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
23024 +               mhi_ring_chan_db(mhi_cntrl, mhi_chan);
23026 -       mhi_ring_chan_db(mhi_cntrl, mhi_chan);
23027 +       if (dir == DMA_FROM_DEVICE)
23028 +               mhi_cntrl->runtime_put(mhi_cntrl);
23030  exit_unlock:
23031         read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
23032 @@ -1365,6 +1450,7 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
23033         struct mhi_ring *ev_ring;
23034         struct device *dev = &mhi_cntrl->mhi_dev->dev;
23035         unsigned long flags;
23036 +       dma_addr_t ptr;
23038         dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
23040 @@ -1372,7 +1458,15 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
23042         /* mark all stale events related to channel as STALE event */
23043         spin_lock_irqsave(&mhi_event->lock, flags);
23044 -       dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
23046 +       ptr = er_ctxt->rp;
23047 +       if (!is_valid_ring_ptr(ev_ring, ptr)) {
23048 +               dev_err(&mhi_cntrl->mhi_dev->dev,
23049 +                       "Event ring rp points outside of the event ring\n");
23050 +               dev_rp = ev_ring->rp;
23051 +       } else {
23052 +               dev_rp = mhi_to_virtual(ev_ring, ptr);
23053 +       }
23055         local_rp = ev_ring->rp;
23056         while (dev_rp != local_rp) {
23057 @@ -1403,8 +1497,11 @@ static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
23058         while (tre_ring->rp != tre_ring->wp) {
23059                 struct mhi_buf_info *buf_info = buf_ring->rp;
23061 -               if (mhi_chan->dir == DMA_TO_DEVICE)
23062 +               if (mhi_chan->dir == DMA_TO_DEVICE) {
23063                         atomic_dec(&mhi_cntrl->pending_pkts);
23064 +                       /* Release the reference got from mhi_queue() */
23065 +                       mhi_cntrl->runtime_put(mhi_cntrl);
23066 +               }
23068                 if (!buf_info->pre_mapped)
23069                         mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
23070 diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
23071 index 681960c72d2a..87d3b73bcade 100644
23072 --- a/drivers/bus/mhi/core/pm.c
23073 +++ b/drivers/bus/mhi/core/pm.c
23074 @@ -377,24 +377,28 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
23076         struct mhi_event *mhi_event;
23077         struct device *dev = &mhi_cntrl->mhi_dev->dev;
23078 +       enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
23079         int i, ret;
23081         dev_dbg(dev, "Processing Mission Mode transition\n");
23083         write_lock_irq(&mhi_cntrl->pm_lock);
23084         if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
23085 -               mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
23086 +               ee = mhi_get_exec_env(mhi_cntrl);
23088 -       if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
23089 +       if (!MHI_IN_MISSION_MODE(ee)) {
23090                 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
23091                 write_unlock_irq(&mhi_cntrl->pm_lock);
23092                 wake_up_all(&mhi_cntrl->state_event);
23093                 return -EIO;
23094         }
23095 +       mhi_cntrl->ee = ee;
23096         write_unlock_irq(&mhi_cntrl->pm_lock);
23098         wake_up_all(&mhi_cntrl->state_event);
23100 +       device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee,
23101 +                             mhi_destroy_device);
23102         mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
23104         /* Force MHI to be in M0 state before continuing */
23105 @@ -755,6 +759,8 @@ void mhi_pm_st_worker(struct work_struct *work)
23106                          * either SBL or AMSS states
23107                          */
23108                         mhi_create_devices(mhi_cntrl);
23109 +                       if (mhi_cntrl->fbc_download)
23110 +                               mhi_download_amss_image(mhi_cntrl);
23111                         break;
23112                 case DEV_ST_TRANSITION_MISSION_MODE:
23113                         mhi_pm_mission_mode_transition(mhi_cntrl);
23114 @@ -1092,7 +1098,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
23115                                                            &val) ||
23116                                         !val,
23117                                 msecs_to_jiffies(mhi_cntrl->timeout_ms));
23118 -               if (ret) {
23119 +               if (!ret) {
23120                         ret = -EIO;
23121                         dev_info(dev, "Failed to reset MHI due to syserr state\n");
23122                         goto error_bhi_offset;
23123 diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
23124 index 20673a4b4a3c..ef549c695b55 100644
23125 --- a/drivers/bus/mhi/pci_generic.c
23126 +++ b/drivers/bus/mhi/pci_generic.c
23127 @@ -230,6 +230,21 @@ static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
23128         }
23131 +static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force)
23133 +       /* no-op */
23136 +static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override)
23138 +       /* no-op */
23141 +static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl)
23143 +       /* no-op */
23146  static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
23148         struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
23149 @@ -433,6 +448,9 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
23150         mhi_cntrl->status_cb = mhi_pci_status_cb;
23151         mhi_cntrl->runtime_get = mhi_pci_runtime_get;
23152         mhi_cntrl->runtime_put = mhi_pci_runtime_put;
23153 +       mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
23154 +       mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
23155 +       mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
23157         err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
23158         if (err)
23159 @@ -498,6 +516,12 @@ static void mhi_pci_remove(struct pci_dev *pdev)
23160         mhi_unregister_controller(mhi_cntrl);
23163 +static void mhi_pci_shutdown(struct pci_dev *pdev)
23165 +       mhi_pci_remove(pdev);
23166 +       pci_set_power_state(pdev, PCI_D3hot);
23169  static void mhi_pci_reset_prepare(struct pci_dev *pdev)
23171         struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
23172 @@ -668,6 +692,7 @@ static struct pci_driver mhi_pci_driver = {
23173         .id_table       = mhi_pci_id_table,
23174         .probe          = mhi_pci_probe,
23175         .remove         = mhi_pci_remove,
23176 +       .shutdown       = mhi_pci_shutdown,
23177         .err_handler    = &mhi_pci_err_handler,
23178         .driver.pm      = &mhi_pci_pm_ops
23179  };
23180 diff --git a/drivers/bus/qcom-ebi2.c b/drivers/bus/qcom-ebi2.c
23181 index 03ddcf426887..0b8f53a688b8 100644
23182 --- a/drivers/bus/qcom-ebi2.c
23183 +++ b/drivers/bus/qcom-ebi2.c
23184 @@ -353,8 +353,10 @@ static int qcom_ebi2_probe(struct platform_device *pdev)
23186                 /* Figure out the chipselect */
23187                 ret = of_property_read_u32(child, "reg", &csindex);
23188 -               if (ret)
23189 +               if (ret) {
23190 +                       of_node_put(child);
23191                         return ret;
23192 +               }
23194                 if (csindex > 5) {
23195                         dev_err(dev,
23196 diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
23197 index 3d74f237f005..68145e326eb9 100644
23198 --- a/drivers/bus/ti-sysc.c
23199 +++ b/drivers/bus/ti-sysc.c
23200 @@ -635,6 +635,51 @@ static int sysc_parse_and_check_child_range(struct sysc *ddata)
23201         return 0;
23204 +/* Interconnect instances to probe before l4_per instances */
23205 +static struct resource early_bus_ranges[] = {
23206 +       /* am3/4 l4_wkup */
23207 +       { .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
23208 +       /* omap4/5 and dra7 l4_cfg */
23209 +       { .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
23210 +       /* omap4 l4_wkup */
23211 +       { .start = 0x4a300000, .end = 0x4a300000 + 0x30000,  },
23212 +       /* omap5 and dra7 l4_wkup without dra7 dcan segment */
23213 +       { .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000,  },
23216 +static atomic_t sysc_defer = ATOMIC_INIT(10);
23218 +/**
23219 + * sysc_defer_non_critical - defer non_critical interconnect probing
23220 + * @ddata: device driver data
23221 + *
23222 + * We want to probe l4_cfg and l4_wkup interconnect instances before any
23223 + * l4_per instances as l4_per instances depend on resources on l4_cfg and
23224 + * l4_wkup interconnects.
23225 + */
23226 +static int sysc_defer_non_critical(struct sysc *ddata)
23228 +       struct resource *res;
23229 +       int i;
23231 +       if (!atomic_read(&sysc_defer))
23232 +               return 0;
23234 +       for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
23235 +               res = &early_bus_ranges[i];
23236 +               if (ddata->module_pa >= res->start &&
23237 +                   ddata->module_pa <= res->end) {
23238 +                       atomic_set(&sysc_defer, 0);
23240 +                       return 0;
23241 +               }
23242 +       }
23244 +       atomic_dec_if_positive(&sysc_defer);
23246 +       return -EPROBE_DEFER;
23249  static struct device_node *stdout_path;
23251  static void sysc_init_stdout_path(struct sysc *ddata)
23252 @@ -856,15 +901,19 @@ static int sysc_map_and_check_registers(struct sysc *ddata)
23253         struct device_node *np = ddata->dev->of_node;
23254         int error;
23256 -       if (!of_get_property(np, "reg", NULL))
23257 -               return 0;
23259         error = sysc_parse_and_check_child_range(ddata);
23260         if (error)
23261                 return error;
23263 +       error = sysc_defer_non_critical(ddata);
23264 +       if (error)
23265 +               return error;
23267         sysc_check_children(ddata);
23269 +       if (!of_get_property(np, "reg", NULL))
23270 +               return 0;
23272         error = sysc_parse_registers(ddata);
23273         if (error)
23274                 return error;
23275 diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
23276 index 9874fc1c815b..1831099306aa 100644
23277 --- a/drivers/cdrom/gdrom.c
23278 +++ b/drivers/cdrom/gdrom.c
23279 @@ -743,6 +743,13 @@ static const struct blk_mq_ops gdrom_mq_ops = {
23280  static int probe_gdrom(struct platform_device *devptr)
23282         int err;
23284 +       /*
23285 +        * Ensure our "one" device is initialized properly in case of previous
23286 +        * usages of it
23287 +        */
23288 +       memset(&gd, 0, sizeof(gd));
23290         /* Start the device */
23291         if (gdrom_execute_diagnostic() != 1) {
23292                 pr_warn("ATA Probe for GDROM failed\n");
23293 @@ -831,6 +838,8 @@ static int remove_gdrom(struct platform_device *devptr)
23294         if (gdrom_major)
23295                 unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
23296         unregister_cdrom(gd.cd_info);
23297 +       kfree(gd.cd_info);
23298 +       kfree(gd.toc);
23300         return 0;
23302 @@ -846,7 +855,7 @@ static struct platform_driver gdrom_driver = {
23303  static int __init init_gdrom(void)
23305         int rc;
23306 -       gd.toc = NULL;
23308         rc = platform_driver_register(&gdrom_driver);
23309         if (rc)
23310                 return rc;
23311 @@ -862,8 +871,6 @@ static void __exit exit_gdrom(void)
23313         platform_device_unregister(pd);
23314         platform_driver_unregister(&gdrom_driver);
23315 -       kfree(gd.toc);
23316 -       kfree(gd.cd_info);
23319  module_init(init_gdrom);
23320 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
23321 index c44ad18464f1..ca87178200e0 100644
23322 --- a/drivers/char/ipmi/ipmi_msghandler.c
23323 +++ b/drivers/char/ipmi/ipmi_msghandler.c
23324 @@ -3563,7 +3563,7 @@ static void cleanup_smi_msgs(struct ipmi_smi *intf)
23325         /* Current message first, to preserve order */
23326         while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
23327                 /* Wait for the message to clear out. */
23328 -               schedule_timeout(1);
23329 +               schedule_min_hrtimeout();
23330         }
23332         /* No need for locks, the interface is down. */
23333 diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
23334 index 0416b9c9d410..9ce5fae0f1cf 100644
23335 --- a/drivers/char/ipmi/ipmi_ssif.c
23336 +++ b/drivers/char/ipmi/ipmi_ssif.c
23337 @@ -1288,7 +1288,7 @@ static void shutdown_ssif(void *send_info)
23339         /* make sure the driver is not looking for flags any more. */
23340         while (ssif_info->ssif_state != SSIF_NORMAL)
23341 -               schedule_timeout(1);
23342 +               schedule_min_hrtimeout();
23344         ssif_info->stopping = true;
23345         del_timer_sync(&ssif_info->watch_timer);
23346 diff --git a/drivers/char/random.c b/drivers/char/random.c
23347 index 0fe9e200e4c8..5d6acfecd919 100644
23348 --- a/drivers/char/random.c
23349 +++ b/drivers/char/random.c
23350 @@ -819,7 +819,7 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng)
23352  static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
23354 -       memcpy(&crng->state[0], "expand 32-byte k", 16);
23355 +       chacha_init_consts(crng->state);
23356         _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
23357         crng_init_try_arch(crng);
23358         crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
23359 @@ -827,7 +827,7 @@ static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
23361  static void __init crng_initialize_primary(struct crng_state *crng)
23363 -       memcpy(&crng->state[0], "expand 32-byte k", 16);
23364 +       chacha_init_consts(crng->state);
23365         _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
23366         if (crng_init_try_arch_early(crng) && trust_cpu) {
23367                 invalidate_batched_entropy();
23368 diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
23369 index 3633ed70f48f..1b18ce5ebab1 100644
23370 --- a/drivers/char/tpm/eventlog/acpi.c
23371 +++ b/drivers/char/tpm/eventlog/acpi.c
23372 @@ -41,6 +41,27 @@ struct acpi_tcpa {
23373         };
23374  };
23376 +/* Check that the given log is indeed a TPM2 log. */
23377 +static bool tpm_is_tpm2_log(void *bios_event_log, u64 len)
23379 +       struct tcg_efi_specid_event_head *efispecid;
23380 +       struct tcg_pcr_event *event_header;
23381 +       int n;
23383 +       if (len < sizeof(*event_header))
23384 +               return false;
23385 +       len -= sizeof(*event_header);
23386 +       event_header = bios_event_log;
23388 +       if (len < sizeof(*efispecid))
23389 +               return false;
23390 +       efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
23392 +       n = memcmp(efispecid->signature, TCG_SPECID_SIG,
23393 +                  sizeof(TCG_SPECID_SIG));
23394 +       return n == 0;
23397  /* read binary bios log */
23398  int tpm_read_log_acpi(struct tpm_chip *chip)
23400 @@ -52,6 +73,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
23401         struct acpi_table_tpm2 *tbl;
23402         struct acpi_tpm2_phy *tpm2_phy;
23403         int format;
23404 +       int ret;
23406         log = &chip->log;
23408 @@ -112,6 +134,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
23410         log->bios_event_log_end = log->bios_event_log + len;
23412 +       ret = -EIO;
23413         virt = acpi_os_map_iomem(start, len);
23414         if (!virt)
23415                 goto err;
23416 @@ -119,11 +142,19 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
23417         memcpy_fromio(log->bios_event_log, virt, len);
23419         acpi_os_unmap_iomem(virt, len);
23421 +       if (chip->flags & TPM_CHIP_FLAG_TPM2 &&
23422 +           !tpm_is_tpm2_log(log->bios_event_log, len)) {
23423 +               /* try EFI log next */
23424 +               ret = -ENODEV;
23425 +               goto err;
23426 +       }
23428         return format;
23430  err:
23431         kfree(log->bios_event_log);
23432         log->bios_event_log = NULL;
23433 -       return -EIO;
23434 +       return ret;
23437 diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
23438 index 7460f230bae4..8512ec76d526 100644
23439 --- a/drivers/char/tpm/eventlog/common.c
23440 +++ b/drivers/char/tpm/eventlog/common.c
23441 @@ -107,6 +107,9 @@ void tpm_bios_log_setup(struct tpm_chip *chip)
23442         int log_version;
23443         int rc = 0;
23445 +       if (chip->flags & TPM_CHIP_FLAG_VIRTUAL)
23446 +               return;
23448         rc = tpm_read_log(chip);
23449         if (rc < 0)
23450                 return;
23451 diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c
23452 index 35229e5143ca..e6cb9d525e30 100644
23453 --- a/drivers/char/tpm/eventlog/efi.c
23454 +++ b/drivers/char/tpm/eventlog/efi.c
23455 @@ -17,6 +17,7 @@ int tpm_read_log_efi(struct tpm_chip *chip)
23458         struct efi_tcg2_final_events_table *final_tbl = NULL;
23459 +       int final_events_log_size = efi_tpm_final_log_size;
23460         struct linux_efi_tpm_eventlog *log_tbl;
23461         struct tpm_bios_log *log;
23462         u32 log_size;
23463 @@ -66,12 +67,12 @@ int tpm_read_log_efi(struct tpm_chip *chip)
23464         ret = tpm_log_version;
23466         if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
23467 -           efi_tpm_final_log_size == 0 ||
23468 +           final_events_log_size == 0 ||
23469             tpm_log_version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
23470                 goto out;
23472         final_tbl = memremap(efi.tpm_final_log,
23473 -                            sizeof(*final_tbl) + efi_tpm_final_log_size,
23474 +                            sizeof(*final_tbl) + final_events_log_size,
23475                              MEMREMAP_WB);
23476         if (!final_tbl) {
23477                 pr_err("Could not map UEFI TPM final log\n");
23478 @@ -80,10 +81,18 @@ int tpm_read_log_efi(struct tpm_chip *chip)
23479                 goto out;
23480         }
23482 -       efi_tpm_final_log_size -= log_tbl->final_events_preboot_size;
23483 +       /*
23484 +        * The 'final events log' size excludes the 'final events preboot log'
23485 +        * at its beginning.
23486 +        */
23487 +       final_events_log_size -= log_tbl->final_events_preboot_size;
23489 +       /*
23490 +        * Allocate memory for the 'combined log' where we will append the
23491 +        * 'final events log' to.
23492 +        */
23493         tmp = krealloc(log->bios_event_log,
23494 -                      log_size + efi_tpm_final_log_size,
23495 +                      log_size + final_events_log_size,
23496                        GFP_KERNEL);
23497         if (!tmp) {
23498                 kfree(log->bios_event_log);
23499 @@ -94,15 +103,19 @@ int tpm_read_log_efi(struct tpm_chip *chip)
23500         log->bios_event_log = tmp;
23502         /*
23503 -        * Copy any of the final events log that didn't also end up in the
23504 -        * main log. Events can be logged in both if events are generated
23505 +        * Append any of the 'final events log' that didn't also end up in the
23506 +        * 'main log'. Events can be logged in both if events are generated
23507          * between GetEventLog() and ExitBootServices().
23508          */
23509         memcpy((void *)log->bios_event_log + log_size,
23510                final_tbl->events + log_tbl->final_events_preboot_size,
23511 -              efi_tpm_final_log_size);
23512 +              final_events_log_size);
23513 +       /*
23514 +        * The size of the 'combined log' is the size of the 'main log' plus
23515 +        * the size of the 'final events log'.
23516 +        */
23517         log->bios_event_log_end = log->bios_event_log +
23518 -               log_size + efi_tpm_final_log_size;
23519 +               log_size + final_events_log_size;
23521  out:
23522         memunmap(final_tbl);
23523 diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
23524 index eff1f12d981a..c84d23951219 100644
23525 --- a/drivers/char/tpm/tpm2-cmd.c
23526 +++ b/drivers/char/tpm/tpm2-cmd.c
23527 @@ -656,6 +656,7 @@ int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
23529         if (nr_commands !=
23530             be32_to_cpup((__be32 *)&buf.data[TPM_HEADER_SIZE + 5])) {
23531 +               rc = -EFAULT;
23532                 tpm_buf_destroy(&buf);
23533                 goto out;
23534         }
23535 diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
23536 index a2e0395cbe61..55b9d3965ae1 100644
23537 --- a/drivers/char/tpm/tpm_tis_core.c
23538 +++ b/drivers/char/tpm/tpm_tis_core.c
23539 @@ -709,16 +709,14 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
23540         cap_t cap;
23541         int ret;
23543 -       /* TPM 2.0 */
23544 -       if (chip->flags & TPM_CHIP_FLAG_TPM2)
23545 -               return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
23547 -       /* TPM 1.2 */
23548         ret = request_locality(chip, 0);
23549         if (ret < 0)
23550                 return ret;
23552 -       ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
23553 +       if (chip->flags & TPM_CHIP_FLAG_TPM2)
23554 +               ret = tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
23555 +       else
23556 +               ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
23558         release_locality(chip, 0);
23560 @@ -1127,12 +1125,20 @@ int tpm_tis_resume(struct device *dev)
23561         if (ret)
23562                 return ret;
23564 -       /* TPM 1.2 requires self-test on resume. This function actually returns
23565 +       /*
23566 +        * TPM 1.2 requires self-test on resume. This function actually returns
23567          * an error code but for unknown reason it isn't handled.
23568          */
23569 -       if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
23570 +       if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
23571 +               ret = request_locality(chip, 0);
23572 +               if (ret < 0)
23573 +                       return ret;
23575                 tpm1_do_selftest(chip);
23577 +               release_locality(chip, 0);
23578 +       }
23580         return 0;
23582  EXPORT_SYMBOL_GPL(tpm_tis_resume);
23583 diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
23584 index ec9a65e7887d..f19c227d20f4 100644
23585 --- a/drivers/char/tpm/tpm_tis_i2c_cr50.c
23586 +++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
23587 @@ -483,6 +483,7 @@ static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len)
23588         expected = be32_to_cpup((__be32 *)(buf + 2));
23589         if (expected > buf_len) {
23590                 dev_err(&chip->dev, "Buffer too small to receive i2c data\n");
23591 +               rc = -E2BIG;
23592                 goto out_err;
23593         }
23595 diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
23596 index 6a0059e508e3..93f5d11c830b 100644
23597 --- a/drivers/char/ttyprintk.c
23598 +++ b/drivers/char/ttyprintk.c
23599 @@ -158,12 +158,23 @@ static int tpk_ioctl(struct tty_struct *tty,
23600         return 0;
23604 + * TTY operations hangup function.
23605 + */
23606 +static void tpk_hangup(struct tty_struct *tty)
23608 +       struct ttyprintk_port *tpkp = tty->driver_data;
23610 +       tty_port_hangup(&tpkp->port);
23613  static const struct tty_operations ttyprintk_ops = {
23614         .open = tpk_open,
23615         .close = tpk_close,
23616         .write = tpk_write,
23617         .write_room = tpk_write_room,
23618         .ioctl = tpk_ioctl,
23619 +       .hangup = tpk_hangup,
23620  };
23622  static const struct tty_port_operations null_ops = { };
23623 diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
23624 index a55b37fc2c8b..bc3be5f3eae1 100644
23625 --- a/drivers/clk/clk-ast2600.c
23626 +++ b/drivers/clk/clk-ast2600.c
23627 @@ -61,10 +61,10 @@ static void __iomem *scu_g6_base;
23628  static const struct aspeed_gate_data aspeed_g6_gates[] = {
23629         /*                                  clk rst  name               parent   flags */
23630         [ASPEED_CLK_GATE_MCLK]          = {  0, -1, "mclk-gate",        "mpll",  CLK_IS_CRITICAL }, /* SDRAM */
23631 -       [ASPEED_CLK_GATE_ECLK]          = {  1, -1, "eclk-gate",        "eclk",  0 },   /* Video Engine */
23632 +       [ASPEED_CLK_GATE_ECLK]          = {  1,  6, "eclk-gate",        "eclk",  0 },   /* Video Engine */
23633         [ASPEED_CLK_GATE_GCLK]          = {  2,  7, "gclk-gate",        NULL,    0 },   /* 2D engine */
23634         /* vclk parent - dclk/d1clk/hclk/mclk */
23635 -       [ASPEED_CLK_GATE_VCLK]          = {  3,  6, "vclk-gate",        NULL,    0 },   /* Video Capture */
23636 +       [ASPEED_CLK_GATE_VCLK]          = {  3, -1, "vclk-gate",        NULL,    0 },   /* Video Capture */
23637         [ASPEED_CLK_GATE_BCLK]          = {  4,  8, "bclk-gate",        "bclk",  0 }, /* PCIe/PCI */
23638         /* From dpll */
23639         [ASPEED_CLK_GATE_DCLK]          = {  5, -1, "dclk-gate",        NULL,    CLK_IS_CRITICAL }, /* DAC */
23640 diff --git a/drivers/clk/imx/clk-imx25.c b/drivers/clk/imx/clk-imx25.c
23641 index a66cabfbf94f..66192fe0a898 100644
23642 --- a/drivers/clk/imx/clk-imx25.c
23643 +++ b/drivers/clk/imx/clk-imx25.c
23644 @@ -73,16 +73,6 @@ enum mx25_clks {
23646  static struct clk *clk[clk_max];
23648 -static struct clk ** const uart_clks[] __initconst = {
23649 -       &clk[uart_ipg_per],
23650 -       &clk[uart1_ipg],
23651 -       &clk[uart2_ipg],
23652 -       &clk[uart3_ipg],
23653 -       &clk[uart4_ipg],
23654 -       &clk[uart5_ipg],
23655 -       NULL
23658  static int __init __mx25_clocks_init(void __iomem *ccm_base)
23660         BUG_ON(!ccm_base);
23661 @@ -228,7 +218,7 @@ static int __init __mx25_clocks_init(void __iomem *ccm_base)
23662          */
23663         clk_set_parent(clk[cko_sel], clk[ipg]);
23665 -       imx_register_uart_clocks(uart_clks);
23666 +       imx_register_uart_clocks(6);
23668         return 0;
23670 diff --git a/drivers/clk/imx/clk-imx27.c b/drivers/clk/imx/clk-imx27.c
23671 index 5585ded8b8c6..56a5fc402b10 100644
23672 --- a/drivers/clk/imx/clk-imx27.c
23673 +++ b/drivers/clk/imx/clk-imx27.c
23674 @@ -49,17 +49,6 @@ static const char *ssi_sel_clks[] = { "spll_gate", "mpll", };
23675  static struct clk *clk[IMX27_CLK_MAX];
23676  static struct clk_onecell_data clk_data;
23678 -static struct clk ** const uart_clks[] __initconst = {
23679 -       &clk[IMX27_CLK_PER1_GATE],
23680 -       &clk[IMX27_CLK_UART1_IPG_GATE],
23681 -       &clk[IMX27_CLK_UART2_IPG_GATE],
23682 -       &clk[IMX27_CLK_UART3_IPG_GATE],
23683 -       &clk[IMX27_CLK_UART4_IPG_GATE],
23684 -       &clk[IMX27_CLK_UART5_IPG_GATE],
23685 -       &clk[IMX27_CLK_UART6_IPG_GATE],
23686 -       NULL
23689  static void __init _mx27_clocks_init(unsigned long fref)
23691         BUG_ON(!ccm);
23692 @@ -176,7 +165,7 @@ static void __init _mx27_clocks_init(unsigned long fref)
23694         clk_prepare_enable(clk[IMX27_CLK_EMI_AHB_GATE]);
23696 -       imx_register_uart_clocks(uart_clks);
23697 +       imx_register_uart_clocks(7);
23699         imx_print_silicon_rev("i.MX27", mx27_revision());
23701 diff --git a/drivers/clk/imx/clk-imx35.c b/drivers/clk/imx/clk-imx35.c
23702 index c1df03665c09..0fe5ac210156 100644
23703 --- a/drivers/clk/imx/clk-imx35.c
23704 +++ b/drivers/clk/imx/clk-imx35.c
23705 @@ -82,14 +82,6 @@ enum mx35_clks {
23707  static struct clk *clk[clk_max];
23709 -static struct clk ** const uart_clks[] __initconst = {
23710 -       &clk[ipg],
23711 -       &clk[uart1_gate],
23712 -       &clk[uart2_gate],
23713 -       &clk[uart3_gate],
23714 -       NULL
23717  static void __init _mx35_clocks_init(void)
23719         void __iomem *base;
23720 @@ -243,7 +235,7 @@ static void __init _mx35_clocks_init(void)
23721          */
23722         clk_prepare_enable(clk[scc_gate]);
23724 -       imx_register_uart_clocks(uart_clks);
23725 +       imx_register_uart_clocks(4);
23727         imx_print_silicon_rev("i.MX35", mx35_revision());
23729 diff --git a/drivers/clk/imx/clk-imx5.c b/drivers/clk/imx/clk-imx5.c
23730 index 01e079b81026..e4493846454d 100644
23731 --- a/drivers/clk/imx/clk-imx5.c
23732 +++ b/drivers/clk/imx/clk-imx5.c
23733 @@ -128,30 +128,6 @@ static const char *ieee1588_sels[] = { "pll3_sw", "pll4_sw", "dummy" /* usbphy2_
23734  static struct clk *clk[IMX5_CLK_END];
23735  static struct clk_onecell_data clk_data;
23737 -static struct clk ** const uart_clks_mx51[] __initconst = {
23738 -       &clk[IMX5_CLK_UART1_IPG_GATE],
23739 -       &clk[IMX5_CLK_UART1_PER_GATE],
23740 -       &clk[IMX5_CLK_UART2_IPG_GATE],
23741 -       &clk[IMX5_CLK_UART2_PER_GATE],
23742 -       &clk[IMX5_CLK_UART3_IPG_GATE],
23743 -       &clk[IMX5_CLK_UART3_PER_GATE],
23744 -       NULL
23747 -static struct clk ** const uart_clks_mx50_mx53[] __initconst = {
23748 -       &clk[IMX5_CLK_UART1_IPG_GATE],
23749 -       &clk[IMX5_CLK_UART1_PER_GATE],
23750 -       &clk[IMX5_CLK_UART2_IPG_GATE],
23751 -       &clk[IMX5_CLK_UART2_PER_GATE],
23752 -       &clk[IMX5_CLK_UART3_IPG_GATE],
23753 -       &clk[IMX5_CLK_UART3_PER_GATE],
23754 -       &clk[IMX5_CLK_UART4_IPG_GATE],
23755 -       &clk[IMX5_CLK_UART4_PER_GATE],
23756 -       &clk[IMX5_CLK_UART5_IPG_GATE],
23757 -       &clk[IMX5_CLK_UART5_PER_GATE],
23758 -       NULL
23761  static void __init mx5_clocks_common_init(void __iomem *ccm_base)
23763         clk[IMX5_CLK_DUMMY]             = imx_clk_fixed("dummy", 0);
23764 @@ -382,7 +358,7 @@ static void __init mx50_clocks_init(struct device_node *np)
23765         r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
23766         clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
23768 -       imx_register_uart_clocks(uart_clks_mx50_mx53);
23769 +       imx_register_uart_clocks(5);
23771  CLK_OF_DECLARE(imx50_ccm, "fsl,imx50-ccm", mx50_clocks_init);
23773 @@ -488,7 +464,7 @@ static void __init mx51_clocks_init(struct device_node *np)
23774         val |= 1 << 23;
23775         writel(val, MXC_CCM_CLPCR);
23777 -       imx_register_uart_clocks(uart_clks_mx51);
23778 +       imx_register_uart_clocks(3);
23780  CLK_OF_DECLARE(imx51_ccm, "fsl,imx51-ccm", mx51_clocks_init);
23782 @@ -633,6 +609,6 @@ static void __init mx53_clocks_init(struct device_node *np)
23783         r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
23784         clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
23786 -       imx_register_uart_clocks(uart_clks_mx50_mx53);
23787 +       imx_register_uart_clocks(5);
23789  CLK_OF_DECLARE(imx53_ccm, "fsl,imx53-ccm", mx53_clocks_init);
23790 diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
23791 index 521d6136d22c..496900de0b0b 100644
23792 --- a/drivers/clk/imx/clk-imx6q.c
23793 +++ b/drivers/clk/imx/clk-imx6q.c
23794 @@ -140,13 +140,6 @@ static inline int clk_on_imx6dl(void)
23795         return of_machine_is_compatible("fsl,imx6dl");
23798 -static const int uart_clk_ids[] __initconst = {
23799 -       IMX6QDL_CLK_UART_IPG,
23800 -       IMX6QDL_CLK_UART_SERIAL,
23803 -static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
23805  static int ldb_di_sel_by_clock_id(int clock_id)
23807         switch (clock_id) {
23808 @@ -440,7 +433,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
23809         struct device_node *np;
23810         void __iomem *anatop_base, *base;
23811         int ret;
23812 -       int i;
23814         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
23815                                           IMX6QDL_CLK_END), GFP_KERNEL);
23816 @@ -982,12 +974,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
23817                                hws[IMX6QDL_CLK_PLL3_USB_OTG]->clk);
23818         }
23820 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
23821 -               int index = uart_clk_ids[i];
23823 -               uart_clks[i] = &hws[index]->clk;
23824 -       }
23826 -       imx_register_uart_clocks(uart_clks);
23827 +       imx_register_uart_clocks(1);
23829  CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init);
23830 diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
23831 index 29eab05c9068..277365970320 100644
23832 --- a/drivers/clk/imx/clk-imx6sl.c
23833 +++ b/drivers/clk/imx/clk-imx6sl.c
23834 @@ -179,19 +179,11 @@ void imx6sl_set_wait_clk(bool enter)
23835                 imx6sl_enable_pll_arm(false);
23838 -static const int uart_clk_ids[] __initconst = {
23839 -       IMX6SL_CLK_UART,
23840 -       IMX6SL_CLK_UART_SERIAL,
23843 -static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
23845  static void __init imx6sl_clocks_init(struct device_node *ccm_node)
23847         struct device_node *np;
23848         void __iomem *base;
23849         int ret;
23850 -       int i;
23852         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
23853                                           IMX6SL_CLK_END), GFP_KERNEL);
23854 @@ -448,12 +440,6 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
23855         clk_set_parent(hws[IMX6SL_CLK_LCDIF_AXI_SEL]->clk,
23856                        hws[IMX6SL_CLK_PLL2_PFD2]->clk);
23858 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
23859 -               int index = uart_clk_ids[i];
23861 -               uart_clks[i] = &hws[index]->clk;
23862 -       }
23864 -       imx_register_uart_clocks(uart_clks);
23865 +       imx_register_uart_clocks(2);
23867  CLK_OF_DECLARE(imx6sl, "fsl,imx6sl-ccm", imx6sl_clocks_init);
23868 diff --git a/drivers/clk/imx/clk-imx6sll.c b/drivers/clk/imx/clk-imx6sll.c
23869 index 8e8288bda4d0..31d777f30039 100644
23870 --- a/drivers/clk/imx/clk-imx6sll.c
23871 +++ b/drivers/clk/imx/clk-imx6sll.c
23872 @@ -76,26 +76,10 @@ static u32 share_count_ssi1;
23873  static u32 share_count_ssi2;
23874  static u32 share_count_ssi3;
23876 -static const int uart_clk_ids[] __initconst = {
23877 -       IMX6SLL_CLK_UART1_IPG,
23878 -       IMX6SLL_CLK_UART1_SERIAL,
23879 -       IMX6SLL_CLK_UART2_IPG,
23880 -       IMX6SLL_CLK_UART2_SERIAL,
23881 -       IMX6SLL_CLK_UART3_IPG,
23882 -       IMX6SLL_CLK_UART3_SERIAL,
23883 -       IMX6SLL_CLK_UART4_IPG,
23884 -       IMX6SLL_CLK_UART4_SERIAL,
23885 -       IMX6SLL_CLK_UART5_IPG,
23886 -       IMX6SLL_CLK_UART5_SERIAL,
23889 -static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
23891  static void __init imx6sll_clocks_init(struct device_node *ccm_node)
23893         struct device_node *np;
23894         void __iomem *base;
23895 -       int i;
23897         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
23898                                           IMX6SLL_CLK_END), GFP_KERNEL);
23899 @@ -356,13 +340,7 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
23901         of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
23903 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
23904 -               int index = uart_clk_ids[i];
23906 -               uart_clks[i] = &hws[index]->clk;
23907 -       }
23909 -       imx_register_uart_clocks(uart_clks);
23910 +       imx_register_uart_clocks(5);
23912         /* Lower the AHB clock rate before changing the clock source. */
23913         clk_set_rate(hws[IMX6SLL_CLK_AHB]->clk, 99000000);
23914 diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
23915 index 20dcce526d07..fc1bd23d4583 100644
23916 --- a/drivers/clk/imx/clk-imx6sx.c
23917 +++ b/drivers/clk/imx/clk-imx6sx.c
23918 @@ -117,18 +117,10 @@ static u32 share_count_ssi3;
23919  static u32 share_count_sai1;
23920  static u32 share_count_sai2;
23922 -static const int uart_clk_ids[] __initconst = {
23923 -       IMX6SX_CLK_UART_IPG,
23924 -       IMX6SX_CLK_UART_SERIAL,
23927 -static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
23929  static void __init imx6sx_clocks_init(struct device_node *ccm_node)
23931         struct device_node *np;
23932         void __iomem *base;
23933 -       int i;
23935         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
23936                                           IMX6SX_CLK_CLK_END), GFP_KERNEL);
23937 @@ -556,12 +548,6 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
23938         clk_set_parent(hws[IMX6SX_CLK_QSPI1_SEL]->clk, hws[IMX6SX_CLK_PLL2_BUS]->clk);
23939         clk_set_parent(hws[IMX6SX_CLK_QSPI2_SEL]->clk, hws[IMX6SX_CLK_PLL2_BUS]->clk);
23941 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
23942 -               int index = uart_clk_ids[i];
23944 -               uart_clks[i] = &hws[index]->clk;
23945 -       }
23947 -       imx_register_uart_clocks(uart_clks);
23948 +       imx_register_uart_clocks(2);
23950  CLK_OF_DECLARE(imx6sx, "fsl,imx6sx-ccm", imx6sx_clocks_init);
23951 diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
23952 index 22d24a6a05e7..c4e0f1c07192 100644
23953 --- a/drivers/clk/imx/clk-imx7d.c
23954 +++ b/drivers/clk/imx/clk-imx7d.c
23955 @@ -377,23 +377,10 @@ static const char *pll_video_bypass_sel[] = { "pll_video_main", "pll_video_main_
23956  static struct clk_hw **hws;
23957  static struct clk_hw_onecell_data *clk_hw_data;
23959 -static const int uart_clk_ids[] __initconst = {
23960 -       IMX7D_UART1_ROOT_CLK,
23961 -       IMX7D_UART2_ROOT_CLK,
23962 -       IMX7D_UART3_ROOT_CLK,
23963 -       IMX7D_UART4_ROOT_CLK,
23964 -       IMX7D_UART5_ROOT_CLK,
23965 -       IMX7D_UART6_ROOT_CLK,
23966 -       IMX7D_UART7_ROOT_CLK,
23969 -static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
23971  static void __init imx7d_clocks_init(struct device_node *ccm_node)
23973         struct device_node *np;
23974         void __iomem *base;
23975 -       int i;
23977         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
23978                                           IMX7D_CLK_END), GFP_KERNEL);
23979 @@ -897,14 +884,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
23980         hws[IMX7D_USB1_MAIN_480M_CLK] = imx_clk_hw_fixed_factor("pll_usb1_main_clk", "osc", 20, 1);
23981         hws[IMX7D_USB_MAIN_480M_CLK] = imx_clk_hw_fixed_factor("pll_usb_main_clk", "osc", 20, 1);
23983 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
23984 -               int index = uart_clk_ids[i];
23986 -               uart_clks[i] = &hws[index]->clk;
23987 -       }
23990 -       imx_register_uart_clocks(uart_clks);
23991 +       imx_register_uart_clocks(7);
23994  CLK_OF_DECLARE(imx7d, "fsl,imx7d-ccm", imx7d_clocks_init);
23995 diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
23996 index 634c0b6636b0..779e09105da7 100644
23997 --- a/drivers/clk/imx/clk-imx7ulp.c
23998 +++ b/drivers/clk/imx/clk-imx7ulp.c
23999 @@ -43,19 +43,6 @@ static const struct clk_div_table ulp_div_table[] = {
24000         { /* sentinel */ },
24001  };
24003 -static const int pcc2_uart_clk_ids[] __initconst = {
24004 -       IMX7ULP_CLK_LPUART4,
24005 -       IMX7ULP_CLK_LPUART5,
24008 -static const int pcc3_uart_clk_ids[] __initconst = {
24009 -       IMX7ULP_CLK_LPUART6,
24010 -       IMX7ULP_CLK_LPUART7,
24013 -static struct clk **pcc2_uart_clks[ARRAY_SIZE(pcc2_uart_clk_ids) + 1] __initdata;
24014 -static struct clk **pcc3_uart_clks[ARRAY_SIZE(pcc3_uart_clk_ids) + 1] __initdata;
24016  static void __init imx7ulp_clk_scg1_init(struct device_node *np)
24018         struct clk_hw_onecell_data *clk_data;
24019 @@ -150,7 +137,6 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
24020         struct clk_hw_onecell_data *clk_data;
24021         struct clk_hw **hws;
24022         void __iomem *base;
24023 -       int i;
24025         clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC2_END),
24026                            GFP_KERNEL);
24027 @@ -190,13 +176,7 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
24029         of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
24031 -       for (i = 0; i < ARRAY_SIZE(pcc2_uart_clk_ids); i++) {
24032 -               int index = pcc2_uart_clk_ids[i];
24034 -               pcc2_uart_clks[i] = &hws[index]->clk;
24035 -       }
24037 -       imx_register_uart_clocks(pcc2_uart_clks);
24038 +       imx_register_uart_clocks(2);
24040  CLK_OF_DECLARE(imx7ulp_clk_pcc2, "fsl,imx7ulp-pcc2", imx7ulp_clk_pcc2_init);
24042 @@ -205,7 +185,6 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
24043         struct clk_hw_onecell_data *clk_data;
24044         struct clk_hw **hws;
24045         void __iomem *base;
24046 -       int i;
24048         clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC3_END),
24049                            GFP_KERNEL);
24050 @@ -244,13 +223,7 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
24052         of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
24054 -       for (i = 0; i < ARRAY_SIZE(pcc3_uart_clk_ids); i++) {
24055 -               int index = pcc3_uart_clk_ids[i];
24057 -               pcc3_uart_clks[i] = &hws[index]->clk;
24058 -       }
24060 -       imx_register_uart_clocks(pcc3_uart_clks);
24061 +       imx_register_uart_clocks(7);
24063  CLK_OF_DECLARE(imx7ulp_clk_pcc3, "fsl,imx7ulp-pcc3", imx7ulp_clk_pcc3_init);
24065 diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
24066 index 6a01eec36dd0..f1919fafb124 100644
24067 --- a/drivers/clk/imx/clk-imx8mm.c
24068 +++ b/drivers/clk/imx/clk-imx8mm.c
24069 @@ -296,20 +296,12 @@ static const char * const clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "
24070  static struct clk_hw_onecell_data *clk_hw_data;
24071  static struct clk_hw **hws;
24073 -static const int uart_clk_ids[] = {
24074 -       IMX8MM_CLK_UART1_ROOT,
24075 -       IMX8MM_CLK_UART2_ROOT,
24076 -       IMX8MM_CLK_UART3_ROOT,
24077 -       IMX8MM_CLK_UART4_ROOT,
24079 -static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
24081  static int imx8mm_clocks_probe(struct platform_device *pdev)
24083         struct device *dev = &pdev->dev;
24084         struct device_node *np = dev->of_node;
24085         void __iomem *base;
24086 -       int ret, i;
24087 +       int ret;
24089         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
24090                                           IMX8MM_CLK_END), GFP_KERNEL);
24091 @@ -634,13 +626,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
24092                 goto unregister_hws;
24093         }
24095 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
24096 -               int index = uart_clk_ids[i];
24098 -               uart_hws[i] = &hws[index]->clk;
24099 -       }
24101 -       imx_register_uart_clocks(uart_hws);
24102 +       imx_register_uart_clocks(4);
24104         return 0;
24106 diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
24107 index 324c5fd0aa04..88f6630cd472 100644
24108 --- a/drivers/clk/imx/clk-imx8mn.c
24109 +++ b/drivers/clk/imx/clk-imx8mn.c
24110 @@ -289,20 +289,12 @@ static const char * const clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "
24111  static struct clk_hw_onecell_data *clk_hw_data;
24112  static struct clk_hw **hws;
24114 -static const int uart_clk_ids[] = {
24115 -       IMX8MN_CLK_UART1_ROOT,
24116 -       IMX8MN_CLK_UART2_ROOT,
24117 -       IMX8MN_CLK_UART3_ROOT,
24118 -       IMX8MN_CLK_UART4_ROOT,
24120 -static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
24122  static int imx8mn_clocks_probe(struct platform_device *pdev)
24124         struct device *dev = &pdev->dev;
24125         struct device_node *np = dev->of_node;
24126         void __iomem *base;
24127 -       int ret, i;
24128 +       int ret;
24130         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
24131                                           IMX8MN_CLK_END), GFP_KERNEL);
24132 @@ -585,13 +577,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
24133                 goto unregister_hws;
24134         }
24136 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
24137 -               int index = uart_clk_ids[i];
24139 -               uart_hws[i] = &hws[index]->clk;
24140 -       }
24142 -       imx_register_uart_clocks(uart_hws);
24143 +       imx_register_uart_clocks(4);
24145         return 0;
24147 diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
24148 index 2f4e1d674e1c..3e6557e7d559 100644
24149 --- a/drivers/clk/imx/clk-imx8mp.c
24150 +++ b/drivers/clk/imx/clk-imx8mp.c
24151 @@ -414,20 +414,11 @@ static const char * const imx8mp_dram_core_sels[] = {"dram_pll_out", "dram_alt_r
24152  static struct clk_hw **hws;
24153  static struct clk_hw_onecell_data *clk_hw_data;
24155 -static const int uart_clk_ids[] = {
24156 -       IMX8MP_CLK_UART1_ROOT,
24157 -       IMX8MP_CLK_UART2_ROOT,
24158 -       IMX8MP_CLK_UART3_ROOT,
24159 -       IMX8MP_CLK_UART4_ROOT,
24161 -static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1];
24163  static int imx8mp_clocks_probe(struct platform_device *pdev)
24165         struct device *dev = &pdev->dev;
24166         struct device_node *np;
24167         void __iomem *anatop_base, *ccm_base;
24168 -       int i;
24170         np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop");
24171         anatop_base = of_iomap(np, 0);
24172 @@ -737,13 +728,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
24174         of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
24176 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
24177 -               int index = uart_clk_ids[i];
24179 -               uart_clks[i] = &hws[index]->clk;
24180 -       }
24182 -       imx_register_uart_clocks(uart_clks);
24183 +       imx_register_uart_clocks(4);
24185         return 0;
24187 diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
24188 index 4dd4ae9d022b..3e1a10d3f55c 100644
24189 --- a/drivers/clk/imx/clk-imx8mq.c
24190 +++ b/drivers/clk/imx/clk-imx8mq.c
24191 @@ -281,20 +281,12 @@ static const char * const pllout_monitor_sels[] = {"osc_25m", "osc_27m", "dummy"
24192  static struct clk_hw_onecell_data *clk_hw_data;
24193  static struct clk_hw **hws;
24195 -static const int uart_clk_ids[] = {
24196 -       IMX8MQ_CLK_UART1_ROOT,
24197 -       IMX8MQ_CLK_UART2_ROOT,
24198 -       IMX8MQ_CLK_UART3_ROOT,
24199 -       IMX8MQ_CLK_UART4_ROOT,
24201 -static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
24203  static int imx8mq_clocks_probe(struct platform_device *pdev)
24205         struct device *dev = &pdev->dev;
24206         struct device_node *np = dev->of_node;
24207         void __iomem *base;
24208 -       int err, i;
24209 +       int err;
24211         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
24212                                           IMX8MQ_CLK_END), GFP_KERNEL);
24213 @@ -629,13 +621,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
24214                 goto unregister_hws;
24215         }
24217 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
24218 -               int index = uart_clk_ids[i];
24220 -               uart_hws[i] = &hws[index]->clk;
24221 -       }
24223 -       imx_register_uart_clocks(uart_hws);
24224 +       imx_register_uart_clocks(4);
24226         return 0;
24228 diff --git a/drivers/clk/imx/clk.c b/drivers/clk/imx/clk.c
24229 index 47882c51cb85..7cc669934253 100644
24230 --- a/drivers/clk/imx/clk.c
24231 +++ b/drivers/clk/imx/clk.c
24232 @@ -147,8 +147,10 @@ void imx_cscmr1_fixup(u32 *val)
24235  #ifndef MODULE
24236 -static int imx_keep_uart_clocks;
24237 -static struct clk ** const *imx_uart_clocks;
24239 +static bool imx_keep_uart_clocks;
24240 +static int imx_enabled_uart_clocks;
24241 +static struct clk **imx_uart_clocks;
24243  static int __init imx_keep_uart_clocks_param(char *str)
24245 @@ -161,24 +163,45 @@ __setup_param("earlycon", imx_keep_uart_earlycon,
24246  __setup_param("earlyprintk", imx_keep_uart_earlyprintk,
24247               imx_keep_uart_clocks_param, 0);
24249 -void imx_register_uart_clocks(struct clk ** const clks[])
24250 +void imx_register_uart_clocks(unsigned int clk_count)
24252 +       imx_enabled_uart_clocks = 0;
24254 +/* i.MX boards use device trees now.  For build tests without CONFIG_OF, do nothing */
24255 +#ifdef CONFIG_OF
24256         if (imx_keep_uart_clocks) {
24257                 int i;
24259 -               imx_uart_clocks = clks;
24260 -               for (i = 0; imx_uart_clocks[i]; i++)
24261 -                       clk_prepare_enable(*imx_uart_clocks[i]);
24262 +               imx_uart_clocks = kcalloc(clk_count, sizeof(struct clk *), GFP_KERNEL);
24264 +               if (!of_stdout)
24265 +                       return;
24267 +               for (i = 0; i < clk_count; i++) {
24268 +                       imx_uart_clocks[imx_enabled_uart_clocks] = of_clk_get(of_stdout, i);
24270 +                       /* Stop if there are no more of_stdout references */
24271 +                       if (IS_ERR(imx_uart_clocks[imx_enabled_uart_clocks]))
24272 +                               return;
24274 +                       /* Only enable the clock if it's not NULL */
24275 +                       if (imx_uart_clocks[imx_enabled_uart_clocks])
24276 +                               clk_prepare_enable(imx_uart_clocks[imx_enabled_uart_clocks++]);
24277 +               }
24278         }
24279 +#endif
24282  static int __init imx_clk_disable_uart(void)
24284 -       if (imx_keep_uart_clocks && imx_uart_clocks) {
24285 +       if (imx_keep_uart_clocks && imx_enabled_uart_clocks) {
24286                 int i;
24288 -               for (i = 0; imx_uart_clocks[i]; i++)
24289 -                       clk_disable_unprepare(*imx_uart_clocks[i]);
24290 +               for (i = 0; i < imx_enabled_uart_clocks; i++) {
24291 +                       clk_disable_unprepare(imx_uart_clocks[i]);
24292 +                       clk_put(imx_uart_clocks[i]);
24293 +               }
24294 +               kfree(imx_uart_clocks);
24295         }
24297         return 0;
24298 diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
24299 index 4f04c8287286..7571603bee23 100644
24300 --- a/drivers/clk/imx/clk.h
24301 +++ b/drivers/clk/imx/clk.h
24302 @@ -11,9 +11,9 @@ extern spinlock_t imx_ccm_lock;
24303  void imx_check_clocks(struct clk *clks[], unsigned int count);
24304  void imx_check_clk_hws(struct clk_hw *clks[], unsigned int count);
24305  #ifndef MODULE
24306 -void imx_register_uart_clocks(struct clk ** const clks[]);
24307 +void imx_register_uart_clocks(unsigned int clk_count);
24308  #else
24309 -static inline void imx_register_uart_clocks(struct clk ** const clks[])
24310 +static inline void imx_register_uart_clocks(unsigned int clk_count)
24313  #endif
24314 diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
24315 index f5746f9ea929..32ac6b6b7530 100644
24316 --- a/drivers/clk/mvebu/armada-37xx-periph.c
24317 +++ b/drivers/clk/mvebu/armada-37xx-periph.c
24318 @@ -84,6 +84,7 @@ struct clk_pm_cpu {
24319         void __iomem *reg_div;
24320         u8 shift_div;
24321         struct regmap *nb_pm_base;
24322 +       unsigned long l1_expiration;
24323  };
24325  #define to_clk_double_div(_hw) container_of(_hw, struct clk_double_div, hw)
24326 @@ -440,33 +441,6 @@ static u8 clk_pm_cpu_get_parent(struct clk_hw *hw)
24327         return val;
24330 -static int clk_pm_cpu_set_parent(struct clk_hw *hw, u8 index)
24332 -       struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
24333 -       struct regmap *base = pm_cpu->nb_pm_base;
24334 -       int load_level;
24336 -       /*
24337 -        * We set the clock parent only if the DVFS is available but
24338 -        * not enabled.
24339 -        */
24340 -       if (IS_ERR(base) || armada_3700_pm_dvfs_is_enabled(base))
24341 -               return -EINVAL;
24343 -       /* Set the parent clock for all the load level */
24344 -       for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) {
24345 -               unsigned int reg, mask,  val,
24346 -                       offset = ARMADA_37XX_NB_TBG_SEL_OFF;
24348 -               armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
24350 -               val = index << offset;
24351 -               mask = ARMADA_37XX_NB_TBG_SEL_MASK << offset;
24352 -               regmap_update_bits(base, reg, mask, val);
24353 -       }
24354 -       return 0;
24357  static unsigned long clk_pm_cpu_recalc_rate(struct clk_hw *hw,
24358                                             unsigned long parent_rate)
24360 @@ -514,8 +488,10 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
24363  /*
24364 - * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz
24365 - * respectively) to L0 frequency (1.2 Ghz) requires a significant
24366 + * Workaround when base CPU frequnecy is 1000 or 1200 MHz
24367 + *
24368 + * Switching the CPU from the L2 or L3 frequencies (250/300 or 200 MHz
24369 + * respectively) to L0 frequency (1/1.2 GHz) requires a significant
24370   * amount of time to let VDD stabilize to the appropriate
24371   * voltage. This amount of time is large enough that it cannot be
24372   * covered by the hardware countdown register. Due to this, the CPU
24373 @@ -525,26 +501,56 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
24374   * To work around this problem, we prevent switching directly from the
24375   * L2/L3 frequencies to the L0 frequency, and instead switch to the L1
24376   * frequency in-between. The sequence therefore becomes:
24377 - * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ)
24378 + * 1. First switch from L2/L3 (200/250/300 MHz) to L1 (500/600 MHz)
24379   * 2. Sleep 20ms for stabling VDD voltage
24380 - * 3. Then switch from L1(600MHZ) to L0(1200Mhz).
24381 + * 3. Then switch from L1 (500/600 MHz) to L0 (1000/1200 MHz).
24382   */
24383 -static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base)
24384 +static void clk_pm_cpu_set_rate_wa(struct clk_pm_cpu *pm_cpu,
24385 +                                  unsigned int new_level, unsigned long rate,
24386 +                                  struct regmap *base)
24388         unsigned int cur_level;
24390 -       if (rate != 1200 * 1000 * 1000)
24391 -               return;
24393         regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
24394         cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
24395 -       if (cur_level <= ARMADA_37XX_DVFS_LOAD_1)
24397 +       if (cur_level == new_level)
24398 +               return;
24400 +       /*
24401 +        * System wants to go to L1 on its own. If we are going from L2/L3,
24402 +        * remember when 20ms will expire. If from L0, set the value so that
24403 +        * next switch to L0 won't have to wait.
24404 +        */
24405 +       if (new_level == ARMADA_37XX_DVFS_LOAD_1) {
24406 +               if (cur_level == ARMADA_37XX_DVFS_LOAD_0)
24407 +                       pm_cpu->l1_expiration = jiffies;
24408 +               else
24409 +                       pm_cpu->l1_expiration = jiffies + msecs_to_jiffies(20);
24410                 return;
24411 +       }
24413 +       /*
24414 +        * If we are setting to L2/L3, just invalidate L1 expiration time,
24415 +        * sleeping is not needed.
24416 +        */
24417 +       if (rate < 1000*1000*1000)
24418 +               goto invalidate_l1_exp;
24420 +       /*
24421 +        * We are going to L0 with rate >= 1GHz. Check whether we have been at
24422 +        * L1 for long enough time. If not, go to L1 for 20ms.
24423 +        */
24424 +       if (pm_cpu->l1_expiration && jiffies >= pm_cpu->l1_expiration)
24425 +               goto invalidate_l1_exp;
24427         regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
24428                            ARMADA_37XX_NB_CPU_LOAD_MASK,
24429                            ARMADA_37XX_DVFS_LOAD_1);
24430         msleep(20);
24432 +invalidate_l1_exp:
24433 +       pm_cpu->l1_expiration = 0;
24436  static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
24437 @@ -578,7 +584,9 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
24438                         reg = ARMADA_37XX_NB_CPU_LOAD;
24439                         mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
24441 -                       clk_pm_cpu_set_rate_wa(rate, base);
24442 +                       /* Apply workaround when base CPU frequency is 1000 or 1200 MHz */
24443 +                       if (parent_rate >= 1000*1000*1000)
24444 +                               clk_pm_cpu_set_rate_wa(pm_cpu, load_level, rate, base);
24446                         regmap_update_bits(base, reg, mask, load_level);
24448 @@ -592,7 +600,6 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
24450  static const struct clk_ops clk_pm_cpu_ops = {
24451         .get_parent = clk_pm_cpu_get_parent,
24452 -       .set_parent = clk_pm_cpu_set_parent,
24453         .round_rate = clk_pm_cpu_round_rate,
24454         .set_rate = clk_pm_cpu_set_rate,
24455         .recalc_rate = clk_pm_cpu_recalc_rate,
24456 diff --git a/drivers/clk/qcom/a53-pll.c b/drivers/clk/qcom/a53-pll.c
24457 index 45cfc57bff92..af6ac17c7dae 100644
24458 --- a/drivers/clk/qcom/a53-pll.c
24459 +++ b/drivers/clk/qcom/a53-pll.c
24460 @@ -93,6 +93,7 @@ static const struct of_device_id qcom_a53pll_match_table[] = {
24461         { .compatible = "qcom,msm8916-a53pll" },
24462         { }
24463  };
24464 +MODULE_DEVICE_TABLE(of, qcom_a53pll_match_table);
24466  static struct platform_driver qcom_a53pll_driver = {
24467         .probe = qcom_a53pll_probe,
24468 diff --git a/drivers/clk/qcom/a7-pll.c b/drivers/clk/qcom/a7-pll.c
24469 index e171d3caf2cf..c4a53e5db229 100644
24470 --- a/drivers/clk/qcom/a7-pll.c
24471 +++ b/drivers/clk/qcom/a7-pll.c
24472 @@ -86,6 +86,7 @@ static const struct of_device_id qcom_a7pll_match_table[] = {
24473         { .compatible = "qcom,sdx55-a7pll" },
24474         { }
24475  };
24476 +MODULE_DEVICE_TABLE(of, qcom_a7pll_match_table);
24478  static struct platform_driver qcom_a7pll_driver = {
24479         .probe = qcom_a7pll_probe,
24480 diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
24481 index 30be87fb222a..bef7899ad0d6 100644
24482 --- a/drivers/clk/qcom/apss-ipq-pll.c
24483 +++ b/drivers/clk/qcom/apss-ipq-pll.c
24484 @@ -81,6 +81,7 @@ static const struct of_device_id apss_ipq_pll_match_table[] = {
24485         { .compatible = "qcom,ipq6018-a53pll" },
24486         { }
24487  };
24488 +MODULE_DEVICE_TABLE(of, apss_ipq_pll_match_table);
24490  static struct platform_driver apss_ipq_pll_driver = {
24491         .probe = apss_ipq_pll_probe,
24492 diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
24493 index 87ee1bad9a9a..4a5d2a914bd6 100644
24494 --- a/drivers/clk/samsung/clk-exynos7.c
24495 +++ b/drivers/clk/samsung/clk-exynos7.c
24496 @@ -537,8 +537,13 @@ static const struct samsung_gate_clock top1_gate_clks[] __initconst = {
24497         GATE(CLK_ACLK_FSYS0_200, "aclk_fsys0_200", "dout_aclk_fsys0_200",
24498                 ENABLE_ACLK_TOP13, 28, CLK_SET_RATE_PARENT |
24499                 CLK_IS_CRITICAL, 0),
24500 +       /*
24501 +        * This clock is required for the CMU_FSYS1 registers access, keep it
24502 +        * enabled permanently until proper runtime PM support is added.
24503 +        */
24504         GATE(CLK_ACLK_FSYS1_200, "aclk_fsys1_200", "dout_aclk_fsys1_200",
24505 -               ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT, 0),
24506 +               ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT |
24507 +               CLK_IS_CRITICAL, 0),
24509         GATE(CLK_SCLK_PHY_FSYS1_26M, "sclk_phy_fsys1_26m",
24510                 "dout_sclk_phy_fsys1_26m", ENABLE_SCLK_TOP1_FSYS11,
24511 diff --git a/drivers/clk/socfpga/clk-gate-a10.c b/drivers/clk/socfpga/clk-gate-a10.c
24512 index cd5df9103614..d62778884208 100644
24513 --- a/drivers/clk/socfpga/clk-gate-a10.c
24514 +++ b/drivers/clk/socfpga/clk-gate-a10.c
24515 @@ -146,6 +146,7 @@ static void __init __socfpga_gate_init(struct device_node *node,
24516                 if (IS_ERR(socfpga_clk->sys_mgr_base_addr)) {
24517                         pr_err("%s: failed to find altr,sys-mgr regmap!\n",
24518                                         __func__);
24519 +                       kfree(socfpga_clk);
24520                         return;
24521                 }
24522         }
24523 diff --git a/drivers/clk/uniphier/clk-uniphier-mux.c b/drivers/clk/uniphier/clk-uniphier-mux.c
24524 index 462c84321b2d..1998e9d4cfc0 100644
24525 --- a/drivers/clk/uniphier/clk-uniphier-mux.c
24526 +++ b/drivers/clk/uniphier/clk-uniphier-mux.c
24527 @@ -31,10 +31,10 @@ static int uniphier_clk_mux_set_parent(struct clk_hw *hw, u8 index)
24528  static u8 uniphier_clk_mux_get_parent(struct clk_hw *hw)
24530         struct uniphier_clk_mux *mux = to_uniphier_clk_mux(hw);
24531 -       int num_parents = clk_hw_get_num_parents(hw);
24532 +       unsigned int num_parents = clk_hw_get_num_parents(hw);
24533         int ret;
24534         unsigned int val;
24535 -       u8 i;
24536 +       unsigned int i;
24538         ret = regmap_read(mux->regmap, mux->reg, &val);
24539         if (ret)
24540 diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c
24541 index 92f449ed38e5..abe6afbf3407 100644
24542 --- a/drivers/clk/zynqmp/pll.c
24543 +++ b/drivers/clk/zynqmp/pll.c
24544 @@ -14,10 +14,12 @@
24545   * struct zynqmp_pll - PLL clock
24546   * @hw:                Handle between common and hardware-specific interfaces
24547   * @clk_id:    PLL clock ID
24548 + * @set_pll_mode:      Whether an IOCTL_SET_PLL_FRAC_MODE request be sent to ATF
24549   */
24550  struct zynqmp_pll {
24551         struct clk_hw hw;
24552         u32 clk_id;
24553 +       bool set_pll_mode;
24554  };
24556  #define to_zynqmp_pll(_hw)     container_of(_hw, struct zynqmp_pll, hw)
24557 @@ -81,6 +83,8 @@ static inline void zynqmp_pll_set_mode(struct clk_hw *hw, bool on)
24558         if (ret)
24559                 pr_warn_once("%s() PLL set frac mode failed for %s, ret = %d\n",
24560                              __func__, clk_name, ret);
24561 +       else
24562 +               clk->set_pll_mode = true;
24565  /**
24566 @@ -100,9 +104,7 @@ static long zynqmp_pll_round_rate(struct clk_hw *hw, unsigned long rate,
24567         /* Enable the fractional mode if needed */
24568         rate_div = (rate * FRAC_DIV) / *prate;
24569         f = rate_div % FRAC_DIV;
24570 -       zynqmp_pll_set_mode(hw, !!f);
24572 -       if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) {
24573 +       if (f) {
24574                 if (rate > PS_PLL_VCO_MAX) {
24575                         fbdiv = rate / PS_PLL_VCO_MAX;
24576                         rate = rate / (fbdiv + 1);
24577 @@ -173,10 +175,12 @@ static int zynqmp_pll_set_rate(struct clk_hw *hw, unsigned long rate,
24578         long rate_div, frac, m, f;
24579         int ret;
24581 -       if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) {
24582 -               rate_div = (rate * FRAC_DIV) / parent_rate;
24583 +       rate_div = (rate * FRAC_DIV) / parent_rate;
24584 +       f = rate_div % FRAC_DIV;
24585 +       zynqmp_pll_set_mode(hw, !!f);
24587 +       if (f) {
24588                 m = rate_div / FRAC_DIV;
24589 -               f = rate_div % FRAC_DIV;
24590                 m = clamp_t(u32, m, (PLL_FBDIV_MIN), (PLL_FBDIV_MAX));
24591                 rate = parent_rate * m;
24592                 frac = (parent_rate * f) / FRAC_DIV;
24593 @@ -240,9 +244,15 @@ static int zynqmp_pll_enable(struct clk_hw *hw)
24594         u32 clk_id = clk->clk_id;
24595         int ret;
24597 -       if (zynqmp_pll_is_enabled(hw))
24598 +       /*
24599 +        * Don't skip enabling clock if there is an IOCTL_SET_PLL_FRAC_MODE request
24600 +        * that has been sent to ATF.
24601 +        */
24602 +       if (zynqmp_pll_is_enabled(hw) && (!clk->set_pll_mode))
24603                 return 0;
24605 +       clk->set_pll_mode = false;
24607         ret = zynqmp_pm_clock_enable(clk_id);
24608         if (ret)
24609                 pr_warn_once("%s() clock enable failed for %s, ret = %d\n",
24610 diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
24611 index 42e7e43b8fcd..b1e2b697b21b 100644
24612 --- a/drivers/clocksource/dw_apb_timer_of.c
24613 +++ b/drivers/clocksource/dw_apb_timer_of.c
24614 @@ -52,18 +52,34 @@ static int __init timer_get_base_and_rate(struct device_node *np,
24615                 return 0;
24617         timer_clk = of_clk_get_by_name(np, "timer");
24618 -       if (IS_ERR(timer_clk))
24619 -               return PTR_ERR(timer_clk);
24620 +       if (IS_ERR(timer_clk)) {
24621 +               ret = PTR_ERR(timer_clk);
24622 +               goto out_pclk_disable;
24623 +       }
24625         ret = clk_prepare_enable(timer_clk);
24626         if (ret)
24627 -               return ret;
24628 +               goto out_timer_clk_put;
24630         *rate = clk_get_rate(timer_clk);
24631 -       if (!(*rate))
24632 -               return -EINVAL;
24633 +       if (!(*rate)) {
24634 +               ret = -EINVAL;
24635 +               goto out_timer_clk_disable;
24636 +       }
24638         return 0;
24640 +out_timer_clk_disable:
24641 +       clk_disable_unprepare(timer_clk);
24642 +out_timer_clk_put:
24643 +       clk_put(timer_clk);
24644 +out_pclk_disable:
24645 +       if (!IS_ERR(pclk)) {
24646 +               clk_disable_unprepare(pclk);
24647 +               clk_put(pclk);
24648 +       }
24649 +       iounmap(*base);
24650 +       return ret;
24653  static int __init add_clockevent(struct device_node *event_timer)
24654 diff --git a/drivers/clocksource/ingenic-ost.c b/drivers/clocksource/ingenic-ost.c
24655 index 029efc2731b4..6af2470136bd 100644
24656 --- a/drivers/clocksource/ingenic-ost.c
24657 +++ b/drivers/clocksource/ingenic-ost.c
24658 @@ -88,9 +88,9 @@ static int __init ingenic_ost_probe(struct platform_device *pdev)
24659                 return PTR_ERR(ost->regs);
24661         map = device_node_to_regmap(dev->parent->of_node);
24662 -       if (!map) {
24663 +       if (IS_ERR(map)) {
24664                 dev_err(dev, "regmap not found");
24665 -               return -EINVAL;
24666 +               return PTR_ERR(map);
24667         }
24669         ost->clk = devm_clk_get(dev, "ost");
24670 diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
24671 index 33b3e8aa2cc5..b6f97960d8ee 100644
24672 --- a/drivers/clocksource/timer-ti-dm-systimer.c
24673 +++ b/drivers/clocksource/timer-ti-dm-systimer.c
24674 @@ -2,6 +2,7 @@
24675  #include <linux/clk.h>
24676  #include <linux/clocksource.h>
24677  #include <linux/clockchips.h>
24678 +#include <linux/cpuhotplug.h>
24679  #include <linux/interrupt.h>
24680  #include <linux/io.h>
24681  #include <linux/iopoll.h>
24682 @@ -449,13 +450,13 @@ static int dmtimer_set_next_event(unsigned long cycles,
24683         struct dmtimer_systimer *t = &clkevt->t;
24684         void __iomem *pend = t->base + t->pend;
24686 -       writel_relaxed(0xffffffff - cycles, t->base + t->counter);
24687         while (readl_relaxed(pend) & WP_TCRR)
24688                 cpu_relax();
24689 +       writel_relaxed(0xffffffff - cycles, t->base + t->counter);
24691 -       writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
24692         while (readl_relaxed(pend) & WP_TCLR)
24693                 cpu_relax();
24694 +       writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
24696         return 0;
24698 @@ -490,18 +491,18 @@ static int dmtimer_set_periodic(struct clock_event_device *evt)
24699         dmtimer_clockevent_shutdown(evt);
24701         /* Looks like we need to first set the load value separately */
24702 -       writel_relaxed(clkevt->period, t->base + t->load);
24703         while (readl_relaxed(pend) & WP_TLDR)
24704                 cpu_relax();
24705 +       writel_relaxed(clkevt->period, t->base + t->load);
24707 -       writel_relaxed(clkevt->period, t->base + t->counter);
24708         while (readl_relaxed(pend) & WP_TCRR)
24709                 cpu_relax();
24710 +       writel_relaxed(clkevt->period, t->base + t->counter);
24712 -       writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
24713 -                      t->base + t->ctrl);
24714         while (readl_relaxed(pend) & WP_TCLR)
24715                 cpu_relax();
24716 +       writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
24717 +                      t->base + t->ctrl);
24719         return 0;
24721 @@ -530,17 +531,17 @@ static void omap_clockevent_unidle(struct clock_event_device *evt)
24722         writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
24725 -static int __init dmtimer_clockevent_init(struct device_node *np)
24726 +static int __init dmtimer_clkevt_init_common(struct dmtimer_clockevent *clkevt,
24727 +                                            struct device_node *np,
24728 +                                            unsigned int features,
24729 +                                            const struct cpumask *cpumask,
24730 +                                            const char *name,
24731 +                                            int rating)
24733 -       struct dmtimer_clockevent *clkevt;
24734         struct clock_event_device *dev;
24735         struct dmtimer_systimer *t;
24736         int error;
24738 -       clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
24739 -       if (!clkevt)
24740 -               return -ENOMEM;
24742         t = &clkevt->t;
24743         dev = &clkevt->dev;
24745 @@ -548,24 +549,23 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
24746          * We mostly use cpuidle_coupled with ARM local timers for runtime,
24747          * so there's probably no use for CLOCK_EVT_FEAT_DYNIRQ here.
24748          */
24749 -       dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
24750 -       dev->rating = 300;
24751 +       dev->features = features;
24752 +       dev->rating = rating;
24753         dev->set_next_event = dmtimer_set_next_event;
24754         dev->set_state_shutdown = dmtimer_clockevent_shutdown;
24755         dev->set_state_periodic = dmtimer_set_periodic;
24756         dev->set_state_oneshot = dmtimer_clockevent_shutdown;
24757 +       dev->set_state_oneshot_stopped = dmtimer_clockevent_shutdown;
24758         dev->tick_resume = dmtimer_clockevent_shutdown;
24759 -       dev->cpumask = cpu_possible_mask;
24760 +       dev->cpumask = cpumask;
24762         dev->irq = irq_of_parse_and_map(np, 0);
24763 -       if (!dev->irq) {
24764 -               error = -ENXIO;
24765 -               goto err_out_free;
24766 -       }
24767 +       if (!dev->irq)
24768 +               return -ENXIO;
24770         error = dmtimer_systimer_setup(np, &clkevt->t);
24771         if (error)
24772 -               goto err_out_free;
24773 +               return error;
24775         clkevt->period = 0xffffffff - DIV_ROUND_CLOSEST(t->rate, HZ);
24777 @@ -577,38 +577,132 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
24778         writel_relaxed(OMAP_TIMER_CTRL_POSTED, t->base + t->ifctrl);
24780         error = request_irq(dev->irq, dmtimer_clockevent_interrupt,
24781 -                           IRQF_TIMER, "clockevent", clkevt);
24782 +                           IRQF_TIMER, name, clkevt);
24783         if (error)
24784                 goto err_out_unmap;
24786         writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
24787         writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
24789 -       pr_info("TI gptimer clockevent: %s%lu Hz at %pOF\n",
24790 -               of_find_property(np, "ti,timer-alwon", NULL) ?
24791 +       pr_info("TI gptimer %s: %s%lu Hz at %pOF\n",
24792 +               name, of_find_property(np, "ti,timer-alwon", NULL) ?
24793                 "always-on " : "", t->rate, np->parent);
24795 -       clockevents_config_and_register(dev, t->rate,
24796 -                                       3, /* Timer internal resynch latency */
24797 +       return 0;
24799 +err_out_unmap:
24800 +       iounmap(t->base);
24802 +       return error;
24805 +static int __init dmtimer_clockevent_init(struct device_node *np)
24807 +       struct dmtimer_clockevent *clkevt;
24808 +       int error;
24810 +       clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
24811 +       if (!clkevt)
24812 +               return -ENOMEM;
24814 +       error = dmtimer_clkevt_init_common(clkevt, np,
24815 +                                          CLOCK_EVT_FEAT_PERIODIC |
24816 +                                          CLOCK_EVT_FEAT_ONESHOT,
24817 +                                          cpu_possible_mask, "clockevent",
24818 +                                          300);
24819 +       if (error)
24820 +               goto err_out_free;
24822 +       clockevents_config_and_register(&clkevt->dev, clkevt->t.rate,
24823 +                                       3, /* Timer internal resync latency */
24824                                         0xffffffff);
24826         if (of_machine_is_compatible("ti,am33xx") ||
24827             of_machine_is_compatible("ti,am43")) {
24828 -               dev->suspend = omap_clockevent_idle;
24829 -               dev->resume = omap_clockevent_unidle;
24830 +               clkevt->dev.suspend = omap_clockevent_idle;
24831 +               clkevt->dev.resume = omap_clockevent_unidle;
24832         }
24834         return 0;
24836 -err_out_unmap:
24837 -       iounmap(t->base);
24839  err_out_free:
24840         kfree(clkevt);
24842         return error;
24845 +/* Dmtimer as percpu timer. See dra7 ARM architected timer wrap erratum i940 */
24846 +static DEFINE_PER_CPU(struct dmtimer_clockevent, dmtimer_percpu_timer);
24848 +static int __init dmtimer_percpu_timer_init(struct device_node *np, int cpu)
24850 +       struct dmtimer_clockevent *clkevt;
24851 +       int error;
24853 +       if (!cpu_possible(cpu))
24854 +               return -EINVAL;
24856 +       if (!of_property_read_bool(np->parent, "ti,no-reset-on-init") ||
24857 +           !of_property_read_bool(np->parent, "ti,no-idle"))
24858 +               pr_warn("Incomplete dtb for percpu dmtimer %pOF\n", np->parent);
24860 +       clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
24862 +       error = dmtimer_clkevt_init_common(clkevt, np, CLOCK_EVT_FEAT_ONESHOT,
24863 +                                          cpumask_of(cpu), "percpu-dmtimer",
24864 +                                          500);
24865 +       if (error)
24866 +               return error;
24868 +       return 0;
24871 +/* See TRM for timer internal resynch latency */
24872 +static int omap_dmtimer_starting_cpu(unsigned int cpu)
24874 +       struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
24875 +       struct clock_event_device *dev = &clkevt->dev;
24876 +       struct dmtimer_systimer *t = &clkevt->t;
24878 +       clockevents_config_and_register(dev, t->rate, 3, ULONG_MAX);
24879 +       irq_force_affinity(dev->irq, cpumask_of(cpu));
24881 +       return 0;
24884 +static int __init dmtimer_percpu_timer_startup(void)
24886 +       struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, 0);
24887 +       struct dmtimer_systimer *t = &clkevt->t;
24889 +       if (t->sysc) {
24890 +               cpuhp_setup_state(CPUHP_AP_TI_GP_TIMER_STARTING,
24891 +                                 "clockevents/omap/gptimer:starting",
24892 +                                 omap_dmtimer_starting_cpu, NULL);
24893 +       }
24895 +       return 0;
24897 +subsys_initcall(dmtimer_percpu_timer_startup);
24899 +static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa)
24901 +       struct device_node *arm_timer;
24903 +       arm_timer = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
24904 +       if (of_device_is_available(arm_timer)) {
24905 +               pr_warn_once("ARM architected timer wrap issue i940 detected\n");
24906 +               return 0;
24907 +       }
24909 +       if (pa == 0x48034000)           /* dra7 dmtimer3 */
24910 +               return dmtimer_percpu_timer_init(np, 0);
24911 +       else if (pa == 0x48036000)      /* dra7 dmtimer4 */
24912 +               return dmtimer_percpu_timer_init(np, 1);
24914 +       return 0;
24917  /* Clocksource */
24918  static struct dmtimer_clocksource *
24919  to_dmtimer_clocksource(struct clocksource *cs)
24920 @@ -742,6 +836,9 @@ static int __init dmtimer_systimer_init(struct device_node *np)
24921         if (clockevent == pa)
24922                 return dmtimer_clockevent_init(np);
24924 +       if (of_machine_is_compatible("ti,dra7"))
24925 +               return dmtimer_percpu_quirk_init(np, pa);
24927         return 0;
24930 diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
24931 index d1bbc16fba4b..7e7450453714 100644
24932 --- a/drivers/cpufreq/acpi-cpufreq.c
24933 +++ b/drivers/cpufreq/acpi-cpufreq.c
24934 @@ -646,7 +646,11 @@ static u64 get_max_boost_ratio(unsigned int cpu)
24935                 return 0;
24936         }
24938 -       highest_perf = perf_caps.highest_perf;
24939 +       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
24940 +               highest_perf = amd_get_highest_perf();
24941 +       else
24942 +               highest_perf = perf_caps.highest_perf;
24944         nominal_perf = perf_caps.nominal_perf;
24946         if (!highest_perf || !nominal_perf) {
24947 diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
24948 index b4af4094309b..e4782f562e7a 100644
24949 --- a/drivers/cpufreq/armada-37xx-cpufreq.c
24950 +++ b/drivers/cpufreq/armada-37xx-cpufreq.c
24951 @@ -25,6 +25,10 @@
24953  #include "cpufreq-dt.h"
24955 +/* Clk register set */
24956 +#define ARMADA_37XX_CLK_TBG_SEL                0
24957 +#define ARMADA_37XX_CLK_TBG_SEL_CPU_OFF        22
24959  /* Power management in North Bridge register set */
24960  #define ARMADA_37XX_NB_L0L1    0x18
24961  #define ARMADA_37XX_NB_L2L3    0x1C
24962 @@ -69,6 +73,8 @@
24963  #define LOAD_LEVEL_NR  4
24965  #define MIN_VOLT_MV 1000
24966 +#define MIN_VOLT_MV_FOR_L1_1000MHZ 1108
24967 +#define MIN_VOLT_MV_FOR_L1_1200MHZ 1155
24969  /*  AVS value for the corresponding voltage (in mV) */
24970  static int avs_map[] = {
24971 @@ -120,10 +126,15 @@ static struct armada_37xx_dvfs *armada_37xx_cpu_freq_info_get(u32 freq)
24972   * will be configured then the DVFS will be enabled.
24973   */
24974  static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
24975 -                                                struct clk *clk, u8 *divider)
24976 +                                                struct regmap *clk_base, u8 *divider)
24978 +       u32 cpu_tbg_sel;
24979         int load_lvl;
24980 -       struct clk *parent;
24982 +       /* Determine to which TBG clock is CPU connected */
24983 +       regmap_read(clk_base, ARMADA_37XX_CLK_TBG_SEL, &cpu_tbg_sel);
24984 +       cpu_tbg_sel >>= ARMADA_37XX_CLK_TBG_SEL_CPU_OFF;
24985 +       cpu_tbg_sel &= ARMADA_37XX_NB_TBG_SEL_MASK;
24987         for (load_lvl = 0; load_lvl < LOAD_LEVEL_NR; load_lvl++) {
24988                 unsigned int reg, mask, val, offset = 0;
24989 @@ -142,6 +153,11 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
24990                 mask = (ARMADA_37XX_NB_CLK_SEL_MASK
24991                         << ARMADA_37XX_NB_CLK_SEL_OFF);
24993 +               /* Set TBG index, for all levels we use the same TBG */
24994 +               val = cpu_tbg_sel << ARMADA_37XX_NB_TBG_SEL_OFF;
24995 +               mask = (ARMADA_37XX_NB_TBG_SEL_MASK
24996 +                       << ARMADA_37XX_NB_TBG_SEL_OFF);
24998                 /*
24999                  * Set cpu divider based on the pre-computed array in
25000                  * order to have balanced step.
25001 @@ -160,14 +176,6 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
25003                 regmap_update_bits(base, reg, mask, val);
25004         }
25006 -       /*
25007 -        * Set cpu clock source, for all the level we keep the same
25008 -        * clock source that the one already configured. For this one
25009 -        * we need to use the clock framework
25010 -        */
25011 -       parent = clk_get_parent(clk);
25012 -       clk_set_parent(clk, parent);
25015  /*
25016 @@ -202,6 +210,8 @@ static u32 armada_37xx_avs_val_match(int target_vm)
25017   * - L2 & L3 voltage should be about 150mv smaller than L0 voltage.
25018   * This function calculates L1 & L2 & L3 AVS values dynamically based
25019   * on L0 voltage and fill all AVS values to the AVS value table.
25020 + * When base CPU frequency is 1000 or 1200 MHz then there is additional
25021 + * minimal avs value for load L1.
25022   */
25023  static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
25024                                                 struct armada_37xx_dvfs *dvfs)
25025 @@ -233,6 +243,19 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
25026                 for (load_level = 1; load_level < LOAD_LEVEL_NR; load_level++)
25027                         dvfs->avs[load_level] = avs_min;
25029 +               /*
25030 +                * Set the avs values for load L0 and L1 when base CPU frequency
25031 +                * is 1000/1200 MHz to its typical initial values according to
25032 +                * the Armada 3700 Hardware Specifications.
25033 +                */
25034 +               if (dvfs->cpu_freq_max >= 1000*1000*1000) {
25035 +                       if (dvfs->cpu_freq_max >= 1200*1000*1000)
25036 +                               avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ);
25037 +                       else
25038 +                               avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ);
25039 +                       dvfs->avs[0] = dvfs->avs[1] = avs_min;
25040 +               }
25042                 return;
25043         }
25045 @@ -252,6 +275,26 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
25046         target_vm = avs_map[l0_vdd_min] - 150;
25047         target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
25048         dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm);
25050 +       /*
25051 +        * Fix the avs value for load L1 when base CPU frequency is 1000/1200 MHz,
25052 +        * otherwise the CPU gets stuck when switching from load L1 to load L0.
25053 +        * Also ensure that avs value for load L1 is not higher than for L0.
25054 +        */
25055 +       if (dvfs->cpu_freq_max >= 1000*1000*1000) {
25056 +               u32 avs_min_l1;
25058 +               if (dvfs->cpu_freq_max >= 1200*1000*1000)
25059 +                       avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ);
25060 +               else
25061 +                       avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ);
25063 +               if (avs_min_l1 > dvfs->avs[0])
25064 +                       avs_min_l1 = dvfs->avs[0];
25066 +               if (dvfs->avs[1] < avs_min_l1)
25067 +                       dvfs->avs[1] = avs_min_l1;
25068 +       }
25071  static void __init armada37xx_cpufreq_avs_setup(struct regmap *base,
25072 @@ -358,11 +401,16 @@ static int __init armada37xx_cpufreq_driver_init(void)
25073         struct platform_device *pdev;
25074         unsigned long freq;
25075         unsigned int cur_frequency, base_frequency;
25076 -       struct regmap *nb_pm_base, *avs_base;
25077 +       struct regmap *nb_clk_base, *nb_pm_base, *avs_base;
25078         struct device *cpu_dev;
25079         int load_lvl, ret;
25080         struct clk *clk, *parent;
25082 +       nb_clk_base =
25083 +               syscon_regmap_lookup_by_compatible("marvell,armada-3700-periph-clock-nb");
25084 +       if (IS_ERR(nb_clk_base))
25085 +               return -ENODEV;
25087         nb_pm_base =
25088                 syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
25090 @@ -421,7 +469,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
25091                 return -EINVAL;
25092         }
25094 -       dvfs = armada_37xx_cpu_freq_info_get(cur_frequency);
25095 +       dvfs = armada_37xx_cpu_freq_info_get(base_frequency);
25096         if (!dvfs) {
25097                 clk_put(clk);
25098                 return -EINVAL;
25099 @@ -439,7 +487,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
25100         armada37xx_cpufreq_avs_configure(avs_base, dvfs);
25101         armada37xx_cpufreq_avs_setup(avs_base, dvfs);
25103 -       armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider);
25104 +       armada37xx_cpufreq_dvfs_setup(nb_pm_base, nb_clk_base, dvfs->divider);
25105         clk_put(clk);
25107         for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
25108 @@ -473,7 +521,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
25109  remove_opp:
25110         /* clean-up the already added opp before leaving */
25111         while (load_lvl-- > ARMADA_37XX_DVFS_LOAD_0) {
25112 -               freq = cur_frequency / dvfs->divider[load_lvl];
25113 +               freq = base_frequency / dvfs->divider[load_lvl];
25114                 dev_pm_opp_remove(cpu_dev, freq);
25115         }
25117 diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
25118 index aa39ff31ec9f..b3eae5ec17b2 100644
25119 --- a/drivers/cpufreq/cpufreq_conservative.c
25120 +++ b/drivers/cpufreq/cpufreq_conservative.c
25121 @@ -28,8 +28,8 @@ struct cs_dbs_tuners {
25122  };
25124  /* Conservative governor macros */
25125 -#define DEF_FREQUENCY_UP_THRESHOLD             (80)
25126 -#define DEF_FREQUENCY_DOWN_THRESHOLD           (20)
25127 +#define DEF_FREQUENCY_UP_THRESHOLD             (63)
25128 +#define DEF_FREQUENCY_DOWN_THRESHOLD           (26)
25129  #define DEF_FREQUENCY_STEP                     (5)
25130  #define DEF_SAMPLING_DOWN_FACTOR               (1)
25131  #define MAX_SAMPLING_DOWN_FACTOR               (10)
25132 @@ -47,9 +47,9 @@ static inline unsigned int get_freq_step(struct cs_dbs_tuners *cs_tuners,
25135  /*
25136 - * Every sampling_rate, we check, if current idle time is less than 20%
25137 + * Every sampling_rate, we check, if current idle time is less than 37%
25138   * (default), then we try to increase frequency. Every sampling_rate *
25139 - * sampling_down_factor, we check, if current idle time is more than 80%
25140 + * sampling_down_factor, we check, if current idle time is more than 74%
25141   * (default), then we try to decrease frequency
25142   *
25143   * Frequency updates happen at minimum steps of 5% (default) of maximum
25144 diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
25145 index ac361a8b1d3b..611d80122336 100644
25146 --- a/drivers/cpufreq/cpufreq_ondemand.c
25147 +++ b/drivers/cpufreq/cpufreq_ondemand.c
25148 @@ -18,10 +18,10 @@
25149  #include "cpufreq_ondemand.h"
25151  /* On-demand governor macros */
25152 -#define DEF_FREQUENCY_UP_THRESHOLD             (80)
25153 -#define DEF_SAMPLING_DOWN_FACTOR               (1)
25154 +#define DEF_FREQUENCY_UP_THRESHOLD             (63)
25155 +#define DEF_SAMPLING_DOWN_FACTOR               (100)
25156  #define MAX_SAMPLING_DOWN_FACTOR               (100000)
25157 -#define MICRO_FREQUENCY_UP_THRESHOLD           (95)
25158 +#define MICRO_FREQUENCY_UP_THRESHOLD           (70)
25159  #define MICRO_FREQUENCY_MIN_SAMPLE_RATE                (10000)
25160  #define MIN_FREQUENCY_UP_THRESHOLD             (1)
25161  #define MAX_FREQUENCY_UP_THRESHOLD             (100)
25162 @@ -127,7 +127,7 @@ static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
25165  /*
25166 - * Every sampling_rate, we check, if current idle time is less than 20%
25167 + * Every sampling_rate, we check, if current idle time is less than 37%
25168   * (default), then we try to increase frequency. Else, we adjust the frequency
25169   * proportional to load.
25170   */
25171 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
25172 index 5175ae3cac44..34196c107de6 100644
25173 --- a/drivers/cpufreq/intel_pstate.c
25174 +++ b/drivers/cpufreq/intel_pstate.c
25175 @@ -3054,6 +3054,14 @@ static const struct x86_cpu_id hwp_support_ids[] __initconst = {
25176         {}
25177  };
25179 +static bool intel_pstate_hwp_is_enabled(void)
25181 +       u64 value;
25183 +       rdmsrl(MSR_PM_ENABLE, value);
25184 +       return !!(value & 0x1);
25187  static int __init intel_pstate_init(void)
25189         const struct x86_cpu_id *id;
25190 @@ -3072,8 +3080,12 @@ static int __init intel_pstate_init(void)
25191                  * Avoid enabling HWP for processors without EPP support,
25192                  * because that means incomplete HWP implementation which is a
25193                  * corner case and supporting it is generally problematic.
25194 +                *
25195 +                * If HWP is enabled already, though, there is no choice but to
25196 +                * deal with it.
25197                  */
25198 -               if (!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) {
25199 +               if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) ||
25200 +                   intel_pstate_hwp_is_enabled()) {
25201                         hwp_active++;
25202                         hwp_mode_bdw = id->driver_data;
25203                         intel_pstate.attr = hwp_cpufreq_attrs;
25204 diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
25205 index 0844fadc4be8..334f83e56120 100644
25206 --- a/drivers/cpuidle/Kconfig.arm
25207 +++ b/drivers/cpuidle/Kconfig.arm
25208 @@ -107,7 +107,7 @@ config ARM_TEGRA_CPUIDLE
25210  config ARM_QCOM_SPM_CPUIDLE
25211         bool "CPU Idle Driver for Qualcomm Subsystem Power Manager (SPM)"
25212 -       depends on (ARCH_QCOM || COMPILE_TEST) && !ARM64
25213 +       depends on (ARCH_QCOM || COMPILE_TEST) && !ARM64 && MMU
25214         select ARM_CPU_SUSPEND
25215         select CPU_IDLE_MULTIPLE_DRIVERS
25216         select DT_IDLE_STATES
25217 diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
25218 index 191966dc8d02..29c5e83500d3 100644
25219 --- a/drivers/cpuidle/cpuidle-tegra.c
25220 +++ b/drivers/cpuidle/cpuidle-tegra.c
25221 @@ -135,13 +135,13 @@ static int tegra_cpuidle_c7_enter(void)
25223         int err;
25225 -       if (tegra_cpuidle_using_firmware()) {
25226 -               err = call_firmware_op(prepare_idle, TF_PM_MODE_LP2_NOFLUSH_L2);
25227 -               if (err)
25228 -                       return err;
25229 +       err = call_firmware_op(prepare_idle, TF_PM_MODE_LP2_NOFLUSH_L2);
25230 +       if (err && err != -ENOSYS)
25231 +               return err;
25233 -               return call_firmware_op(do_idle, 0);
25234 -       }
25235 +       err = call_firmware_op(do_idle, 0);
25236 +       if (err != -ENOSYS)
25237 +               return err;
25239         return cpu_suspend(0, tegra30_pm_secondary_cpu_suspend);
25241 diff --git a/drivers/crypto/allwinner/Kconfig b/drivers/crypto/allwinner/Kconfig
25242 index 856fb2045656..b8e75210a0e3 100644
25243 --- a/drivers/crypto/allwinner/Kconfig
25244 +++ b/drivers/crypto/allwinner/Kconfig
25245 @@ -71,10 +71,10 @@ config CRYPTO_DEV_SUN8I_CE_DEBUG
25246  config CRYPTO_DEV_SUN8I_CE_HASH
25247         bool "Enable support for hash on sun8i-ce"
25248         depends on CRYPTO_DEV_SUN8I_CE
25249 -       select MD5
25250 -       select SHA1
25251 -       select SHA256
25252 -       select SHA512
25253 +       select CRYPTO_MD5
25254 +       select CRYPTO_SHA1
25255 +       select CRYPTO_SHA256
25256 +       select CRYPTO_SHA512
25257         help
25258           Say y to enable support for hash algorithms.
25260 @@ -132,8 +132,8 @@ config CRYPTO_DEV_SUN8I_SS_PRNG
25261  config CRYPTO_DEV_SUN8I_SS_HASH
25262         bool "Enable support for hash on sun8i-ss"
25263         depends on CRYPTO_DEV_SUN8I_SS
25264 -       select MD5
25265 -       select SHA1
25266 -       select SHA256
25267 +       select CRYPTO_MD5
25268 +       select CRYPTO_SHA1
25269 +       select CRYPTO_SHA256
25270         help
25271           Say y to enable support for hash algorithms.
25272 diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
25273 index c2e6f5ed1d79..dec79fa3ebaf 100644
25274 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
25275 +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
25276 @@ -561,7 +561,7 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
25277                                     sizeof(struct sun4i_cipher_req_ctx) +
25278                                     crypto_skcipher_reqsize(op->fallback_tfm));
25280 -       err = pm_runtime_get_sync(op->ss->dev);
25281 +       err = pm_runtime_resume_and_get(op->ss->dev);
25282         if (err < 0)
25283                 goto error_pm;
25285 diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
25286 index 709905ec4680..02a2d34845f2 100644
25287 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
25288 +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
25289 @@ -459,7 +459,7 @@ static int sun4i_ss_probe(struct platform_device *pdev)
25290          * this info could be useful
25291          */
25293 -       err = pm_runtime_get_sync(ss->dev);
25294 +       err = pm_runtime_resume_and_get(ss->dev);
25295         if (err < 0)
25296                 goto error_pm;
25298 diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
25299 index c1b4585e9bbc..d28292762b32 100644
25300 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
25301 +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
25302 @@ -27,7 +27,7 @@ int sun4i_hash_crainit(struct crypto_tfm *tfm)
25303         algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
25304         op->ss = algt->ss;
25306 -       err = pm_runtime_get_sync(op->ss->dev);
25307 +       err = pm_runtime_resume_and_get(op->ss->dev);
25308         if (err < 0)
25309                 return err;
25311 diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
25312 index 443160a114bb..491fcb7b81b4 100644
25313 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
25314 +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
25315 @@ -29,7 +29,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
25316         algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
25317         ss = algt->ss;
25319 -       err = pm_runtime_get_sync(ss->dev);
25320 +       err = pm_runtime_resume_and_get(ss->dev);
25321         if (err < 0)
25322                 return err;
25324 diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
25325 index 158422ff5695..00194d1d9ae6 100644
25326 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
25327 +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
25328 @@ -932,7 +932,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
25329         if (err)
25330                 goto error_alg;
25332 -       err = pm_runtime_get_sync(ce->dev);
25333 +       err = pm_runtime_resume_and_get(ce->dev);
25334         if (err < 0)
25335                 goto error_alg;
25337 diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
25338 index ed2a69f82e1c..7c355bc2fb06 100644
25339 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
25340 +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
25341 @@ -351,7 +351,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
25342         op->enginectx.op.prepare_request = NULL;
25343         op->enginectx.op.unprepare_request = NULL;
25345 -       err = pm_runtime_get_sync(op->ss->dev);
25346 +       err = pm_runtime_resume_and_get(op->ss->dev);
25347         if (err < 0) {
25348                 dev_err(op->ss->dev, "pm error %d\n", err);
25349                 goto error_pm;
25350 diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
25351 index e0ddc684798d..80e89066dbd1 100644
25352 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
25353 +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
25354 @@ -753,7 +753,7 @@ static int sun8i_ss_probe(struct platform_device *pdev)
25355         if (err)
25356                 goto error_alg;
25358 -       err = pm_runtime_get_sync(ss->dev);
25359 +       err = pm_runtime_resume_and_get(ss->dev);
25360         if (err < 0)
25361                 goto error_alg;
25363 diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
25364 index 11cbcbc83a7b..64446b86c927 100644
25365 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
25366 +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
25367 @@ -348,8 +348,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
25368         bf = (__le32 *)pad;
25370         result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
25371 -       if (!result)
25372 +       if (!result) {
25373 +               kfree(pad);
25374                 return -ENOMEM;
25375 +       }
25377         for (i = 0; i < MAX_SG; i++) {
25378                 rctx->t_dst[i].addr = 0;
25379 @@ -435,11 +437,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
25380         dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
25381         dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
25383 -       kfree(pad);
25385         memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
25386 -       kfree(result);
25387  theend:
25388 +       kfree(pad);
25389 +       kfree(result);
25390         crypto_finalize_hash_request(engine, breq, err);
25391         return 0;
25393 diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
25394 index 08a1473b2145..3191527928e4 100644
25395 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
25396 +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
25397 @@ -103,7 +103,8 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
25398         dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
25399         if (dma_mapping_error(ss->dev, dma_iv)) {
25400                 dev_err(ss->dev, "Cannot DMA MAP IV\n");
25401 -               return -EFAULT;
25402 +               err = -EFAULT;
25403 +               goto err_free;
25404         }
25406         dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE);
25407 @@ -167,6 +168,7 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
25408                 memcpy(ctx->seed, d + dlen, ctx->slen);
25409         }
25410         memzero_explicit(d, todo);
25411 +err_free:
25412         kfree(d);
25414         return err;
25415 diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
25416 index cb9b4c4e371e..3e0d1d6922ba 100644
25417 --- a/drivers/crypto/ccp/sev-dev.c
25418 +++ b/drivers/crypto/ccp/sev-dev.c
25419 @@ -150,6 +150,9 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
25421         sev = psp->sev_data;
25423 +       if (data && WARN_ON_ONCE(!virt_addr_valid(data)))
25424 +               return -EINVAL;
25426         /* Get the physical address of the command buffer */
25427         phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
25428         phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
25429 @@ -987,7 +990,7 @@ int sev_dev_init(struct psp_device *psp)
25430         if (!sev->vdata) {
25431                 ret = -ENODEV;
25432                 dev_err(dev, "sev: missing driver data\n");
25433 -               goto e_err;
25434 +               goto e_sev;
25435         }
25437         psp_set_sev_irq_handler(psp, sev_irq_handler, sev);
25438 @@ -1002,6 +1005,8 @@ int sev_dev_init(struct psp_device *psp)
25440  e_irq:
25441         psp_clear_sev_irq_handler(psp);
25442 +e_sev:
25443 +       devm_kfree(dev, sev);
25444  e_err:
25445         psp->sev_data = NULL;
25447 diff --git a/drivers/crypto/ccp/tee-dev.c b/drivers/crypto/ccp/tee-dev.c
25448 index 5e697a90ea7f..bcb81fef4211 100644
25449 --- a/drivers/crypto/ccp/tee-dev.c
25450 +++ b/drivers/crypto/ccp/tee-dev.c
25451 @@ -36,6 +36,7 @@ static int tee_alloc_ring(struct psp_tee_device *tee, int ring_size)
25452         if (!start_addr)
25453                 return -ENOMEM;
25455 +       memset(start_addr, 0x0, ring_size);
25456         rb_mgr->ring_start = start_addr;
25457         rb_mgr->ring_size = ring_size;
25458         rb_mgr->ring_pa = __psp_pa(start_addr);
25459 @@ -244,41 +245,54 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
25460                           void *buf, size_t len, struct tee_ring_cmd **resp)
25462         struct tee_ring_cmd *cmd;
25463 -       u32 rptr, wptr;
25464         int nloop = 1000, ret = 0;
25465 +       u32 rptr;
25467         *resp = NULL;
25469         mutex_lock(&tee->rb_mgr.mutex);
25471 -       wptr = tee->rb_mgr.wptr;
25473 -       /* Check if ring buffer is full */
25474 +       /* Loop until empty entry found in ring buffer */
25475         do {
25476 +               /* Get pointer to ring buffer command entry */
25477 +               cmd = (struct tee_ring_cmd *)
25478 +                       (tee->rb_mgr.ring_start + tee->rb_mgr.wptr);
25480                 rptr = ioread32(tee->io_regs + tee->vdata->ring_rptr_reg);
25482 -               if (!(wptr + sizeof(struct tee_ring_cmd) == rptr))
25483 +               /* Check if ring buffer is full or command entry is waiting
25484 +                * for response from TEE
25485 +                */
25486 +               if (!(tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
25487 +                     cmd->flag == CMD_WAITING_FOR_RESPONSE))
25488                         break;
25490 -               dev_info(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
25491 -                        rptr, wptr);
25492 +               dev_dbg(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
25493 +                       rptr, tee->rb_mgr.wptr);
25495 -               /* Wait if ring buffer is full */
25496 +               /* Wait if ring buffer is full or TEE is processing data */
25497                 mutex_unlock(&tee->rb_mgr.mutex);
25498                 schedule_timeout_interruptible(msecs_to_jiffies(10));
25499                 mutex_lock(&tee->rb_mgr.mutex);
25501         } while (--nloop);
25503 -       if (!nloop && (wptr + sizeof(struct tee_ring_cmd) == rptr)) {
25504 -               dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
25505 -                       rptr, wptr);
25506 +       if (!nloop &&
25507 +           (tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
25508 +            cmd->flag == CMD_WAITING_FOR_RESPONSE)) {
25509 +               dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u response flag %u\n",
25510 +                       rptr, tee->rb_mgr.wptr, cmd->flag);
25511                 ret = -EBUSY;
25512                 goto unlock;
25513         }
25515 -       /* Pointer to empty data entry in ring buffer */
25516 -       cmd = (struct tee_ring_cmd *)(tee->rb_mgr.ring_start + wptr);
25517 +       /* Do not submit command if PSP got disabled while processing any
25518 +        * command in another thread
25519 +        */
25520 +       if (psp_dead) {
25521 +               ret = -EBUSY;
25522 +               goto unlock;
25523 +       }
25525         /* Write command data into ring buffer */
25526         cmd->cmd_id = cmd_id;
25527 @@ -286,6 +300,9 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
25528         memset(&cmd->buf[0], 0, sizeof(cmd->buf));
25529         memcpy(&cmd->buf[0], buf, len);
25531 +       /* Indicate driver is waiting for response */
25532 +       cmd->flag = CMD_WAITING_FOR_RESPONSE;
25534         /* Update local copy of write pointer */
25535         tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd);
25536         if (tee->rb_mgr.wptr >= tee->rb_mgr.ring_size)
25537 @@ -353,12 +370,16 @@ int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len,
25538                 return ret;
25540         ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT);
25541 -       if (ret)
25542 +       if (ret) {
25543 +               resp->flag = CMD_RESPONSE_TIMEDOUT;
25544                 return ret;
25545 +       }
25547         memcpy(buf, &resp->buf[0], len);
25548         *status = resp->status;
25550 +       resp->flag = CMD_RESPONSE_COPIED;
25552         return 0;
25554  EXPORT_SYMBOL(psp_tee_process_cmd);
25555 diff --git a/drivers/crypto/ccp/tee-dev.h b/drivers/crypto/ccp/tee-dev.h
25556 index f09960112115..49d26158b71e 100644
25557 --- a/drivers/crypto/ccp/tee-dev.h
25558 +++ b/drivers/crypto/ccp/tee-dev.h
25559 @@ -1,6 +1,6 @@
25560  /* SPDX-License-Identifier: MIT */
25561  /*
25562 - * Copyright 2019 Advanced Micro Devices, Inc.
25563 + * Copyright (C) 2019,2021 Advanced Micro Devices, Inc.
25564   *
25565   * Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
25566   * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
25567 @@ -18,7 +18,7 @@
25568  #include <linux/mutex.h>
25570  #define TEE_DEFAULT_TIMEOUT            10
25571 -#define MAX_BUFFER_SIZE                        992
25572 +#define MAX_BUFFER_SIZE                        988
25574  /**
25575   * enum tee_ring_cmd_id - TEE interface commands for ring buffer configuration
25576 @@ -81,6 +81,20 @@ enum tee_cmd_state {
25577         TEE_CMD_STATE_COMPLETED,
25578  };
25580 +/**
25581 + * enum cmd_resp_state - TEE command's response status maintained by driver
25582 + * @CMD_RESPONSE_INVALID:      initial state when no command is written to ring
25583 + * @CMD_WAITING_FOR_RESPONSE:  driver waiting for response from TEE
25584 + * @CMD_RESPONSE_TIMEDOUT:     failed to get response from TEE
25585 + * @CMD_RESPONSE_COPIED:       driver has copied response from TEE
25586 + */
25587 +enum cmd_resp_state {
25588 +       CMD_RESPONSE_INVALID,
25589 +       CMD_WAITING_FOR_RESPONSE,
25590 +       CMD_RESPONSE_TIMEDOUT,
25591 +       CMD_RESPONSE_COPIED,
25594  /**
25595   * struct tee_ring_cmd - Structure of the command buffer in TEE ring
25596   * @cmd_id:      refers to &enum tee_cmd_id. Command id for the ring buffer
25597 @@ -91,6 +105,7 @@ enum tee_cmd_state {
25598   * @pdata:       private data (currently unused)
25599   * @res1:        reserved region
25600   * @buf:         TEE command specific buffer
25601 + * @flag:       refers to &enum cmd_resp_state
25602   */
25603  struct tee_ring_cmd {
25604         u32 cmd_id;
25605 @@ -100,6 +115,7 @@ struct tee_ring_cmd {
25606         u64 pdata;
25607         u32 res1[2];
25608         u8 buf[MAX_BUFFER_SIZE];
25609 +       u32 flag;
25611         /* Total size: 1024 bytes */
25612  } __packed;
25613 diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
25614 index f5a336634daa..405ff957b837 100644
25615 --- a/drivers/crypto/chelsio/chcr_algo.c
25616 +++ b/drivers/crypto/chelsio/chcr_algo.c
25617 @@ -769,13 +769,14 @@ static inline void create_wreq(struct chcr_context *ctx,
25618         struct uld_ctx *u_ctx = ULD_CTX(ctx);
25619         unsigned int tx_channel_id, rx_channel_id;
25620         unsigned int txqidx = 0, rxqidx = 0;
25621 -       unsigned int qid, fid;
25622 +       unsigned int qid, fid, portno;
25624         get_qidxs(req, &txqidx, &rxqidx);
25625         qid = u_ctx->lldi.rxq_ids[rxqidx];
25626         fid = u_ctx->lldi.rxq_ids[0];
25627 +       portno = rxqidx / ctx->rxq_perchan;
25628         tx_channel_id = txqidx / ctx->txq_perchan;
25629 -       rx_channel_id = rxqidx / ctx->rxq_perchan;
25630 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]);
25633         chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
25634 @@ -806,6 +807,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
25636         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
25637         struct chcr_context *ctx = c_ctx(tfm);
25638 +       struct uld_ctx *u_ctx = ULD_CTX(ctx);
25639         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
25640         struct sk_buff *skb = NULL;
25641         struct chcr_wr *chcr_req;
25642 @@ -822,6 +824,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
25643         struct adapter *adap = padap(ctx->dev);
25644         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
25646 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
25647         nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
25648                               reqctx->dst_ofst);
25649         dst_size = get_space_for_phys_dsgl(nents);
25650 @@ -1580,6 +1583,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
25651         int error = 0;
25652         unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
25654 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
25655         transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
25656         req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
25657                                 param->sg_len) <= SGE_MAX_WR_LEN;
25658 @@ -2438,6 +2442,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
25660         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
25661         struct chcr_context *ctx = a_ctx(tfm);
25662 +       struct uld_ctx *u_ctx = ULD_CTX(ctx);
25663         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
25664         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
25665         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
25666 @@ -2457,6 +2462,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
25667         struct adapter *adap = padap(ctx->dev);
25668         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
25670 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
25671         if (req->cryptlen == 0)
25672                 return NULL;
25674 @@ -2710,9 +2716,11 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
25675         struct dsgl_walk dsgl_walk;
25676         unsigned int authsize = crypto_aead_authsize(tfm);
25677         struct chcr_context *ctx = a_ctx(tfm);
25678 +       struct uld_ctx *u_ctx = ULD_CTX(ctx);
25679         u32 temp;
25680         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
25682 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
25683         dsgl_walk_init(&dsgl_walk, phys_cpl);
25684         dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
25685         temp = req->assoclen + req->cryptlen +
25686 @@ -2752,9 +2760,11 @@ void chcr_add_cipher_dst_ent(struct skcipher_request *req,
25687         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
25688         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
25689         struct chcr_context *ctx = c_ctx(tfm);
25690 +       struct uld_ctx *u_ctx = ULD_CTX(ctx);
25691         struct dsgl_walk dsgl_walk;
25692         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
25694 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
25695         dsgl_walk_init(&dsgl_walk, phys_cpl);
25696         dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
25697                          reqctx->dst_ofst);
25698 @@ -2958,6 +2968,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
25700         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
25701         struct chcr_context *ctx = a_ctx(tfm);
25702 +       struct uld_ctx *u_ctx = ULD_CTX(ctx);
25703         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
25704         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
25705         unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
25706 @@ -2967,6 +2978,8 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
25707         unsigned int tag_offset = 0, auth_offset = 0;
25708         unsigned int assoclen;
25710 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
25712         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
25713                 assoclen = req->assoclen - 8;
25714         else
25715 @@ -3127,6 +3140,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
25717         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
25718         struct chcr_context *ctx = a_ctx(tfm);
25719 +       struct uld_ctx *u_ctx = ULD_CTX(ctx);
25720         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
25721         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
25722         struct sk_buff *skb = NULL;
25723 @@ -3143,6 +3157,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
25724         struct adapter *adap = padap(ctx->dev);
25725         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
25727 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
25728         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
25729                 assoclen = req->assoclen - 8;
25731 diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
25732 index 2eaa516b3231..8adcbb327126 100644
25733 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
25734 +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
25735 @@ -546,7 +546,7 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm)
25736         crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
25737         ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
25738         if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
25739 -               dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n");
25740 +               pr_err("get error skcipher iv size!\n");
25741                 return -EINVAL;
25742         }
25744 diff --git a/drivers/crypto/keembay/keembay-ocs-aes-core.c b/drivers/crypto/keembay/keembay-ocs-aes-core.c
25745 index b6b25d994af3..2ef312866338 100644
25746 --- a/drivers/crypto/keembay/keembay-ocs-aes-core.c
25747 +++ b/drivers/crypto/keembay/keembay-ocs-aes-core.c
25748 @@ -1649,8 +1649,10 @@ static int kmb_ocs_aes_probe(struct platform_device *pdev)
25750         /* Initialize crypto engine */
25751         aes_dev->engine = crypto_engine_alloc_init(dev, true);
25752 -       if (!aes_dev->engine)
25753 +       if (!aes_dev->engine) {
25754 +               rc = -ENOMEM;
25755                 goto list_del;
25756 +       }
25758         rc = crypto_engine_start(aes_dev->engine);
25759         if (rc) {
25760 diff --git a/drivers/crypto/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
25761 index c4b97b4160e9..322c51a6936f 100644
25762 --- a/drivers/crypto/keembay/keembay-ocs-hcu-core.c
25763 +++ b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
25764 @@ -1220,8 +1220,10 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev)
25766         /* Initialize crypto engine */
25767         hcu_dev->engine = crypto_engine_alloc_init(dev, 1);
25768 -       if (!hcu_dev->engine)
25769 +       if (!hcu_dev->engine) {
25770 +               rc = -ENOMEM;
25771                 goto list_del;
25772 +       }
25774         rc = crypto_engine_start(hcu_dev->engine);
25775         if (rc) {
25776 diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
25777 index a45bdcf3026d..0dd4c6b157de 100644
25778 --- a/drivers/crypto/omap-aes.c
25779 +++ b/drivers/crypto/omap-aes.c
25780 @@ -103,9 +103,8 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd)
25781                 dd->err = 0;
25782         }
25784 -       err = pm_runtime_get_sync(dd->dev);
25785 +       err = pm_runtime_resume_and_get(dd->dev);
25786         if (err < 0) {
25787 -               pm_runtime_put_noidle(dd->dev);
25788                 dev_err(dd->dev, "failed to get sync: %d\n", err);
25789                 return err;
25790         }
25791 @@ -1134,7 +1133,7 @@ static int omap_aes_probe(struct platform_device *pdev)
25792         pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
25794         pm_runtime_enable(dev);
25795 -       err = pm_runtime_get_sync(dev);
25796 +       err = pm_runtime_resume_and_get(dev);
25797         if (err < 0) {
25798                 dev_err(dev, "%s: failed to get_sync(%d)\n",
25799                         __func__, err);
25800 @@ -1303,7 +1302,7 @@ static int omap_aes_suspend(struct device *dev)
25802  static int omap_aes_resume(struct device *dev)
25804 -       pm_runtime_get_sync(dev);
25805 +       pm_runtime_resume_and_get(dev);
25806         return 0;
25808  #endif
25809 diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
25810 index 1d1532e8fb6d..067ca5e17d38 100644
25811 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
25812 +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
25813 @@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
25814         if (ret)
25815                 goto out_err_free_reg;
25817 -       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
25819         ret = adf_dev_init(accel_dev);
25820         if (ret)
25821                 goto out_err_dev_shutdown;
25823 +       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
25825         ret = adf_dev_start(accel_dev);
25826         if (ret)
25827                 goto out_err_dev_stop;
25828 diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
25829 index 04742a6d91ca..51ea88c0b17d 100644
25830 --- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
25831 +++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
25832 @@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
25833         if (ret)
25834                 goto out_err_free_reg;
25836 -       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
25838         ret = adf_dev_init(accel_dev);
25839         if (ret)
25840                 goto out_err_dev_shutdown;
25842 +       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
25844         ret = adf_dev_start(accel_dev);
25845         if (ret)
25846                 goto out_err_dev_stop;
25847 diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
25848 index c45853463530..e3ad5587be49 100644
25849 --- a/drivers/crypto/qat/qat_common/adf_isr.c
25850 +++ b/drivers/crypto/qat/qat_common/adf_isr.c
25851 @@ -291,19 +291,32 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
25853         ret = adf_isr_alloc_msix_entry_table(accel_dev);
25854         if (ret)
25855 -               return ret;
25856 -       if (adf_enable_msix(accel_dev))
25857                 goto err_out;
25859 -       if (adf_setup_bh(accel_dev))
25860 -               goto err_out;
25861 +       ret = adf_enable_msix(accel_dev);
25862 +       if (ret)
25863 +               goto err_free_msix_table;
25865 -       if (adf_request_irqs(accel_dev))
25866 -               goto err_out;
25867 +       ret = adf_setup_bh(accel_dev);
25868 +       if (ret)
25869 +               goto err_disable_msix;
25871 +       ret = adf_request_irqs(accel_dev);
25872 +       if (ret)
25873 +               goto err_cleanup_bh;
25875         return 0;
25877 +err_cleanup_bh:
25878 +       adf_cleanup_bh(accel_dev);
25880 +err_disable_msix:
25881 +       adf_disable_msix(&accel_dev->accel_pci_dev);
25883 +err_free_msix_table:
25884 +       adf_isr_free_msix_entry_table(accel_dev);
25886  err_out:
25887 -       adf_isr_resource_free(accel_dev);
25888 -       return -EFAULT;
25889 +       return ret;
25891  EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
25892 diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
25893 index 888c1e047295..8ba28409fb74 100644
25894 --- a/drivers/crypto/qat/qat_common/adf_transport.c
25895 +++ b/drivers/crypto/qat/qat_common/adf_transport.c
25896 @@ -172,6 +172,7 @@ static int adf_init_ring(struct adf_etr_ring_data *ring)
25897                 dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
25898                 dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
25899                                   ring->base_addr, ring->dma_addr);
25900 +               ring->base_addr = NULL;
25901                 return -EFAULT;
25902         }
25904 diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
25905 index 38d316a42ba6..888388acb6bd 100644
25906 --- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
25907 +++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
25908 @@ -261,17 +261,26 @@ int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
25909                 goto err_out;
25911         if (adf_setup_pf2vf_bh(accel_dev))
25912 -               goto err_out;
25913 +               goto err_disable_msi;
25915         if (adf_setup_bh(accel_dev))
25916 -               goto err_out;
25917 +               goto err_cleanup_pf2vf_bh;
25919         if (adf_request_msi_irq(accel_dev))
25920 -               goto err_out;
25921 +               goto err_cleanup_bh;
25923         return 0;
25925 +err_cleanup_bh:
25926 +       adf_cleanup_bh(accel_dev);
25928 +err_cleanup_pf2vf_bh:
25929 +       adf_cleanup_pf2vf_bh(accel_dev);
25931 +err_disable_msi:
25932 +       adf_disable_msi(accel_dev);
25934  err_out:
25935 -       adf_vf_isr_resource_free(accel_dev);
25936         return -EFAULT;
25938  EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
25939 diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
25940 index ff78c73c47e3..ea1c6899290d 100644
25941 --- a/drivers/crypto/qat/qat_common/qat_algs.c
25942 +++ b/drivers/crypto/qat/qat_common/qat_algs.c
25943 @@ -719,7 +719,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
25944         struct qat_alg_buf_list *bufl;
25945         struct qat_alg_buf_list *buflout = NULL;
25946         dma_addr_t blp;
25947 -       dma_addr_t bloutp = 0;
25948 +       dma_addr_t bloutp;
25949         struct scatterlist *sg;
25950         size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
25952 @@ -731,6 +731,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
25953         if (unlikely(!bufl))
25954                 return -ENOMEM;
25956 +       for_each_sg(sgl, sg, n, i)
25957 +               bufl->bufers[i].addr = DMA_MAPPING_ERROR;
25959         blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
25960         if (unlikely(dma_mapping_error(dev, blp)))
25961                 goto err_in;
25962 @@ -764,10 +767,14 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
25963                                        dev_to_node(&GET_DEV(inst->accel_dev)));
25964                 if (unlikely(!buflout))
25965                         goto err_in;
25967 +               bufers = buflout->bufers;
25968 +               for_each_sg(sglout, sg, n, i)
25969 +                       bufers[i].addr = DMA_MAPPING_ERROR;
25971                 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
25972                 if (unlikely(dma_mapping_error(dev, bloutp)))
25973                         goto err_out;
25974 -               bufers = buflout->bufers;
25975                 for_each_sg(sglout, sg, n, i) {
25976                         int y = sg_nctr;
25978 diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
25979 index c972554a755e..29999da716cc 100644
25980 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
25981 +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
25982 @@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
25983         if (ret)
25984                 goto out_err_free_reg;
25986 -       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
25988         ret = adf_dev_init(accel_dev);
25989         if (ret)
25990                 goto out_err_dev_shutdown;
25992 +       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
25994         ret = adf_dev_start(accel_dev);
25995         if (ret)
25996                 goto out_err_dev_stop;
25997 diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
25998 index f300b0a5958a..b0f0502a5bb0 100644
25999 --- a/drivers/crypto/sa2ul.c
26000 +++ b/drivers/crypto/sa2ul.c
26001 @@ -1146,8 +1146,10 @@ static int sa_run(struct sa_req *req)
26002                 mapped_sg->sgt.sgl = src;
26003                 mapped_sg->sgt.orig_nents = src_nents;
26004                 ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
26005 -               if (ret)
26006 +               if (ret) {
26007 +                       kfree(rxd);
26008                         return ret;
26009 +               }
26011                 mapped_sg->dir = dir_src;
26012                 mapped_sg->mapped = true;
26013 @@ -1155,8 +1157,10 @@ static int sa_run(struct sa_req *req)
26014                 mapped_sg->sgt.sgl = req->src;
26015                 mapped_sg->sgt.orig_nents = sg_nents;
26016                 ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
26017 -               if (ret)
26018 +               if (ret) {
26019 +                       kfree(rxd);
26020                         return ret;
26021 +               }
26023                 mapped_sg->dir = dir_src;
26024                 mapped_sg->mapped = true;
26025 @@ -2350,7 +2354,7 @@ static int sa_ul_probe(struct platform_device *pdev)
26026         dev_set_drvdata(sa_k3_dev, dev_data);
26028         pm_runtime_enable(dev);
26029 -       ret = pm_runtime_get_sync(dev);
26030 +       ret = pm_runtime_resume_and_get(dev);
26031         if (ret < 0) {
26032                 dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
26033                         ret);
26034 diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
26035 index 2a4793176c71..7389a0536ff0 100644
26036 --- a/drivers/crypto/stm32/stm32-cryp.c
26037 +++ b/drivers/crypto/stm32/stm32-cryp.c
26038 @@ -542,7 +542,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
26039         int ret;
26040         u32 cfg, hw_mode;
26042 -       pm_runtime_get_sync(cryp->dev);
26043 +       pm_runtime_resume_and_get(cryp->dev);
26045         /* Disable interrupt */
26046         stm32_cryp_write(cryp, CRYP_IMSCR, 0);
26047 @@ -2043,7 +2043,7 @@ static int stm32_cryp_remove(struct platform_device *pdev)
26048         if (!cryp)
26049                 return -ENODEV;
26051 -       ret = pm_runtime_get_sync(cryp->dev);
26052 +       ret = pm_runtime_resume_and_get(cryp->dev);
26053         if (ret < 0)
26054                 return ret;
26056 diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
26057 index 7ac0573ef663..389de9e3302d 100644
26058 --- a/drivers/crypto/stm32/stm32-hash.c
26059 +++ b/drivers/crypto/stm32/stm32-hash.c
26060 @@ -813,7 +813,7 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err)
26061  static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
26062                               struct stm32_hash_request_ctx *rctx)
26064 -       pm_runtime_get_sync(hdev->dev);
26065 +       pm_runtime_resume_and_get(hdev->dev);
26067         if (!(HASH_FLAGS_INIT & hdev->flags)) {
26068                 stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
26069 @@ -962,7 +962,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
26070         u32 *preg;
26071         unsigned int i;
26073 -       pm_runtime_get_sync(hdev->dev);
26074 +       pm_runtime_resume_and_get(hdev->dev);
26076         while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
26077                 cpu_relax();
26078 @@ -1000,7 +1000,7 @@ static int stm32_hash_import(struct ahash_request *req, const void *in)
26080         preg = rctx->hw_context;
26082 -       pm_runtime_get_sync(hdev->dev);
26083 +       pm_runtime_resume_and_get(hdev->dev);
26085         stm32_hash_write(hdev, HASH_IMR, *preg++);
26086         stm32_hash_write(hdev, HASH_STR, *preg++);
26087 @@ -1566,7 +1566,7 @@ static int stm32_hash_remove(struct platform_device *pdev)
26088         if (!hdev)
26089                 return -ENODEV;
26091 -       ret = pm_runtime_get_sync(hdev->dev);
26092 +       ret = pm_runtime_resume_and_get(hdev->dev);
26093         if (ret < 0)
26094                 return ret;
26096 diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
26097 index bf3047896e41..59ba59bea0f5 100644
26098 --- a/drivers/devfreq/devfreq.c
26099 +++ b/drivers/devfreq/devfreq.c
26100 @@ -387,7 +387,7 @@ static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq,
26101         devfreq->previous_freq = new_freq;
26103         if (devfreq->suspend_freq)
26104 -               devfreq->resume_freq = cur_freq;
26105 +               devfreq->resume_freq = new_freq;
26107         return err;
26109 @@ -821,7 +821,8 @@ struct devfreq *devfreq_add_device(struct device *dev,
26111         if (devfreq->profile->timer < 0
26112                 || devfreq->profile->timer >= DEVFREQ_TIMER_NUM) {
26113 -               goto err_out;
26114 +               mutex_unlock(&devfreq->lock);
26115 +               goto err_dev;
26116         }
26118         if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
26119 diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
26120 index f264b70c383e..eadd1eaa2fb5 100644
26121 --- a/drivers/dma-buf/dma-buf.c
26122 +++ b/drivers/dma-buf/dma-buf.c
26123 @@ -760,7 +760,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
26125                 if (dma_buf_is_dynamic(attach->dmabuf)) {
26126                         dma_resv_lock(attach->dmabuf->resv, NULL);
26127 -                       ret = dma_buf_pin(attach);
26128 +                       ret = dmabuf->ops->pin(attach);
26129                         if (ret)
26130                                 goto err_unlock;
26131                 }
26132 @@ -786,7 +786,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
26134  err_unpin:
26135         if (dma_buf_is_dynamic(attach->dmabuf))
26136 -               dma_buf_unpin(attach);
26137 +               dmabuf->ops->unpin(attach);
26139  err_unlock:
26140         if (dma_buf_is_dynamic(attach->dmabuf))
26141 @@ -843,7 +843,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
26142                 __unmap_dma_buf(attach, attach->sgt, attach->dir);
26144                 if (dma_buf_is_dynamic(attach->dmabuf)) {
26145 -                       dma_buf_unpin(attach);
26146 +                       dmabuf->ops->unpin(attach);
26147                         dma_resv_unlock(attach->dmabuf->resv);
26148                 }
26149         }
26150 @@ -956,7 +956,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
26151         if (dma_buf_is_dynamic(attach->dmabuf)) {
26152                 dma_resv_assert_held(attach->dmabuf->resv);
26153                 if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
26154 -                       r = dma_buf_pin(attach);
26155 +                       r = attach->dmabuf->ops->pin(attach);
26156                         if (r)
26157                                 return ERR_PTR(r);
26158                 }
26159 @@ -968,7 +968,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
26161         if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
26162              !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
26163 -               dma_buf_unpin(attach);
26164 +               attach->dmabuf->ops->unpin(attach);
26166         if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
26167                 attach->sgt = sg_table;
26168 diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
26169 index 08d71dafa001..58c8cc8fe0e1 100644
26170 --- a/drivers/dma/dw-edma/dw-edma-core.c
26171 +++ b/drivers/dma/dw-edma/dw-edma-core.c
26172 @@ -937,22 +937,21 @@ int dw_edma_remove(struct dw_edma_chip *chip)
26173         /* Power management */
26174         pm_runtime_disable(dev);
26176 +       /* Deregister eDMA device */
26177 +       dma_async_device_unregister(&dw->wr_edma);
26178         list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
26179                                  vc.chan.device_node) {
26180 -               list_del(&chan->vc.chan.device_node);
26181                 tasklet_kill(&chan->vc.task);
26182 +               list_del(&chan->vc.chan.device_node);
26183         }
26185 +       dma_async_device_unregister(&dw->rd_edma);
26186         list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
26187                                  vc.chan.device_node) {
26188 -               list_del(&chan->vc.chan.device_node);
26189                 tasklet_kill(&chan->vc.task);
26190 +               list_del(&chan->vc.chan.device_node);
26191         }
26193 -       /* Deregister eDMA device */
26194 -       dma_async_device_unregister(&dw->wr_edma);
26195 -       dma_async_device_unregister(&dw->rd_edma);
26197         /* Turn debugfs off */
26198         dw_edma_v0_core_debugfs_off();
26200 diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
26201 index 0db9b82ed8cf..1d8a3876b745 100644
26202 --- a/drivers/dma/idxd/cdev.c
26203 +++ b/drivers/dma/idxd/cdev.c
26204 @@ -39,15 +39,15 @@ struct idxd_user_context {
26205         struct iommu_sva *sva;
26206  };
26208 -enum idxd_cdev_cleanup {
26209 -       CDEV_NORMAL = 0,
26210 -       CDEV_FAILED,
26213  static void idxd_cdev_dev_release(struct device *dev)
26215 -       dev_dbg(dev, "releasing cdev device\n");
26216 -       kfree(dev);
26217 +       struct idxd_cdev *idxd_cdev = container_of(dev, struct idxd_cdev, dev);
26218 +       struct idxd_cdev_context *cdev_ctx;
26219 +       struct idxd_wq *wq = idxd_cdev->wq;
26221 +       cdev_ctx = &ictx[wq->idxd->type];
26222 +       ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
26223 +       kfree(idxd_cdev);
26226  static struct device_type idxd_cdev_device_type = {
26227 @@ -62,14 +62,11 @@ static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode)
26228         return container_of(cdev, struct idxd_cdev, cdev);
26231 -static inline struct idxd_wq *idxd_cdev_wq(struct idxd_cdev *idxd_cdev)
26233 -       return container_of(idxd_cdev, struct idxd_wq, idxd_cdev);
26236  static inline struct idxd_wq *inode_wq(struct inode *inode)
26238 -       return idxd_cdev_wq(inode_idxd_cdev(inode));
26239 +       struct idxd_cdev *idxd_cdev = inode_idxd_cdev(inode);
26241 +       return idxd_cdev->wq;
26244  static int idxd_cdev_open(struct inode *inode, struct file *filp)
26245 @@ -220,11 +217,10 @@ static __poll_t idxd_cdev_poll(struct file *filp,
26246         struct idxd_user_context *ctx = filp->private_data;
26247         struct idxd_wq *wq = ctx->wq;
26248         struct idxd_device *idxd = wq->idxd;
26249 -       struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
26250         unsigned long flags;
26251         __poll_t out = 0;
26253 -       poll_wait(filp, &idxd_cdev->err_queue, wait);
26254 +       poll_wait(filp, &wq->err_queue, wait);
26255         spin_lock_irqsave(&idxd->dev_lock, flags);
26256         if (idxd->sw_err.valid)
26257                 out = EPOLLIN | EPOLLRDNORM;
26258 @@ -246,98 +242,67 @@ int idxd_cdev_get_major(struct idxd_device *idxd)
26259         return MAJOR(ictx[idxd->type].devt);
26262 -static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
26263 +int idxd_wq_add_cdev(struct idxd_wq *wq)
26265         struct idxd_device *idxd = wq->idxd;
26266 -       struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
26267 -       struct idxd_cdev_context *cdev_ctx;
26268 +       struct idxd_cdev *idxd_cdev;
26269 +       struct cdev *cdev;
26270         struct device *dev;
26271 -       int minor, rc;
26272 +       struct idxd_cdev_context *cdev_ctx;
26273 +       int rc, minor;
26275 -       idxd_cdev->dev = kzalloc(sizeof(*idxd_cdev->dev), GFP_KERNEL);
26276 -       if (!idxd_cdev->dev)
26277 +       idxd_cdev = kzalloc(sizeof(*idxd_cdev), GFP_KERNEL);
26278 +       if (!idxd_cdev)
26279                 return -ENOMEM;
26281 -       dev = idxd_cdev->dev;
26282 -       dev->parent = &idxd->pdev->dev;
26283 -       dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
26284 -                    idxd->id, wq->id);
26285 -       dev->bus = idxd_get_bus_type(idxd);
26287 +       idxd_cdev->wq = wq;
26288 +       cdev = &idxd_cdev->cdev;
26289 +       dev = &idxd_cdev->dev;
26290         cdev_ctx = &ictx[wq->idxd->type];
26291         minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
26292         if (minor < 0) {
26293 -               rc = minor;
26294 -               kfree(dev);
26295 -               goto ida_err;
26296 -       }
26298 -       dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
26299 -       dev->type = &idxd_cdev_device_type;
26300 -       rc = device_register(dev);
26301 -       if (rc < 0) {
26302 -               dev_err(&idxd->pdev->dev, "device register failed\n");
26303 -               goto dev_reg_err;
26304 +               kfree(idxd_cdev);
26305 +               return minor;
26306         }
26307         idxd_cdev->minor = minor;
26309 -       return 0;
26311 - dev_reg_err:
26312 -       ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt));
26313 -       put_device(dev);
26314 - ida_err:
26315 -       idxd_cdev->dev = NULL;
26316 -       return rc;
26319 -static void idxd_wq_cdev_cleanup(struct idxd_wq *wq,
26320 -                                enum idxd_cdev_cleanup cdev_state)
26322 -       struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
26323 -       struct idxd_cdev_context *cdev_ctx;
26325 -       cdev_ctx = &ictx[wq->idxd->type];
26326 -       if (cdev_state == CDEV_NORMAL)
26327 -               cdev_del(&idxd_cdev->cdev);
26328 -       device_unregister(idxd_cdev->dev);
26329 -       /*
26330 -        * The device_type->release() will be called on the device and free
26331 -        * the allocated struct device. We can just forget it.
26332 -        */
26333 -       ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
26334 -       idxd_cdev->dev = NULL;
26335 -       idxd_cdev->minor = -1;
26338 -int idxd_wq_add_cdev(struct idxd_wq *wq)
26340 -       struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
26341 -       struct cdev *cdev = &idxd_cdev->cdev;
26342 -       struct device *dev;
26343 -       int rc;
26344 +       device_initialize(dev);
26345 +       dev->parent = &wq->conf_dev;
26346 +       dev->bus = idxd_get_bus_type(idxd);
26347 +       dev->type = &idxd_cdev_device_type;
26348 +       dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
26350 -       rc = idxd_wq_cdev_dev_setup(wq);
26351 +       rc = dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
26352 +                         idxd->id, wq->id);
26353         if (rc < 0)
26354 -               return rc;
26355 +               goto err;
26357 -       dev = idxd_cdev->dev;
26358 +       wq->idxd_cdev = idxd_cdev;
26359         cdev_init(cdev, &idxd_cdev_fops);
26360 -       cdev_set_parent(cdev, &dev->kobj);
26361 -       rc = cdev_add(cdev, dev->devt, 1);
26362 +       rc = cdev_device_add(cdev, dev);
26363         if (rc) {
26364                 dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
26365 -               idxd_wq_cdev_cleanup(wq, CDEV_FAILED);
26366 -               return rc;
26367 +               goto err;
26368         }
26370 -       init_waitqueue_head(&idxd_cdev->err_queue);
26371         return 0;
26373 + err:
26374 +       put_device(dev);
26375 +       wq->idxd_cdev = NULL;
26376 +       return rc;
26379  void idxd_wq_del_cdev(struct idxd_wq *wq)
26381 -       idxd_wq_cdev_cleanup(wq, CDEV_NORMAL);
26382 +       struct idxd_cdev *idxd_cdev;
26383 +       struct idxd_cdev_context *cdev_ctx;
26385 +       cdev_ctx = &ictx[wq->idxd->type];
26386 +       idxd_cdev = wq->idxd_cdev;
26387 +       wq->idxd_cdev = NULL;
26388 +       cdev_device_del(&idxd_cdev->cdev, &idxd_cdev->dev);
26389 +       put_device(&idxd_cdev->dev);
26392  int idxd_cdev_register(void)
26393 diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
26394 index 31c819544a22..4fef57717049 100644
26395 --- a/drivers/dma/idxd/device.c
26396 +++ b/drivers/dma/idxd/device.c
26397 @@ -19,7 +19,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
26398  /* Interrupt control bits */
26399  void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
26401 -       struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
26402 +       struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
26404         pci_msi_mask_irq(data);
26406 @@ -36,7 +36,7 @@ void idxd_mask_msix_vectors(struct idxd_device *idxd)
26408  void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
26410 -       struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
26411 +       struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
26413         pci_msi_unmask_irq(data);
26415 @@ -186,8 +186,6 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
26416                 desc->id = i;
26417                 desc->wq = wq;
26418                 desc->cpu = -1;
26419 -               dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan);
26420 -               desc->txd.tx_submit = idxd_dma_tx_submit;
26421         }
26423         return 0;
26424 @@ -451,7 +449,8 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
26426         if (idxd_device_is_halted(idxd)) {
26427                 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
26428 -               *status = IDXD_CMDSTS_HW_ERR;
26429 +               if (status)
26430 +                       *status = IDXD_CMDSTS_HW_ERR;
26431                 return;
26432         }
26434 @@ -521,7 +520,7 @@ void idxd_device_wqs_clear_state(struct idxd_device *idxd)
26435         lockdep_assert_held(&idxd->dev_lock);
26437         for (i = 0; i < idxd->max_wqs; i++) {
26438 -               struct idxd_wq *wq = &idxd->wqs[i];
26439 +               struct idxd_wq *wq = idxd->wqs[i];
26441                 if (wq->state == IDXD_WQ_ENABLED) {
26442                         idxd_wq_disable_cleanup(wq);
26443 @@ -660,7 +659,7 @@ static int idxd_groups_config_write(struct idxd_device *idxd)
26444                 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
26446         for (i = 0; i < idxd->max_groups; i++) {
26447 -               struct idxd_group *group = &idxd->groups[i];
26448 +               struct idxd_group *group = idxd->groups[i];
26450                 idxd_group_config_write(group);
26451         }
26452 @@ -739,7 +738,7 @@ static int idxd_wqs_config_write(struct idxd_device *idxd)
26453         int i, rc;
26455         for (i = 0; i < idxd->max_wqs; i++) {
26456 -               struct idxd_wq *wq = &idxd->wqs[i];
26457 +               struct idxd_wq *wq = idxd->wqs[i];
26459                 rc = idxd_wq_config_write(wq);
26460                 if (rc < 0)
26461 @@ -755,7 +754,7 @@ static void idxd_group_flags_setup(struct idxd_device *idxd)
26463         /* TC-A 0 and TC-B 1 should be defaults */
26464         for (i = 0; i < idxd->max_groups; i++) {
26465 -               struct idxd_group *group = &idxd->groups[i];
26466 +               struct idxd_group *group = idxd->groups[i];
26468                 if (group->tc_a == -1)
26469                         group->tc_a = group->grpcfg.flags.tc_a = 0;
26470 @@ -782,12 +781,12 @@ static int idxd_engines_setup(struct idxd_device *idxd)
26471         struct idxd_group *group;
26473         for (i = 0; i < idxd->max_groups; i++) {
26474 -               group = &idxd->groups[i];
26475 +               group = idxd->groups[i];
26476                 group->grpcfg.engines = 0;
26477         }
26479         for (i = 0; i < idxd->max_engines; i++) {
26480 -               eng = &idxd->engines[i];
26481 +               eng = idxd->engines[i];
26482                 group = eng->group;
26484                 if (!group)
26485 @@ -811,13 +810,13 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
26486         struct device *dev = &idxd->pdev->dev;
26488         for (i = 0; i < idxd->max_groups; i++) {
26489 -               group = &idxd->groups[i];
26490 +               group = idxd->groups[i];
26491                 for (j = 0; j < 4; j++)
26492                         group->grpcfg.wqs[j] = 0;
26493         }
26495         for (i = 0; i < idxd->max_wqs; i++) {
26496 -               wq = &idxd->wqs[i];
26497 +               wq = idxd->wqs[i];
26498                 group = wq->group;
26500                 if (!wq->group)
26501 diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
26502 index a15e50126434..77439b645044 100644
26503 --- a/drivers/dma/idxd/dma.c
26504 +++ b/drivers/dma/idxd/dma.c
26505 @@ -14,7 +14,10 @@
26507  static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
26509 -       return container_of(c, struct idxd_wq, dma_chan);
26510 +       struct idxd_dma_chan *idxd_chan;
26512 +       idxd_chan = container_of(c, struct idxd_dma_chan, chan);
26513 +       return idxd_chan->wq;
26516  void idxd_dma_complete_txd(struct idxd_desc *desc,
26517 @@ -135,7 +138,7 @@ static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
26521 -dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
26522 +static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
26524         struct dma_chan *c = tx->chan;
26525         struct idxd_wq *wq = to_idxd_wq(c);
26526 @@ -156,14 +159,25 @@ dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
26528  static void idxd_dma_release(struct dma_device *device)
26530 +       struct idxd_dma_dev *idxd_dma = container_of(device, struct idxd_dma_dev, dma);
26532 +       kfree(idxd_dma);
26535  int idxd_register_dma_device(struct idxd_device *idxd)
26537 -       struct dma_device *dma = &idxd->dma_dev;
26538 +       struct idxd_dma_dev *idxd_dma;
26539 +       struct dma_device *dma;
26540 +       struct device *dev = &idxd->pdev->dev;
26541 +       int rc;
26543 +       idxd_dma = kzalloc_node(sizeof(*idxd_dma), GFP_KERNEL, dev_to_node(dev));
26544 +       if (!idxd_dma)
26545 +               return -ENOMEM;
26547 +       dma = &idxd_dma->dma;
26548         INIT_LIST_HEAD(&dma->channels);
26549 -       dma->dev = &idxd->pdev->dev;
26550 +       dma->dev = dev;
26552         dma_cap_set(DMA_PRIVATE, dma->cap_mask);
26553         dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
26554 @@ -179,35 +193,72 @@ int idxd_register_dma_device(struct idxd_device *idxd)
26555         dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
26556         dma->device_free_chan_resources = idxd_dma_free_chan_resources;
26558 -       return dma_async_device_register(&idxd->dma_dev);
26559 +       rc = dma_async_device_register(dma);
26560 +       if (rc < 0) {
26561 +               kfree(idxd_dma);
26562 +               return rc;
26563 +       }
26565 +       idxd_dma->idxd = idxd;
26566 +       /*
26567 +        * This pointer is protected by the refs taken by the dma_chan. It will remain valid
26568 +        * as long as there are outstanding channels.
26569 +        */
26570 +       idxd->idxd_dma = idxd_dma;
26571 +       return 0;
26574  void idxd_unregister_dma_device(struct idxd_device *idxd)
26576 -       dma_async_device_unregister(&idxd->dma_dev);
26577 +       dma_async_device_unregister(&idxd->idxd_dma->dma);
26580  int idxd_register_dma_channel(struct idxd_wq *wq)
26582         struct idxd_device *idxd = wq->idxd;
26583 -       struct dma_device *dma = &idxd->dma_dev;
26584 -       struct dma_chan *chan = &wq->dma_chan;
26585 -       int rc;
26586 +       struct dma_device *dma = &idxd->idxd_dma->dma;
26587 +       struct device *dev = &idxd->pdev->dev;
26588 +       struct idxd_dma_chan *idxd_chan;
26589 +       struct dma_chan *chan;
26590 +       int rc, i;
26592 +       idxd_chan = kzalloc_node(sizeof(*idxd_chan), GFP_KERNEL, dev_to_node(dev));
26593 +       if (!idxd_chan)
26594 +               return -ENOMEM;
26596 -       memset(&wq->dma_chan, 0, sizeof(struct dma_chan));
26597 +       chan = &idxd_chan->chan;
26598         chan->device = dma;
26599         list_add_tail(&chan->device_node, &dma->channels);
26601 +       for (i = 0; i < wq->num_descs; i++) {
26602 +               struct idxd_desc *desc = wq->descs[i];
26604 +               dma_async_tx_descriptor_init(&desc->txd, chan);
26605 +               desc->txd.tx_submit = idxd_dma_tx_submit;
26606 +       }
26608         rc = dma_async_device_channel_register(dma, chan);
26609 -       if (rc < 0)
26610 +       if (rc < 0) {
26611 +               kfree(idxd_chan);
26612                 return rc;
26613 +       }
26615 +       wq->idxd_chan = idxd_chan;
26616 +       idxd_chan->wq = wq;
26617 +       get_device(&wq->conf_dev);
26619         return 0;
26622  void idxd_unregister_dma_channel(struct idxd_wq *wq)
26624 -       struct dma_chan *chan = &wq->dma_chan;
26625 +       struct idxd_dma_chan *idxd_chan = wq->idxd_chan;
26626 +       struct dma_chan *chan = &idxd_chan->chan;
26627 +       struct idxd_dma_dev *idxd_dma = wq->idxd->idxd_dma;
26629 -       dma_async_device_channel_unregister(&wq->idxd->dma_dev, chan);
26630 +       dma_async_device_channel_unregister(&idxd_dma->dma, chan);
26631         list_del(&chan->device_node);
26632 +       kfree(wq->idxd_chan);
26633 +       wq->idxd_chan = NULL;
26634 +       put_device(&wq->conf_dev);
26636 diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
26637 index 76014c14f473..89daf746d121 100644
26638 --- a/drivers/dma/idxd/idxd.h
26639 +++ b/drivers/dma/idxd/idxd.h
26640 @@ -8,12 +8,16 @@
26641  #include <linux/percpu-rwsem.h>
26642  #include <linux/wait.h>
26643  #include <linux/cdev.h>
26644 +#include <linux/idr.h>
26645  #include "registers.h"
26647  #define IDXD_DRIVER_VERSION    "1.00"
26649  extern struct kmem_cache *idxd_desc_pool;
26651 +struct idxd_device;
26652 +struct idxd_wq;
26654  #define IDXD_REG_TIMEOUT       50
26655  #define IDXD_DRAIN_TIMEOUT     5000
26657 @@ -33,6 +37,7 @@ struct idxd_device_driver {
26658  struct idxd_irq_entry {
26659         struct idxd_device *idxd;
26660         int id;
26661 +       int vector;
26662         struct llist_head pending_llist;
26663         struct list_head work_list;
26664         /*
26665 @@ -75,10 +80,10 @@ enum idxd_wq_type {
26666  };
26668  struct idxd_cdev {
26669 +       struct idxd_wq *wq;
26670         struct cdev cdev;
26671 -       struct device *dev;
26672 +       struct device dev;
26673         int minor;
26674 -       struct wait_queue_head err_queue;
26675  };
26677  #define IDXD_ALLOCATED_BATCH_SIZE      128U
26678 @@ -96,10 +101,16 @@ enum idxd_complete_type {
26679         IDXD_COMPLETE_DEV_FAIL,
26680  };
26682 +struct idxd_dma_chan {
26683 +       struct dma_chan chan;
26684 +       struct idxd_wq *wq;
26687  struct idxd_wq {
26688         void __iomem *portal;
26689         struct device conf_dev;
26690 -       struct idxd_cdev idxd_cdev;
26691 +       struct idxd_cdev *idxd_cdev;
26692 +       struct wait_queue_head err_queue;
26693         struct idxd_device *idxd;
26694         int id;
26695         enum idxd_wq_type type;
26696 @@ -125,7 +136,7 @@ struct idxd_wq {
26697         int compls_size;
26698         struct idxd_desc **descs;
26699         struct sbitmap_queue sbq;
26700 -       struct dma_chan dma_chan;
26701 +       struct idxd_dma_chan *idxd_chan;
26702         char name[WQ_NAME_SIZE + 1];
26703         u64 max_xfer_bytes;
26704         u32 max_batch_size;
26705 @@ -162,6 +173,11 @@ enum idxd_device_flag {
26706         IDXD_FLAG_PASID_ENABLED,
26707  };
26709 +struct idxd_dma_dev {
26710 +       struct idxd_device *idxd;
26711 +       struct dma_device dma;
26714  struct idxd_device {
26715         enum idxd_type type;
26716         struct device conf_dev;
26717 @@ -178,9 +194,9 @@ struct idxd_device {
26719         spinlock_t dev_lock;    /* spinlock for device */
26720         struct completion *cmd_done;
26721 -       struct idxd_group *groups;
26722 -       struct idxd_wq *wqs;
26723 -       struct idxd_engine *engines;
26724 +       struct idxd_group **groups;
26725 +       struct idxd_wq **wqs;
26726 +       struct idxd_engine **engines;
26728         struct iommu_sva *sva;
26729         unsigned int pasid;
26730 @@ -206,11 +222,10 @@ struct idxd_device {
26732         union sw_err_reg sw_err;
26733         wait_queue_head_t cmd_waitq;
26734 -       struct msix_entry *msix_entries;
26735         int num_wq_irqs;
26736         struct idxd_irq_entry *irq_entries;
26738 -       struct dma_device dma_dev;
26739 +       struct idxd_dma_dev *idxd_dma;
26740         struct workqueue_struct *wq;
26741         struct work_struct work;
26742  };
26743 @@ -242,6 +257,43 @@ extern struct bus_type dsa_bus_type;
26744  extern struct bus_type iax_bus_type;
26746  extern bool support_enqcmd;
26747 +extern struct device_type dsa_device_type;
26748 +extern struct device_type iax_device_type;
26749 +extern struct device_type idxd_wq_device_type;
26750 +extern struct device_type idxd_engine_device_type;
26751 +extern struct device_type idxd_group_device_type;
26753 +static inline bool is_dsa_dev(struct device *dev)
26755 +       return dev->type == &dsa_device_type;
26758 +static inline bool is_iax_dev(struct device *dev)
26760 +       return dev->type == &iax_device_type;
26763 +static inline bool is_idxd_dev(struct device *dev)
26765 +       return is_dsa_dev(dev) || is_iax_dev(dev);
26768 +static inline bool is_idxd_wq_dev(struct device *dev)
26770 +       return dev->type == &idxd_wq_device_type;
26773 +static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
26775 +       if (wq->type == IDXD_WQT_KERNEL && strcmp(wq->name, "dmaengine") == 0)
26776 +               return true;
26777 +       return false;
26780 +static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
26782 +       return wq->type == IDXD_WQT_USER;
26785  static inline bool wq_dedicated(struct idxd_wq *wq)
26787 @@ -279,18 +331,6 @@ static inline int idxd_get_wq_portal_full_offset(int wq_id,
26788         return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
26791 -static inline void idxd_set_type(struct idxd_device *idxd)
26793 -       struct pci_dev *pdev = idxd->pdev;
26795 -       if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
26796 -               idxd->type = IDXD_TYPE_DSA;
26797 -       else if (pdev->device == PCI_DEVICE_ID_INTEL_IAX_SPR0)
26798 -               idxd->type = IDXD_TYPE_IAX;
26799 -       else
26800 -               idxd->type = IDXD_TYPE_UNKNOWN;
26803  static inline void idxd_wq_get(struct idxd_wq *wq)
26805         wq->client_count++;
26806 @@ -306,14 +346,16 @@ static inline int idxd_wq_refcount(struct idxd_wq *wq)
26807         return wq->client_count;
26808  };
26810 +struct ida *idxd_ida(struct idxd_device *idxd);
26811  const char *idxd_get_dev_name(struct idxd_device *idxd);
26812  int idxd_register_bus_type(void);
26813  void idxd_unregister_bus_type(void);
26814 -int idxd_setup_sysfs(struct idxd_device *idxd);
26815 -void idxd_cleanup_sysfs(struct idxd_device *idxd);
26816 +int idxd_register_devices(struct idxd_device *idxd);
26817 +void idxd_unregister_devices(struct idxd_device *idxd);
26818  int idxd_register_driver(void);
26819  void idxd_unregister_driver(void);
26820  struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
26821 +struct device_type *idxd_get_device_type(struct idxd_device *idxd);
26823  /* device interrupt control */
26824  void idxd_msix_perm_setup(struct idxd_device *idxd);
26825 @@ -363,7 +405,6 @@ void idxd_unregister_dma_channel(struct idxd_wq *wq);
26826  void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
26827  void idxd_dma_complete_txd(struct idxd_desc *desc,
26828                            enum idxd_complete_type comp_type);
26829 -dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx);
26831  /* cdev */
26832  int idxd_cdev_register(void);
26833 diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
26834 index 6584b0ec07d5..07cf7977a045 100644
26835 --- a/drivers/dma/idxd/init.c
26836 +++ b/drivers/dma/idxd/init.c
26837 @@ -34,8 +34,7 @@ MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
26839  bool support_enqcmd;
26841 -static struct idr idxd_idrs[IDXD_TYPE_MAX];
26842 -static DEFINE_MUTEX(idxd_idr_lock);
26843 +static struct ida idxd_idas[IDXD_TYPE_MAX];
26845  static struct pci_device_id idxd_pci_tbl[] = {
26846         /* DSA ver 1.0 platforms */
26847 @@ -52,6 +51,11 @@ static char *idxd_name[] = {
26848         "iax"
26849  };
26851 +struct ida *idxd_ida(struct idxd_device *idxd)
26853 +       return &idxd_idas[idxd->type];
26856  const char *idxd_get_dev_name(struct idxd_device *idxd)
26858         return idxd_name[idxd->type];
26859 @@ -61,7 +65,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
26861         struct pci_dev *pdev = idxd->pdev;
26862         struct device *dev = &pdev->dev;
26863 -       struct msix_entry *msix;
26864         struct idxd_irq_entry *irq_entry;
26865         int i, msixcnt;
26866         int rc = 0;
26867 @@ -69,23 +72,13 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
26868         msixcnt = pci_msix_vec_count(pdev);
26869         if (msixcnt < 0) {
26870                 dev_err(dev, "Not MSI-X interrupt capable.\n");
26871 -               goto err_no_irq;
26872 -       }
26874 -       idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
26875 -                       msixcnt, GFP_KERNEL);
26876 -       if (!idxd->msix_entries) {
26877 -               rc = -ENOMEM;
26878 -               goto err_no_irq;
26879 +               return -ENOSPC;
26880         }
26882 -       for (i = 0; i < msixcnt; i++)
26883 -               idxd->msix_entries[i].entry = i;
26885 -       rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
26886 -       if (rc) {
26887 -               dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
26888 -               goto err_no_irq;
26889 +       rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
26890 +       if (rc != msixcnt) {
26891 +               dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc);
26892 +               return -ENOSPC;
26893         }
26894         dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
26896 @@ -93,119 +86,236 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
26897          * We implement 1 completion list per MSI-X entry except for
26898          * entry 0, which is for errors and others.
26899          */
26900 -       idxd->irq_entries = devm_kcalloc(dev, msixcnt,
26901 -                                        sizeof(struct idxd_irq_entry),
26902 -                                        GFP_KERNEL);
26903 +       idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry),
26904 +                                        GFP_KERNEL, dev_to_node(dev));
26905         if (!idxd->irq_entries) {
26906                 rc = -ENOMEM;
26907 -               goto err_no_irq;
26908 +               goto err_irq_entries;
26909         }
26911         for (i = 0; i < msixcnt; i++) {
26912                 idxd->irq_entries[i].id = i;
26913                 idxd->irq_entries[i].idxd = idxd;
26914 +               idxd->irq_entries[i].vector = pci_irq_vector(pdev, i);
26915                 spin_lock_init(&idxd->irq_entries[i].list_lock);
26916         }
26918 -       msix = &idxd->msix_entries[0];
26919         irq_entry = &idxd->irq_entries[0];
26920 -       rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
26921 -                                      idxd_misc_thread, 0, "idxd-misc",
26922 -                                      irq_entry);
26923 +       rc = request_threaded_irq(irq_entry->vector, idxd_irq_handler, idxd_misc_thread,
26924 +                                 0, "idxd-misc", irq_entry);
26925         if (rc < 0) {
26926                 dev_err(dev, "Failed to allocate misc interrupt.\n");
26927 -               goto err_no_irq;
26928 +               goto err_misc_irq;
26929         }
26931 -       dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
26932 -               msix->vector);
26933 +       dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector);
26935         /* first MSI-X entry is not for wq interrupts */
26936         idxd->num_wq_irqs = msixcnt - 1;
26938         for (i = 1; i < msixcnt; i++) {
26939 -               msix = &idxd->msix_entries[i];
26940                 irq_entry = &idxd->irq_entries[i];
26942                 init_llist_head(&idxd->irq_entries[i].pending_llist);
26943                 INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
26944 -               rc = devm_request_threaded_irq(dev, msix->vector,
26945 -                                              idxd_irq_handler,
26946 -                                              idxd_wq_thread, 0,
26947 -                                              "idxd-portal", irq_entry);
26948 +               rc = request_threaded_irq(irq_entry->vector, idxd_irq_handler,
26949 +                                         idxd_wq_thread, 0, "idxd-portal", irq_entry);
26950                 if (rc < 0) {
26951 -                       dev_err(dev, "Failed to allocate irq %d.\n",
26952 -                               msix->vector);
26953 -                       goto err_no_irq;
26954 +                       dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector);
26955 +                       goto err_wq_irqs;
26956                 }
26957 -               dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
26958 -                       i, msix->vector);
26959 +               dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector);
26960         }
26962         idxd_unmask_error_interrupts(idxd);
26963         idxd_msix_perm_setup(idxd);
26964         return 0;
26966 - err_no_irq:
26967 + err_wq_irqs:
26968 +       while (--i >= 0) {
26969 +               irq_entry = &idxd->irq_entries[i];
26970 +               free_irq(irq_entry->vector, irq_entry);
26971 +       }
26972 + err_misc_irq:
26973         /* Disable error interrupt generation */
26974         idxd_mask_error_interrupts(idxd);
26975 -       pci_disable_msix(pdev);
26976 + err_irq_entries:
26977 +       pci_free_irq_vectors(pdev);
26978         dev_err(dev, "No usable interrupts\n");
26979         return rc;
26982 -static int idxd_setup_internals(struct idxd_device *idxd)
26983 +static int idxd_setup_wqs(struct idxd_device *idxd)
26985         struct device *dev = &idxd->pdev->dev;
26986 -       int i;
26988 -       init_waitqueue_head(&idxd->cmd_waitq);
26989 -       idxd->groups = devm_kcalloc(dev, idxd->max_groups,
26990 -                                   sizeof(struct idxd_group), GFP_KERNEL);
26991 -       if (!idxd->groups)
26992 -               return -ENOMEM;
26994 -       for (i = 0; i < idxd->max_groups; i++) {
26995 -               idxd->groups[i].idxd = idxd;
26996 -               idxd->groups[i].id = i;
26997 -               idxd->groups[i].tc_a = -1;
26998 -               idxd->groups[i].tc_b = -1;
26999 -       }
27000 +       struct idxd_wq *wq;
27001 +       int i, rc;
27003 -       idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
27004 -                                GFP_KERNEL);
27005 +       idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
27006 +                                GFP_KERNEL, dev_to_node(dev));
27007         if (!idxd->wqs)
27008                 return -ENOMEM;
27010 -       idxd->engines = devm_kcalloc(dev, idxd->max_engines,
27011 -                                    sizeof(struct idxd_engine), GFP_KERNEL);
27012 -       if (!idxd->engines)
27013 -               return -ENOMEM;
27015         for (i = 0; i < idxd->max_wqs; i++) {
27016 -               struct idxd_wq *wq = &idxd->wqs[i];
27017 +               wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
27018 +               if (!wq) {
27019 +                       rc = -ENOMEM;
27020 +                       goto err;
27021 +               }
27023                 wq->id = i;
27024                 wq->idxd = idxd;
27025 +               device_initialize(&wq->conf_dev);
27026 +               wq->conf_dev.parent = &idxd->conf_dev;
27027 +               wq->conf_dev.bus = idxd_get_bus_type(idxd);
27028 +               wq->conf_dev.type = &idxd_wq_device_type;
27029 +               rc = dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
27030 +               if (rc < 0) {
27031 +                       put_device(&wq->conf_dev);
27032 +                       goto err;
27033 +               }
27035                 mutex_init(&wq->wq_lock);
27036 -               wq->idxd_cdev.minor = -1;
27037 +               init_waitqueue_head(&wq->err_queue);
27038                 wq->max_xfer_bytes = idxd->max_xfer_bytes;
27039                 wq->max_batch_size = idxd->max_batch_size;
27040 -               wq->wqcfg = devm_kzalloc(dev, idxd->wqcfg_size, GFP_KERNEL);
27041 -               if (!wq->wqcfg)
27042 -                       return -ENOMEM;
27043 +               wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
27044 +               if (!wq->wqcfg) {
27045 +                       put_device(&wq->conf_dev);
27046 +                       rc = -ENOMEM;
27047 +                       goto err;
27048 +               }
27049 +               idxd->wqs[i] = wq;
27050         }
27052 +       return 0;
27054 + err:
27055 +       while (--i >= 0)
27056 +               put_device(&idxd->wqs[i]->conf_dev);
27057 +       return rc;
27060 +static int idxd_setup_engines(struct idxd_device *idxd)
27062 +       struct idxd_engine *engine;
27063 +       struct device *dev = &idxd->pdev->dev;
27064 +       int i, rc;
27066 +       idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
27067 +                                    GFP_KERNEL, dev_to_node(dev));
27068 +       if (!idxd->engines)
27069 +               return -ENOMEM;
27071         for (i = 0; i < idxd->max_engines; i++) {
27072 -               idxd->engines[i].idxd = idxd;
27073 -               idxd->engines[i].id = i;
27074 +               engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev));
27075 +               if (!engine) {
27076 +                       rc = -ENOMEM;
27077 +                       goto err;
27078 +               }
27080 +               engine->id = i;
27081 +               engine->idxd = idxd;
27082 +               device_initialize(&engine->conf_dev);
27083 +               engine->conf_dev.parent = &idxd->conf_dev;
27084 +               engine->conf_dev.type = &idxd_engine_device_type;
27085 +               rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
27086 +               if (rc < 0) {
27087 +                       put_device(&engine->conf_dev);
27088 +                       goto err;
27089 +               }
27091 +               idxd->engines[i] = engine;
27092         }
27094 -       idxd->wq = create_workqueue(dev_name(dev));
27095 -       if (!idxd->wq)
27096 +       return 0;
27098 + err:
27099 +       while (--i >= 0)
27100 +               put_device(&idxd->engines[i]->conf_dev);
27101 +       return rc;
27104 +static int idxd_setup_groups(struct idxd_device *idxd)
27106 +       struct device *dev = &idxd->pdev->dev;
27107 +       struct idxd_group *group;
27108 +       int i, rc;
27110 +       idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *),
27111 +                                   GFP_KERNEL, dev_to_node(dev));
27112 +       if (!idxd->groups)
27113                 return -ENOMEM;
27115 +       for (i = 0; i < idxd->max_groups; i++) {
27116 +               group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev));
27117 +               if (!group) {
27118 +                       rc = -ENOMEM;
27119 +                       goto err;
27120 +               }
27122 +               group->id = i;
27123 +               group->idxd = idxd;
27124 +               device_initialize(&group->conf_dev);
27125 +               group->conf_dev.parent = &idxd->conf_dev;
27126 +               group->conf_dev.bus = idxd_get_bus_type(idxd);
27127 +               group->conf_dev.type = &idxd_group_device_type;
27128 +               rc = dev_set_name(&group->conf_dev, "group%d.%d", idxd->id, group->id);
27129 +               if (rc < 0) {
27130 +                       put_device(&group->conf_dev);
27131 +                       goto err;
27132 +               }
27134 +               idxd->groups[i] = group;
27135 +               group->tc_a = -1;
27136 +               group->tc_b = -1;
27137 +       }
27139 +       return 0;
27141 + err:
27142 +       while (--i >= 0)
27143 +               put_device(&idxd->groups[i]->conf_dev);
27144 +       return rc;
27147 +static int idxd_setup_internals(struct idxd_device *idxd)
27149 +       struct device *dev = &idxd->pdev->dev;
27150 +       int rc, i;
27152 +       init_waitqueue_head(&idxd->cmd_waitq);
27154 +       rc = idxd_setup_wqs(idxd);
27155 +       if (rc < 0)
27156 +               return rc;
27158 +       rc = idxd_setup_engines(idxd);
27159 +       if (rc < 0)
27160 +               goto err_engine;
27162 +       rc = idxd_setup_groups(idxd);
27163 +       if (rc < 0)
27164 +               goto err_group;
27166 +       idxd->wq = create_workqueue(dev_name(dev));
27167 +       if (!idxd->wq) {
27168 +               rc = -ENOMEM;
27169 +               goto err_wkq_create;
27170 +       }
27172         return 0;
27174 + err_wkq_create:
27175 +       for (i = 0; i < idxd->max_groups; i++)
27176 +               put_device(&idxd->groups[i]->conf_dev);
27177 + err_group:
27178 +       for (i = 0; i < idxd->max_engines; i++)
27179 +               put_device(&idxd->engines[i]->conf_dev);
27180 + err_engine:
27181 +       for (i = 0; i < idxd->max_wqs; i++)
27182 +               put_device(&idxd->wqs[i]->conf_dev);
27183 +       return rc;
27186  static void idxd_read_table_offsets(struct idxd_device *idxd)
27187 @@ -275,16 +385,44 @@ static void idxd_read_caps(struct idxd_device *idxd)
27188         }
27191 +static inline void idxd_set_type(struct idxd_device *idxd)
27193 +       struct pci_dev *pdev = idxd->pdev;
27195 +       if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
27196 +               idxd->type = IDXD_TYPE_DSA;
27197 +       else if (pdev->device == PCI_DEVICE_ID_INTEL_IAX_SPR0)
27198 +               idxd->type = IDXD_TYPE_IAX;
27199 +       else
27200 +               idxd->type = IDXD_TYPE_UNKNOWN;
27203  static struct idxd_device *idxd_alloc(struct pci_dev *pdev)
27205         struct device *dev = &pdev->dev;
27206         struct idxd_device *idxd;
27207 +       int rc;
27209 -       idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
27210 +       idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev));
27211         if (!idxd)
27212                 return NULL;
27214         idxd->pdev = pdev;
27215 +       idxd_set_type(idxd);
27216 +       idxd->id = ida_alloc(idxd_ida(idxd), GFP_KERNEL);
27217 +       if (idxd->id < 0)
27218 +               return NULL;
27220 +       device_initialize(&idxd->conf_dev);
27221 +       idxd->conf_dev.parent = dev;
27222 +       idxd->conf_dev.bus = idxd_get_bus_type(idxd);
27223 +       idxd->conf_dev.type = idxd_get_device_type(idxd);
27224 +       rc = dev_set_name(&idxd->conf_dev, "%s%d", idxd_get_dev_name(idxd), idxd->id);
27225 +       if (rc < 0) {
27226 +               put_device(&idxd->conf_dev);
27227 +               return NULL;
27228 +       }
27230         spin_lock_init(&idxd->dev_lock);
27232         return idxd;
27233 @@ -352,31 +490,20 @@ static int idxd_probe(struct idxd_device *idxd)
27235         rc = idxd_setup_internals(idxd);
27236         if (rc)
27237 -               goto err_setup;
27238 +               goto err;
27240         rc = idxd_setup_interrupts(idxd);
27241         if (rc)
27242 -               goto err_setup;
27243 +               goto err;
27245         dev_dbg(dev, "IDXD interrupt setup complete.\n");
27247 -       mutex_lock(&idxd_idr_lock);
27248 -       idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL);
27249 -       mutex_unlock(&idxd_idr_lock);
27250 -       if (idxd->id < 0) {
27251 -               rc = -ENOMEM;
27252 -               goto err_idr_fail;
27253 -       }
27255         idxd->major = idxd_cdev_get_major(idxd);
27257         dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
27258         return 0;
27260 - err_idr_fail:
27261 -       idxd_mask_error_interrupts(idxd);
27262 -       idxd_mask_msix_vectors(idxd);
27263 - err_setup:
27264 + err:
27265         if (device_pasid_enabled(idxd))
27266                 idxd_disable_system_pasid(idxd);
27267         return rc;
27268 @@ -396,34 +523,37 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
27269         struct idxd_device *idxd;
27270         int rc;
27272 -       rc = pcim_enable_device(pdev);
27273 +       rc = pci_enable_device(pdev);
27274         if (rc)
27275                 return rc;
27277         dev_dbg(dev, "Alloc IDXD context\n");
27278         idxd = idxd_alloc(pdev);
27279 -       if (!idxd)
27280 -               return -ENOMEM;
27281 +       if (!idxd) {
27282 +               rc = -ENOMEM;
27283 +               goto err_idxd_alloc;
27284 +       }
27286         dev_dbg(dev, "Mapping BARs\n");
27287 -       idxd->reg_base = pcim_iomap(pdev, IDXD_MMIO_BAR, 0);
27288 -       if (!idxd->reg_base)
27289 -               return -ENOMEM;
27290 +       idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
27291 +       if (!idxd->reg_base) {
27292 +               rc = -ENOMEM;
27293 +               goto err_iomap;
27294 +       }
27296         dev_dbg(dev, "Set DMA masks\n");
27297         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
27298         if (rc)
27299                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
27300         if (rc)
27301 -               return rc;
27302 +               goto err;
27304         rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
27305         if (rc)
27306                 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
27307         if (rc)
27308 -               return rc;
27309 +               goto err;
27311 -       idxd_set_type(idxd);
27313         idxd_type_init(idxd);
27315 @@ -435,13 +565,13 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
27316         rc = idxd_probe(idxd);
27317         if (rc) {
27318                 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
27319 -               return -ENODEV;
27320 +               goto err;
27321         }
27323 -       rc = idxd_setup_sysfs(idxd);
27324 +       rc = idxd_register_devices(idxd);
27325         if (rc) {
27326                 dev_err(dev, "IDXD sysfs setup failed\n");
27327 -               return -ENODEV;
27328 +               goto err;
27329         }
27331         idxd->state = IDXD_DEV_CONF_READY;
27332 @@ -450,6 +580,14 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
27333                  idxd->hw.version);
27335         return 0;
27337 + err:
27338 +       pci_iounmap(pdev, idxd->reg_base);
27339 + err_iomap:
27340 +       put_device(&idxd->conf_dev);
27341 + err_idxd_alloc:
27342 +       pci_disable_device(pdev);
27343 +       return rc;
27346  static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
27347 @@ -495,7 +633,8 @@ static void idxd_shutdown(struct pci_dev *pdev)
27349         for (i = 0; i < msixcnt; i++) {
27350                 irq_entry = &idxd->irq_entries[i];
27351 -               synchronize_irq(idxd->msix_entries[i].vector);
27352 +               synchronize_irq(irq_entry->vector);
27353 +               free_irq(irq_entry->vector, irq_entry);
27354                 if (i == 0)
27355                         continue;
27356                 idxd_flush_pending_llist(irq_entry);
27357 @@ -503,6 +642,9 @@ static void idxd_shutdown(struct pci_dev *pdev)
27358         }
27360         idxd_msix_perm_clear(idxd);
27361 +       pci_free_irq_vectors(pdev);
27362 +       pci_iounmap(pdev, idxd->reg_base);
27363 +       pci_disable_device(pdev);
27364         destroy_workqueue(idxd->wq);
27367 @@ -511,13 +653,10 @@ static void idxd_remove(struct pci_dev *pdev)
27368         struct idxd_device *idxd = pci_get_drvdata(pdev);
27370         dev_dbg(&pdev->dev, "%s called\n", __func__);
27371 -       idxd_cleanup_sysfs(idxd);
27372         idxd_shutdown(pdev);
27373         if (device_pasid_enabled(idxd))
27374                 idxd_disable_system_pasid(idxd);
27375 -       mutex_lock(&idxd_idr_lock);
27376 -       idr_remove(&idxd_idrs[idxd->type], idxd->id);
27377 -       mutex_unlock(&idxd_idr_lock);
27378 +       idxd_unregister_devices(idxd);
27381  static struct pci_driver idxd_pci_driver = {
27382 @@ -547,7 +686,7 @@ static int __init idxd_init_module(void)
27383                 support_enqcmd = true;
27385         for (i = 0; i < IDXD_TYPE_MAX; i++)
27386 -               idr_init(&idxd_idrs[i]);
27387 +               ida_init(&idxd_idas[i]);
27389         err = idxd_register_bus_type();
27390         if (err < 0)
27391 diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
27392 index f1463fc58112..fc0781e3f36d 100644
27393 --- a/drivers/dma/idxd/irq.c
27394 +++ b/drivers/dma/idxd/irq.c
27395 @@ -45,7 +45,7 @@ static void idxd_device_reinit(struct work_struct *work)
27396                 goto out;
27398         for (i = 0; i < idxd->max_wqs; i++) {
27399 -               struct idxd_wq *wq = &idxd->wqs[i];
27400 +               struct idxd_wq *wq = idxd->wqs[i];
27402                 if (wq->state == IDXD_WQ_ENABLED) {
27403                         rc = idxd_wq_enable(wq);
27404 @@ -130,18 +130,18 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
27406                 if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
27407                         int id = idxd->sw_err.wq_idx;
27408 -                       struct idxd_wq *wq = &idxd->wqs[id];
27409 +                       struct idxd_wq *wq = idxd->wqs[id];
27411                         if (wq->type == IDXD_WQT_USER)
27412 -                               wake_up_interruptible(&wq->idxd_cdev.err_queue);
27413 +                               wake_up_interruptible(&wq->err_queue);
27414                 } else {
27415                         int i;
27417                         for (i = 0; i < idxd->max_wqs; i++) {
27418 -                               struct idxd_wq *wq = &idxd->wqs[i];
27419 +                               struct idxd_wq *wq = idxd->wqs[i];
27421                                 if (wq->type == IDXD_WQT_USER)
27422 -                                       wake_up_interruptible(&wq->idxd_cdev.err_queue);
27423 +                                       wake_up_interruptible(&wq->err_queue);
27424                         }
27425                 }
27427 diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
27428 index 18bf4d148989..9586b55abce5 100644
27429 --- a/drivers/dma/idxd/sysfs.c
27430 +++ b/drivers/dma/idxd/sysfs.c
27431 @@ -16,69 +16,6 @@ static char *idxd_wq_type_names[] = {
27432         [IDXD_WQT_USER]         = "user",
27433  };
27435 -static void idxd_conf_device_release(struct device *dev)
27437 -       dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
27440 -static struct device_type idxd_group_device_type = {
27441 -       .name = "group",
27442 -       .release = idxd_conf_device_release,
27445 -static struct device_type idxd_wq_device_type = {
27446 -       .name = "wq",
27447 -       .release = idxd_conf_device_release,
27450 -static struct device_type idxd_engine_device_type = {
27451 -       .name = "engine",
27452 -       .release = idxd_conf_device_release,
27455 -static struct device_type dsa_device_type = {
27456 -       .name = "dsa",
27457 -       .release = idxd_conf_device_release,
27460 -static struct device_type iax_device_type = {
27461 -       .name = "iax",
27462 -       .release = idxd_conf_device_release,
27465 -static inline bool is_dsa_dev(struct device *dev)
27467 -       return dev ? dev->type == &dsa_device_type : false;
27470 -static inline bool is_iax_dev(struct device *dev)
27472 -       return dev ? dev->type == &iax_device_type : false;
27475 -static inline bool is_idxd_dev(struct device *dev)
27477 -       return is_dsa_dev(dev) || is_iax_dev(dev);
27480 -static inline bool is_idxd_wq_dev(struct device *dev)
27482 -       return dev ? dev->type == &idxd_wq_device_type : false;
27485 -static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
27487 -       if (wq->type == IDXD_WQT_KERNEL &&
27488 -           strcmp(wq->name, "dmaengine") == 0)
27489 -               return true;
27490 -       return false;
27493 -static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
27495 -       return wq->type == IDXD_WQT_USER;
27498  static int idxd_config_bus_match(struct device *dev,
27499                                  struct device_driver *drv)
27501 @@ -322,7 +259,7 @@ static int idxd_config_bus_remove(struct device *dev)
27502                 dev_dbg(dev, "%s removing dev %s\n", __func__,
27503                         dev_name(&idxd->conf_dev));
27504                 for (i = 0; i < idxd->max_wqs; i++) {
27505 -                       struct idxd_wq *wq = &idxd->wqs[i];
27506 +                       struct idxd_wq *wq = idxd->wqs[i];
27508                         if (wq->state == IDXD_WQ_DISABLED)
27509                                 continue;
27510 @@ -334,7 +271,7 @@ static int idxd_config_bus_remove(struct device *dev)
27511                 idxd_unregister_dma_device(idxd);
27512                 rc = idxd_device_disable(idxd);
27513                 for (i = 0; i < idxd->max_wqs; i++) {
27514 -                       struct idxd_wq *wq = &idxd->wqs[i];
27515 +                       struct idxd_wq *wq = idxd->wqs[i];
27517                         mutex_lock(&wq->wq_lock);
27518                         idxd_wq_disable_cleanup(wq);
27519 @@ -405,7 +342,7 @@ struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
27520         return idxd_bus_types[idxd->type];
27523 -static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
27524 +struct device_type *idxd_get_device_type(struct idxd_device *idxd)
27526         if (idxd->type == IDXD_TYPE_DSA)
27527                 return &dsa_device_type;
27528 @@ -488,7 +425,7 @@ static ssize_t engine_group_id_store(struct device *dev,
27530         if (prevg)
27531                 prevg->num_engines--;
27532 -       engine->group = &idxd->groups[id];
27533 +       engine->group = idxd->groups[id];
27534         engine->group->num_engines++;
27536         return count;
27537 @@ -512,6 +449,19 @@ static const struct attribute_group *idxd_engine_attribute_groups[] = {
27538         NULL,
27539  };
27541 +static void idxd_conf_engine_release(struct device *dev)
27543 +       struct idxd_engine *engine = container_of(dev, struct idxd_engine, conf_dev);
27545 +       kfree(engine);
27548 +struct device_type idxd_engine_device_type = {
27549 +       .name = "engine",
27550 +       .release = idxd_conf_engine_release,
27551 +       .groups = idxd_engine_attribute_groups,
27554  /* Group attributes */
27556  static void idxd_set_free_tokens(struct idxd_device *idxd)
27557 @@ -519,7 +469,7 @@ static void idxd_set_free_tokens(struct idxd_device *idxd)
27558         int i, tokens;
27560         for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
27561 -               struct idxd_group *g = &idxd->groups[i];
27562 +               struct idxd_group *g = idxd->groups[i];
27564                 tokens += g->tokens_reserved;
27565         }
27566 @@ -674,7 +624,7 @@ static ssize_t group_engines_show(struct device *dev,
27567         struct idxd_device *idxd = group->idxd;
27569         for (i = 0; i < idxd->max_engines; i++) {
27570 -               struct idxd_engine *engine = &idxd->engines[i];
27571 +               struct idxd_engine *engine = idxd->engines[i];
27573                 if (!engine->group)
27574                         continue;
27575 @@ -703,7 +653,7 @@ static ssize_t group_work_queues_show(struct device *dev,
27576         struct idxd_device *idxd = group->idxd;
27578         for (i = 0; i < idxd->max_wqs; i++) {
27579 -               struct idxd_wq *wq = &idxd->wqs[i];
27580 +               struct idxd_wq *wq = idxd->wqs[i];
27582                 if (!wq->group)
27583                         continue;
27584 @@ -824,6 +774,19 @@ static const struct attribute_group *idxd_group_attribute_groups[] = {
27585         NULL,
27586  };
27588 +static void idxd_conf_group_release(struct device *dev)
27590 +       struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev);
27592 +       kfree(group);
27595 +struct device_type idxd_group_device_type = {
27596 +       .name = "group",
27597 +       .release = idxd_conf_group_release,
27598 +       .groups = idxd_group_attribute_groups,
27601  /* IDXD work queue attribs */
27602  static ssize_t wq_clients_show(struct device *dev,
27603                                struct device_attribute *attr, char *buf)
27604 @@ -896,7 +859,7 @@ static ssize_t wq_group_id_store(struct device *dev,
27605                 return count;
27606         }
27608 -       group = &idxd->groups[id];
27609 +       group = idxd->groups[id];
27610         prevg = wq->group;
27612         if (prevg)
27613 @@ -960,7 +923,7 @@ static int total_claimed_wq_size(struct idxd_device *idxd)
27614         int wq_size = 0;
27616         for (i = 0; i < idxd->max_wqs; i++) {
27617 -               struct idxd_wq *wq = &idxd->wqs[i];
27618 +               struct idxd_wq *wq = idxd->wqs[i];
27620                 wq_size += wq->size;
27621         }
27622 @@ -1206,8 +1169,16 @@ static ssize_t wq_cdev_minor_show(struct device *dev,
27623                                   struct device_attribute *attr, char *buf)
27625         struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
27626 +       int minor = -1;
27628 -       return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
27629 +       mutex_lock(&wq->wq_lock);
27630 +       if (wq->idxd_cdev)
27631 +               minor = wq->idxd_cdev->minor;
27632 +       mutex_unlock(&wq->wq_lock);
27634 +       if (minor == -1)
27635 +               return -ENXIO;
27636 +       return sysfs_emit(buf, "%d\n", minor);
27639  static struct device_attribute dev_attr_wq_cdev_minor =
27640 @@ -1356,6 +1327,20 @@ static const struct attribute_group *idxd_wq_attribute_groups[] = {
27641         NULL,
27642  };
27644 +static void idxd_conf_wq_release(struct device *dev)
27646 +       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
27648 +       kfree(wq->wqcfg);
27649 +       kfree(wq);
27652 +struct device_type idxd_wq_device_type = {
27653 +       .name = "wq",
27654 +       .release = idxd_conf_wq_release,
27655 +       .groups = idxd_wq_attribute_groups,
27658  /* IDXD device attribs */
27659  static ssize_t version_show(struct device *dev, struct device_attribute *attr,
27660                             char *buf)
27661 @@ -1486,7 +1471,7 @@ static ssize_t clients_show(struct device *dev,
27663         spin_lock_irqsave(&idxd->dev_lock, flags);
27664         for (i = 0; i < idxd->max_wqs; i++) {
27665 -               struct idxd_wq *wq = &idxd->wqs[i];
27666 +               struct idxd_wq *wq = idxd->wqs[i];
27668                 count += wq->client_count;
27669         }
27670 @@ -1644,183 +1629,160 @@ static const struct attribute_group *idxd_attribute_groups[] = {
27671         NULL,
27672  };
27674 -static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
27675 +static void idxd_conf_device_release(struct device *dev)
27677 -       struct device *dev = &idxd->pdev->dev;
27678 -       int i, rc;
27679 +       struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
27681 +       kfree(idxd->groups);
27682 +       kfree(idxd->wqs);
27683 +       kfree(idxd->engines);
27684 +       kfree(idxd->irq_entries);
27685 +       ida_free(idxd_ida(idxd), idxd->id);
27686 +       kfree(idxd);
27689 +struct device_type dsa_device_type = {
27690 +       .name = "dsa",
27691 +       .release = idxd_conf_device_release,
27692 +       .groups = idxd_attribute_groups,
27695 +struct device_type iax_device_type = {
27696 +       .name = "iax",
27697 +       .release = idxd_conf_device_release,
27698 +       .groups = idxd_attribute_groups,
27701 +static int idxd_register_engine_devices(struct idxd_device *idxd)
27703 +       int i, j, rc;
27705         for (i = 0; i < idxd->max_engines; i++) {
27706 -               struct idxd_engine *engine = &idxd->engines[i];
27708 -               engine->conf_dev.parent = &idxd->conf_dev;
27709 -               dev_set_name(&engine->conf_dev, "engine%d.%d",
27710 -                            idxd->id, engine->id);
27711 -               engine->conf_dev.bus = idxd_get_bus_type(idxd);
27712 -               engine->conf_dev.groups = idxd_engine_attribute_groups;
27713 -               engine->conf_dev.type = &idxd_engine_device_type;
27714 -               dev_dbg(dev, "Engine device register: %s\n",
27715 -                       dev_name(&engine->conf_dev));
27716 -               rc = device_register(&engine->conf_dev);
27717 -               if (rc < 0) {
27718 -                       put_device(&engine->conf_dev);
27719 +               struct idxd_engine *engine = idxd->engines[i];
27721 +               rc = device_add(&engine->conf_dev);
27722 +               if (rc < 0)
27723                         goto cleanup;
27724 -               }
27725         }
27727         return 0;
27729  cleanup:
27730 -       while (i--) {
27731 -               struct idxd_engine *engine = &idxd->engines[i];
27732 +       j = i - 1;
27733 +       for (; i < idxd->max_engines; i++)
27734 +               put_device(&idxd->engines[i]->conf_dev);
27736 -               device_unregister(&engine->conf_dev);
27737 -       }
27738 +       while (j--)
27739 +               device_unregister(&idxd->engines[j]->conf_dev);
27740         return rc;
27743 -static int idxd_setup_group_sysfs(struct idxd_device *idxd)
27744 +static int idxd_register_group_devices(struct idxd_device *idxd)
27746 -       struct device *dev = &idxd->pdev->dev;
27747 -       int i, rc;
27748 +       int i, j, rc;
27750         for (i = 0; i < idxd->max_groups; i++) {
27751 -               struct idxd_group *group = &idxd->groups[i];
27753 -               group->conf_dev.parent = &idxd->conf_dev;
27754 -               dev_set_name(&group->conf_dev, "group%d.%d",
27755 -                            idxd->id, group->id);
27756 -               group->conf_dev.bus = idxd_get_bus_type(idxd);
27757 -               group->conf_dev.groups = idxd_group_attribute_groups;
27758 -               group->conf_dev.type = &idxd_group_device_type;
27759 -               dev_dbg(dev, "Group device register: %s\n",
27760 -                       dev_name(&group->conf_dev));
27761 -               rc = device_register(&group->conf_dev);
27762 -               if (rc < 0) {
27763 -                       put_device(&group->conf_dev);
27764 +               struct idxd_group *group = idxd->groups[i];
27766 +               rc = device_add(&group->conf_dev);
27767 +               if (rc < 0)
27768                         goto cleanup;
27769 -               }
27770         }
27772         return 0;
27774  cleanup:
27775 -       while (i--) {
27776 -               struct idxd_group *group = &idxd->groups[i];
27777 +       j = i - 1;
27778 +       for (; i < idxd->max_groups; i++)
27779 +               put_device(&idxd->groups[i]->conf_dev);
27781 -               device_unregister(&group->conf_dev);
27782 -       }
27783 +       while (j--)
27784 +               device_unregister(&idxd->groups[j]->conf_dev);
27785         return rc;
27788 -static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
27789 +static int idxd_register_wq_devices(struct idxd_device *idxd)
27791 -       struct device *dev = &idxd->pdev->dev;
27792 -       int i, rc;
27793 +       int i, rc, j;
27795         for (i = 0; i < idxd->max_wqs; i++) {
27796 -               struct idxd_wq *wq = &idxd->wqs[i];
27798 -               wq->conf_dev.parent = &idxd->conf_dev;
27799 -               dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
27800 -               wq->conf_dev.bus = idxd_get_bus_type(idxd);
27801 -               wq->conf_dev.groups = idxd_wq_attribute_groups;
27802 -               wq->conf_dev.type = &idxd_wq_device_type;
27803 -               dev_dbg(dev, "WQ device register: %s\n",
27804 -                       dev_name(&wq->conf_dev));
27805 -               rc = device_register(&wq->conf_dev);
27806 -               if (rc < 0) {
27807 -                       put_device(&wq->conf_dev);
27808 +               struct idxd_wq *wq = idxd->wqs[i];
27810 +               rc = device_add(&wq->conf_dev);
27811 +               if (rc < 0)
27812                         goto cleanup;
27813 -               }
27814         }
27816         return 0;
27818  cleanup:
27819 -       while (i--) {
27820 -               struct idxd_wq *wq = &idxd->wqs[i];
27821 +       j = i - 1;
27822 +       for (; i < idxd->max_wqs; i++)
27823 +               put_device(&idxd->wqs[i]->conf_dev);
27825 -               device_unregister(&wq->conf_dev);
27826 -       }
27827 +       while (j--)
27828 +               device_unregister(&idxd->wqs[j]->conf_dev);
27829         return rc;
27832 -static int idxd_setup_device_sysfs(struct idxd_device *idxd)
27833 +int idxd_register_devices(struct idxd_device *idxd)
27835         struct device *dev = &idxd->pdev->dev;
27836 -       int rc;
27837 -       char devname[IDXD_NAME_SIZE];
27839 -       sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
27840 -       idxd->conf_dev.parent = dev;
27841 -       dev_set_name(&idxd->conf_dev, "%s", devname);
27842 -       idxd->conf_dev.bus = idxd_get_bus_type(idxd);
27843 -       idxd->conf_dev.groups = idxd_attribute_groups;
27844 -       idxd->conf_dev.type = idxd_get_device_type(idxd);
27846 -       dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
27847 -       rc = device_register(&idxd->conf_dev);
27848 -       if (rc < 0) {
27849 -               put_device(&idxd->conf_dev);
27850 -               return rc;
27851 -       }
27852 +       int rc, i;
27854 -       return 0;
27857 -int idxd_setup_sysfs(struct idxd_device *idxd)
27859 -       struct device *dev = &idxd->pdev->dev;
27860 -       int rc;
27862 -       rc = idxd_setup_device_sysfs(idxd);
27863 -       if (rc < 0) {
27864 -               dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
27865 +       rc = device_add(&idxd->conf_dev);
27866 +       if (rc < 0)
27867                 return rc;
27868 -       }
27870 -       rc = idxd_setup_wq_sysfs(idxd);
27871 +       rc = idxd_register_wq_devices(idxd);
27872         if (rc < 0) {
27873 -               /* unregister conf dev */
27874 -               dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
27875 -               return rc;
27876 +               dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
27877 +               goto err_wq;
27878         }
27880 -       rc = idxd_setup_group_sysfs(idxd);
27881 +       rc = idxd_register_engine_devices(idxd);
27882         if (rc < 0) {
27883 -               /* unregister conf dev */
27884 -               dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
27885 -               return rc;
27886 +               dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
27887 +               goto err_engine;
27888         }
27890 -       rc = idxd_setup_engine_sysfs(idxd);
27891 +       rc = idxd_register_group_devices(idxd);
27892         if (rc < 0) {
27893 -               /* unregister conf dev */
27894 -               dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
27895 -               return rc;
27896 +               dev_dbg(dev, "Group device registering failed: %d\n", rc);
27897 +               goto err_group;
27898         }
27900         return 0;
27902 + err_group:
27903 +       for (i = 0; i < idxd->max_engines; i++)
27904 +               device_unregister(&idxd->engines[i]->conf_dev);
27905 + err_engine:
27906 +       for (i = 0; i < idxd->max_wqs; i++)
27907 +               device_unregister(&idxd->wqs[i]->conf_dev);
27908 + err_wq:
27909 +       device_del(&idxd->conf_dev);
27910 +       return rc;
27913 -void idxd_cleanup_sysfs(struct idxd_device *idxd)
27914 +void idxd_unregister_devices(struct idxd_device *idxd)
27916         int i;
27918         for (i = 0; i < idxd->max_wqs; i++) {
27919 -               struct idxd_wq *wq = &idxd->wqs[i];
27920 +               struct idxd_wq *wq = idxd->wqs[i];
27922                 device_unregister(&wq->conf_dev);
27923         }
27925         for (i = 0; i < idxd->max_engines; i++) {
27926 -               struct idxd_engine *engine = &idxd->engines[i];
27927 +               struct idxd_engine *engine = idxd->engines[i];
27929                 device_unregister(&engine->conf_dev);
27930         }
27932         for (i = 0; i < idxd->max_groups; i++) {
27933 -               struct idxd_group *group = &idxd->groups[i];
27934 +               struct idxd_group *group = idxd->groups[i];
27936                 device_unregister(&group->conf_dev);
27937         }
27938 diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
27939 index aae82db542a5..76aacbac5869 100644
27940 --- a/drivers/extcon/extcon-arizona.c
27941 +++ b/drivers/extcon/extcon-arizona.c
27942 @@ -601,7 +601,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
27943         struct arizona *arizona = info->arizona;
27944         int id_gpio = arizona->pdata.hpdet_id_gpio;
27945         unsigned int report = EXTCON_JACK_HEADPHONE;
27946 -       int ret, reading;
27947 +       int ret, reading, state;
27948         bool mic = false;
27950         mutex_lock(&info->lock);
27951 @@ -614,12 +614,11 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
27952         }
27954         /* If the cable was removed while measuring ignore the result */
27955 -       ret = extcon_get_state(info->edev, EXTCON_MECHANICAL);
27956 -       if (ret < 0) {
27957 -               dev_err(arizona->dev, "Failed to check cable state: %d\n",
27958 -                       ret);
27959 +       state = extcon_get_state(info->edev, EXTCON_MECHANICAL);
27960 +       if (state < 0) {
27961 +               dev_err(arizona->dev, "Failed to check cable state: %d\n", state);
27962                 goto out;
27963 -       } else if (!ret) {
27964 +       } else if (!state) {
27965                 dev_dbg(arizona->dev, "Ignoring HPDET for removed cable\n");
27966                 goto done;
27967         }
27968 @@ -667,7 +666,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
27969                 gpio_set_value_cansleep(id_gpio, 0);
27971         /* If we have a mic then reenable MICDET */
27972 -       if (mic || info->mic)
27973 +       if (state && (mic || info->mic))
27974                 arizona_start_mic(info);
27976         if (info->hpdet_active) {
27977 @@ -675,7 +674,9 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
27978                 info->hpdet_active = false;
27979         }
27981 -       info->hpdet_done = true;
27982 +       /* Do not set hp_det done when the cable has been unplugged */
27983 +       if (state)
27984 +               info->hpdet_done = true;
27986  out:
27987         mutex_unlock(&info->lock);
27988 @@ -1759,25 +1760,6 @@ static int arizona_extcon_remove(struct platform_device *pdev)
27989         bool change;
27990         int ret;
27992 -       ret = regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
27993 -                                      ARIZONA_MICD_ENA, 0,
27994 -                                      &change);
27995 -       if (ret < 0) {
27996 -               dev_err(&pdev->dev, "Failed to disable micd on remove: %d\n",
27997 -                       ret);
27998 -       } else if (change) {
27999 -               regulator_disable(info->micvdd);
28000 -               pm_runtime_put(info->dev);
28001 -       }
28003 -       gpiod_put(info->micd_pol_gpio);
28005 -       pm_runtime_disable(&pdev->dev);
28007 -       regmap_update_bits(arizona->regmap,
28008 -                          ARIZONA_MICD_CLAMP_CONTROL,
28009 -                          ARIZONA_MICD_CLAMP_MODE_MASK, 0);
28011         if (info->micd_clamp) {
28012                 jack_irq_rise = ARIZONA_IRQ_MICD_CLAMP_RISE;
28013                 jack_irq_fall = ARIZONA_IRQ_MICD_CLAMP_FALL;
28014 @@ -1793,10 +1775,31 @@ static int arizona_extcon_remove(struct platform_device *pdev)
28015         arizona_free_irq(arizona, jack_irq_rise, info);
28016         arizona_free_irq(arizona, jack_irq_fall, info);
28017         cancel_delayed_work_sync(&info->hpdet_work);
28018 +       cancel_delayed_work_sync(&info->micd_detect_work);
28019 +       cancel_delayed_work_sync(&info->micd_timeout_work);
28021 +       ret = regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
28022 +                                      ARIZONA_MICD_ENA, 0,
28023 +                                      &change);
28024 +       if (ret < 0) {
28025 +               dev_err(&pdev->dev, "Failed to disable micd on remove: %d\n",
28026 +                       ret);
28027 +       } else if (change) {
28028 +               regulator_disable(info->micvdd);
28029 +               pm_runtime_put(info->dev);
28030 +       }
28032 +       regmap_update_bits(arizona->regmap,
28033 +                          ARIZONA_MICD_CLAMP_CONTROL,
28034 +                          ARIZONA_MICD_CLAMP_MODE_MASK, 0);
28035         regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_ANALOGUE,
28036                            ARIZONA_JD1_ENA, 0);
28037         arizona_clk32k_disable(arizona);
28039 +       gpiod_put(info->micd_pol_gpio);
28041 +       pm_runtime_disable(&pdev->dev);
28043         return 0;
28046 diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
28047 index 3f14dffb9669..5dd19dbd67a3 100644
28048 --- a/drivers/firmware/Kconfig
28049 +++ b/drivers/firmware/Kconfig
28050 @@ -237,6 +237,7 @@ config INTEL_STRATIX10_RSU
28051  config QCOM_SCM
28052         bool
28053         depends on ARM || ARM64
28054 +       depends on HAVE_ARM_SMCCC
28055         select RESET_CONTROLLER
28057  config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
28058 diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
28059 index d0dee37ad522..4ceba5ef7895 100644
28060 --- a/drivers/firmware/arm_scpi.c
28061 +++ b/drivers/firmware/arm_scpi.c
28062 @@ -552,8 +552,10 @@ static unsigned long scpi_clk_get_val(u16 clk_id)
28064         ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
28065                                 sizeof(le_clk_id), &rate, sizeof(rate));
28066 +       if (ret)
28067 +               return 0;
28069 -       return ret ? ret : le32_to_cpu(rate);
28070 +       return le32_to_cpu(rate);
28073  static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
28074 diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
28075 index c23466e05e60..d0537573501e 100644
28076 --- a/drivers/firmware/efi/libstub/Makefile
28077 +++ b/drivers/firmware/efi/libstub/Makefile
28078 @@ -13,7 +13,8 @@ cflags-$(CONFIG_X86)          += -m$(BITS) -D__KERNEL__ \
28079                                    -Wno-pointer-sign \
28080                                    $(call cc-disable-warning, address-of-packed-member) \
28081                                    $(call cc-disable-warning, gnu) \
28082 -                                  -fno-asynchronous-unwind-tables
28083 +                                  -fno-asynchronous-unwind-tables \
28084 +                                  $(CLANG_FLAGS)
28086  # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
28087  # disable the stackleak plugin
28088 diff --git a/drivers/firmware/qcom_scm-smc.c b/drivers/firmware/qcom_scm-smc.c
28089 index 497c13ba98d6..d111833364ba 100644
28090 --- a/drivers/firmware/qcom_scm-smc.c
28091 +++ b/drivers/firmware/qcom_scm-smc.c
28092 @@ -77,8 +77,10 @@ static void __scm_smc_do(const struct arm_smccc_args *smc,
28093         }  while (res->a0 == QCOM_SCM_V2_EBUSY);
28096 -int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
28097 -                struct qcom_scm_res *res, bool atomic)
28099 +int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
28100 +                  enum qcom_scm_convention qcom_convention,
28101 +                  struct qcom_scm_res *res, bool atomic)
28103         int arglen = desc->arginfo & 0xf;
28104         int i;
28105 @@ -87,9 +89,8 @@ int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
28106         size_t alloc_len;
28107         gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
28108         u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL;
28109 -       u32 qcom_smccc_convention =
28110 -                       (qcom_scm_convention == SMC_CONVENTION_ARM_32) ?
28111 -                       ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
28112 +       u32 qcom_smccc_convention = (qcom_convention == SMC_CONVENTION_ARM_32) ?
28113 +                                   ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
28114         struct arm_smccc_res smc_res;
28115         struct arm_smccc_args smc = {0};
28117 @@ -148,4 +149,5 @@ int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
28118         }
28120         return (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0;
28123 diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
28124 index f57779fc7ee9..9ac84b5d6ce0 100644
28125 --- a/drivers/firmware/qcom_scm.c
28126 +++ b/drivers/firmware/qcom_scm.c
28127 @@ -113,14 +113,10 @@ static void qcom_scm_clk_disable(void)
28128         clk_disable_unprepare(__scm->bus_clk);
28131 -static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
28132 -                                       u32 cmd_id);
28133 +enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
28134 +static DEFINE_SPINLOCK(scm_query_lock);
28136 -enum qcom_scm_convention qcom_scm_convention;
28137 -static bool has_queried __read_mostly;
28138 -static DEFINE_SPINLOCK(query_lock);
28140 -static void __query_convention(void)
28141 +static enum qcom_scm_convention __get_convention(void)
28143         unsigned long flags;
28144         struct qcom_scm_desc desc = {
28145 @@ -133,36 +129,50 @@ static void __query_convention(void)
28146                 .owner = ARM_SMCCC_OWNER_SIP,
28147         };
28148         struct qcom_scm_res res;
28149 +       enum qcom_scm_convention probed_convention;
28150         int ret;
28151 +       bool forced = false;
28153 -       spin_lock_irqsave(&query_lock, flags);
28154 -       if (has_queried)
28155 -               goto out;
28156 +       if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
28157 +               return qcom_scm_convention;
28159 -       qcom_scm_convention = SMC_CONVENTION_ARM_64;
28160 -       // Device isn't required as there is only one argument - no device
28161 -       // needed to dma_map_single to secure world
28162 -       ret = scm_smc_call(NULL, &desc, &res, true);
28163 +       /*
28164 +        * Device isn't required as there is only one argument - no device
28165 +        * needed to dma_map_single to secure world
28166 +        */
28167 +       probed_convention = SMC_CONVENTION_ARM_64;
28168 +       ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
28169         if (!ret && res.result[0] == 1)
28170 -               goto out;
28171 +               goto found;
28173 +       /*
28174 +        * Some SC7180 firmwares didn't implement the
28175 +        * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
28176 +        * calling conventions on these firmwares. Luckily we don't make any
28177 +        * early calls into the firmware on these SoCs so the device pointer
28178 +        * will be valid here to check if the compatible matches.
28179 +        */
28180 +       if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
28181 +               forced = true;
28182 +               goto found;
28183 +       }
28185 -       qcom_scm_convention = SMC_CONVENTION_ARM_32;
28186 -       ret = scm_smc_call(NULL, &desc, &res, true);
28187 +       probed_convention = SMC_CONVENTION_ARM_32;
28188 +       ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
28189         if (!ret && res.result[0] == 1)
28190 -               goto out;
28192 -       qcom_scm_convention = SMC_CONVENTION_LEGACY;
28193 -out:
28194 -       has_queried = true;
28195 -       spin_unlock_irqrestore(&query_lock, flags);
28196 -       pr_info("qcom_scm: convention: %s\n",
28197 -               qcom_scm_convention_names[qcom_scm_convention]);
28199 +               goto found;
28201 +       probed_convention = SMC_CONVENTION_LEGACY;
28202 +found:
28203 +       spin_lock_irqsave(&scm_query_lock, flags);
28204 +       if (probed_convention != qcom_scm_convention) {
28205 +               qcom_scm_convention = probed_convention;
28206 +               pr_info("qcom_scm: convention: %s%s\n",
28207 +                       qcom_scm_convention_names[qcom_scm_convention],
28208 +                       forced ? " (forced)" : "");
28209 +       }
28210 +       spin_unlock_irqrestore(&scm_query_lock, flags);
28212 -static inline enum qcom_scm_convention __get_convention(void)
28214 -       if (unlikely(!has_queried))
28215 -               __query_convention();
28216         return qcom_scm_convention;
28219 @@ -219,8 +229,8 @@ static int qcom_scm_call_atomic(struct device *dev,
28220         }
28223 -static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
28224 -                                       u32 cmd_id)
28225 +static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
28226 +                                        u32 cmd_id)
28228         int ret;
28229         struct qcom_scm_desc desc = {
28230 @@ -247,7 +257,7 @@ static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
28232         ret = qcom_scm_call(dev, &desc, &res);
28234 -       return ret ? : res.result[0];
28235 +       return ret ? false : !!res.result[0];
28238  /**
28239 @@ -585,9 +595,8 @@ bool qcom_scm_pas_supported(u32 peripheral)
28240         };
28241         struct qcom_scm_res res;
28243 -       ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
28244 -                                          QCOM_SCM_PIL_PAS_IS_SUPPORTED);
28245 -       if (ret <= 0)
28246 +       if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
28247 +                                         QCOM_SCM_PIL_PAS_IS_SUPPORTED))
28248                 return false;
28250         ret = qcom_scm_call(__scm->dev, &desc, &res);
28251 @@ -1060,17 +1069,18 @@ EXPORT_SYMBOL(qcom_scm_ice_set_key);
28252   */
28253  bool qcom_scm_hdcp_available(void)
28255 +       bool avail;
28256         int ret = qcom_scm_clk_enable();
28258         if (ret)
28259                 return ret;
28261 -       ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
28262 +       avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
28263                                                 QCOM_SCM_HDCP_INVOKE);
28265         qcom_scm_clk_disable();
28267 -       return ret > 0;
28268 +       return avail;
28270  EXPORT_SYMBOL(qcom_scm_hdcp_available);
28272 @@ -1242,7 +1252,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
28273         __scm = scm;
28274         __scm->dev = &pdev->dev;
28276 -       __query_convention();
28277 +       __get_convention();
28279         /*
28280          * If requested enable "download mode", from this point on warmboot
28281 diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
28282 index 95cd1ac30ab0..632fe3142462 100644
28283 --- a/drivers/firmware/qcom_scm.h
28284 +++ b/drivers/firmware/qcom_scm.h
28285 @@ -61,8 +61,11 @@ struct qcom_scm_res {
28286  };
28288  #define SCM_SMC_FNID(s, c)     ((((s) & 0xFF) << 8) | ((c) & 0xFF))
28289 -extern int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
28290 -                       struct qcom_scm_res *res, bool atomic);
28291 +extern int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
28292 +                         enum qcom_scm_convention qcom_convention,
28293 +                         struct qcom_scm_res *res, bool atomic);
28294 +#define scm_smc_call(dev, desc, res, atomic) \
28295 +       __scm_smc_call((dev), (desc), qcom_scm_convention, (res), (atomic))
28297  #define SCM_LEGACY_FNID(s, c)  (((s) << 10) | ((c) & 0x3ff))
28298  extern int scm_legacy_call_atomic(struct device *dev,
28299 diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
28300 index 7eb9958662dd..83082e2f2e44 100644
28301 --- a/drivers/firmware/xilinx/zynqmp.c
28302 +++ b/drivers/firmware/xilinx/zynqmp.c
28303 @@ -2,7 +2,7 @@
28304  /*
28305   * Xilinx Zynq MPSoC Firmware layer
28306   *
28307 - *  Copyright (C) 2014-2020 Xilinx, Inc.
28308 + *  Copyright (C) 2014-2021 Xilinx, Inc.
28309   *
28310   *  Michal Simek <michal.simek@xilinx.com>
28311   *  Davorin Mista <davorin.mista@aggios.com>
28312 @@ -1280,12 +1280,13 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
28313  static int zynqmp_firmware_remove(struct platform_device *pdev)
28315         struct pm_api_feature_data *feature_data;
28316 +       struct hlist_node *tmp;
28317         int i;
28319         mfd_remove_devices(&pdev->dev);
28320         zynqmp_pm_api_debugfs_exit();
28322 -       hash_for_each(pm_api_features_map, i, feature_data, hentry) {
28323 +       hash_for_each_safe(pm_api_features_map, i, tmp, feature_data, hentry) {
28324                 hash_del(&feature_data->hentry);
28325                 kfree(feature_data);
28326         }
28327 diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
28328 index 04e47e266f26..b44523ea8c91 100644
28329 --- a/drivers/fpga/dfl-pci.c
28330 +++ b/drivers/fpga/dfl-pci.c
28331 @@ -69,14 +69,16 @@ static void cci_pci_free_irq(struct pci_dev *pcidev)
28334  /* PCI Device ID */
28335 -#define PCIE_DEVICE_ID_PF_INT_5_X      0xBCBD
28336 -#define PCIE_DEVICE_ID_PF_INT_6_X      0xBCC0
28337 -#define PCIE_DEVICE_ID_PF_DSC_1_X      0x09C4
28338 -#define PCIE_DEVICE_ID_INTEL_PAC_N3000 0x0B30
28339 +#define PCIE_DEVICE_ID_PF_INT_5_X              0xBCBD
28340 +#define PCIE_DEVICE_ID_PF_INT_6_X              0xBCC0
28341 +#define PCIE_DEVICE_ID_PF_DSC_1_X              0x09C4
28342 +#define PCIE_DEVICE_ID_INTEL_PAC_N3000         0x0B30
28343 +#define PCIE_DEVICE_ID_INTEL_PAC_D5005         0x0B2B
28344  /* VF Device */
28345 -#define PCIE_DEVICE_ID_VF_INT_5_X      0xBCBF
28346 -#define PCIE_DEVICE_ID_VF_INT_6_X      0xBCC1
28347 -#define PCIE_DEVICE_ID_VF_DSC_1_X      0x09C5
28348 +#define PCIE_DEVICE_ID_VF_INT_5_X              0xBCBF
28349 +#define PCIE_DEVICE_ID_VF_INT_6_X              0xBCC1
28350 +#define PCIE_DEVICE_ID_VF_DSC_1_X              0x09C5
28351 +#define PCIE_DEVICE_ID_INTEL_PAC_D5005_VF      0x0B2C
28353  static struct pci_device_id cci_pcie_id_tbl[] = {
28354         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
28355 @@ -86,6 +88,8 @@ static struct pci_device_id cci_pcie_id_tbl[] = {
28356         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
28357         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
28358         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_N3000),},
28359 +       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005),},
28360 +       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),},
28361         {0,}
28362  };
28363  MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
28364 diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c
28365 index 27defa98092d..fee4d0abf6bf 100644
28366 --- a/drivers/fpga/xilinx-spi.c
28367 +++ b/drivers/fpga/xilinx-spi.c
28368 @@ -233,25 +233,19 @@ static int xilinx_spi_probe(struct spi_device *spi)
28370         /* PROGRAM_B is active low */
28371         conf->prog_b = devm_gpiod_get(&spi->dev, "prog_b", GPIOD_OUT_LOW);
28372 -       if (IS_ERR(conf->prog_b)) {
28373 -               dev_err(&spi->dev, "Failed to get PROGRAM_B gpio: %ld\n",
28374 -                       PTR_ERR(conf->prog_b));
28375 -               return PTR_ERR(conf->prog_b);
28376 -       }
28377 +       if (IS_ERR(conf->prog_b))
28378 +               return dev_err_probe(&spi->dev, PTR_ERR(conf->prog_b),
28379 +                                    "Failed to get PROGRAM_B gpio\n");
28381         conf->init_b = devm_gpiod_get_optional(&spi->dev, "init-b", GPIOD_IN);
28382 -       if (IS_ERR(conf->init_b)) {
28383 -               dev_err(&spi->dev, "Failed to get INIT_B gpio: %ld\n",
28384 -                       PTR_ERR(conf->init_b));
28385 -               return PTR_ERR(conf->init_b);
28386 -       }
28387 +       if (IS_ERR(conf->init_b))
28388 +               return dev_err_probe(&spi->dev, PTR_ERR(conf->init_b),
28389 +                                    "Failed to get INIT_B gpio\n");
28391         conf->done = devm_gpiod_get(&spi->dev, "done", GPIOD_IN);
28392 -       if (IS_ERR(conf->done)) {
28393 -               dev_err(&spi->dev, "Failed to get DONE gpio: %ld\n",
28394 -                       PTR_ERR(conf->done));
28395 -               return PTR_ERR(conf->done);
28396 -       }
28397 +       if (IS_ERR(conf->done))
28398 +               return dev_err_probe(&spi->dev, PTR_ERR(conf->done),
28399 +                                    "Failed to get DONE gpio\n");
28401         mgr = devm_fpga_mgr_create(&spi->dev,
28402                                    "Xilinx Slave Serial FPGA Manager",
28403 diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
28404 index 1bd9e44df718..05974b760796 100644
28405 --- a/drivers/gpio/gpio-tegra186.c
28406 +++ b/drivers/gpio/gpio-tegra186.c
28407 @@ -444,16 +444,6 @@ static int tegra186_irq_set_wake(struct irq_data *data, unsigned int on)
28408         return 0;
28411 -static int tegra186_irq_set_affinity(struct irq_data *data,
28412 -                                    const struct cpumask *dest,
28413 -                                    bool force)
28415 -       if (data->parent_data)
28416 -               return irq_chip_set_affinity_parent(data, dest, force);
28418 -       return -EINVAL;
28421  static void tegra186_gpio_irq(struct irq_desc *desc)
28423         struct tegra_gpio *gpio = irq_desc_get_handler_data(desc);
28424 @@ -700,7 +690,6 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
28425         gpio->intc.irq_unmask = tegra186_irq_unmask;
28426         gpio->intc.irq_set_type = tegra186_irq_set_type;
28427         gpio->intc.irq_set_wake = tegra186_irq_set_wake;
28428 -       gpio->intc.irq_set_affinity = tegra186_irq_set_affinity;
28430         irq = &gpio->gpio.irq;
28431         irq->chip = &gpio->intc;
28432 diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
28433 index 1aacd2a5a1fd..174839f3772f 100644
28434 --- a/drivers/gpio/gpiolib-acpi.c
28435 +++ b/drivers/gpio/gpiolib-acpi.c
28436 @@ -1438,6 +1438,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
28437                         .no_edge_events_on_boot = true,
28438                 },
28439         },
28440 +       {
28441 +               /*
28442 +                * The Dell Venue 10 Pro 5055, with Bay Trail SoC + TI PMIC uses an
28443 +                * external embedded-controller connected via I2C + an ACPI GPIO
28444 +                * event handler on INT33FFC:02 pin 12, causing spurious wakeups.
28445 +                */
28446 +               .matches = {
28447 +                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
28448 +                       DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
28449 +               },
28450 +               .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
28451 +                       .ignore_wake = "INT33FC:02@12",
28452 +               },
28453 +       },
28454         {
28455                 /*
28456                  * HP X2 10 models with Cherry Trail SoC + TI PMIC use an
28457 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
28458 index 8a5a8ff5d362..5eee251e3335 100644
28459 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
28460 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
28461 @@ -3613,6 +3613,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
28463         dev_info(adev->dev, "amdgpu: finishing device.\n");
28464         flush_delayed_work(&adev->delayed_init_work);
28465 +       ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
28466         adev->shutdown = true;
28468         kfree(adev->pci_state);
28469 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
28470 index f753e04fee99..a2ac44cc2a6d 100644
28471 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
28472 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
28473 @@ -1355,7 +1355,7 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
28474                         }
28475                 }
28476         }
28477 -       return r;
28478 +       return 0;
28481  int amdgpu_display_resume_helper(struct amdgpu_device *adev)
28482 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
28483 index d56f4023ebb3..7e8e46c39dbd 100644
28484 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
28485 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
28486 @@ -533,6 +533,8 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
28488                 if (!ring || !ring->fence_drv.initialized)
28489                         continue;
28490 +               if (!ring->no_scheduler)
28491 +                       drm_sched_fini(&ring->sched);
28492                 r = amdgpu_fence_wait_empty(ring);
28493                 if (r) {
28494                         /* no need to trigger GPU reset as we are unloading */
28495 @@ -541,8 +543,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
28496                 if (ring->fence_drv.irq_src)
28497                         amdgpu_irq_put(adev, ring->fence_drv.irq_src,
28498                                        ring->fence_drv.irq_type);
28499 -               if (!ring->no_scheduler)
28500 -                       drm_sched_fini(&ring->sched);
28502                 del_timer_sync(&ring->fence_drv.fallback_timer);
28503                 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
28504                         dma_fence_put(ring->fence_drv.fences[j]);
28505 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
28506 index 7645223ea0ef..97c11aa47ad0 100644
28507 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
28508 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
28509 @@ -77,6 +77,8 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
28510                 }
28512                 ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
28513 +               /* flush the cache before commit the IB */
28514 +               ib->flags = AMDGPU_IB_FLAG_EMIT_MEM_SYNC;
28516                 if (!vm)
28517                         ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
28518 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
28519 index 94b069630db3..b4971e90b98c 100644
28520 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
28521 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
28522 @@ -215,7 +215,11 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
28523         /* Check if we have an idle VMID */
28524         i = 0;
28525         list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
28526 -               fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
28527 +               /* Don't use per engine and per process VMID at the same time */
28528 +               struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
28529 +                       NULL : ring;
28531 +               fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
28532                 if (!fences[i])
28533                         break;
28534                 ++i;
28535 @@ -281,7 +285,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
28536         if (updates && (*id)->flushed_updates &&
28537             updates->context == (*id)->flushed_updates->context &&
28538             !dma_fence_is_later(updates, (*id)->flushed_updates))
28539 -           updates = NULL;
28540 +               updates = NULL;
28542         if ((*id)->owner != vm->immediate.fence_context ||
28543             job->vm_pd_addr != (*id)->pd_gpu_addr ||
28544 @@ -290,6 +294,10 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
28545              !dma_fence_is_signaled((*id)->last_flush))) {
28546                 struct dma_fence *tmp;
28548 +               /* Don't use per engine and per process VMID at the same time */
28549 +               if (adev->vm_manager.concurrent_flush)
28550 +                       ring = NULL;
28552                 /* to prevent one context starved by another context */
28553                 (*id)->pd_gpu_addr = 0;
28554                 tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
28555 @@ -365,12 +373,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
28556                 if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
28557                         needs_flush = true;
28559 -               /* Concurrent flushes are only possible starting with Vega10 and
28560 -                * are broken on Navi10 and Navi14.
28561 -                */
28562 -               if (needs_flush && (adev->asic_type < CHIP_VEGA10 ||
28563 -                                   adev->asic_type == CHIP_NAVI10 ||
28564 -                                   adev->asic_type == CHIP_NAVI14))
28565 +               if (needs_flush && !adev->vm_manager.concurrent_flush)
28566                         continue;
28568                 /* Good, we can use this VMID. Remember this submission as
28569 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
28570 index afbbec82a289..9be945d8e72f 100644
28571 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
28572 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
28573 @@ -535,7 +535,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
28574                 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
28575                         struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
28577 -                       if (!src)
28578 +                       if (!src || !src->funcs || !src->funcs->set)
28579                                 continue;
28580                         for (k = 0; k < src->num_types; k++)
28581                                 amdgpu_irq_update(adev, src, k);
28582 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
28583 index 19c0a3655228..82e9ecf84352 100644
28584 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
28585 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
28586 @@ -519,8 +519,10 @@ static int init_pmu_entry_by_type_and_add(struct amdgpu_pmu_entry *pmu_entry,
28587         pmu_entry->pmu.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
28588                                                                 GFP_KERNEL);
28590 -       if (!pmu_entry->pmu.attr_groups)
28591 +       if (!pmu_entry->pmu.attr_groups) {
28592 +               ret = -ENOMEM;
28593                 goto err_attr_group;
28594 +       }
28596         snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d", pmu_entry->pmu_file_prefix,
28597                                 adev_to_drm(pmu_entry->adev)->primary->index);
28598 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
28599 index 5efa331e3ee8..6b14626c148e 100644
28600 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
28601 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
28602 @@ -267,7 +267,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
28603         *addr += offset & ~PAGE_MASK;
28605         num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
28606 -       num_bytes = num_pages * 8;
28607 +       num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
28609         r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
28610                                      AMDGPU_IB_POOL_DELAYED, &job);
28611 @@ -942,7 +942,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev,
28612                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
28614         /* double check that we don't free the table twice */
28615 -       if (!ttm->sg->sgl)
28616 +       if (!ttm->sg || !ttm->sg->sgl)
28617                 return;
28619         /* unmap the pages mapped to the device */
28620 @@ -1162,13 +1162,13 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
28621         struct amdgpu_ttm_tt *gtt = (void *)ttm;
28622         int r;
28624 -       if (!gtt->bound)
28625 -               return;
28627         /* if the pages have userptr pinning then clear that first */
28628         if (gtt->userptr)
28629                 amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
28631 +       if (!gtt->bound)
28632 +               return;
28634         if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
28635                 return;
28637 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
28638 index e2ed4689118a..c6dbc0801604 100644
28639 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
28640 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
28641 @@ -259,7 +259,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
28642                 if ((adev->asic_type == CHIP_POLARIS10 ||
28643                      adev->asic_type == CHIP_POLARIS11) &&
28644                     (adev->uvd.fw_version < FW_1_66_16))
28645 -                       DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
28646 +                       DRM_ERROR("POLARIS10/11 UVD firmware version %u.%u is too old.\n",
28647                                   version_major, version_minor);
28648         } else {
28649                 unsigned int enc_major, enc_minor, dec_minor;
28650 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
28651 index 326dae31b675..a566bbe26bdd 100644
28652 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
28653 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
28654 @@ -92,13 +92,13 @@ struct amdgpu_prt_cb {
28655  static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
28657         mutex_lock(&vm->eviction_lock);
28658 -       vm->saved_flags = memalloc_nofs_save();
28659 +       vm->saved_flags = memalloc_noreclaim_save();
28662  static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
28664         if (mutex_trylock(&vm->eviction_lock)) {
28665 -               vm->saved_flags = memalloc_nofs_save();
28666 +               vm->saved_flags = memalloc_noreclaim_save();
28667                 return 1;
28668         }
28669         return 0;
28670 @@ -106,7 +106,7 @@ static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
28672  static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
28674 -       memalloc_nofs_restore(vm->saved_flags);
28675 +       memalloc_noreclaim_restore(vm->saved_flags);
28676         mutex_unlock(&vm->eviction_lock);
28679 @@ -3147,6 +3147,12 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
28681         unsigned i;
28683 +       /* Concurrent flushes are only possible starting with Vega10 and
28684 +        * are broken on Navi10 and Navi14.
28685 +        */
28686 +       adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
28687 +                                             adev->asic_type == CHIP_NAVI10 ||
28688 +                                             adev->asic_type == CHIP_NAVI14);
28689         amdgpu_vmid_mgr_init(adev);
28691         adev->vm_manager.fence_context =
28692 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
28693 index 976a12e5a8b9..4e140288159c 100644
28694 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
28695 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
28696 @@ -331,6 +331,7 @@ struct amdgpu_vm_manager {
28697         /* Handling of VMIDs */
28698         struct amdgpu_vmid_mgr                  id_mgr[AMDGPU_MAX_VMHUBS];
28699         unsigned int                            first_kfd_vmid;
28700 +       bool                                    concurrent_flush;
28702         /* Handling of VM fences */
28703         u64                                     fence_context;
28704 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
28705 index 659b385b27b5..4d3a24fdeb9c 100644
28706 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
28707 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
28708 @@ -468,15 +468,22 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
28713 + * NOTE psp_xgmi_node_info.num_hops layout is as follows:
28714 + * num_hops[7:6] = link type (0 = xGMI2, 1 = xGMI3, 2/3 = reserved)
28715 + * num_hops[5:3] = reserved
28716 + * num_hops[2:0] = number of hops
28717 + */
28718  int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
28719                 struct amdgpu_device *peer_adev)
28721         struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
28722 +       uint8_t num_hops_mask = 0x7;
28723         int i;
28725         for (i = 0 ; i < top->num_nodes; ++i)
28726                 if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
28727 -                       return top->nodes[i].num_hops;
28728 +                       return top->nodes[i].num_hops & num_hops_mask;
28729         return  -EINVAL;
28732 diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
28733 index 63691deb7df3..2342c5d216f9 100644
28734 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
28735 +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
28736 @@ -1391,9 +1391,10 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
28737         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
28738         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
28739         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
28740 -       SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000),
28741 +       SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04900000),
28742         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
28743         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
28744 +       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1800ff, 0x00000044),
28745         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
28746         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x00007fff, 0x000001fe),
28747         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
28748 @@ -1411,12 +1412,13 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
28749         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000820, 0x00000820),
28750         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
28751         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
28752 +       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070104),
28753         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
28754         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
28755         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
28756         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
28757         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0xffdf80ff, 0x479c0010),
28758 -       SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
28759 +       SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000)
28760  };
28762  static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
28763 diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
28764 index 65db88bb6cbc..d2c020a91c0b 100644
28765 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
28766 +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
28767 @@ -4864,7 +4864,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
28768         amdgpu_gfx_rlc_enter_safe_mode(adev);
28770         /* Enable 3D CGCG/CGLS */
28771 -       if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
28772 +       if (enable) {
28773                 /* write cmd to clear cgcg/cgls ov */
28774                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
28775                 /* unset CGCG override */
28776 @@ -4876,8 +4876,12 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
28777                 /* enable 3Dcgcg FSM(0x0000363f) */
28778                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
28780 -               data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
28781 -                       RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
28782 +               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
28783 +                       data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
28784 +                               RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
28785 +               else
28786 +                       data = 0x0 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT;
28788                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
28789                         data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
28790                                 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
28791 diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
28792 index 2d832fc23119..421d6069c509 100644
28793 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
28794 +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
28795 @@ -59,6 +59,7 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
28796  MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
28797  MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
28798  MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
28799 +MODULE_FIRMWARE("amdgpu/polaris12_32_mc.bin");
28800  MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
28801  MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
28802  MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
28803 @@ -243,10 +244,16 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
28804                         chip_name = "polaris10";
28805                 break;
28806         case CHIP_POLARIS12:
28807 -               if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision))
28808 +               if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) {
28809                         chip_name = "polaris12_k";
28810 -               else
28811 -                       chip_name = "polaris12";
28812 +               } else {
28813 +                       WREG32(mmMC_SEQ_IO_DEBUG_INDEX, ixMC_IO_DEBUG_UP_159);
28814 +                       /* Polaris12 32bit ASIC needs a special MC firmware */
28815 +                       if (RREG32(mmMC_SEQ_IO_DEBUG_DATA) == 0x05b4dc40)
28816 +                               chip_name = "polaris12_32";
28817 +                       else
28818 +                               chip_name = "polaris12";
28819 +               }
28820                 break;
28821         case CHIP_FIJI:
28822         case CHIP_CARRIZO:
28823 diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
28824 index d345e324837d..2a27fe26232b 100644
28825 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
28826 +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
28827 @@ -123,6 +123,10 @@ static const struct soc15_reg_golden golden_settings_sdma_nv14[] = {
28829  static const struct soc15_reg_golden golden_settings_sdma_nv12[] = {
28830         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
28831 +       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
28832 +       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
28833 +       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
28834 +       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
28835         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
28836  };
28838 diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
28839 index 1221aa6b40a9..d1045a9b37d9 100644
28840 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c
28841 +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
28842 @@ -1151,7 +1151,6 @@ static int soc15_common_early_init(void *handle)
28843                         adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
28844                                 AMD_CG_SUPPORT_GFX_MGLS |
28845                                 AMD_CG_SUPPORT_GFX_CP_LS |
28846 -                               AMD_CG_SUPPORT_GFX_3D_CGCG |
28847                                 AMD_CG_SUPPORT_GFX_3D_CGLS |
28848                                 AMD_CG_SUPPORT_GFX_CGCG |
28849                                 AMD_CG_SUPPORT_GFX_CGLS |
28850 @@ -1170,7 +1169,6 @@ static int soc15_common_early_init(void *handle)
28851                                 AMD_CG_SUPPORT_GFX_MGLS |
28852                                 AMD_CG_SUPPORT_GFX_RLC_LS |
28853                                 AMD_CG_SUPPORT_GFX_CP_LS |
28854 -                               AMD_CG_SUPPORT_GFX_3D_CGCG |
28855                                 AMD_CG_SUPPORT_GFX_3D_CGLS |
28856                                 AMD_CG_SUPPORT_GFX_CGCG |
28857                                 AMD_CG_SUPPORT_GFX_CGLS |
28858 diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
28859 index def583916294..9b844e9fb16f 100644
28860 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
28861 +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
28862 @@ -584,6 +584,10 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
28863         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
28864                         VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0),
28865                         AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
28867 +       /* VCN global tiling registers */
28868 +       WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
28869 +               UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
28872  static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
28873 diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
28874 index 88626d83e07b..ca8efa5c6978 100644
28875 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
28876 +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
28877 @@ -220,10 +220,8 @@ static int vega10_ih_enable_ring(struct amdgpu_device *adev,
28878         tmp = vega10_ih_rb_cntl(ih, tmp);
28879         if (ih == &adev->irq.ih)
28880                 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
28881 -       if (ih == &adev->irq.ih1) {
28882 -               tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
28883 +       if (ih == &adev->irq.ih1)
28884                 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
28885 -       }
28886         if (amdgpu_sriov_vf(adev)) {
28887                 if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
28888                         dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
28889 @@ -265,7 +263,6 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
28890         u32 ih_chicken;
28891         int ret;
28892         int i;
28893 -       u32 tmp;
28895         /* disable irqs */
28896         ret = vega10_ih_toggle_interrupts(adev, false);
28897 @@ -291,15 +288,6 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
28898                 }
28899         }
28901 -       tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
28902 -       tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
28903 -                           CLIENT18_IS_STORM_CLIENT, 1);
28904 -       WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
28906 -       tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
28907 -       tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
28908 -       WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
28910         pci_set_master(adev->pdev);
28912         /* enable interrupts */
28913 @@ -345,11 +333,17 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
28914         u32 wptr, tmp;
28915         struct amdgpu_ih_regs *ih_regs;
28917 -       wptr = le32_to_cpu(*ih->wptr_cpu);
28918 -       ih_regs = &ih->ih_regs;
28919 +       if (ih == &adev->irq.ih) {
28920 +               /* Only ring0 supports writeback. On other rings fall back
28921 +                * to register-based code with overflow checking below.
28922 +                */
28923 +               wptr = le32_to_cpu(*ih->wptr_cpu);
28925 -       if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
28926 -               goto out;
28927 +               if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
28928 +                       goto out;
28929 +       }
28931 +       ih_regs = &ih->ih_regs;
28933         /* Double check that the overflow wasn't already cleared. */
28934         wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
28935 @@ -440,15 +434,11 @@ static int vega10_ih_self_irq(struct amdgpu_device *adev,
28936                               struct amdgpu_irq_src *source,
28937                               struct amdgpu_iv_entry *entry)
28939 -       uint32_t wptr = cpu_to_le32(entry->src_data[0]);
28941         switch (entry->ring_id) {
28942         case 1:
28943 -               *adev->irq.ih1.wptr_cpu = wptr;
28944                 schedule_work(&adev->irq.ih1_work);
28945                 break;
28946         case 2:
28947 -               *adev->irq.ih2.wptr_cpu = wptr;
28948                 schedule_work(&adev->irq.ih2_work);
28949                 break;
28950         default: break;
28951 diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
28952 index 5a3c867d5881..86dcf448e0c2 100644
28953 --- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
28954 +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
28955 @@ -104,6 +104,8 @@ static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
28957         tmp = RREG32(ih_regs->ih_rb_cntl);
28958         tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
28959 +       tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1);
28961         /* enable_intr field is only valid in ring0 */
28962         if (ih == &adev->irq.ih)
28963                 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
28964 @@ -220,10 +222,8 @@ static int vega20_ih_enable_ring(struct amdgpu_device *adev,
28965         tmp = vega20_ih_rb_cntl(ih, tmp);
28966         if (ih == &adev->irq.ih)
28967                 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
28968 -       if (ih == &adev->irq.ih1) {
28969 -               tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
28970 +       if (ih == &adev->irq.ih1)
28971                 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
28972 -       }
28973         if (amdgpu_sriov_vf(adev)) {
28974                 if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
28975                         dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
28976 @@ -297,7 +297,6 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
28977         u32 ih_chicken;
28978         int ret;
28979         int i;
28980 -       u32 tmp;
28982         /* disable irqs */
28983         ret = vega20_ih_toggle_interrupts(adev, false);
28984 @@ -326,15 +325,6 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
28985                 }
28986         }
28988 -       tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
28989 -       tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
28990 -                           CLIENT18_IS_STORM_CLIENT, 1);
28991 -       WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
28993 -       tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
28994 -       tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
28995 -       WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
28997         pci_set_master(adev->pdev);
28999         /* enable interrupts */
29000 @@ -380,11 +370,17 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
29001         u32 wptr, tmp;
29002         struct amdgpu_ih_regs *ih_regs;
29004 -       wptr = le32_to_cpu(*ih->wptr_cpu);
29005 -       ih_regs = &ih->ih_regs;
29006 +       if (ih == &adev->irq.ih) {
29007 +               /* Only ring0 supports writeback. On other rings fall back
29008 +                * to register-based code with overflow checking below.
29009 +                */
29010 +               wptr = le32_to_cpu(*ih->wptr_cpu);
29012 -       if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
29013 -               goto out;
29014 +               if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
29015 +                       goto out;
29016 +       }
29018 +       ih_regs = &ih->ih_regs;
29020         /* Double check that the overflow wasn't already cleared. */
29021         wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
29022 @@ -476,15 +472,11 @@ static int vega20_ih_self_irq(struct amdgpu_device *adev,
29023                               struct amdgpu_irq_src *source,
29024                               struct amdgpu_iv_entry *entry)
29026 -       uint32_t wptr = cpu_to_le32(entry->src_data[0]);
29028         switch (entry->ring_id) {
29029         case 1:
29030 -               *adev->irq.ih1.wptr_cpu = wptr;
29031                 schedule_work(&adev->irq.ih1_work);
29032                 break;
29033         case 2:
29034 -               *adev->irq.ih2.wptr_cpu = wptr;
29035                 schedule_work(&adev->irq.ih2_work);
29036                 break;
29037         default: break;
29038 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
29039 index 511712c2e382..673d5e34f213 100644
29040 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
29041 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
29042 @@ -33,6 +33,11 @@ static int kfd_debugfs_open(struct inode *inode, struct file *file)
29044         return single_open(file, show, NULL);
29046 +static int kfd_debugfs_hang_hws_read(struct seq_file *m, void *data)
29048 +       seq_printf(m, "echo gpu_id > hang_hws\n");
29049 +       return 0;
29052  static ssize_t kfd_debugfs_hang_hws_write(struct file *file,
29053         const char __user *user_buf, size_t size, loff_t *ppos)
29054 @@ -94,7 +99,7 @@ void kfd_debugfs_init(void)
29055         debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
29056                             kfd_debugfs_rls_by_device, &kfd_debugfs_fops);
29057         debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root,
29058 -                           NULL, &kfd_debugfs_hang_hws_fops);
29059 +                           kfd_debugfs_hang_hws_read, &kfd_debugfs_hang_hws_fops);
29062  void kfd_debugfs_fini(void)
29063 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
29064 index 4598a9a58125..a4266c4bca13 100644
29065 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
29066 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
29067 @@ -1128,6 +1128,9 @@ static int set_sched_resources(struct device_queue_manager *dqm)
29069  static int initialize_cpsch(struct device_queue_manager *dqm)
29071 +       uint64_t num_sdma_queues;
29072 +       uint64_t num_xgmi_sdma_queues;
29074         pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
29076         mutex_init(&dqm->lock_hidden);
29077 @@ -1136,8 +1139,18 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
29078         dqm->active_cp_queue_count = 0;
29079         dqm->gws_queue_count = 0;
29080         dqm->active_runlist = false;
29081 -       dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
29082 -       dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
29084 +       num_sdma_queues = get_num_sdma_queues(dqm);
29085 +       if (num_sdma_queues >= BITS_PER_TYPE(dqm->sdma_bitmap))
29086 +               dqm->sdma_bitmap = ULLONG_MAX;
29087 +       else
29088 +               dqm->sdma_bitmap = (BIT_ULL(num_sdma_queues) - 1);
29090 +       num_xgmi_sdma_queues = get_num_xgmi_sdma_queues(dqm);
29091 +       if (num_xgmi_sdma_queues >= BITS_PER_TYPE(dqm->xgmi_sdma_bitmap))
29092 +               dqm->xgmi_sdma_bitmap = ULLONG_MAX;
29093 +       else
29094 +               dqm->xgmi_sdma_bitmap = (BIT_ULL(num_xgmi_sdma_queues) - 1);
29096         INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
29098 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
29099 index 66bbca61e3ef..9318936aa805 100644
29100 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
29101 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
29102 @@ -20,6 +20,10 @@
29103   * OTHER DEALINGS IN THE SOFTWARE.
29104   */
29106 +#include <linux/kconfig.h>
29108 +#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2)
29110  #include <linux/printk.h>
29111  #include <linux/device.h>
29112  #include <linux/slab.h>
29113 @@ -355,3 +359,5 @@ int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
29115         return 0;
29118 +#endif
29119 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
29120 index dd23d9fdf6a8..afd420b01a0c 100644
29121 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
29122 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
29123 @@ -23,7 +23,9 @@
29124  #ifndef __KFD_IOMMU_H__
29125  #define __KFD_IOMMU_H__
29127 -#if defined(CONFIG_AMD_IOMMU_V2_MODULE) || defined(CONFIG_AMD_IOMMU_V2)
29128 +#include <linux/kconfig.h>
29130 +#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2)
29132  #define KFD_SUPPORT_IOMMU_V2
29134 @@ -46,6 +48,9 @@ static inline int kfd_iommu_check_device(struct kfd_dev *kfd)
29136  static inline int kfd_iommu_device_init(struct kfd_dev *kfd)
29138 +#if IS_MODULE(CONFIG_AMD_IOMMU_V2)
29139 +       WARN_ONCE(1, "iommu_v2 module is not usable by built-in KFD");
29140 +#endif
29141         return 0;
29144 @@ -73,6 +78,6 @@ static inline int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
29145         return 0;
29148 -#endif /* defined(CONFIG_AMD_IOMMU_V2) */
29149 +#endif /* IS_REACHABLE(CONFIG_AMD_IOMMU_V2) */
29151  #endif /* __KFD_IOMMU_H__ */
29152 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
29153 index d699a5cf6c11..b63f55ea8758 100644
29154 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
29155 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
29156 @@ -1191,6 +1191,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
29157         if (adev->dm.dc)
29158                 dc_deinit_callbacks(adev->dm.dc);
29159  #endif
29161 +#if defined(CONFIG_DRM_AMD_DC_DCN)
29162 +       if (adev->dm.vblank_workqueue) {
29163 +               adev->dm.vblank_workqueue->dm = NULL;
29164 +               kfree(adev->dm.vblank_workqueue);
29165 +               adev->dm.vblank_workqueue = NULL;
29166 +       }
29167 +#endif
29169         if (adev->dm.dc->ctx->dmub_srv) {
29170                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
29171                 adev->dm.dc->ctx->dmub_srv = NULL;
29172 @@ -3841,6 +3850,23 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
29173         scaling_info->src_rect.x = state->src_x >> 16;
29174         scaling_info->src_rect.y = state->src_y >> 16;
29176 +       /*
29177 +        * For reasons we don't (yet) fully understand a non-zero
29178 +        * src_y coordinate into an NV12 buffer can cause a
29179 +        * system hang. To avoid hangs (and maybe be overly cautious)
29180 +        * let's reject both non-zero src_x and src_y.
29181 +        *
29182 +        * We currently know of only one use-case to reproduce a
29183 +        * scenario with non-zero src_x and src_y for NV12, which
29184 +        * is to gesture the YouTube Android app into full screen
29185 +        * on ChromeOS.
29186 +        */
29187 +       if (state->fb &&
29188 +           state->fb->format->format == DRM_FORMAT_NV12 &&
29189 +           (scaling_info->src_rect.x != 0 ||
29190 +            scaling_info->src_rect.y != 0))
29191 +               return -EINVAL;
29193         scaling_info->src_rect.width = state->src_w >> 16;
29194         if (scaling_info->src_rect.width == 0)
29195                 return -EINVAL;
29196 @@ -5863,6 +5889,15 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
29198         } while (stream == NULL && requested_bpc >= 6);
29200 +       if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
29201 +               DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
29203 +               aconnector->force_yuv420_output = true;
29204 +               stream = create_validate_stream_for_sink(aconnector, drm_mode,
29205 +                                               dm_state, old_stream);
29206 +               aconnector->force_yuv420_output = false;
29207 +       }
29209         return stream;
29212 @@ -7417,10 +7452,6 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
29213         int x, y;
29214         int xorigin = 0, yorigin = 0;
29216 -       position->enable = false;
29217 -       position->x = 0;
29218 -       position->y = 0;
29220         if (!crtc || !plane->state->fb)
29221                 return 0;
29223 @@ -7467,7 +7498,7 @@ static void handle_cursor_update(struct drm_plane *plane,
29224         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
29225         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
29226         uint64_t address = afb ? afb->address : 0;
29227 -       struct dc_cursor_position position;
29228 +       struct dc_cursor_position position = {0};
29229         struct dc_cursor_attributes attributes;
29230         int ret;
29232 @@ -9264,7 +9295,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
29234         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
29235         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
29236 -       if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
29237 +       if (!new_cursor_state || !new_primary_state ||
29238 +           !new_cursor_state->fb || !new_primary_state->fb) {
29239                 return 0;
29240         }
29242 @@ -9312,6 +9344,53 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
29244  #endif
29246 +static int validate_overlay(struct drm_atomic_state *state)
29248 +       int i;
29249 +       struct drm_plane *plane;
29250 +       struct drm_plane_state *old_plane_state, *new_plane_state;
29251 +       struct drm_plane_state *primary_state, *overlay_state = NULL;
29253 +       /* Check if primary plane is contained inside overlay */
29254 +       for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
29255 +               if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
29256 +                       if (drm_atomic_plane_disabling(plane->state, new_plane_state))
29257 +                               return 0;
29259 +                       overlay_state = new_plane_state;
29260 +                       continue;
29261 +               }
29262 +       }
29264 +       /* check if we're making changes to the overlay plane */
29265 +       if (!overlay_state)
29266 +               return 0;
29268 +       /* check if overlay plane is enabled */
29269 +       if (!overlay_state->crtc)
29270 +               return 0;
29272 +       /* find the primary plane for the CRTC that the overlay is enabled on */
29273 +       primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
29274 +       if (IS_ERR(primary_state))
29275 +               return PTR_ERR(primary_state);
29277 +       /* check if primary plane is enabled */
29278 +       if (!primary_state->crtc)
29279 +               return 0;
29281 +       /* Perform the bounds check to ensure the overlay plane covers the primary */
29282 +       if (primary_state->crtc_x < overlay_state->crtc_x ||
29283 +           primary_state->crtc_y < overlay_state->crtc_y ||
29284 +           primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
29285 +           primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
29286 +               DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
29287 +               return -EINVAL;
29288 +       }
29290 +       return 0;
29293  /**
29294   * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
29295   * @dev: The DRM device
29296 @@ -9383,7 +9462,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
29297         }
29299  #if defined(CONFIG_DRM_AMD_DC_DCN)
29300 -       if (adev->asic_type >= CHIP_NAVI10) {
29301 +       if (dc_resource_is_dsc_encoding_supported(dc)) {
29302                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
29303                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
29304                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
29305 @@ -9486,6 +9565,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
29306                         goto fail;
29307         }
29309 +       ret = validate_overlay(state);
29310 +       if (ret)
29311 +               goto fail;
29313         /* Add new/modified planes */
29314         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
29315                 ret = dm_update_plane_state(dc, state, plane,
29316 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
29317 index 8bfe901cf237..52cc81705280 100644
29318 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
29319 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
29320 @@ -68,18 +68,6 @@ struct common_irq_params {
29321         enum dc_irq_source irq_src;
29322  };
29324 -/**
29325 - * struct irq_list_head - Linked-list for low context IRQ handlers.
29326 - *
29327 - * @head: The list_head within &struct handler_data
29328 - * @work: A work_struct containing the deferred handler work
29329 - */
29330 -struct irq_list_head {
29331 -       struct list_head head;
29332 -       /* In case this interrupt needs post-processing, 'work' will be queued*/
29333 -       struct work_struct work;
29336  /**
29337   * struct dm_compressor_info - Buffer info used by frame buffer compression
29338   * @cpu_addr: MMIO cpu addr
29339 @@ -293,7 +281,7 @@ struct amdgpu_display_manager {
29340          * Note that handlers are called in the same order as they were
29341          * registered (FIFO).
29342          */
29343 -       struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
29344 +       struct list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
29346         /**
29347          * @irq_handler_list_high_tab:
29348 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
29349 index 360952129b6d..29139b34dbe2 100644
29350 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
29351 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
29352 @@ -150,7 +150,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
29353   *
29354   * --- to get dp configuration
29355   *
29356 - * cat link_settings
29357 + * cat /sys/kernel/debug/dri/0/DP-x/link_settings
29358   *
29359   * It will list current, verified, reported, preferred dp configuration.
29360   * current -- for current video mode
29361 @@ -163,7 +163,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
29362   * echo <lane_count>  <link_rate> > link_settings
29363   *
29364   * for example, to force to  2 lane, 2.7GHz,
29365 - * echo 4 0xa > link_settings
29366 + * echo 4 0xa > /sys/kernel/debug/dri/0/DP-x/link_settings
29367   *
29368   * spread_spectrum could not be changed dynamically.
29369   *
29370 @@ -171,7 +171,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
29371   * done. please check link settings after force operation to see if HW get
29372   * programming.
29373   *
29374 - * cat link_settings
29375 + * cat /sys/kernel/debug/dri/0/DP-x/link_settings
29376   *
29377   * check current and preferred settings.
29378   *
29379 @@ -255,7 +255,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
29380         int max_param_num = 2;
29381         uint8_t param_nums = 0;
29382         long param[2];
29383 -       bool valid_input = false;
29384 +       bool valid_input = true;
29386         if (size == 0)
29387                 return -EINVAL;
29388 @@ -282,9 +282,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
29389         case LANE_COUNT_ONE:
29390         case LANE_COUNT_TWO:
29391         case LANE_COUNT_FOUR:
29392 -               valid_input = true;
29393                 break;
29394         default:
29395 +               valid_input = false;
29396                 break;
29397         }
29399 @@ -294,9 +294,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
29400         case LINK_RATE_RBR2:
29401         case LINK_RATE_HIGH2:
29402         case LINK_RATE_HIGH3:
29403 -               valid_input = true;
29404                 break;
29405         default:
29406 +               valid_input = false;
29407                 break;
29408         }
29410 @@ -310,10 +310,11 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
29411          * spread spectrum will not be changed
29412          */
29413         prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
29414 +       prefer_link_settings.use_link_rate_set = false;
29415         prefer_link_settings.lane_count = param[0];
29416         prefer_link_settings.link_rate = param[1];
29418 -       dc_link_set_preferred_link_settings(dc, &prefer_link_settings, link);
29419 +       dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true);
29421         kfree(wr_buf);
29422         return size;
29423 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
29424 index 0cdbfcd475ec..71a15f68514b 100644
29425 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
29426 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
29427 @@ -644,6 +644,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
29429         /* File created at /sys/class/drm/card0/device/hdcp_srm*/
29430         hdcp_work[0].attr = data_attr;
29431 +       sysfs_bin_attr_init(&hdcp_work[0].attr);
29433         if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr))
29434                 DRM_WARN("Failed to create device file hdcp_srm");
29435 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
29436 index e0000c180ed1..8ce10d0973c5 100644
29437 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
29438 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
29439 @@ -82,6 +82,7 @@ struct amdgpu_dm_irq_handler_data {
29440         struct amdgpu_display_manager *dm;
29441         /* DAL irq source which registered for this interrupt. */
29442         enum dc_irq_source irq_source;
29443 +       struct work_struct work;
29444  };
29446  #define DM_IRQ_TABLE_LOCK(adev, flags) \
29447 @@ -111,20 +112,10 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
29448   */
29449  static void dm_irq_work_func(struct work_struct *work)
29451 -       struct irq_list_head *irq_list_head =
29452 -               container_of(work, struct irq_list_head, work);
29453 -       struct list_head *handler_list = &irq_list_head->head;
29454 -       struct amdgpu_dm_irq_handler_data *handler_data;
29456 -       list_for_each_entry(handler_data, handler_list, list) {
29457 -               DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
29458 -                               handler_data->irq_source);
29459 +       struct amdgpu_dm_irq_handler_data *handler_data =
29460 +               container_of(work, struct amdgpu_dm_irq_handler_data, work);
29462 -               DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
29463 -                       handler_data->irq_source);
29465 -               handler_data->handler(handler_data->handler_arg);
29466 -       }
29467 +       handler_data->handler(handler_data->handler_arg);
29469         /* Call a DAL subcomponent which registered for interrupt notification
29470          * at INTERRUPT_LOW_IRQ_CONTEXT.
29471 @@ -156,7 +147,7 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
29472                 break;
29473         case INTERRUPT_LOW_IRQ_CONTEXT:
29474         default:
29475 -               hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
29476 +               hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
29477                 break;
29478         }
29480 @@ -290,7 +281,8 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
29481                 break;
29482         case INTERRUPT_LOW_IRQ_CONTEXT:
29483         default:
29484 -               hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
29485 +               hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
29486 +               INIT_WORK(&handler_data->work, dm_irq_work_func);
29487                 break;
29488         }
29490 @@ -372,7 +364,7 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
29491  int amdgpu_dm_irq_init(struct amdgpu_device *adev)
29493         int src;
29494 -       struct irq_list_head *lh;
29495 +       struct list_head *lh;
29497         DRM_DEBUG_KMS("DM_IRQ\n");
29499 @@ -381,9 +373,7 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
29500         for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
29501                 /* low context handler list init */
29502                 lh = &adev->dm.irq_handler_list_low_tab[src];
29503 -               INIT_LIST_HEAD(&lh->head);
29504 -               INIT_WORK(&lh->work, dm_irq_work_func);
29506 +               INIT_LIST_HEAD(lh);
29507                 /* high context handler init */
29508                 INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
29509         }
29510 @@ -400,8 +390,11 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
29511  void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
29513         int src;
29514 -       struct irq_list_head *lh;
29515 +       struct list_head *lh;
29516 +       struct list_head *entry, *tmp;
29517 +       struct amdgpu_dm_irq_handler_data *handler;
29518         unsigned long irq_table_flags;
29520         DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
29521         for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
29522                 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
29523 @@ -410,7 +403,16 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
29524                  * (because no code can schedule a new one). */
29525                 lh = &adev->dm.irq_handler_list_low_tab[src];
29526                 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
29527 -               flush_work(&lh->work);
29529 +               if (!list_empty(lh)) {
29530 +                       list_for_each_safe(entry, tmp, lh) {
29531 +                               handler = list_entry(
29532 +                                       entry,
29533 +                                       struct amdgpu_dm_irq_handler_data,
29534 +                                       list);
29535 +                               flush_work(&handler->work);
29536 +                       }
29537 +               }
29538         }
29541 @@ -420,6 +422,8 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
29542         struct list_head *hnd_list_h;
29543         struct list_head *hnd_list_l;
29544         unsigned long irq_table_flags;
29545 +       struct list_head *entry, *tmp;
29546 +       struct amdgpu_dm_irq_handler_data *handler;
29548         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
29550 @@ -430,14 +434,22 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
29551          * will be disabled from manage_dm_interrupts on disable CRTC.
29552          */
29553         for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
29554 -               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
29555 +               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
29556                 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
29557                 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
29558                         dc_interrupt_set(adev->dm.dc, src, false);
29560                 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
29561 -               flush_work(&adev->dm.irq_handler_list_low_tab[src].work);
29563 +               if (!list_empty(hnd_list_l)) {
29564 +                       list_for_each_safe (entry, tmp, hnd_list_l) {
29565 +                               handler = list_entry(
29566 +                                       entry,
29567 +                                       struct amdgpu_dm_irq_handler_data,
29568 +                                       list);
29569 +                               flush_work(&handler->work);
29570 +                       }
29571 +               }
29572                 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
29573         }
29575 @@ -457,7 +469,7 @@ int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
29577         /* re-enable short pulse interrupts HW interrupt */
29578         for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
29579 -               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
29580 +               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
29581                 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
29582                 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
29583                         dc_interrupt_set(adev->dm.dc, src, true);
29584 @@ -483,7 +495,7 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
29585          * will be enabled from manage_dm_interrupts on enable CRTC.
29586          */
29587         for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
29588 -               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
29589 +               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
29590                 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
29591                 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
29592                         dc_interrupt_set(adev->dm.dc, src, true);
29593 @@ -500,22 +512,53 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
29594  static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
29595                                         enum dc_irq_source irq_source)
29597 -       unsigned long irq_table_flags;
29598 -       struct work_struct *work = NULL;
29599 +       struct  list_head *handler_list = &adev->dm.irq_handler_list_low_tab[irq_source];
29600 +       struct  amdgpu_dm_irq_handler_data *handler_data;
29601 +       bool    work_queued = false;
29603 -       DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
29604 +       if (list_empty(handler_list))
29605 +               return;
29607 +       list_for_each_entry (handler_data, handler_list, list) {
29608 +               if (!queue_work(system_highpri_wq, &handler_data->work)) {
29609 +                       continue;
29610 +               } else {
29611 +                       work_queued = true;
29612 +                       break;
29613 +               }
29614 +       }
29616 -       if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head))
29617 -               work = &adev->dm.irq_handler_list_low_tab[irq_source].work;
29618 +       if (!work_queued) {
29619 +               struct  amdgpu_dm_irq_handler_data *handler_data_add;
29620 +               /*get the amdgpu_dm_irq_handler_data of first item pointed by handler_list*/
29621 +               handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);
29623 -       DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
29624 +               /*allocate a new amdgpu_dm_irq_handler_data*/
29625 +               handler_data_add = kzalloc(sizeof(*handler_data), GFP_KERNEL);
29626 +               if (!handler_data_add) {
29627 +                       DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
29628 +                       return;
29629 +               }
29631 -       if (work) {
29632 -               if (!schedule_work(work))
29633 -                       DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n",
29634 -                                               irq_source);
29635 -       }
29636 +               /*copy new amdgpu_dm_irq_handler_data members from handler_data*/
29637 +               handler_data_add->handler       = handler_data->handler;
29638 +               handler_data_add->handler_arg   = handler_data->handler_arg;
29639 +               handler_data_add->dm            = handler_data->dm;
29640 +               handler_data_add->irq_source    = irq_source;
29642 +               list_add_tail(&handler_data_add->list, handler_list);
29644 +               INIT_WORK(&handler_data_add->work, dm_irq_work_func);
29646 +               if (queue_work(system_highpri_wq, &handler_data_add->work))
29647 +                       DRM_DEBUG("Queued work for handling interrupt from "
29648 +                                 "display for IRQ source %d\n",
29649 +                                 irq_source);
29650 +               else
29651 +                       DRM_ERROR("Failed to queue work for handling interrupt "
29652 +                                 "from display for IRQ source %d\n",
29653 +                                 irq_source);
29654 +       }
29657  /*
29658 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
29659 index 995ffbbf64e7..1ee27f2f28f1 100644
29660 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
29661 +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
29662 @@ -217,6 +217,9 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
29663                 if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
29664                         dcn3_clk_mgr_destroy(clk_mgr);
29665                 }
29666 +               if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
29667 +                       dcn3_clk_mgr_destroy(clk_mgr);
29668 +               }
29669                 break;
29671         case FAMILY_VGH:
29672 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
29673 index c7e5a64e06af..81ea5d3a1947 100644
29674 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
29675 +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
29676 @@ -252,6 +252,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
29677         bool force_reset = false;
29678         bool update_uclk = false;
29679         bool p_state_change_support;
29680 +       int total_plane_count;
29682         if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present)
29683                 return;
29684 @@ -292,7 +293,8 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
29685                 clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
29687         clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
29688 -       p_state_change_support = new_clocks->p_state_change_support || (display_count == 0);
29689 +       total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
29690 +       p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
29691         if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
29692                 clk_mgr_base->clks.p_state_change_support = p_state_change_support;
29694 diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
29695 index 8f8a13c7cf73..4781279024a9 100644
29696 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c
29697 +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
29698 @@ -2398,7 +2398,8 @@ static void commit_planes_do_stream_update(struct dc *dc,
29699                                         if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
29700                                                 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
29702 -                                       dc->hwss.optimize_bandwidth(dc, dc->current_state);
29703 +                                       dc->optimized_required = true;
29705                                 } else {
29706                                         if (dc->optimize_seamless_boot_streams == 0)
29707                                                 dc->hwss.prepare_bandwidth(dc, dc->current_state);
29708 @@ -2545,6 +2546,10 @@ static void commit_planes_for_stream(struct dc *dc,
29709                                                 plane_state->triplebuffer_flips = true;
29710                                 }
29711                         }
29712 +                       if (update_type == UPDATE_TYPE_FULL) {
29713 +                               /* force vsync flip when reconfiguring pipes to prevent underflow */
29714 +                               plane_state->flip_immediate = false;
29715 +                       }
29716                 }
29717         }
29719 diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
29720 index bd0101013ec8..440bf0a0e12a 100644
29721 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
29722 +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
29723 @@ -1603,6 +1603,7 @@ static bool dc_link_construct(struct dc_link *link,
29724         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
29726         DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__);
29727 +       kfree(info);
29728         return true;
29729  device_tag_fail:
29730         link->link_enc->funcs->destroy(&link->link_enc);
29731 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
29732 index 4e87e70237e3..874b132fe1d7 100644
29733 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
29734 +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
29735 @@ -283,7 +283,7 @@ struct abm *dce_abm_create(
29736         const struct dce_abm_shift *abm_shift,
29737         const struct dce_abm_mask *abm_mask)
29739 -       struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
29740 +       struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_ATOMIC);
29742         if (abm_dce == NULL) {
29743                 BREAK_TO_DEBUGGER();
29744 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
29745 index 277484cf853e..d4be5954d7aa 100644
29746 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
29747 +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
29748 @@ -99,7 +99,6 @@ struct dce110_aux_registers {
29749         AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
29750         AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
29751         AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
29752 -       AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
29753         AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
29754         AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
29755         AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
29756 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
29757 index ddc789daf3b1..09d4cb5c97b6 100644
29758 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
29759 +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
29760 @@ -1049,7 +1049,7 @@ struct dmcu *dcn10_dmcu_create(
29761         const struct dce_dmcu_shift *dmcu_shift,
29762         const struct dce_dmcu_mask *dmcu_mask)
29764 -       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
29765 +       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
29767         if (dmcu_dce == NULL) {
29768                 BREAK_TO_DEBUGGER();
29769 @@ -1070,7 +1070,7 @@ struct dmcu *dcn20_dmcu_create(
29770         const struct dce_dmcu_shift *dmcu_shift,
29771         const struct dce_dmcu_mask *dmcu_mask)
29773 -       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
29774 +       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
29776         if (dmcu_dce == NULL) {
29777                 BREAK_TO_DEBUGGER();
29778 @@ -1091,7 +1091,7 @@ struct dmcu *dcn21_dmcu_create(
29779         const struct dce_dmcu_shift *dmcu_shift,
29780         const struct dce_dmcu_mask *dmcu_mask)
29782 -       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
29783 +       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
29785         if (dmcu_dce == NULL) {
29786                 BREAK_TO_DEBUGGER();
29787 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
29788 index 69e34bef274c..febccb35ddad 100644
29789 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
29790 +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
29791 @@ -81,13 +81,18 @@ static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state)
29793         struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
29794         uint32_t raw_state;
29795 +       enum dmub_status status = DMUB_STATUS_INVALID;
29797         // Send gpint command and wait for ack
29798 -       dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
29800 -       dmub_srv_get_gpint_response(srv, &raw_state);
29802 -       *state = convert_psr_state(raw_state);
29803 +       status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
29805 +       if (status == DMUB_STATUS_OK) {
29806 +               // GPINT was executed, get response
29807 +               dmub_srv_get_gpint_response(srv, &raw_state);
29808 +               *state = convert_psr_state(raw_state);
29809 +       } else
29810 +               // Return invalid state when GPINT times out
29811 +               *state = 0xFF;
29814  /*
29815 diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
29816 index 62cc2651e00c..8774406120fc 100644
29817 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
29818 +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
29819 @@ -112,7 +112,7 @@ struct dccg *dccg2_create(
29820         const struct dccg_shift *dccg_shift,
29821         const struct dccg_mask *dccg_mask)
29823 -       struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
29824 +       struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_ATOMIC);
29825         struct dccg *base;
29827         if (dccg_dcn == NULL) {
29828 diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
29829 index bec7059f6d5d..a1318c31bcfa 100644
29830 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
29831 +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
29832 @@ -1,5 +1,5 @@
29833  /*
29834 - * Copyright 2012-17 Advanced Micro Devices, Inc.
29835 + * Copyright 2012-2021 Advanced Micro Devices, Inc.
29836   *
29837   * Permission is hereby granted, free of charge, to any person obtaining a
29838   * copy of this software and associated documentation files (the "Software"),
29839 @@ -181,11 +181,14 @@ void hubp2_vready_at_or_After_vsync(struct hubp *hubp,
29840         else
29841                 Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 0
29842         */
29843 -       if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
29844 -               + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
29845 -               value = 1;
29846 -       } else
29847 -               value = 0;
29848 +       if (pipe_dest->htotal != 0) {
29849 +               if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
29850 +                       + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
29851 +                       value = 1;
29852 +               } else
29853 +                       value = 0;
29854 +       }
29856         REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);
29859 diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
29860 index 2c2dbfcd8957..bfbc23b76cd5 100644
29861 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
29862 +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
29863 @@ -1104,7 +1104,7 @@ struct dpp *dcn20_dpp_create(
29864         uint32_t inst)
29866         struct dcn20_dpp *dpp =
29867 -               kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL);
29868 +               kzalloc(sizeof(struct dcn20_dpp), GFP_ATOMIC);
29870         if (!dpp)
29871                 return NULL;
29872 @@ -1122,7 +1122,7 @@ struct input_pixel_processor *dcn20_ipp_create(
29873         struct dc_context *ctx, uint32_t inst)
29875         struct dcn10_ipp *ipp =
29876 -               kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
29877 +               kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC);
29879         if (!ipp) {
29880                 BREAK_TO_DEBUGGER();
29881 @@ -1139,7 +1139,7 @@ struct output_pixel_processor *dcn20_opp_create(
29882         struct dc_context *ctx, uint32_t inst)
29884         struct dcn20_opp *opp =
29885 -               kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
29886 +               kzalloc(sizeof(struct dcn20_opp), GFP_ATOMIC);
29888         if (!opp) {
29889                 BREAK_TO_DEBUGGER();
29890 @@ -1156,7 +1156,7 @@ struct dce_aux *dcn20_aux_engine_create(
29891         uint32_t inst)
29893         struct aux_engine_dce110 *aux_engine =
29894 -               kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
29895 +               kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
29897         if (!aux_engine)
29898                 return NULL;
29899 @@ -1194,7 +1194,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
29900         uint32_t inst)
29902         struct dce_i2c_hw *dce_i2c_hw =
29903 -               kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
29904 +               kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
29906         if (!dce_i2c_hw)
29907                 return NULL;
29908 @@ -1207,7 +1207,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
29909  struct mpc *dcn20_mpc_create(struct dc_context *ctx)
29911         struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
29912 -                                         GFP_KERNEL);
29913 +                                         GFP_ATOMIC);
29915         if (!mpc20)
29916                 return NULL;
29917 @@ -1225,7 +1225,7 @@ struct hubbub *dcn20_hubbub_create(struct dc_context *ctx)
29919         int i;
29920         struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
29921 -                                         GFP_KERNEL);
29922 +                                         GFP_ATOMIC);
29924         if (!hubbub)
29925                 return NULL;
29926 @@ -1253,7 +1253,7 @@ struct timing_generator *dcn20_timing_generator_create(
29927                 uint32_t instance)
29929         struct optc *tgn10 =
29930 -               kzalloc(sizeof(struct optc), GFP_KERNEL);
29931 +               kzalloc(sizeof(struct optc), GFP_ATOMIC);
29933         if (!tgn10)
29934                 return NULL;
29935 @@ -1332,7 +1332,7 @@ static struct clock_source *dcn20_clock_source_create(
29936         bool dp_clk_src)
29938         struct dce110_clk_src *clk_src =
29939 -               kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
29940 +               kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC);
29942         if (!clk_src)
29943                 return NULL;
29944 @@ -1438,7 +1438,7 @@ struct display_stream_compressor *dcn20_dsc_create(
29945         struct dc_context *ctx, uint32_t inst)
29947         struct dcn20_dsc *dsc =
29948 -               kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
29949 +               kzalloc(sizeof(struct dcn20_dsc), GFP_ATOMIC);
29951         if (!dsc) {
29952                 BREAK_TO_DEBUGGER();
29953 @@ -1572,7 +1572,7 @@ struct hubp *dcn20_hubp_create(
29954         uint32_t inst)
29956         struct dcn20_hubp *hubp2 =
29957 -               kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
29958 +               kzalloc(sizeof(struct dcn20_hubp), GFP_ATOMIC);
29960         if (!hubp2)
29961                 return NULL;
29962 @@ -3390,7 +3390,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
29964  static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
29966 -       struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
29967 +       struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_ATOMIC);
29969         if (!pp_smu)
29970                 return pp_smu;
29971 @@ -4034,7 +4034,7 @@ struct resource_pool *dcn20_create_resource_pool(
29972                 struct dc *dc)
29974         struct dcn20_resource_pool *pool =
29975 -               kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL);
29976 +               kzalloc(sizeof(struct dcn20_resource_pool), GFP_ATOMIC);
29978         if (!pool)
29979                 return NULL;
29980 diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
29981 index 06dc1e2e8383..07c8d2e2c09c 100644
29982 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
29983 +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
29984 @@ -848,7 +848,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
29986                                         cmd.mall.cursor_copy_src.quad_part = cursor_attr.address.quad_part;
29987                                         cmd.mall.cursor_copy_dst.quad_part =
29988 -                                                       plane->address.grph.cursor_cache_addr.quad_part;
29989 +                                                       (plane->address.grph.cursor_cache_addr.quad_part + 2047) & ~2047;
29990                                         cmd.mall.cursor_width = cursor_attr.width;
29991                                         cmd.mall.cursor_height = cursor_attr.height;
29992                                         cmd.mall.cursor_pitch = cursor_attr.pitch;
29993 @@ -858,8 +858,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
29994                                         dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
29996                                         /* Use copied cursor, and it's okay to not switch back */
29997 -                                       cursor_attr.address.quad_part =
29998 -                                                       plane->address.grph.cursor_cache_addr.quad_part;
29999 +                                       cursor_attr.address.quad_part = cmd.mall.cursor_copy_dst.quad_part;
30000                                         dc_stream_set_cursor_attributes(stream, &cursor_attr);
30001                                 }
30003 diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
30004 index 3e6f76096119..a7598356f37d 100644
30005 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
30006 +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
30007 @@ -143,16 +143,18 @@ static void mpc3_power_on_ogam_lut(
30009         struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
30011 -       if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) {
30012 -               // Force power on
30013 -               REG_UPDATE(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_DIS, power_on == true ? 1:0);
30014 -               // Wait for confirmation when powering on
30015 -               if (power_on)
30016 -                       REG_WAIT(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_STATE, 0, 10, 10);
30017 -       } else {
30018 -               REG_SET(MPCC_MEM_PWR_CTRL[mpcc_id], 0,
30019 -                               MPCC_OGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1);
30020 -       }
30021 +       /*
30022 +        * Powering on: force memory active so the LUT can be updated.
30023 +        * Powering off: allow entering memory low power mode
30024 +        *
30025 +        * Memory low power mode is controlled during MPC OGAM LUT init.
30026 +        */
30027 +       REG_UPDATE(MPCC_MEM_PWR_CTRL[mpcc_id],
30028 +                  MPCC_OGAM_MEM_PWR_DIS, power_on != 0);
30030 +       /* Wait for memory to be powered on - we won't be able to write to it otherwise. */
30031 +       if (power_on)
30032 +               REG_WAIT(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_STATE, 0, 10, 10);
30035  static void mpc3_configure_ogam_lut(
30036 @@ -1427,7 +1429,7 @@ const struct mpc_funcs dcn30_mpc_funcs = {
30037         .acquire_rmu = mpcc3_acquire_rmu,
30038         .program_3dlut = mpc3_program_3dlut,
30039         .release_rmu = mpcc3_release_rmu,
30040 -       .power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
30041 +       .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
30042         .get_mpc_out_mux = mpc1_get_mpc_out_mux,
30044  };
30045 diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
30046 index fb7f1dea3c46..9b33182f3abd 100644
30047 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
30048 +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
30049 @@ -181,7 +181,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = {
30050                 },
30051         .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
30052         .num_states = 1,
30053 -       .sr_exit_time_us = 12,
30054 +       .sr_exit_time_us = 15.5,
30055         .sr_enter_plus_exit_time_us = 20,
30056         .urgent_latency_us = 4.0,
30057         .urgent_latency_pixel_data_only_us = 4.0,
30058 @@ -826,10 +826,11 @@ static const struct dc_plane_cap plane_cap = {
30059                         .fp16 = 16000
30060         },
30062 +       /* 6:1 downscaling ratio: 1000/6 = 166.666 */
30063         .max_downscale_factor = {
30064 -                       .argb8888 = 600,
30065 -                       .nv12 = 600,
30066 -                       .fp16 = 600
30067 +                       .argb8888 = 167,
30068 +                       .nv12 = 167,
30069 +                       .fp16 = 167
30070         }
30071  };
30073 diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
30074 index c494235016e0..00f066f1da0c 100644
30075 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
30076 +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
30077 @@ -843,10 +843,11 @@ static const struct dc_plane_cap plane_cap = {
30078                         .fp16 = 16000
30079         },
30081 +       /* 6:1 downscaling ratio: 1000/6 = 166.666 */
30082         .max_downscale_factor = {
30083 -                       .argb8888 = 600,
30084 -                       .nv12 = 600,
30085 -                       .fp16 = 600
30086 +                       .argb8888 = 167,
30087 +                       .nv12 = 167,
30088 +                       .fp16 = 167
30089         },
30090         64,
30091         64
30092 diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
30093 index 4b659b63f75b..7d9d591de411 100644
30094 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
30095 +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
30096 @@ -164,7 +164,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc = {
30098                 .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
30099                 .num_states = 1,
30100 -               .sr_exit_time_us = 12,
30101 +               .sr_exit_time_us = 15.5,
30102                 .sr_enter_plus_exit_time_us = 20,
30103                 .urgent_latency_us = 4.0,
30104                 .urgent_latency_pixel_data_only_us = 4.0,
30105 @@ -282,10 +282,11 @@ static const struct dc_plane_cap plane_cap = {
30106                                 .nv12 = 16000,
30107                                 .fp16 = 16000
30108                 },
30109 +               /* 6:1 downscaling ratio: 1000/6 = 166.666 */
30110                 .max_downscale_factor = {
30111 -                               .argb8888 = 600,
30112 -                               .nv12 = 600,
30113 -                               .fp16 = 600
30114 +                               .argb8888 = 167,
30115 +                               .nv12 = 167,
30116 +                               .fp16 = 167
30117                 },
30118                 16,
30119                 16
30120 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
30121 index 0f3f510fd83b..9729cf292e84 100644
30122 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
30123 +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
30124 @@ -3437,6 +3437,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
30125                         mode_lib->vba.DCCEnabledInAnyPlane = true;
30126                 }
30127         }
30128 +       mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly;
30129         for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
30130                 locals->FabricAndDRAMBandwidthPerState[i] = dml_min(
30131                                 mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
30132 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
30133 index 210c96cd5b03..51098c2c9854 100644
30134 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
30135 +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
30136 @@ -3544,6 +3544,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
30137                         mode_lib->vba.DCCEnabledInAnyPlane = true;
30138                 }
30139         }
30140 +       mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly;
30141         for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
30142                 locals->FabricAndDRAMBandwidthPerState[i] = dml_min(
30143                                 mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
30144 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
30145 index 72423dc425dc..799bae229e67 100644
30146 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
30147 +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
30148 @@ -293,13 +293,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
30149         if (surf_linear) {
30150                 log2_swath_height_l = 0;
30151                 log2_swath_height_c = 0;
30152 -       } else if (!surf_vert) {
30153 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
30154 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
30155         } else {
30156 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
30157 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
30158 +               unsigned int swath_height_l;
30159 +               unsigned int swath_height_c;
30161 +               if (!surf_vert) {
30162 +                       swath_height_l = rq_param->misc.rq_l.blk256_height;
30163 +                       swath_height_c = rq_param->misc.rq_c.blk256_height;
30164 +               } else {
30165 +                       swath_height_l = rq_param->misc.rq_l.blk256_width;
30166 +                       swath_height_c = rq_param->misc.rq_c.blk256_width;
30167 +               }
30169 +               if (swath_height_l > 0)
30170 +                       log2_swath_height_l = dml_log2(swath_height_l);
30172 +               if (req128_l && log2_swath_height_l > 0)
30173 +                       log2_swath_height_l -= 1;
30175 +               if (swath_height_c > 0)
30176 +                       log2_swath_height_c = dml_log2(swath_height_c);
30178 +               if (req128_c && log2_swath_height_c > 0)
30179 +                       log2_swath_height_c -= 1;
30180         }
30182         rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
30183         rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
30185 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
30186 index 9c78446c3a9d..6a6d5970d1d5 100644
30187 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
30188 +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
30189 @@ -293,13 +293,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
30190         if (surf_linear) {
30191                 log2_swath_height_l = 0;
30192                 log2_swath_height_c = 0;
30193 -       } else if (!surf_vert) {
30194 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
30195 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
30196         } else {
30197 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
30198 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
30199 +               unsigned int swath_height_l;
30200 +               unsigned int swath_height_c;
30202 +               if (!surf_vert) {
30203 +                       swath_height_l = rq_param->misc.rq_l.blk256_height;
30204 +                       swath_height_c = rq_param->misc.rq_c.blk256_height;
30205 +               } else {
30206 +                       swath_height_l = rq_param->misc.rq_l.blk256_width;
30207 +                       swath_height_c = rq_param->misc.rq_c.blk256_width;
30208 +               }
30210 +               if (swath_height_l > 0)
30211 +                       log2_swath_height_l = dml_log2(swath_height_l);
30213 +               if (req128_l && log2_swath_height_l > 0)
30214 +                       log2_swath_height_l -= 1;
30216 +               if (swath_height_c > 0)
30217 +                       log2_swath_height_c = dml_log2(swath_height_c);
30219 +               if (req128_c && log2_swath_height_c > 0)
30220 +                       log2_swath_height_c -= 1;
30221         }
30223         rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
30224         rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
30226 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
30227 index edd41d358291..dc1c81a6e377 100644
30228 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
30229 +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
30230 @@ -277,13 +277,31 @@ static void handle_det_buf_split(
30231         if (surf_linear) {
30232                 log2_swath_height_l = 0;
30233                 log2_swath_height_c = 0;
30234 -       } else if (!surf_vert) {
30235 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
30236 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
30237         } else {
30238 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
30239 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
30240 +               unsigned int swath_height_l;
30241 +               unsigned int swath_height_c;
30243 +               if (!surf_vert) {
30244 +                       swath_height_l = rq_param->misc.rq_l.blk256_height;
30245 +                       swath_height_c = rq_param->misc.rq_c.blk256_height;
30246 +               } else {
30247 +                       swath_height_l = rq_param->misc.rq_l.blk256_width;
30248 +                       swath_height_c = rq_param->misc.rq_c.blk256_width;
30249 +               }
30251 +               if (swath_height_l > 0)
30252 +                       log2_swath_height_l = dml_log2(swath_height_l);
30254 +               if (req128_l && log2_swath_height_l > 0)
30255 +                       log2_swath_height_l -= 1;
30257 +               if (swath_height_c > 0)
30258 +                       log2_swath_height_c = dml_log2(swath_height_c);
30260 +               if (req128_c && log2_swath_height_c > 0)
30261 +                       log2_swath_height_c -= 1;
30262         }
30264         rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
30265         rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
30267 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
30268 index 0f14f205ebe5..04601a767a8f 100644
30269 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
30270 +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
30271 @@ -237,13 +237,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
30272         if (surf_linear) {
30273                 log2_swath_height_l = 0;
30274                 log2_swath_height_c = 0;
30275 -       } else if (!surf_vert) {
30276 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
30277 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
30278         } else {
30279 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
30280 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
30281 +               unsigned int swath_height_l;
30282 +               unsigned int swath_height_c;
30284 +               if (!surf_vert) {
30285 +                       swath_height_l = rq_param->misc.rq_l.blk256_height;
30286 +                       swath_height_c = rq_param->misc.rq_c.blk256_height;
30287 +               } else {
30288 +                       swath_height_l = rq_param->misc.rq_l.blk256_width;
30289 +                       swath_height_c = rq_param->misc.rq_c.blk256_width;
30290 +               }
30292 +               if (swath_height_l > 0)
30293 +                       log2_swath_height_l = dml_log2(swath_height_l);
30295 +               if (req128_l && log2_swath_height_l > 0)
30296 +                       log2_swath_height_l -= 1;
30298 +               if (swath_height_c > 0)
30299 +                       log2_swath_height_c = dml_log2(swath_height_c);
30301 +               if (req128_c && log2_swath_height_c > 0)
30302 +                       log2_swath_height_c -= 1;
30303         }
30305         rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
30306         rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
30308 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
30309 index 4c3e9cc30167..414da64f5734 100644
30310 --- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
30311 +++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
30312 @@ -344,13 +344,31 @@ static void handle_det_buf_split(
30313         if (surf_linear) {
30314                 log2_swath_height_l = 0;
30315                 log2_swath_height_c = 0;
30316 -       } else if (!surf_vert) {
30317 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
30318 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
30319         } else {
30320 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
30321 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
30322 +               unsigned int swath_height_l;
30323 +               unsigned int swath_height_c;
30325 +               if (!surf_vert) {
30326 +                       swath_height_l = rq_param->misc.rq_l.blk256_height;
30327 +                       swath_height_c = rq_param->misc.rq_c.blk256_height;
30328 +               } else {
30329 +                       swath_height_l = rq_param->misc.rq_l.blk256_width;
30330 +                       swath_height_c = rq_param->misc.rq_c.blk256_width;
30331 +               }
30333 +               if (swath_height_l > 0)
30334 +                       log2_swath_height_l = dml_log2(swath_height_l);
30336 +               if (req128_l && log2_swath_height_l > 0)
30337 +                       log2_swath_height_l -= 1;
30339 +               if (swath_height_c > 0)
30340 +                       log2_swath_height_c = dml_log2(swath_height_c);
30342 +               if (req128_c && log2_swath_height_c > 0)
30343 +                       log2_swath_height_c -= 1;
30344         }
30346         rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
30347         rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
30349 diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
30350 index 5e384a8a83dc..51855a2624cf 100644
30351 --- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
30352 +++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
30353 @@ -39,7 +39,7 @@
30354  #define HDCP14_KSV_SIZE 5
30355  #define HDCP14_MAX_KSV_FIFO_SIZE 127*HDCP14_KSV_SIZE
30357 -static const bool hdcp_cmd_is_read[] = {
30358 +static const bool hdcp_cmd_is_read[HDCP_MESSAGE_ID_MAX] = {
30359         [HDCP_MESSAGE_ID_READ_BKSV] = true,
30360         [HDCP_MESSAGE_ID_READ_RI_R0] = true,
30361         [HDCP_MESSAGE_ID_READ_PJ] = true,
30362 @@ -75,7 +75,7 @@ static const bool hdcp_cmd_is_read[] = {
30363         [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = false
30364  };
30366 -static const uint8_t hdcp_i2c_offsets[] = {
30367 +static const uint8_t hdcp_i2c_offsets[HDCP_MESSAGE_ID_MAX] = {
30368         [HDCP_MESSAGE_ID_READ_BKSV] = 0x0,
30369         [HDCP_MESSAGE_ID_READ_RI_R0] = 0x8,
30370         [HDCP_MESSAGE_ID_READ_PJ] = 0xA,
30371 @@ -106,7 +106,8 @@ static const uint8_t hdcp_i2c_offsets[] = {
30372         [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60,
30373         [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60,
30374         [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80,
30375 -       [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70
30376 +       [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70,
30377 +       [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x0,
30378  };
30380  struct protection_properties {
30381 @@ -184,7 +185,7 @@ static const struct protection_properties hdmi_14_protection = {
30382         .process_transaction = hdmi_14_process_transaction
30383  };
30385 -static const uint32_t hdcp_dpcd_addrs[] = {
30386 +static const uint32_t hdcp_dpcd_addrs[HDCP_MESSAGE_ID_MAX] = {
30387         [HDCP_MESSAGE_ID_READ_BKSV] = 0x68000,
30388         [HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005,
30389         [HDCP_MESSAGE_ID_READ_PJ] = 0xFFFFFFFF,
30390 diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
30391 index 904ce9b88088..afbe8856468a 100644
30392 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
30393 +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
30394 @@ -791,6 +791,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
30395                            TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
30396                         hdcp->connection.is_hdcp2_revoked = 1;
30397                         status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
30398 +               } else {
30399 +                       status = MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
30400                 }
30401         }
30402         mutex_unlock(&psp->hdcp_context.mutex);
30403 diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
30404 index ed05a30d1139..e2a56a7f3d7a 100644
30405 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
30406 +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
30407 @@ -1526,20 +1526,6 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
30409                 smu10_data->gfx_actual_soft_min_freq = min_freq;
30410                 smu10_data->gfx_actual_soft_max_freq = max_freq;
30412 -               ret = smum_send_msg_to_smc_with_parameter(hwmgr,
30413 -                                       PPSMC_MSG_SetHardMinGfxClk,
30414 -                                       min_freq,
30415 -                                       NULL);
30416 -               if (ret)
30417 -                       return ret;
30419 -               ret = smum_send_msg_to_smc_with_parameter(hwmgr,
30420 -                                       PPSMC_MSG_SetSoftMaxGfxClk,
30421 -                                       max_freq,
30422 -                                       NULL);
30423 -               if (ret)
30424 -                       return ret;
30425         } else if (type == PP_OD_COMMIT_DPM_TABLE) {
30426                 if (size != 0) {
30427                         pr_err("Input parameter number not correct\n");
30428 diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
30429 index 599ec9726601..959143eff651 100644
30430 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
30431 +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
30432 @@ -5160,7 +5160,7 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
30434  out:
30435         smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
30436 -                                               1 << power_profile_mode,
30437 +                                               (!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1),
30438                                                 NULL);
30439         hwmgr->power_profile_mode = power_profile_mode;
30441 diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
30442 index cd905e41080e..ec0037a21331 100644
30443 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
30444 +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
30445 @@ -279,35 +279,25 @@ static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_
30446         if (smu->adev->in_suspend)
30447                 return;
30449 -       /*
30450 -        * mclk, fclk and socclk are interdependent
30451 -        * on each other
30452 -        */
30453         if (clk == SMU_MCLK) {
30454 -               /* reset clock dependency */
30455                 smu->user_dpm_profile.clk_dependency = 0;
30456 -               /* set mclk dependent clocks(fclk and socclk) */
30457                 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
30458         } else if (clk == SMU_FCLK) {
30459 -               /* give priority to mclk, if mclk dependent clocks are set */
30460 +               /* MCLK takes precedence over FCLK */
30461                 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
30462                         return;
30464 -               /* reset clock dependency */
30465                 smu->user_dpm_profile.clk_dependency = 0;
30466 -               /* set fclk dependent clocks(mclk and socclk) */
30467                 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
30468         } else if (clk == SMU_SOCCLK) {
30469 -               /* give priority to mclk, if mclk dependent clocks are set */
30470 +               /* MCLK takes precedence over SOCCLK */
30471                 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
30472                         return;
30474 -               /* reset clock dependency */
30475                 smu->user_dpm_profile.clk_dependency = 0;
30476 -               /* set socclk dependent clocks(mclk and fclk) */
30477                 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
30478         } else
30479 -               /* add clk dependencies here, if any */
30480 +               /* Add clk dependencies here, if any */
30481                 return;
30484 @@ -331,7 +321,7 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
30485                 return;
30487         /* Enable restore flag */
30488 -       smu->user_dpm_profile.flags = SMU_DPM_USER_PROFILE_RESTORE;
30489 +       smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
30491         /* set the user dpm power limit */
30492         if (smu->user_dpm_profile.power_limit) {
30493 @@ -354,8 +344,8 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
30494                                 ret = smu_force_clk_levels(smu, clk_type,
30495                                                 smu->user_dpm_profile.clk_mask[clk_type]);
30496                                 if (ret)
30497 -                                       dev_err(smu->adev->dev, "Failed to set clock type = %d\n",
30498 -                                                       clk_type);
30499 +                                       dev_err(smu->adev->dev,
30500 +                                               "Failed to set clock type = %d\n", clk_type);
30501                         }
30502                 }
30503         }
30504 @@ -1777,7 +1767,7 @@ int smu_force_clk_levels(struct smu_context *smu,
30506         if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
30507                 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
30508 -               if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) {
30509 +               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
30510                         smu->user_dpm_profile.clk_mask[clk_type] = mask;
30511                         smu_set_user_clk_dependencies(smu, clk_type);
30512                 }
30513 @@ -2034,7 +2024,7 @@ int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
30514         if (smu->ppt_funcs->set_fan_speed_percent) {
30515                 percent = speed * 100 / smu->fan_max_rpm;
30516                 ret = smu->ppt_funcs->set_fan_speed_percent(smu, percent);
30517 -               if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
30518 +               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
30519                         smu->user_dpm_profile.fan_speed_percent = percent;
30520         }
30522 @@ -2096,6 +2086,7 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
30523                 dev_err(smu->adev->dev,
30524                         "New power limit (%d) is over the max allowed %d\n",
30525                         limit, smu->max_power_limit);
30526 +               ret = -EINVAL;
30527                 goto out;
30528         }
30530 @@ -2104,7 +2095,7 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
30532         if (smu->ppt_funcs->set_power_limit) {
30533                 ret = smu->ppt_funcs->set_power_limit(smu, limit);
30534 -               if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
30535 +               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
30536                         smu->user_dpm_profile.power_limit = limit;
30537         }
30539 @@ -2285,7 +2276,7 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)
30541         if (smu->ppt_funcs->set_fan_control_mode) {
30542                 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
30543 -               if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
30544 +               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
30545                         smu->user_dpm_profile.fan_mode = value;
30546         }
30548 @@ -2293,7 +2284,7 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)
30550         /* reset user dpm fan speed */
30551         if (!ret && value != AMD_FAN_CTRL_MANUAL &&
30552 -                       smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
30553 +                       !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
30554                 smu->user_dpm_profile.fan_speed_percent = 0;
30556         return ret;
30557 @@ -2335,7 +2326,7 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
30558                 if (speed > 100)
30559                         speed = 100;
30560                 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
30561 -               if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
30562 +               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
30563                         smu->user_dpm_profile.fan_speed_percent = speed;
30564         }
30566 diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
30567 index 6e641f1513d8..fbff3df72e6c 100644
30568 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
30569 +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
30570 @@ -1110,7 +1110,6 @@ static int navi10_force_clk_levels(struct smu_context *smu,
30571         case SMU_SOCCLK:
30572         case SMU_MCLK:
30573         case SMU_UCLK:
30574 -       case SMU_DCEFCLK:
30575         case SMU_FCLK:
30576                 /* There is only 2 levels for fine grained DPM */
30577                 if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
30578 @@ -1130,6 +1129,10 @@ static int navi10_force_clk_levels(struct smu_context *smu,
30579                 if (ret)
30580                         return size;
30581                 break;
30582 +       case SMU_DCEFCLK:
30583 +               dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n");
30584 +               break;
30586         default:
30587                 break;
30588         }
30589 diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
30590 index af73e1430af5..61438940c26e 100644
30591 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
30592 +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
30593 @@ -1127,7 +1127,6 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
30594         case SMU_SOCCLK:
30595         case SMU_MCLK:
30596         case SMU_UCLK:
30597 -       case SMU_DCEFCLK:
30598         case SMU_FCLK:
30599                 /* There is only 2 levels for fine grained DPM */
30600                 if (sienna_cichlid_is_support_fine_grained_dpm(smu, clk_type)) {
30601 @@ -1147,6 +1146,9 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
30602                 if (ret)
30603                         goto forec_level_out;
30604                 break;
30605 +       case SMU_DCEFCLK:
30606 +               dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n");
30607 +               break;
30608         default:
30609                 break;
30610         }
30611 diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
30612 index 101eaa20db9b..a80f551771b9 100644
30613 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
30614 +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
30615 @@ -1462,7 +1462,6 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
30616                                         long input[], uint32_t size)
30618         int ret = 0;
30619 -       int i;
30620         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
30622         if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) {
30623 @@ -1535,43 +1534,6 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
30624                         smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
30625                         smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
30626                         smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
30628 -                       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
30629 -                                                                       smu->gfx_actual_hard_min_freq, NULL);
30630 -                       if (ret) {
30631 -                               dev_err(smu->adev->dev, "Restore the default hard min sclk failed!");
30632 -                               return ret;
30633 -                       }
30635 -                       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
30636 -                                                                       smu->gfx_actual_soft_max_freq, NULL);
30637 -                       if (ret) {
30638 -                               dev_err(smu->adev->dev, "Restore the default soft max sclk failed!");
30639 -                               return ret;
30640 -                       }
30642 -                       if (smu->adev->pm.fw_version < 0x43f1b00) {
30643 -                               dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n");
30644 -                               break;
30645 -                       }
30647 -                       for (i = 0; i < smu->cpu_core_num; i++) {
30648 -                               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
30649 -                                                                     (i << 20) | smu->cpu_actual_soft_min_freq,
30650 -                                                                     NULL);
30651 -                               if (ret) {
30652 -                                       dev_err(smu->adev->dev, "Set hard min cclk failed!");
30653 -                                       return ret;
30654 -                               }
30656 -                               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
30657 -                                                                     (i << 20) | smu->cpu_actual_soft_max_freq,
30658 -                                                                     NULL);
30659 -                               if (ret) {
30660 -                                       dev_err(smu->adev->dev, "Set soft max cclk failed!");
30661 -                                       return ret;
30662 -                               }
30663 -                       }
30664                 }
30665                 break;
30666         case PP_OD_COMMIT_DPM_TABLE:
30667 diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
30668 index 5493388fcb10..dbe6d0caddb7 100644
30669 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
30670 +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
30671 @@ -389,24 +389,6 @@ static int renoir_od_edit_dpm_table(struct smu_context *smu,
30672                 }
30673                 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
30674                 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
30676 -               ret = smu_cmn_send_smc_msg_with_param(smu,
30677 -                                                               SMU_MSG_SetHardMinGfxClk,
30678 -                                                               smu->gfx_actual_hard_min_freq,
30679 -                                                               NULL);
30680 -               if (ret) {
30681 -                       dev_err(smu->adev->dev, "Restore the default hard min sclk failed!");
30682 -                       return ret;
30683 -               }
30685 -               ret = smu_cmn_send_smc_msg_with_param(smu,
30686 -                                                               SMU_MSG_SetSoftMaxGfxClk,
30687 -                                                               smu->gfx_actual_soft_max_freq,
30688 -                                                               NULL);
30689 -               if (ret) {
30690 -                       dev_err(smu->adev->dev, "Restore the default soft max sclk failed!");
30691 -                       return ret;
30692 -               }
30693                 break;
30694         case PP_OD_COMMIT_DPM_TABLE:
30695                 if (size != 0) {
30696 diff --git a/drivers/gpu/drm/arm/display/include/malidp_utils.h b/drivers/gpu/drm/arm/display/include/malidp_utils.h
30697 index 3bc383d5bf73..49a1d7f3539c 100644
30698 --- a/drivers/gpu/drm/arm/display/include/malidp_utils.h
30699 +++ b/drivers/gpu/drm/arm/display/include/malidp_utils.h
30700 @@ -13,9 +13,6 @@
30701  #define has_bit(nr, mask)      (BIT(nr) & (mask))
30702  #define has_bits(bits, mask)   (((bits) & (mask)) == (bits))
30704 -#define dp_for_each_set_bit(bit, mask) \
30705 -       for_each_set_bit((bit), ((unsigned long *)&(mask)), sizeof(mask) * 8)
30707  #define dp_wait_cond(__cond, __tries, __min_range, __max_range)        \
30708  ({                                                     \
30709         int num_tries = __tries;                        \
30710 diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
30711 index 719a79728e24..06c595378dda 100644
30712 --- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
30713 +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
30714 @@ -46,8 +46,9 @@ void komeda_pipeline_destroy(struct komeda_dev *mdev,
30716         struct komeda_component *c;
30717         int i;
30718 +       unsigned long avail_comps = pipe->avail_comps;
30720 -       dp_for_each_set_bit(i, pipe->avail_comps) {
30721 +       for_each_set_bit(i, &avail_comps, 32) {
30722                 c = komeda_pipeline_get_component(pipe, i);
30723                 komeda_component_destroy(mdev, c);
30724         }
30725 @@ -247,6 +248,7 @@ static void komeda_pipeline_dump(struct komeda_pipeline *pipe)
30727         struct komeda_component *c;
30728         int id;
30729 +       unsigned long avail_comps = pipe->avail_comps;
30731         DRM_INFO("Pipeline-%d: n_layers: %d, n_scalers: %d, output: %s.\n",
30732                  pipe->id, pipe->n_layers, pipe->n_scalers,
30733 @@ -258,7 +260,7 @@ static void komeda_pipeline_dump(struct komeda_pipeline *pipe)
30734                  pipe->of_output_links[1] ?
30735                  pipe->of_output_links[1]->full_name : "none");
30737 -       dp_for_each_set_bit(id, pipe->avail_comps) {
30738 +       for_each_set_bit(id, &avail_comps, 32) {
30739                 c = komeda_pipeline_get_component(pipe, id);
30741                 komeda_component_dump(c);
30742 @@ -270,8 +272,9 @@ static void komeda_component_verify_inputs(struct komeda_component *c)
30743         struct komeda_pipeline *pipe = c->pipeline;
30744         struct komeda_component *input;
30745         int id;
30746 +       unsigned long supported_inputs = c->supported_inputs;
30748 -       dp_for_each_set_bit(id, c->supported_inputs) {
30749 +       for_each_set_bit(id, &supported_inputs, 32) {
30750                 input = komeda_pipeline_get_component(pipe, id);
30751                 if (!input) {
30752                         c->supported_inputs &= ~(BIT(id));
30753 @@ -302,8 +305,9 @@ static void komeda_pipeline_assemble(struct komeda_pipeline *pipe)
30754         struct komeda_component *c;
30755         struct komeda_layer *layer;
30756         int i, id;
30757 +       unsigned long avail_comps = pipe->avail_comps;
30759 -       dp_for_each_set_bit(id, pipe->avail_comps) {
30760 +       for_each_set_bit(id, &avail_comps, 32) {
30761                 c = komeda_pipeline_get_component(pipe, id);
30762                 komeda_component_verify_inputs(c);
30763         }
30764 @@ -355,13 +359,15 @@ void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
30766         struct komeda_component *c;
30767         u32 id;
30768 +       unsigned long avail_comps;
30770         seq_printf(sf, "\n======== Pipeline-%d ==========\n", pipe->id);
30772         if (pipe->funcs && pipe->funcs->dump_register)
30773                 pipe->funcs->dump_register(pipe, sf);
30775 -       dp_for_each_set_bit(id, pipe->avail_comps) {
30776 +       avail_comps = pipe->avail_comps;
30777 +       for_each_set_bit(id, &avail_comps, 32) {
30778                 c = komeda_pipeline_get_component(pipe, id);
30780                 seq_printf(sf, "\n------%s------\n", c->name);
30781 diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
30782 index 5c085116de3f..e672b9cffee3 100644
30783 --- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
30784 +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
30785 @@ -1231,14 +1231,15 @@ komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
30786         struct komeda_pipeline_state *old = priv_to_pipe_st(pipe->obj.state);
30787         struct komeda_component_state *c_st;
30788         struct komeda_component *c;
30789 -       u32 disabling_comps, id;
30790 +       u32 id;
30791 +       unsigned long disabling_comps;
30793         WARN_ON(!old);
30795         disabling_comps = (~new->active_comps) & old->active_comps;
30797         /* unbound all disabling component */
30798 -       dp_for_each_set_bit(id, disabling_comps) {
30799 +       for_each_set_bit(id, &disabling_comps, 32) {
30800                 c = komeda_pipeline_get_component(pipe, id);
30801                 c_st = komeda_component_get_state_and_set_user(c,
30802                                 drm_st, NULL, new->crtc);
30803 @@ -1286,7 +1287,8 @@ bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
30804         struct komeda_pipeline_state *old;
30805         struct komeda_component *c;
30806         struct komeda_component_state *c_st;
30807 -       u32 id, disabling_comps = 0;
30808 +       u32 id;
30809 +       unsigned long disabling_comps;
30811         old = komeda_pipeline_get_old_state(pipe, old_state);
30813 @@ -1296,10 +1298,10 @@ bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
30814                 disabling_comps = old->active_comps &
30815                                   pipe->standalone_disabled_comps;
30817 -       DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%x.\n",
30818 +       DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%lx.\n",
30819                          pipe->id, old->active_comps, disabling_comps);
30821 -       dp_for_each_set_bit(id, disabling_comps) {
30822 +       for_each_set_bit(id, &disabling_comps, 32) {
30823                 c = komeda_pipeline_get_component(pipe, id);
30824                 c_st = priv_to_comp_st(c->obj.state);
30826 @@ -1330,16 +1332,17 @@ void komeda_pipeline_update(struct komeda_pipeline *pipe,
30827         struct komeda_pipeline_state *new = priv_to_pipe_st(pipe->obj.state);
30828         struct komeda_pipeline_state *old;
30829         struct komeda_component *c;
30830 -       u32 id, changed_comps = 0;
30831 +       u32 id;
30832 +       unsigned long changed_comps;
30834         old = komeda_pipeline_get_old_state(pipe, old_state);
30836         changed_comps = new->active_comps | old->active_comps;
30838 -       DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%x.\n",
30839 +       DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%lx.\n",
30840                          pipe->id, new->active_comps, changed_comps);
30842 -       dp_for_each_set_bit(id, changed_comps) {
30843 +       for_each_set_bit(id, &changed_comps, 32) {
30844                 c = komeda_pipeline_get_component(pipe, id);
30846                 if (new->active_comps & BIT(c->id))
30847 diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
30848 index ea8164e7a6dc..01837bea18c2 100644
30849 --- a/drivers/gpu/drm/ast/ast_drv.c
30850 +++ b/drivers/gpu/drm/ast/ast_drv.c
30851 @@ -30,6 +30,7 @@
30852  #include <linux/module.h>
30853  #include <linux/pci.h>
30855 +#include <drm/drm_atomic_helper.h>
30856  #include <drm/drm_crtc_helper.h>
30857  #include <drm/drm_drv.h>
30858  #include <drm/drm_fb_helper.h>
30859 @@ -138,6 +139,7 @@ static void ast_pci_remove(struct pci_dev *pdev)
30860         struct drm_device *dev = pci_get_drvdata(pdev);
30862         drm_dev_unregister(dev);
30863 +       drm_atomic_helper_shutdown(dev);
30866  static int ast_drm_freeze(struct drm_device *dev)
30867 diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
30868 index 988b270fea5e..758c69aa7232 100644
30869 --- a/drivers/gpu/drm/ast/ast_mode.c
30870 +++ b/drivers/gpu/drm/ast/ast_mode.c
30871 @@ -688,7 +688,7 @@ ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
30872         unsigned int offset_x, offset_y;
30874         offset_x = AST_MAX_HWC_WIDTH - fb->width;
30875 -       offset_y = AST_MAX_HWC_WIDTH - fb->height;
30876 +       offset_y = AST_MAX_HWC_HEIGHT - fb->height;
30878         if (state->fb != old_state->fb) {
30879                 /* A new cursor image was installed. */
30880 diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
30881 index e4110d6ca7b3..bc60fc4728d7 100644
30882 --- a/drivers/gpu/drm/bridge/Kconfig
30883 +++ b/drivers/gpu/drm/bridge/Kconfig
30884 @@ -67,6 +67,7 @@ config DRM_LONTIUM_LT9611UXC
30885         depends on OF
30886         select DRM_PANEL_BRIDGE
30887         select DRM_KMS_HELPER
30888 +       select DRM_MIPI_DSI
30889         select REGMAP_I2C
30890         help
30891           Driver for Lontium LT9611UXC DSI to HDMI bridge
30892 @@ -151,6 +152,7 @@ config DRM_SII902X
30893         tristate "Silicon Image sii902x RGB/HDMI bridge"
30894         depends on OF
30895         select DRM_KMS_HELPER
30896 +       select DRM_MIPI_DSI
30897         select REGMAP_I2C
30898         select I2C_MUX
30899         select SND_SOC_HDMI_CODEC if SND_SOC
30900 @@ -200,6 +202,7 @@ config DRM_TOSHIBA_TC358767
30901         tristate "Toshiba TC358767 eDP bridge"
30902         depends on OF
30903         select DRM_KMS_HELPER
30904 +       select DRM_MIPI_DSI
30905         select REGMAP_I2C
30906         select DRM_PANEL
30907         help
30908 diff --git a/drivers/gpu/drm/bridge/analogix/Kconfig b/drivers/gpu/drm/bridge/analogix/Kconfig
30909 index 024ea2a570e7..9160fd80dd70 100644
30910 --- a/drivers/gpu/drm/bridge/analogix/Kconfig
30911 +++ b/drivers/gpu/drm/bridge/analogix/Kconfig
30912 @@ -30,6 +30,7 @@ config DRM_ANALOGIX_ANX7625
30913         tristate "Analogix Anx7625 MIPI to DP interface support"
30914         depends on DRM
30915         depends on OF
30916 +       select DRM_MIPI_DSI
30917         help
30918           ANX7625 is an ultra-low power 4K mobile HD transmitter
30919           designed for portable devices. It converts MIPI/DPI to
30920 diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
30921 index 0ddc37551194..c916f4b8907e 100644
30922 --- a/drivers/gpu/drm/bridge/panel.c
30923 +++ b/drivers/gpu/drm/bridge/panel.c
30924 @@ -87,6 +87,18 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
30926  static void panel_bridge_detach(struct drm_bridge *bridge)
30928 +       struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
30929 +       struct drm_connector *connector = &panel_bridge->connector;
30931 +       /*
30932 +        * Cleanup the connector if we know it was initialized.
30933 +        *
30934 +        * FIXME: This wouldn't be needed if the panel_bridge structure was
30935 +        * allocated with drmm_kzalloc(). This might be tricky since the
30936 +        * drm_device pointer can only be retrieved when the bridge is attached.
30937 +        */
30938 +       if (connector->dev)
30939 +               drm_connector_cleanup(connector);
30942  static void panel_bridge_pre_enable(struct drm_bridge *bridge)
30943 diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
30944 index 309afe61afdd..9c75c8815056 100644
30945 --- a/drivers/gpu/drm/drm_dp_mst_topology.c
30946 +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
30947 @@ -1154,6 +1154,7 @@ static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
30949         req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
30950         drm_dp_encode_sideband_req(&req, msg);
30951 +       msg->path_msg = true;
30954  static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
30955 @@ -2824,15 +2825,21 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
30957         req_type = txmsg->msg[0] & 0x7f;
30958         if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
30959 -               req_type == DP_RESOURCE_STATUS_NOTIFY)
30960 +               req_type == DP_RESOURCE_STATUS_NOTIFY ||
30961 +               req_type == DP_CLEAR_PAYLOAD_ID_TABLE)
30962                 hdr->broadcast = 1;
30963         else
30964                 hdr->broadcast = 0;
30965         hdr->path_msg = txmsg->path_msg;
30966 -       hdr->lct = mstb->lct;
30967 -       hdr->lcr = mstb->lct - 1;
30968 -       if (mstb->lct > 1)
30969 -               memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
30970 +       if (hdr->broadcast) {
30971 +               hdr->lct = 1;
30972 +               hdr->lcr = 6;
30973 +       } else {
30974 +               hdr->lct = mstb->lct;
30975 +               hdr->lcr = mstb->lct - 1;
30976 +       }
30978 +       memcpy(hdr->rad, mstb->rad, hdr->lct / 2);
30980         return 0;
30982 diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
30983 index 58f5dc2f6dd5..f6bdec7fa925 100644
30984 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
30985 +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
30986 @@ -84,6 +84,13 @@ static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
30987         .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
30988  };
30990 +static const struct drm_dmi_panel_orientation_data onegx1_pro = {
30991 +       .width = 1200,
30992 +       .height = 1920,
30993 +       .bios_dates = (const char * const []){ "12/17/2020", NULL },
30994 +       .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
30997  static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
30998         .width = 720,
30999         .height = 1280,
31000 @@ -211,6 +218,13 @@ static const struct dmi_system_id orientation_data[] = {
31001                   DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
31002                 },
31003                 .driver_data = (void *)&lcd1200x1920_rightside_up,
31004 +       }, {    /* OneGX1 Pro */
31005 +               .matches = {
31006 +                 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"),
31007 +                 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SYSTEM_PRODUCT_NAME"),
31008 +                 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"),
31009 +               },
31010 +               .driver_data = (void *)&onegx1_pro,
31011         }, {    /* VIOS LTH17 */
31012                 .matches = {
31013                   DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
31014 diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
31015 index ad59a51eab6d..e7e1ee2aa352 100644
31016 --- a/drivers/gpu/drm/drm_probe_helper.c
31017 +++ b/drivers/gpu/drm/drm_probe_helper.c
31018 @@ -624,6 +624,7 @@ static void output_poll_execute(struct work_struct *work)
31019         struct drm_connector_list_iter conn_iter;
31020         enum drm_connector_status old_status;
31021         bool repoll = false, changed;
31022 +       u64 old_epoch_counter;
31024         if (!dev->mode_config.poll_enabled)
31025                 return;
31026 @@ -660,8 +661,9 @@ static void output_poll_execute(struct work_struct *work)
31028                 repoll = true;
31030 +               old_epoch_counter = connector->epoch_counter;
31031                 connector->status = drm_helper_probe_detect(connector, NULL, false);
31032 -               if (old_status != connector->status) {
31033 +               if (old_epoch_counter != connector->epoch_counter) {
31034                         const char *old, *new;
31036                         /*
31037 @@ -690,6 +692,9 @@ static void output_poll_execute(struct work_struct *work)
31038                                       connector->base.id,
31039                                       connector->name,
31040                                       old, new);
31041 +                       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] epoch counter %llu -> %llu\n",
31042 +                                     connector->base.id, connector->name,
31043 +                                     old_epoch_counter, connector->epoch_counter);
31045                         changed = true;
31046                 }
31047 diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
31048 index 775d89b6c3fc..97a785aa8839 100644
31049 --- a/drivers/gpu/drm/i915/display/intel_dp.c
31050 +++ b/drivers/gpu/drm/i915/display/intel_dp.c
31051 @@ -1174,44 +1174,6 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
31052         return -EINVAL;
31055 -/* Optimize link config in order: max bpp, min lanes, min clock */
31056 -static int
31057 -intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
31058 -                                 struct intel_crtc_state *pipe_config,
31059 -                                 const struct link_config_limits *limits)
31061 -       const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
31062 -       int bpp, clock, lane_count;
31063 -       int mode_rate, link_clock, link_avail;
31065 -       for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
31066 -               int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
31068 -               mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
31069 -                                                  output_bpp);
31071 -               for (lane_count = limits->min_lane_count;
31072 -                    lane_count <= limits->max_lane_count;
31073 -                    lane_count <<= 1) {
31074 -                       for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
31075 -                               link_clock = intel_dp->common_rates[clock];
31076 -                               link_avail = intel_dp_max_data_rate(link_clock,
31077 -                                                                   lane_count);
31079 -                               if (mode_rate <= link_avail) {
31080 -                                       pipe_config->lane_count = lane_count;
31081 -                                       pipe_config->pipe_bpp = bpp;
31082 -                                       pipe_config->port_clock = link_clock;
31084 -                                       return 0;
31085 -                               }
31086 -                       }
31087 -               }
31088 -       }
31090 -       return -EINVAL;
31093  static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
31095         int i, num_bpc;
31096 @@ -1461,22 +1423,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
31097             intel_dp_can_bigjoiner(intel_dp))
31098                 pipe_config->bigjoiner = true;
31100 -       if (intel_dp_is_edp(intel_dp))
31101 -               /*
31102 -                * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
31103 -                * section A.1: "It is recommended that the minimum number of
31104 -                * lanes be used, using the minimum link rate allowed for that
31105 -                * lane configuration."
31106 -                *
31107 -                * Note that we fall back to the max clock and lane count for eDP
31108 -                * panels that fail with the fast optimal settings (see
31109 -                * intel_dp->use_max_params), in which case the fast vs. wide
31110 -                * choice doesn't matter.
31111 -                */
31112 -               ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, &limits);
31113 -       else
31114 -               /* Optimize for slow and wide. */
31115 -               ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
31116 +       /*
31117 +        * Optimize for slow and wide for everything, because there are some
31118 +        * eDP 1.3 and 1.4 panels don't work well with fast and narrow.
31119 +        */
31120 +       ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
31122         /* enable compression if the mode doesn't fit available BW */
31123         drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
31124 @@ -4537,7 +4488,18 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
31125         drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
31127         for (;;) {
31128 -               u8 esi[DP_DPRX_ESI_LEN] = {};
31129 +               /*
31130 +                * The +2 is because DP_DPRX_ESI_LEN is 14, but we then
31131 +                * pass in "esi+10" to drm_dp_channel_eq_ok(), which
31132 +                * takes a 6-byte array. So we actually need 16 bytes
31133 +                * here.
31134 +                *
31135 +                * Somebody who knows what the limits actually are
31136 +                * should check this, but for now this is at least
31137 +                * harmless and avoids a valid compiler warning about
31138 +                * using more of the array than we have allocated.
31139 +                */
31140 +               u8 esi[DP_DPRX_ESI_LEN+2] = {};
31141                 bool handled;
31142                 int retry;
31144 diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
31145 index f455040fa989..7cbc81da80b7 100644
31146 --- a/drivers/gpu/drm/i915/display/intel_overlay.c
31147 +++ b/drivers/gpu/drm/i915/display/intel_overlay.c
31148 @@ -383,7 +383,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
31149                 i830_overlay_clock_gating(dev_priv, true);
31152 -static void
31153 +__i915_active_call static void
31154  intel_overlay_last_flip_retire(struct i915_active *active)
31156         struct intel_overlay *overlay =
31157 diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
31158 index ec28a6cde49b..0b2434e29d00 100644
31159 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
31160 +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
31161 @@ -189,7 +189,7 @@ compute_partial_view(const struct drm_i915_gem_object *obj,
31162         struct i915_ggtt_view view;
31164         if (i915_gem_object_is_tiled(obj))
31165 -               chunk = roundup(chunk, tile_row_pages(obj));
31166 +               chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
31168         view.type = I915_GGTT_VIEW_PARTIAL;
31169         view.partial.offset = rounddown(page_offset, chunk);
31170 diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
31171 index 43028f3539a6..76574e245916 100644
31172 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
31173 +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
31174 @@ -63,6 +63,8 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
31175             i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
31176                 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
31177                 i915_gem_object_set_tiling_quirk(obj);
31178 +               GEM_BUG_ON(!list_empty(&obj->mm.link));
31179 +               atomic_inc(&obj->mm.shrink_pin);
31180                 shrinkable = false;
31181         }
31183 diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
31184 index de575fdb033f..21f08e53889c 100644
31185 --- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c
31186 +++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
31187 @@ -397,7 +397,10 @@ static void emit_batch(struct i915_vma * const vma,
31188         gen7_emit_pipeline_invalidate(&cmds);
31189         batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
31190         batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
31191 -       batch_add(&cmds, 0xffff0000);
31192 +       batch_add(&cmds, 0xffff0000 |
31193 +                       ((IS_IVB_GT1(i915) || IS_VALLEYVIEW(i915)) ?
31194 +                        HIZ_RAW_STALL_OPT_DISABLE :
31195 +                        0));
31196         batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
31197         batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
31198         gen7_emit_pipeline_invalidate(&cmds);
31199 diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
31200 index 755522ced60d..3ae16945bd43 100644
31201 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
31202 +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
31203 @@ -630,7 +630,6 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
31205                 err = pin_pt_dma(vm, pde->pt.base);
31206                 if (err) {
31207 -                       i915_gem_object_put(pde->pt.base);
31208                         free_pd(vm, pde);
31209                         return err;
31210                 }
31211 diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
31212 index 67de2b189598..4b09490c20c0 100644
31213 --- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
31214 +++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
31215 @@ -670,8 +670,8 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
31216                  * banks of memory are paired and unswizzled on the
31217                  * uneven portion, so leave that as unknown.
31218                  */
31219 -               if (intel_uncore_read(uncore, C0DRB3) ==
31220 -                   intel_uncore_read(uncore, C1DRB3)) {
31221 +               if (intel_uncore_read16(uncore, C0DRB3) ==
31222 +                   intel_uncore_read16(uncore, C1DRB3)) {
31223                         swizzle_x = I915_BIT_6_SWIZZLE_9_10;
31224                         swizzle_y = I915_BIT_6_SWIZZLE_9;
31225                 }
31226 diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
31227 index d1d8ee4a5f16..57578bf28d77 100644
31228 --- a/drivers/gpu/drm/i915/gvt/gvt.c
31229 +++ b/drivers/gpu/drm/i915/gvt/gvt.c
31230 @@ -126,7 +126,7 @@ static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups
31231         return true;
31234 -static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
31235 +static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
31237         int i, j;
31238         struct intel_vgpu_type *type;
31239 @@ -144,7 +144,7 @@ static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
31240                 gvt_vgpu_type_groups[i] = group;
31241         }
31243 -       return true;
31244 +       return 0;
31246  unwind:
31247         for (j = 0; j < i; j++) {
31248 @@ -152,7 +152,7 @@ static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
31249                 kfree(group);
31250         }
31252 -       return false;
31253 +       return -ENOMEM;
31256  static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
31257 @@ -360,7 +360,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
31258                 goto out_clean_thread;
31260         ret = intel_gvt_init_vgpu_type_groups(gvt);
31261 -       if (ret == false) {
31262 +       if (ret) {
31263                 gvt_err("failed to init vgpu type groups: %d\n", ret);
31264                 goto out_clean_types;
31265         }
31266 diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
31267 index 3bc616cc1ad2..ea660e541c90 100644
31268 --- a/drivers/gpu/drm/i915/i915_active.c
31269 +++ b/drivers/gpu/drm/i915/i915_active.c
31270 @@ -1156,7 +1156,8 @@ static int auto_active(struct i915_active *ref)
31271         return 0;
31274 -static void auto_retire(struct i915_active *ref)
31275 +__i915_active_call static void
31276 +auto_retire(struct i915_active *ref)
31278         i915_active_put(ref);
31280 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
31281 index 8e9cb44e66e5..4ecb813c9bc7 100644
31282 --- a/drivers/gpu/drm/i915/i915_drv.c
31283 +++ b/drivers/gpu/drm/i915/i915_drv.c
31284 @@ -1049,6 +1049,8 @@ static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
31285  void i915_driver_shutdown(struct drm_i915_private *i915)
31287         disable_rpm_wakeref_asserts(&i915->runtime_pm);
31288 +       intel_runtime_pm_disable(&i915->runtime_pm);
31289 +       intel_power_domains_disable(i915);
31291         i915_gem_suspend(i915);
31293 @@ -1064,7 +1066,15 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
31294         intel_suspend_encoders(i915);
31295         intel_shutdown_encoders(i915);
31297 +       /*
31298 +        * The only requirement is to reboot with display DC states disabled,
31299 +        * for now leaving all display power wells in the INIT power domain
31300 +        * enabled matching the driver reload sequence.
31301 +        */
31302 +       intel_power_domains_driver_remove(i915);
31303         enable_rpm_wakeref_asserts(&i915->runtime_pm);
31305 +       intel_runtime_pm_driver_release(&i915->runtime_pm);
31308  static bool suspend_to_idle(struct drm_i915_private *dev_priv)
31309 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
31310 index aa4490934469..19351addb68c 100644
31311 --- a/drivers/gpu/drm/i915/i915_gem.c
31312 +++ b/drivers/gpu/drm/i915/i915_gem.c
31313 @@ -972,12 +972,11 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
31314                 obj->mm.madv = args->madv;
31316         if (i915_gem_object_has_pages(obj)) {
31317 -               struct list_head *list;
31318 +               unsigned long flags;
31320 -               if (i915_gem_object_is_shrinkable(obj)) {
31321 -                       unsigned long flags;
31323 -                       spin_lock_irqsave(&i915->mm.obj_lock, flags);
31324 +               spin_lock_irqsave(&i915->mm.obj_lock, flags);
31325 +               if (!list_empty(&obj->mm.link)) {
31326 +                       struct list_head *list;
31328                         if (obj->mm.madv != I915_MADV_WILLNEED)
31329                                 list = &i915->mm.purge_list;
31330 @@ -985,8 +984,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
31331                                 list = &i915->mm.shrink_list;
31332                         list_move_tail(&obj->mm.link, list);
31334 -                       spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
31335                 }
31336 +               spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
31337         }
31339         /* if the object is no longer attached, discard its backing storage */
31340 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
31341 index 4b4d8d034782..4ba20f959a71 100644
31342 --- a/drivers/gpu/drm/i915/intel_pm.c
31343 +++ b/drivers/gpu/drm/i915/intel_pm.c
31344 @@ -2993,7 +2993,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
31346  static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
31347                                    const char *name,
31348 -                                  const u16 wm[8])
31349 +                                  const u16 wm[])
31351         int level, max_level = ilk_wm_max_level(dev_priv);
31353 diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
31354 index 7bb31fbee29d..fd8870edde0e 100644
31355 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
31356 +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
31357 @@ -554,7 +554,7 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
31358                 height = state->src_h >> 16;
31359                 cpp = state->fb->format->cpp[0];
31361 -               if (priv->soc_info->has_osd && plane->type == DRM_PLANE_TYPE_OVERLAY)
31362 +               if (!priv->soc_info->has_osd || plane->type == DRM_PLANE_TYPE_OVERLAY)
31363                         hwdesc = &priv->dma_hwdescs->hwdesc_f0;
31364                 else
31365                         hwdesc = &priv->dma_hwdescs->hwdesc_f1;
31366 @@ -826,6 +826,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
31367         const struct jz_soc_info *soc_info;
31368         struct ingenic_drm *priv;
31369         struct clk *parent_clk;
31370 +       struct drm_plane *primary;
31371         struct drm_bridge *bridge;
31372         struct drm_panel *panel;
31373         struct drm_encoder *encoder;
31374 @@ -940,9 +941,11 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
31375         if (soc_info->has_osd)
31376                 priv->ipu_plane = drm_plane_from_index(drm, 0);
31378 -       drm_plane_helper_add(&priv->f1, &ingenic_drm_plane_helper_funcs);
31379 +       primary = priv->soc_info->has_osd ? &priv->f1 : &priv->f0;
31381 -       ret = drm_universal_plane_init(drm, &priv->f1, 1,
31382 +       drm_plane_helper_add(primary, &ingenic_drm_plane_helper_funcs);
31384 +       ret = drm_universal_plane_init(drm, primary, 1,
31385                                        &ingenic_drm_primary_plane_funcs,
31386                                        priv->soc_info->formats_f1,
31387                                        priv->soc_info->num_formats_f1,
31388 @@ -954,7 +957,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
31390         drm_crtc_helper_add(&priv->crtc, &ingenic_drm_crtc_helper_funcs);
31392 -       ret = drm_crtc_init_with_planes(drm, &priv->crtc, &priv->f1,
31393 +       ret = drm_crtc_init_with_planes(drm, &priv->crtc, primary,
31394                                         NULL, &ingenic_drm_crtc_funcs, NULL);
31395         if (ret) {
31396                 dev_err(dev, "Failed to init CRTC: %i\n", ret);
31397 diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
31398 index 2314c8122992..b3fd3501c412 100644
31399 --- a/drivers/gpu/drm/mcde/mcde_dsi.c
31400 +++ b/drivers/gpu/drm/mcde/mcde_dsi.c
31401 @@ -760,7 +760,7 @@ static void mcde_dsi_start(struct mcde_dsi *d)
31402                 DSI_MCTL_MAIN_DATA_CTL_BTA_EN |
31403                 DSI_MCTL_MAIN_DATA_CTL_READ_EN |
31404                 DSI_MCTL_MAIN_DATA_CTL_REG_TE_EN;
31405 -       if (d->mdsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET)
31406 +       if (!(d->mdsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
31407                 val |= DSI_MCTL_MAIN_DATA_CTL_HOST_EOT_GEN;
31408         writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
31410 diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
31411 index 8ee55f9e2954..7fb358167f8d 100644
31412 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
31413 +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
31414 @@ -153,7 +153,7 @@ struct mtk_hdmi_conf {
31415  struct mtk_hdmi {
31416         struct drm_bridge bridge;
31417         struct drm_bridge *next_bridge;
31418 -       struct drm_connector conn;
31419 +       struct drm_connector *curr_conn;/* current connector (only valid when 'enabled') */
31420         struct device *dev;
31421         const struct mtk_hdmi_conf *conf;
31422         struct phy *phy;
31423 @@ -186,11 +186,6 @@ static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
31424         return container_of(b, struct mtk_hdmi, bridge);
31427 -static inline struct mtk_hdmi *hdmi_ctx_from_conn(struct drm_connector *c)
31429 -       return container_of(c, struct mtk_hdmi, conn);
31432  static u32 mtk_hdmi_read(struct mtk_hdmi *hdmi, u32 offset)
31434         return readl(hdmi->regs + offset);
31435 @@ -974,7 +969,7 @@ static int mtk_hdmi_setup_avi_infoframe(struct mtk_hdmi *hdmi,
31436         ssize_t err;
31438         err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
31439 -                                                      &hdmi->conn, mode);
31440 +                                                      hdmi->curr_conn, mode);
31441         if (err < 0) {
31442                 dev_err(hdmi->dev,
31443                         "Failed to get AVI infoframe from mode: %zd\n", err);
31444 @@ -1054,7 +1049,7 @@ static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
31445         ssize_t err;
31447         err = drm_hdmi_vendor_infoframe_from_display_mode(&frame,
31448 -                                                         &hdmi->conn, mode);
31449 +                                                         hdmi->curr_conn, mode);
31450         if (err) {
31451                 dev_err(hdmi->dev,
31452                         "Failed to get vendor infoframe from mode: %zd\n", err);
31453 @@ -1201,48 +1196,16 @@ mtk_hdmi_update_plugged_status(struct mtk_hdmi *hdmi)
31454                connector_status_connected : connector_status_disconnected;
31457 -static enum drm_connector_status hdmi_conn_detect(struct drm_connector *conn,
31458 -                                                 bool force)
31459 +static enum drm_connector_status mtk_hdmi_detect(struct mtk_hdmi *hdmi)
31461 -       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
31462         return mtk_hdmi_update_plugged_status(hdmi);
31465 -static void hdmi_conn_destroy(struct drm_connector *conn)
31467 -       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
31469 -       mtk_cec_set_hpd_event(hdmi->cec_dev, NULL, NULL);
31471 -       drm_connector_cleanup(conn);
31474 -static int mtk_hdmi_conn_get_modes(struct drm_connector *conn)
31476 -       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
31477 -       struct edid *edid;
31478 -       int ret;
31480 -       if (!hdmi->ddc_adpt)
31481 -               return -ENODEV;
31483 -       edid = drm_get_edid(conn, hdmi->ddc_adpt);
31484 -       if (!edid)
31485 -               return -ENODEV;
31487 -       hdmi->dvi_mode = !drm_detect_monitor_audio(edid);
31489 -       drm_connector_update_edid_property(conn, edid);
31491 -       ret = drm_add_edid_modes(conn, edid);
31492 -       kfree(edid);
31493 -       return ret;
31496 -static int mtk_hdmi_conn_mode_valid(struct drm_connector *conn,
31497 -                                   struct drm_display_mode *mode)
31498 +static int mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
31499 +                                     const struct drm_display_info *info,
31500 +                                     const struct drm_display_mode *mode)
31502 -       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
31503 +       struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
31504         struct drm_bridge *next_bridge;
31506         dev_dbg(hdmi->dev, "xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
31507 @@ -1267,74 +1230,57 @@ static int mtk_hdmi_conn_mode_valid(struct drm_connector *conn,
31508         return drm_mode_validate_size(mode, 0x1fff, 0x1fff);
31511 -static struct drm_encoder *mtk_hdmi_conn_best_enc(struct drm_connector *conn)
31513 -       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
31515 -       return hdmi->bridge.encoder;
31518 -static const struct drm_connector_funcs mtk_hdmi_connector_funcs = {
31519 -       .detect = hdmi_conn_detect,
31520 -       .fill_modes = drm_helper_probe_single_connector_modes,
31521 -       .destroy = hdmi_conn_destroy,
31522 -       .reset = drm_atomic_helper_connector_reset,
31523 -       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
31524 -       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
31527 -static const struct drm_connector_helper_funcs
31528 -               mtk_hdmi_connector_helper_funcs = {
31529 -       .get_modes = mtk_hdmi_conn_get_modes,
31530 -       .mode_valid = mtk_hdmi_conn_mode_valid,
31531 -       .best_encoder = mtk_hdmi_conn_best_enc,
31534  static void mtk_hdmi_hpd_event(bool hpd, struct device *dev)
31536         struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
31538 -       if (hdmi && hdmi->bridge.encoder && hdmi->bridge.encoder->dev)
31539 +       if (hdmi && hdmi->bridge.encoder && hdmi->bridge.encoder->dev) {
31540 +               static enum drm_connector_status status;
31542 +               status = mtk_hdmi_detect(hdmi);
31543                 drm_helper_hpd_irq_event(hdmi->bridge.encoder->dev);
31544 +               drm_bridge_hpd_notify(&hdmi->bridge, status);
31545 +       }
31548  /*
31549   * Bridge callbacks
31550   */
31552 +static enum drm_connector_status mtk_hdmi_bridge_detect(struct drm_bridge *bridge)
31554 +       struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
31556 +       return mtk_hdmi_detect(hdmi);
31559 +static struct edid *mtk_hdmi_bridge_get_edid(struct drm_bridge *bridge,
31560 +                                            struct drm_connector *connector)
31562 +       struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
31563 +       struct edid *edid;
31565 +       if (!hdmi->ddc_adpt)
31566 +               return NULL;
31567 +       edid = drm_get_edid(connector, hdmi->ddc_adpt);
31568 +       if (!edid)
31569 +               return NULL;
31570 +       hdmi->dvi_mode = !drm_detect_monitor_audio(edid);
31571 +       return edid;
31574  static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge,
31575                                   enum drm_bridge_attach_flags flags)
31577         struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
31578         int ret;
31580 -       if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
31581 -               DRM_ERROR("Fix bridge driver to make connector optional!");
31582 +       if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
31583 +               DRM_ERROR("%s: The flag DRM_BRIDGE_ATTACH_NO_CONNECTOR must be supplied\n",
31584 +                         __func__);
31585                 return -EINVAL;
31586         }
31588 -       ret = drm_connector_init_with_ddc(bridge->encoder->dev, &hdmi->conn,
31589 -                                         &mtk_hdmi_connector_funcs,
31590 -                                         DRM_MODE_CONNECTOR_HDMIA,
31591 -                                         hdmi->ddc_adpt);
31592 -       if (ret) {
31593 -               dev_err(hdmi->dev, "Failed to initialize connector: %d\n", ret);
31594 -               return ret;
31595 -       }
31596 -       drm_connector_helper_add(&hdmi->conn, &mtk_hdmi_connector_helper_funcs);
31598 -       hdmi->conn.polled = DRM_CONNECTOR_POLL_HPD;
31599 -       hdmi->conn.interlace_allowed = true;
31600 -       hdmi->conn.doublescan_allowed = false;
31602 -       ret = drm_connector_attach_encoder(&hdmi->conn,
31603 -                                               bridge->encoder);
31604 -       if (ret) {
31605 -               dev_err(hdmi->dev,
31606 -                       "Failed to attach connector to encoder: %d\n", ret);
31607 -               return ret;
31608 -       }
31610         if (hdmi->next_bridge) {
31611                 ret = drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
31612                                         bridge, flags);
31613 @@ -1357,7 +1303,8 @@ static bool mtk_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
31614         return true;
31617 -static void mtk_hdmi_bridge_disable(struct drm_bridge *bridge)
31618 +static void mtk_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
31619 +                                          struct drm_bridge_state *old_bridge_state)
31621         struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
31623 @@ -1368,10 +1315,13 @@ static void mtk_hdmi_bridge_disable(struct drm_bridge *bridge)
31624         clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
31625         clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
31627 +       hdmi->curr_conn = NULL;
31629         hdmi->enabled = false;
31632 -static void mtk_hdmi_bridge_post_disable(struct drm_bridge *bridge)
31633 +static void mtk_hdmi_bridge_atomic_post_disable(struct drm_bridge *bridge,
31634 +                                               struct drm_bridge_state *old_state)
31636         struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
31638 @@ -1406,7 +1356,8 @@ static void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge,
31639         drm_mode_copy(&hdmi->mode, adjusted_mode);
31642 -static void mtk_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
31643 +static void mtk_hdmi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
31644 +                                             struct drm_bridge_state *old_state)
31646         struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
31648 @@ -1426,10 +1377,16 @@ static void mtk_hdmi_send_infoframe(struct mtk_hdmi *hdmi,
31649                 mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
31652 -static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
31653 +static void mtk_hdmi_bridge_atomic_enable(struct drm_bridge *bridge,
31654 +                                         struct drm_bridge_state *old_state)
31656 +       struct drm_atomic_state *state = old_state->base.state;
31657         struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
31659 +       /* Retrieve the connector through the atomic state. */
31660 +       hdmi->curr_conn = drm_atomic_get_new_connector_for_encoder(state,
31661 +                                                                  bridge->encoder);
31663         mtk_hdmi_output_set_display_mode(hdmi, &hdmi->mode);
31664         clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
31665         clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
31666 @@ -1440,13 +1397,19 @@ static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
31669  static const struct drm_bridge_funcs mtk_hdmi_bridge_funcs = {
31670 +       .mode_valid = mtk_hdmi_bridge_mode_valid,
31671 +       .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
31672 +       .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
31673 +       .atomic_reset = drm_atomic_helper_bridge_reset,
31674         .attach = mtk_hdmi_bridge_attach,
31675         .mode_fixup = mtk_hdmi_bridge_mode_fixup,
31676 -       .disable = mtk_hdmi_bridge_disable,
31677 -       .post_disable = mtk_hdmi_bridge_post_disable,
31678 +       .atomic_disable = mtk_hdmi_bridge_atomic_disable,
31679 +       .atomic_post_disable = mtk_hdmi_bridge_atomic_post_disable,
31680         .mode_set = mtk_hdmi_bridge_mode_set,
31681 -       .pre_enable = mtk_hdmi_bridge_pre_enable,
31682 -       .enable = mtk_hdmi_bridge_enable,
31683 +       .atomic_pre_enable = mtk_hdmi_bridge_atomic_pre_enable,
31684 +       .atomic_enable = mtk_hdmi_bridge_atomic_enable,
31685 +       .detect = mtk_hdmi_bridge_detect,
31686 +       .get_edid = mtk_hdmi_bridge_get_edid,
31687  };
31689  static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
31690 @@ -1662,8 +1625,10 @@ static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
31692         struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
31694 -       memcpy(buf, hdmi->conn.eld, min(sizeof(hdmi->conn.eld), len));
31696 +       if (hdmi->enabled)
31697 +               memcpy(buf, hdmi->curr_conn->eld, min(sizeof(hdmi->curr_conn->eld), len));
31698 +       else
31699 +               memset(buf, 0, len);
31700         return 0;
31703 @@ -1755,6 +1720,9 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev)
31705         hdmi->bridge.funcs = &mtk_hdmi_bridge_funcs;
31706         hdmi->bridge.of_node = pdev->dev.of_node;
31707 +       hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
31708 +                        | DRM_BRIDGE_OP_HPD;
31709 +       hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
31710         drm_bridge_add(&hdmi->bridge);
31712         ret = mtk_hdmi_clk_enable_audio(hdmi);
31713 diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
31714 index 91cf46f84025..3d55e153fa9c 100644
31715 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
31716 +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
31717 @@ -246,7 +246,7 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
31720  struct a6xx_gmu_oob_bits {
31721 -       int set, ack, set_new, ack_new;
31722 +       int set, ack, set_new, ack_new, clear, clear_new;
31723         const char *name;
31724  };
31726 @@ -260,6 +260,8 @@ static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
31727                 .ack = 24,
31728                 .set_new = 30,
31729                 .ack_new = 31,
31730 +               .clear = 24,
31731 +               .clear_new = 31,
31732         },
31734         [GMU_OOB_PERFCOUNTER_SET] = {
31735 @@ -268,18 +270,22 @@ static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
31736                 .ack = 25,
31737                 .set_new = 28,
31738                 .ack_new = 30,
31739 +               .clear = 25,
31740 +               .clear_new = 29,
31741         },
31743         [GMU_OOB_BOOT_SLUMBER] = {
31744                 .name = "BOOT_SLUMBER",
31745                 .set = 22,
31746                 .ack = 30,
31747 +               .clear = 30,
31748         },
31750         [GMU_OOB_DCVS_SET] = {
31751                 .name = "GPU_DCVS",
31752                 .set = 23,
31753                 .ack = 31,
31754 +               .clear = 31,
31755         },
31756  };
31758 @@ -335,9 +341,9 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
31759                 return;
31761         if (gmu->legacy)
31762 -               bit = a6xx_gmu_oob_bits[state].ack;
31763 +               bit = a6xx_gmu_oob_bits[state].clear;
31764         else
31765 -               bit = a6xx_gmu_oob_bits[state].ack_new;
31766 +               bit = a6xx_gmu_oob_bits[state].clear_new;
31768         gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit);
31770 diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
31771 index d553f62f4eeb..b4d8e1b01ee4 100644
31772 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
31773 +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
31774 @@ -1153,10 +1153,6 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
31776         struct device_node *phandle;
31778 -       a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
31779 -       if (IS_ERR(a6xx_gpu->llc_mmio))
31780 -               return;
31782         /*
31783          * There is a different programming path for targets with an mmu500
31784          * attached, so detect if that is the case
31785 @@ -1166,6 +1162,11 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
31786                 of_device_is_compatible(phandle, "arm,mmu-500"));
31787         of_node_put(phandle);
31789 +       if (a6xx_gpu->have_mmu500)
31790 +               a6xx_gpu->llc_mmio = NULL;
31791 +       else
31792 +               a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
31794         a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
31795         a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
31797 diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
31798 index 189f3533525c..e4444452759c 100644
31799 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
31800 +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
31801 @@ -22,7 +22,7 @@
31802         (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4))
31804  #define VIG_SM8250_MASK \
31805 -       (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3LITE))
31806 +       (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3LITE))
31808  #define DMA_SDM845_MASK \
31809         (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
31810 diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
31811 index ff2c1d583c79..0392d4dfe270 100644
31812 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
31813 +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
31814 @@ -20,7 +20,7 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
31816         struct mdp5_kms *mdp5_kms = get_kms(encoder);
31817         struct device *dev = encoder->dev->dev;
31818 -       u32 total_lines_x100, vclks_line, cfg;
31819 +       u32 total_lines, vclks_line, cfg;
31820         long vsync_clk_speed;
31821         struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
31822         int pp_id = mixer->pp;
31823 @@ -30,8 +30,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
31824                 return -EINVAL;
31825         }
31827 -       total_lines_x100 = mode->vtotal * drm_mode_vrefresh(mode);
31828 -       if (!total_lines_x100) {
31829 +       total_lines = mode->vtotal * drm_mode_vrefresh(mode);
31830 +       if (!total_lines) {
31831                 DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
31832                               __func__, mode->vtotal, drm_mode_vrefresh(mode));
31833                 return -EINVAL;
31834 @@ -43,15 +43,23 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
31835                                                         vsync_clk_speed);
31836                 return -EINVAL;
31837         }
31838 -       vclks_line = vsync_clk_speed * 100 / total_lines_x100;
31839 +       vclks_line = vsync_clk_speed / total_lines;
31841         cfg = MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN
31842                 | MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN;
31843         cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line);
31845 +       /*
31846 +        * Tearcheck emits a blanking signal every vclks_line * vtotal * 2 ticks on
31847 +        * the vsync_clk equating to roughly half the desired panel refresh rate.
31848 +        * This is only necessary as stability fallback if interrupts from the
31849 +        * panel arrive too late or not at all, but is currently used by default
31850 +        * because these panel interrupts are not wired up yet.
31851 +        */
31852         mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg);
31853         mdp5_write(mdp5_kms,
31854 -               REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), 0xfff0);
31855 +               REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), (2 * mode->vtotal));
31857         mdp5_write(mdp5_kms,
31858                 REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay);
31859         mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1);
31860 diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
31861 index 82a8673ab8da..d7e4a39a904e 100644
31862 --- a/drivers/gpu/drm/msm/dp/dp_audio.c
31863 +++ b/drivers/gpu/drm/msm/dp/dp_audio.c
31864 @@ -527,6 +527,7 @@ int dp_audio_hw_params(struct device *dev,
31865         dp_audio_setup_acr(audio);
31866         dp_audio_safe_to_exit_level(audio);
31867         dp_audio_enable(audio, true);
31868 +       dp_display_signal_audio_start(dp_display);
31869         dp_display->audio_enabled = true;
31871  end:
31872 diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
31873 index 5a39da6e1eaf..1784e119269b 100644
31874 --- a/drivers/gpu/drm/msm/dp/dp_display.c
31875 +++ b/drivers/gpu/drm/msm/dp/dp_display.c
31876 @@ -178,6 +178,15 @@ static int dp_del_event(struct dp_display_private *dp_priv, u32 event)
31877         return 0;
31880 +void dp_display_signal_audio_start(struct msm_dp *dp_display)
31882 +       struct dp_display_private *dp;
31884 +       dp = container_of(dp_display, struct dp_display_private, dp_display);
31886 +       reinit_completion(&dp->audio_comp);
31889  void dp_display_signal_audio_complete(struct msm_dp *dp_display)
31891         struct dp_display_private *dp;
31892 @@ -586,10 +595,8 @@ static int dp_connect_pending_timeout(struct dp_display_private *dp, u32 data)
31893         mutex_lock(&dp->event_mutex);
31895         state = dp->hpd_state;
31896 -       if (state == ST_CONNECT_PENDING) {
31897 -               dp_display_enable(dp, 0);
31898 +       if (state == ST_CONNECT_PENDING)
31899                 dp->hpd_state = ST_CONNECTED;
31900 -       }
31902         mutex_unlock(&dp->event_mutex);
31904 @@ -651,7 +658,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
31905         dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
31907         /* signal the disconnect event early to ensure proper teardown */
31908 -       reinit_completion(&dp->audio_comp);
31909         dp_display_handle_plugged_change(g_dp_display, false);
31911         dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
31912 @@ -669,10 +675,8 @@ static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data
31913         mutex_lock(&dp->event_mutex);
31915         state =  dp->hpd_state;
31916 -       if (state == ST_DISCONNECT_PENDING) {
31917 -               dp_display_disable(dp, 0);
31918 +       if (state == ST_DISCONNECT_PENDING)
31919                 dp->hpd_state = ST_DISCONNECTED;
31920 -       }
31922         mutex_unlock(&dp->event_mutex);
31924 @@ -898,7 +902,6 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
31925         /* wait only if audio was enabled */
31926         if (dp_display->audio_enabled) {
31927                 /* signal the disconnect event */
31928 -               reinit_completion(&dp->audio_comp);
31929                 dp_display_handle_plugged_change(dp_display, false);
31930                 if (!wait_for_completion_timeout(&dp->audio_comp,
31931                                 HZ * 5))
31932 @@ -1272,7 +1275,12 @@ static int dp_pm_resume(struct device *dev)
31934         status = dp_catalog_link_is_connected(dp->catalog);
31936 -       if (status)
31937 +       /*
31938 +        * can not declared display is connected unless
31939 +        * HDMI cable is plugged in and sink_count of
31940 +        * dongle become 1
31941 +        */
31942 +       if (status && dp->link->sink_count)
31943                 dp->dp_display.is_connected = true;
31944         else
31945                 dp->dp_display.is_connected = false;
31946 diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
31947 index 6092ba1ed85e..5173c89eedf7 100644
31948 --- a/drivers/gpu/drm/msm/dp/dp_display.h
31949 +++ b/drivers/gpu/drm/msm/dp/dp_display.h
31950 @@ -34,6 +34,7 @@ int dp_display_get_modes(struct msm_dp *dp_display,
31951  int dp_display_request_irq(struct msm_dp *dp_display);
31952  bool dp_display_check_video_test(struct msm_dp *dp_display);
31953  int dp_display_get_test_bpp(struct msm_dp *dp_display);
31954 +void dp_display_signal_audio_start(struct msm_dp *dp_display);
31955  void dp_display_signal_audio_complete(struct msm_dp *dp_display);
31957  #endif /* _DP_DISPLAY_H_ */
31958 diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.c b/drivers/gpu/drm/msm/dp/dp_hpd.c
31959 index 5b8fe32022b5..e1c90fa47411 100644
31960 --- a/drivers/gpu/drm/msm/dp/dp_hpd.c
31961 +++ b/drivers/gpu/drm/msm/dp/dp_hpd.c
31962 @@ -34,8 +34,8 @@ int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd)
31964         dp_usbpd->hpd_high = hpd;
31966 -       if (!hpd_priv->dp_cb && !hpd_priv->dp_cb->configure
31967 -                               && !hpd_priv->dp_cb->disconnect) {
31968 +       if (!hpd_priv->dp_cb || !hpd_priv->dp_cb->configure
31969 +                               || !hpd_priv->dp_cb->disconnect) {
31970                 pr_err("hpd dp_cb not initialized\n");
31971                 return -EINVAL;
31972         }
31973 diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
31974 index 85ad0babc326..d611cc8e54a4 100644
31975 --- a/drivers/gpu/drm/msm/msm_debugfs.c
31976 +++ b/drivers/gpu/drm/msm/msm_debugfs.c
31977 @@ -111,23 +111,15 @@ static const struct file_operations msm_gpu_fops = {
31978  static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
31980         struct msm_drm_private *priv = dev->dev_private;
31981 -       struct msm_gpu *gpu = priv->gpu;
31982         int ret;
31984 -       ret = mutex_lock_interruptible(&priv->mm_lock);
31985 +       ret = mutex_lock_interruptible(&priv->obj_lock);
31986         if (ret)
31987                 return ret;
31989 -       if (gpu) {
31990 -               seq_printf(m, "Active Objects (%s):\n", gpu->name);
31991 -               msm_gem_describe_objects(&gpu->active_list, m);
31992 -       }
31994 -       seq_printf(m, "Inactive Objects:\n");
31995 -       msm_gem_describe_objects(&priv->inactive_dontneed, m);
31996 -       msm_gem_describe_objects(&priv->inactive_willneed, m);
31997 +       msm_gem_describe_objects(&priv->objects, m);
31999 -       mutex_unlock(&priv->mm_lock);
32000 +       mutex_unlock(&priv->obj_lock);
32002         return 0;
32004 diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
32005 index 196907689c82..18ea1c66de71 100644
32006 --- a/drivers/gpu/drm/msm/msm_drv.c
32007 +++ b/drivers/gpu/drm/msm/msm_drv.c
32008 @@ -446,6 +446,9 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
32010         priv->wq = alloc_ordered_workqueue("msm", 0);
32012 +       INIT_LIST_HEAD(&priv->objects);
32013 +       mutex_init(&priv->obj_lock);
32015         INIT_LIST_HEAD(&priv->inactive_willneed);
32016         INIT_LIST_HEAD(&priv->inactive_dontneed);
32017         mutex_init(&priv->mm_lock);
32018 diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
32019 index 591c47a654e8..6b58e49754cb 100644
32020 --- a/drivers/gpu/drm/msm/msm_drv.h
32021 +++ b/drivers/gpu/drm/msm/msm_drv.h
32022 @@ -174,7 +174,14 @@ struct msm_drm_private {
32023         struct msm_rd_state *hangrd;   /* debugfs to dump hanging submits */
32024         struct msm_perf_state *perf;
32026 -       /*
32027 +       /**
32028 +        * List of all GEM objects (mainly for debugfs, protected by obj_lock
32029 +        * (acquire before per GEM object lock)
32030 +        */
32031 +       struct list_head objects;
32032 +       struct mutex obj_lock;
32034 +       /**
32035          * Lists of inactive GEM objects.  Every bo is either in one of the
32036          * inactive lists (depending on whether or not it is shrinkable) or
32037          * gpu->active_list (for the gpu it is active on[1])
32038 diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
32039 index f091c1e164fa..aeba3eb8ce46 100644
32040 --- a/drivers/gpu/drm/msm/msm_gem.c
32041 +++ b/drivers/gpu/drm/msm/msm_gem.c
32042 @@ -951,7 +951,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
32043         size_t size = 0;
32045         seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
32046 -       list_for_each_entry(msm_obj, list, mm_list) {
32047 +       list_for_each_entry(msm_obj, list, node) {
32048                 struct drm_gem_object *obj = &msm_obj->base;
32049                 seq_puts(m, "   ");
32050                 msm_gem_describe(obj, m);
32051 @@ -970,6 +970,10 @@ void msm_gem_free_object(struct drm_gem_object *obj)
32052         struct drm_device *dev = obj->dev;
32053         struct msm_drm_private *priv = dev->dev_private;
32055 +       mutex_lock(&priv->obj_lock);
32056 +       list_del(&msm_obj->node);
32057 +       mutex_unlock(&priv->obj_lock);
32059         mutex_lock(&priv->mm_lock);
32060         list_del(&msm_obj->mm_list);
32061         mutex_unlock(&priv->mm_lock);
32062 @@ -1157,6 +1161,10 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
32063         list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
32064         mutex_unlock(&priv->mm_lock);
32066 +       mutex_lock(&priv->obj_lock);
32067 +       list_add_tail(&msm_obj->node, &priv->objects);
32068 +       mutex_unlock(&priv->obj_lock);
32070         return obj;
32072  fail:
32073 @@ -1227,6 +1235,10 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
32074         list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
32075         mutex_unlock(&priv->mm_lock);
32077 +       mutex_lock(&priv->obj_lock);
32078 +       list_add_tail(&msm_obj->node, &priv->objects);
32079 +       mutex_unlock(&priv->obj_lock);
32081         return obj;
32083  fail:
32084 diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
32085 index b3a0a880cbab..99d4c0e9465e 100644
32086 --- a/drivers/gpu/drm/msm/msm_gem.h
32087 +++ b/drivers/gpu/drm/msm/msm_gem.h
32088 @@ -55,8 +55,16 @@ struct msm_gem_object {
32089          */
32090         uint8_t vmap_count;
32092 -       /* And object is either:
32093 -        *  inactive - on priv->inactive_list
32094 +       /**
32095 +        * Node in list of all objects (mainly for debugfs, protected by
32096 +        * priv->obj_lock
32097 +        */
32098 +       struct list_head node;
32100 +       /**
32101 +        * An object is either:
32102 +        *  inactive - on priv->inactive_dontneed or priv->inactive_willneed
32103 +        *     (depending on purgability status)
32104          *  active   - on one one of the gpu's active_list..  well, at
32105          *     least for now we don't have (I don't think) hw sync between
32106          *     2d and 3d one devices which have both, meaning we need to
32107 diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
32108 index b31d750c425a..5f1722b040f4 100644
32109 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c
32110 +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
32111 @@ -4327,7 +4327,8 @@ static int omap_dsi_register_te_irq(struct dsi_data *dsi,
32112         irq_set_status_flags(te_irq, IRQ_NOAUTOEN);
32114         err = request_threaded_irq(te_irq, NULL, omap_dsi_te_irq_handler,
32115 -                                  IRQF_TRIGGER_RISING, "TE", dsi);
32116 +                                  IRQF_TRIGGER_RISING | IRQF_ONESHOT,
32117 +                                  "TE", dsi);
32118         if (err) {
32119                 dev_err(dsi->dev, "request irq failed with %d\n", err);
32120                 gpiod_put(dsi->te_gpio);
32121 diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
32122 index b9a0e56f33e2..ef70140c5b09 100644
32123 --- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
32124 +++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
32125 @@ -898,8 +898,7 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
32126          */
32127         dsi->hs_rate = 349440000;
32128         dsi->lp_rate = 9600000;
32129 -       dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
32130 -               MIPI_DSI_MODE_EOT_PACKET;
32131 +       dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
32133         /*
32134          * Every new incarnation of this display must have a unique
32135 diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
32136 index 4aac0d1573dd..70560cac53a9 100644
32137 --- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
32138 +++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
32139 @@ -184,9 +184,7 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi)
32140          * As we only send commands we do not need to be continuously
32141          * clocked.
32142          */
32143 -       dsi->mode_flags =
32144 -               MIPI_DSI_CLOCK_NON_CONTINUOUS |
32145 -               MIPI_DSI_MODE_EOT_PACKET;
32146 +       dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
32148         s6->supply = devm_regulator_get(dev, "vdd1");
32149         if (IS_ERR(s6->supply))
32150 diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
32151 index eec74c10ddda..9c3563c61e8c 100644
32152 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
32153 +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
32154 @@ -97,7 +97,6 @@ static int s6e63m0_dsi_probe(struct mipi_dsi_device *dsi)
32155         dsi->hs_rate = 349440000;
32156         dsi->lp_rate = 9600000;
32157         dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
32158 -               MIPI_DSI_MODE_EOT_PACKET |
32159                 MIPI_DSI_MODE_VIDEO_BURST;
32161         ret = s6e63m0_probe(dev, s6e63m0_dsi_dcs_read, s6e63m0_dsi_dcs_write,
32162 diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
32163 index 4e2dad314c79..e8b1a0e873ea 100644
32164 --- a/drivers/gpu/drm/panel/panel-simple.c
32165 +++ b/drivers/gpu/drm/panel/panel-simple.c
32166 @@ -406,7 +406,7 @@ static int panel_simple_prepare(struct drm_panel *panel)
32167                 if (IS_ERR(p->hpd_gpio)) {
32168                         err = panel_simple_get_hpd_gpio(panel->dev, p, false);
32169                         if (err)
32170 -                               return err;
32171 +                               goto error;
32172                 }
32174                 err = readx_poll_timeout(gpiod_get_value_cansleep, p->hpd_gpio,
32175 @@ -418,13 +418,20 @@ static int panel_simple_prepare(struct drm_panel *panel)
32176                 if (err) {
32177                         dev_err(panel->dev,
32178                                 "error waiting for hpd GPIO: %d\n", err);
32179 -                       return err;
32180 +                       goto error;
32181                 }
32182         }
32184         p->prepared_time = ktime_get();
32186         return 0;
32188 +error:
32189 +       gpiod_set_value_cansleep(p->enable_gpio, 0);
32190 +       regulator_disable(p->supply);
32191 +       p->unprepared_time = ktime_get();
32193 +       return err;
32196  static int panel_simple_enable(struct drm_panel *panel)
32197 diff --git a/drivers/gpu/drm/panel/panel-sony-acx424akp.c b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
32198 index 065efae213f5..95659a4d15e9 100644
32199 --- a/drivers/gpu/drm/panel/panel-sony-acx424akp.c
32200 +++ b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
32201 @@ -449,8 +449,7 @@ static int acx424akp_probe(struct mipi_dsi_device *dsi)
32202                         MIPI_DSI_MODE_VIDEO_BURST;
32203         else
32204                 dsi->mode_flags =
32205 -                       MIPI_DSI_CLOCK_NON_CONTINUOUS |
32206 -                       MIPI_DSI_MODE_EOT_PACKET;
32207 +                       MIPI_DSI_CLOCK_NON_CONTINUOUS;
32209         acx->supply = devm_regulator_get(dev, "vddi");
32210         if (IS_ERR(acx->supply))
32211 diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
32212 index 7c1b3481b785..21e552d1ac71 100644
32213 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
32214 +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
32215 @@ -488,8 +488,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
32216                 }
32217                 bo->base.pages = pages;
32218                 bo->base.pages_use_count = 1;
32219 -       } else
32220 +       } else {
32221                 pages = bo->base.pages;
32222 +               if (pages[page_offset]) {
32223 +                       /* Pages are already mapped, bail out. */
32224 +                       mutex_unlock(&bo->base.pages_lock);
32225 +                       goto out;
32226 +               }
32227 +       }
32229         mapping = bo->base.base.filp->f_mapping;
32230         mapping_set_unevictable(mapping);
32231 @@ -522,6 +528,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
32233         dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
32235 +out:
32236         panfrost_gem_mapping_put(bomapping);
32238         return 0;
32239 @@ -593,6 +600,8 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
32240                 access_type = (fault_status >> 8) & 0x3;
32241                 source_id = (fault_status >> 16);
32243 +               mmu_write(pfdev, MMU_INT_CLEAR, mask);
32245                 /* Page fault only */
32246                 ret = -1;
32247                 if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0)
32248 @@ -616,8 +625,6 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
32249                                 access_type, access_type_name(pfdev, fault_status),
32250                                 source_id);
32252 -               mmu_write(pfdev, MMU_INT_CLEAR, mask);
32254                 status &= ~mask;
32255         }
32257 diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
32258 index 54e3c3a97440..741cc983daf1 100644
32259 --- a/drivers/gpu/drm/qxl/qxl_cmd.c
32260 +++ b/drivers/gpu/drm/qxl/qxl_cmd.c
32261 @@ -268,7 +268,7 @@ int qxl_alloc_bo_reserved(struct qxl_device *qdev,
32262         int ret;
32264         ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
32265 -                           false, QXL_GEM_DOMAIN_VRAM, NULL, &bo);
32266 +                           false, QXL_GEM_DOMAIN_VRAM, 0, NULL, &bo);
32267         if (ret) {
32268                 DRM_ERROR("failed to allocate VRAM BO\n");
32269                 return ret;
32270 diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
32271 index 10738e04c09b..3f432ec8e771 100644
32272 --- a/drivers/gpu/drm/qxl/qxl_display.c
32273 +++ b/drivers/gpu/drm/qxl/qxl_display.c
32274 @@ -798,8 +798,8 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
32275                                 qdev->dumb_shadow_bo = NULL;
32276                         }
32277                         qxl_bo_create(qdev, surf.height * surf.stride,
32278 -                                     true, true, QXL_GEM_DOMAIN_SURFACE, &surf,
32279 -                                     &qdev->dumb_shadow_bo);
32280 +                                     true, true, QXL_GEM_DOMAIN_SURFACE, 0,
32281 +                                     &surf, &qdev->dumb_shadow_bo);
32282                 }
32283                 if (user_bo->shadow != qdev->dumb_shadow_bo) {
32284                         if (user_bo->shadow) {
32285 @@ -1228,6 +1228,10 @@ int qxl_modeset_init(struct qxl_device *qdev)
32287  void qxl_modeset_fini(struct qxl_device *qdev)
32289 +       if (qdev->dumb_shadow_bo) {
32290 +               drm_gem_object_put(&qdev->dumb_shadow_bo->tbo.base);
32291 +               qdev->dumb_shadow_bo = NULL;
32292 +       }
32293         qxl_destroy_monitors_object(qdev);
32294         drm_mode_config_cleanup(&qdev->ddev);
32296 diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
32297 index 48e096285b4c..a08da0bd9098 100644
32298 --- a/drivers/gpu/drm/qxl/qxl_gem.c
32299 +++ b/drivers/gpu/drm/qxl/qxl_gem.c
32300 @@ -55,7 +55,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
32301         /* At least align on page size */
32302         if (alignment < PAGE_SIZE)
32303                 alignment = PAGE_SIZE;
32304 -       r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo);
32305 +       r = qxl_bo_create(qdev, size, kernel, false, initial_domain, 0, surf, &qbo);
32306         if (r) {
32307                 if (r != -ERESTARTSYS)
32308                         DRM_ERROR(
32309 diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
32310 index ceebc5881f68..a5806667697a 100644
32311 --- a/drivers/gpu/drm/qxl/qxl_object.c
32312 +++ b/drivers/gpu/drm/qxl/qxl_object.c
32313 @@ -103,8 +103,8 @@ static const struct drm_gem_object_funcs qxl_object_funcs = {
32314         .print_info = drm_gem_ttm_print_info,
32315  };
32317 -int qxl_bo_create(struct qxl_device *qdev,
32318 -                 unsigned long size, bool kernel, bool pinned, u32 domain,
32319 +int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
32320 +                 bool kernel, bool pinned, u32 domain, u32 priority,
32321                   struct qxl_surface *surf,
32322                   struct qxl_bo **bo_ptr)
32324 @@ -137,6 +137,7 @@ int qxl_bo_create(struct qxl_device *qdev,
32326         qxl_ttm_placement_from_domain(bo, domain);
32328 +       bo->tbo.priority = priority;
32329         r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, size, type,
32330                                  &bo->placement, 0, &ctx, size,
32331                                  NULL, NULL, &qxl_ttm_bo_destroy);
32332 diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
32333 index e60a8f88e226..dc1659e717f1 100644
32334 --- a/drivers/gpu/drm/qxl/qxl_object.h
32335 +++ b/drivers/gpu/drm/qxl/qxl_object.h
32336 @@ -61,6 +61,7 @@ static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
32337  extern int qxl_bo_create(struct qxl_device *qdev,
32338                          unsigned long size,
32339                          bool kernel, bool pinned, u32 domain,
32340 +                        u32 priority,
32341                          struct qxl_surface *surf,
32342                          struct qxl_bo **bo_ptr);
32343  extern int qxl_bo_kmap(struct qxl_bo *bo, struct dma_buf_map *map);
32344 diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
32345 index b372455e2729..801ce77b1dac 100644
32346 --- a/drivers/gpu/drm/qxl/qxl_release.c
32347 +++ b/drivers/gpu/drm/qxl/qxl_release.c
32348 @@ -199,11 +199,12 @@ qxl_release_free(struct qxl_device *qdev,
32351  static int qxl_release_bo_alloc(struct qxl_device *qdev,
32352 -                               struct qxl_bo **bo)
32353 +                               struct qxl_bo **bo,
32354 +                               u32 priority)
32356         /* pin releases bo's they are too messy to evict */
32357         return qxl_bo_create(qdev, PAGE_SIZE, false, true,
32358 -                            QXL_GEM_DOMAIN_VRAM, NULL, bo);
32359 +                            QXL_GEM_DOMAIN_VRAM, priority, NULL, bo);
32362  int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
32363 @@ -326,13 +327,18 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
32364         int ret = 0;
32365         union qxl_release_info *info;
32366         int cur_idx;
32367 +       u32 priority;
32369 -       if (type == QXL_RELEASE_DRAWABLE)
32370 +       if (type == QXL_RELEASE_DRAWABLE) {
32371                 cur_idx = 0;
32372 -       else if (type == QXL_RELEASE_SURFACE_CMD)
32373 +               priority = 0;
32374 +       } else if (type == QXL_RELEASE_SURFACE_CMD) {
32375                 cur_idx = 1;
32376 -       else if (type == QXL_RELEASE_CURSOR_CMD)
32377 +               priority = 1;
32378 +       } else if (type == QXL_RELEASE_CURSOR_CMD) {
32379                 cur_idx = 2;
32380 +               priority = 1;
32381 +       }
32382         else {
32383                 DRM_ERROR("got illegal type: %d\n", type);
32384                 return -EINVAL;
32385 @@ -352,7 +358,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
32386                 qdev->current_release_bo[cur_idx] = NULL;
32387         }
32388         if (!qdev->current_release_bo[cur_idx]) {
32389 -               ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
32390 +               ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority);
32391                 if (ret) {
32392                         mutex_unlock(&qdev->release_mutex);
32393                         if (free_bo) {
32394 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
32395 index 3effc8c71494..ea44423376c4 100644
32396 --- a/drivers/gpu/drm/radeon/radeon.h
32397 +++ b/drivers/gpu/drm/radeon/radeon.h
32398 @@ -1558,6 +1558,7 @@ struct radeon_dpm {
32399         void                    *priv;
32400         u32                     new_active_crtcs;
32401         int                     new_active_crtc_count;
32402 +       int                     high_pixelclock_count;
32403         u32                     current_active_crtcs;
32404         int                     current_active_crtc_count;
32405         bool single_display;
32406 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
32407 index 42301b4e56f5..28c4413f4dc8 100644
32408 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
32409 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
32410 @@ -2120,11 +2120,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
32411                 return state_index;
32412         /* last mode is usually default, array is low to high */
32413         for (i = 0; i < num_modes; i++) {
32414 -               rdev->pm.power_state[state_index].clock_info =
32415 -                       kcalloc(1, sizeof(struct radeon_pm_clock_info),
32416 -                               GFP_KERNEL);
32417 +               /* avoid memory leaks from invalid modes or unknown frev. */
32418 +               if (!rdev->pm.power_state[state_index].clock_info) {
32419 +                       rdev->pm.power_state[state_index].clock_info =
32420 +                               kzalloc(sizeof(struct radeon_pm_clock_info),
32421 +                                       GFP_KERNEL);
32422 +               }
32423                 if (!rdev->pm.power_state[state_index].clock_info)
32424 -                       return state_index;
32425 +                       goto out;
32426                 rdev->pm.power_state[state_index].num_clock_modes = 1;
32427                 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
32428                 switch (frev) {
32429 @@ -2243,17 +2246,24 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
32430                         break;
32431                 }
32432         }
32433 +out:
32434 +       /* free any unused clock_info allocation. */
32435 +       if (state_index && state_index < num_modes) {
32436 +               kfree(rdev->pm.power_state[state_index].clock_info);
32437 +               rdev->pm.power_state[state_index].clock_info = NULL;
32438 +       }
32440         /* last mode is usually default */
32441 -       if (rdev->pm.default_power_state_index == -1) {
32442 +       if (state_index && rdev->pm.default_power_state_index == -1) {
32443                 rdev->pm.power_state[state_index - 1].type =
32444                         POWER_STATE_TYPE_DEFAULT;
32445                 rdev->pm.default_power_state_index = state_index - 1;
32446                 rdev->pm.power_state[state_index - 1].default_clock_mode =
32447                         &rdev->pm.power_state[state_index - 1].clock_info[0];
32448 -               rdev->pm.power_state[state_index].flags &=
32449 +               rdev->pm.power_state[state_index - 1].flags &=
32450                         ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
32451 -               rdev->pm.power_state[state_index].misc = 0;
32452 -               rdev->pm.power_state[state_index].misc2 = 0;
32453 +               rdev->pm.power_state[state_index - 1].misc = 0;
32454 +               rdev->pm.power_state[state_index - 1].misc2 = 0;
32455         }
32456         return state_index;
32458 diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
32459 index 2c32186c4acd..4e4c937c36c6 100644
32460 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
32461 +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
32462 @@ -242,6 +242,9 @@ radeon_dp_mst_detect(struct drm_connector *connector,
32463                 to_radeon_connector(connector);
32464         struct radeon_connector *master = radeon_connector->mst_port;
32466 +       if (drm_connector_is_unregistered(connector))
32467 +               return connector_status_disconnected;
32469         return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
32470                                       radeon_connector->port);
32472 diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
32473 index 3808a753127b..04109a2a6fd7 100644
32474 --- a/drivers/gpu/drm/radeon/radeon_gart.c
32475 +++ b/drivers/gpu/drm/radeon/radeon_gart.c
32476 @@ -301,7 +301,8 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
32477         p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
32479         for (i = 0; i < pages; i++, p++) {
32480 -               rdev->gart.pages[p] = pagelist[i];
32481 +               rdev->gart.pages[p] = pagelist ? pagelist[i] :
32482 +                       rdev->dummy_page.page;
32483                 page_base = dma_addr[i];
32484                 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
32485                         page_entry = radeon_gart_get_page_entry(page_base, flags);
32486 diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
32487 index 2479d6ab7a36..58876bb4ef2a 100644
32488 --- a/drivers/gpu/drm/radeon/radeon_kms.c
32489 +++ b/drivers/gpu/drm/radeon/radeon_kms.c
32490 @@ -518,6 +518,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
32491                         *value = rdev->config.si.backend_enable_mask;
32492                 } else {
32493                         DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
32494 +                       return -EINVAL;
32495                 }
32496                 break;
32497         case RADEON_INFO_MAX_SCLK:
32498 diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
32499 index 9b81786782de..499ce55e34cc 100644
32500 --- a/drivers/gpu/drm/radeon/radeon_object.c
32501 +++ b/drivers/gpu/drm/radeon/radeon_object.c
32502 @@ -384,6 +384,8 @@ int radeon_bo_evict_vram(struct radeon_device *rdev)
32503         }
32504  #endif
32505         man = ttm_manager_type(bdev, TTM_PL_VRAM);
32506 +       if (!man)
32507 +               return 0;
32508         return ttm_resource_manager_evict_all(bdev, man);
32511 diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
32512 index 1995dad59dd0..2db4a8b1542d 100644
32513 --- a/drivers/gpu/drm/radeon/radeon_pm.c
32514 +++ b/drivers/gpu/drm/radeon/radeon_pm.c
32515 @@ -1775,6 +1775,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
32516         struct drm_device *ddev = rdev->ddev;
32517         struct drm_crtc *crtc;
32518         struct radeon_crtc *radeon_crtc;
32519 +       struct radeon_connector *radeon_connector;
32521         if (!rdev->pm.dpm_enabled)
32522                 return;
32523 @@ -1784,6 +1785,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
32524         /* update active crtc counts */
32525         rdev->pm.dpm.new_active_crtcs = 0;
32526         rdev->pm.dpm.new_active_crtc_count = 0;
32527 +       rdev->pm.dpm.high_pixelclock_count = 0;
32528         if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
32529                 list_for_each_entry(crtc,
32530                                     &ddev->mode_config.crtc_list, head) {
32531 @@ -1791,6 +1793,12 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
32532                         if (crtc->enabled) {
32533                                 rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
32534                                 rdev->pm.dpm.new_active_crtc_count++;
32535 +                               if (!radeon_crtc->connector)
32536 +                                       continue;
32538 +                               radeon_connector = to_radeon_connector(radeon_crtc->connector);
32539 +                               if (radeon_connector->pixelclock_for_modeset > 297000)
32540 +                                       rdev->pm.dpm.high_pixelclock_count++;
32541                         }
32542                 }
32543         }
32544 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
32545 index 78893bea85ae..c0258d213a72 100644
32546 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
32547 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
32548 @@ -485,13 +485,14 @@ static void radeon_ttm_backend_unbind(struct ttm_bo_device *bdev, struct ttm_tt
32549         struct radeon_ttm_tt *gtt = (void *)ttm;
32550         struct radeon_device *rdev = radeon_get_rdev(bdev);
32552 +       if (gtt->userptr)
32553 +               radeon_ttm_tt_unpin_userptr(bdev, ttm);
32555         if (!gtt->bound)
32556                 return;
32558         radeon_gart_unbind(rdev, gtt->offset, ttm->num_pages);
32560 -       if (gtt->userptr)
32561 -               radeon_ttm_tt_unpin_userptr(bdev, ttm);
32562         gtt->bound = false;
32565 diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
32566 index 91bfc4762767..43b63705d073 100644
32567 --- a/drivers/gpu/drm/radeon/si_dpm.c
32568 +++ b/drivers/gpu/drm/radeon/si_dpm.c
32569 @@ -2979,6 +2979,9 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
32570                     (rdev->pdev->device == 0x6605)) {
32571                         max_sclk = 75000;
32572                 }
32574 +               if (rdev->pm.dpm.high_pixelclock_count > 1)
32575 +                       disable_sclk_switching = true;
32576         }
32578         if (rps->vce_active) {
32579 diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
32580 index 7812094f93d6..6f3b523e16e8 100644
32581 --- a/drivers/gpu/drm/stm/ltdc.c
32582 +++ b/drivers/gpu/drm/stm/ltdc.c
32583 @@ -525,13 +525,42 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
32585         struct ltdc_device *ldev = crtc_to_ltdc(crtc);
32586         struct drm_device *ddev = crtc->dev;
32587 +       struct drm_connector_list_iter iter;
32588 +       struct drm_connector *connector = NULL;
32589 +       struct drm_encoder *encoder = NULL;
32590 +       struct drm_bridge *bridge = NULL;
32591         struct drm_display_mode *mode = &crtc->state->adjusted_mode;
32592         struct videomode vm;
32593         u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h;
32594         u32 total_width, total_height;
32595 +       u32 bus_flags = 0;
32596         u32 val;
32597         int ret;
32599 +       /* get encoder from crtc */
32600 +       drm_for_each_encoder(encoder, ddev)
32601 +               if (encoder->crtc == crtc)
32602 +                       break;
32604 +       if (encoder) {
32605 +               /* get bridge from encoder */
32606 +               list_for_each_entry(bridge, &encoder->bridge_chain, chain_node)
32607 +                       if (bridge->encoder == encoder)
32608 +                               break;
32610 +               /* Get the connector from encoder */
32611 +               drm_connector_list_iter_begin(ddev, &iter);
32612 +               drm_for_each_connector_iter(connector, &iter)
32613 +                       if (connector->encoder == encoder)
32614 +                               break;
32615 +               drm_connector_list_iter_end(&iter);
32616 +       }
32618 +       if (bridge && bridge->timings)
32619 +               bus_flags = bridge->timings->input_bus_flags;
32620 +       else if (connector)
32621 +               bus_flags = connector->display_info.bus_flags;
32623         if (!pm_runtime_active(ddev->dev)) {
32624                 ret = pm_runtime_get_sync(ddev->dev);
32625                 if (ret) {
32626 @@ -567,10 +596,10 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
32627         if (vm.flags & DISPLAY_FLAGS_VSYNC_HIGH)
32628                 val |= GCR_VSPOL;
32630 -       if (vm.flags & DISPLAY_FLAGS_DE_LOW)
32631 +       if (bus_flags & DRM_BUS_FLAG_DE_LOW)
32632                 val |= GCR_DEPOL;
32634 -       if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
32635 +       if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
32636                 val |= GCR_PCPOL;
32638         reg_update_bits(ldev->regs, LTDC_GCR,
32639 diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
32640 index 30213708fc99..d99afd19ca08 100644
32641 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
32642 +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
32643 @@ -515,6 +515,15 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
32645         drm_crtc_vblank_off(crtc);
32647 +       spin_lock_irq(&crtc->dev->event_lock);
32649 +       if (crtc->state->event) {
32650 +               drm_crtc_send_vblank_event(crtc, crtc->state->event);
32651 +               crtc->state->event = NULL;
32652 +       }
32654 +       spin_unlock_irq(&crtc->dev->event_lock);
32656         tilcdc_crtc_disable_irqs(dev);
32658         pm_runtime_put_sync(dev->dev);
32659 diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
32660 index 101a68dc615b..799ec7a7caa4 100644
32661 --- a/drivers/gpu/drm/ttm/ttm_bo.c
32662 +++ b/drivers/gpu/drm/ttm/ttm_bo.c
32663 @@ -153,6 +153,8 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
32665                 swap = &ttm_bo_glob.swap_lru[bo->priority];
32666                 list_move_tail(&bo->swap, swap);
32667 +       } else {
32668 +               list_del_init(&bo->swap);
32669         }
32671         if (bdev->driver->del_from_lru_notify)
32672 diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
32673 index 23eb6d772e40..669f2ee39515 100644
32674 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
32675 +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
32676 @@ -174,7 +174,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
32677                 if (!sync_file) {
32678                         dma_fence_put(&out_fence->f);
32679                         ret = -ENOMEM;
32680 -                       goto out_memdup;
32681 +                       goto out_unresv;
32682                 }
32684                 exbuf->fence_fd = out_fence_fd;
32685 diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
32686 index d69a5b6da553..4ff1ec28e630 100644
32687 --- a/drivers/gpu/drm/virtio/virtgpu_object.c
32688 +++ b/drivers/gpu/drm/virtio/virtgpu_object.c
32689 @@ -248,6 +248,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
32691         ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
32692         if (ret != 0) {
32693 +               virtio_gpu_array_put_free(objs);
32694                 virtio_gpu_free_object(&shmem_obj->base);
32695                 return ret;
32696         }
32697 diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
32698 index 0443b7deeaef..758d8a98d96b 100644
32699 --- a/drivers/gpu/drm/vkms/vkms_crtc.c
32700 +++ b/drivers/gpu/drm/vkms/vkms_crtc.c
32701 @@ -18,7 +18,8 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
32703         ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
32704                                           output->period_ns);
32705 -       WARN_ON(ret_overrun != 1);
32706 +       if (ret_overrun != 1)
32707 +               pr_warn("%s: vblank timer overrun\n", __func__);
32709         spin_lock(&output->lock);
32710         ret = drm_crtc_handle_vblank(crtc);
32711 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
32712 index 6c2a569f1fcb..8d7feeb0d7ab 100644
32713 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
32714 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
32715 @@ -201,7 +201,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
32716                         break;
32717                 }
32718                 if (lazy)
32719 -                       schedule_timeout(1);
32720 +                       schedule_min_hrtimeout();
32721                 else if ((++count & 0x0F) == 0) {
32722                         /**
32723                          * FIXME: Use schedule_hr_timeout here for
32724 diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
32725 index 99158ee67d02..59d1fb017da0 100644
32726 --- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
32727 +++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
32728 @@ -866,7 +866,7 @@ static int zynqmp_dp_train(struct zynqmp_dp *dp)
32729                 return ret;
32731         zynqmp_dp_write(dp, ZYNQMP_DP_SCRAMBLING_DISABLE, 1);
32732 -       memset(dp->train_set, 0, 4);
32733 +       memset(dp->train_set, 0, sizeof(dp->train_set));
32734         ret = zynqmp_dp_link_train_cr(dp);
32735         if (ret)
32736                 return ret;
32737 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
32738 index 67fd8a2f5aba..ba338973e968 100644
32739 --- a/drivers/hid/hid-ids.h
32740 +++ b/drivers/hid/hid-ids.h
32741 @@ -946,6 +946,7 @@
32742  #define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S   0x8003
32744  #define USB_VENDOR_ID_PLANTRONICS      0x047f
32745 +#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES        0xc056
32747  #define USB_VENDOR_ID_PANASONIC                0x04da
32748  #define USB_DEVICE_ID_PANABOARD_UBT780 0x1044
32749 diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
32750 index c6c8e20f3e8d..0ff03fed9770 100644
32751 --- a/drivers/hid/hid-lenovo.c
32752 +++ b/drivers/hid/hid-lenovo.c
32753 @@ -33,6 +33,9 @@
32755  #include "hid-ids.h"
32757 +/* Userspace expects F20 for mic-mute KEY_MICMUTE does not work */
32758 +#define LENOVO_KEY_MICMUTE KEY_F20
32760  struct lenovo_drvdata {
32761         u8 led_report[3]; /* Must be first for proper alignment */
32762         int led_state;
32763 @@ -62,8 +65,8 @@ struct lenovo_drvdata {
32764  #define TP10UBKBD_LED_OFF              1
32765  #define TP10UBKBD_LED_ON               2
32767 -static void lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
32768 -                                    enum led_brightness value)
32769 +static int lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
32770 +                                   enum led_brightness value)
32772         struct lenovo_drvdata *data = hid_get_drvdata(hdev);
32773         int ret;
32774 @@ -75,10 +78,18 @@ static void lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
32775         data->led_report[2] = value ? TP10UBKBD_LED_ON : TP10UBKBD_LED_OFF;
32776         ret = hid_hw_raw_request(hdev, data->led_report[0], data->led_report, 3,
32777                                  HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
32778 -       if (ret)
32779 -               hid_err(hdev, "Set LED output report error: %d\n", ret);
32780 +       if (ret != 3) {
32781 +               if (ret != -ENODEV)
32782 +                       hid_err(hdev, "Set LED output report error: %d\n", ret);
32784 +               ret = ret < 0 ? ret : -EIO;
32785 +       } else {
32786 +               ret = 0;
32787 +       }
32789         mutex_unlock(&data->led_report_mutex);
32791 +       return ret;
32794  static void lenovo_tp10ubkbd_sync_fn_lock(struct work_struct *work)
32795 @@ -126,7 +137,7 @@ static int lenovo_input_mapping_tpkbd(struct hid_device *hdev,
32796         if (usage->hid == (HID_UP_BUTTON | 0x0010)) {
32797                 /* This sub-device contains trackpoint, mark it */
32798                 hid_set_drvdata(hdev, (void *)1);
32799 -               map_key_clear(KEY_MICMUTE);
32800 +               map_key_clear(LENOVO_KEY_MICMUTE);
32801                 return 1;
32802         }
32803         return 0;
32804 @@ -141,7 +152,7 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev,
32805             (usage->hid & HID_USAGE_PAGE) == HID_UP_LNVENDOR) {
32806                 switch (usage->hid & HID_USAGE) {
32807                 case 0x00f1: /* Fn-F4: Mic mute */
32808 -                       map_key_clear(KEY_MICMUTE);
32809 +                       map_key_clear(LENOVO_KEY_MICMUTE);
32810                         return 1;
32811                 case 0x00f2: /* Fn-F5: Brightness down */
32812                         map_key_clear(KEY_BRIGHTNESSDOWN);
32813 @@ -231,7 +242,7 @@ static int lenovo_input_mapping_tp10_ultrabook_kbd(struct hid_device *hdev,
32814                         map_key_clear(KEY_FN_ESC);
32815                         return 1;
32816                 case 9: /* Fn-F4: Mic mute */
32817 -                       map_key_clear(KEY_MICMUTE);
32818 +                       map_key_clear(LENOVO_KEY_MICMUTE);
32819                         return 1;
32820                 case 10: /* Fn-F7: Control panel */
32821                         map_key_clear(KEY_CONFIG);
32822 @@ -349,7 +360,7 @@ static ssize_t attr_fn_lock_store(struct device *dev,
32824         struct hid_device *hdev = to_hid_device(dev);
32825         struct lenovo_drvdata *data = hid_get_drvdata(hdev);
32826 -       int value;
32827 +       int value, ret;
32829         if (kstrtoint(buf, 10, &value))
32830                 return -EINVAL;
32831 @@ -364,7 +375,9 @@ static ssize_t attr_fn_lock_store(struct device *dev,
32832                 lenovo_features_set_cptkbd(hdev);
32833                 break;
32834         case USB_DEVICE_ID_LENOVO_TP10UBKBD:
32835 -               lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value);
32836 +               ret = lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value);
32837 +               if (ret)
32838 +                       return ret;
32839                 break;
32840         }
32842 @@ -498,6 +511,9 @@ static int lenovo_event_cptkbd(struct hid_device *hdev,
32843  static int lenovo_event(struct hid_device *hdev, struct hid_field *field,
32844                 struct hid_usage *usage, __s32 value)
32846 +       if (!hid_get_drvdata(hdev))
32847 +               return 0;
32849         switch (hdev->product) {
32850         case USB_DEVICE_ID_LENOVO_CUSBKBD:
32851         case USB_DEVICE_ID_LENOVO_CBTKBD:
32852 @@ -777,7 +793,7 @@ static enum led_brightness lenovo_led_brightness_get(
32853                                 : LED_OFF;
32856 -static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
32857 +static int lenovo_led_brightness_set(struct led_classdev *led_cdev,
32858                         enum led_brightness value)
32860         struct device *dev = led_cdev->dev->parent;
32861 @@ -785,6 +801,7 @@ static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
32862         struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
32863         u8 tp10ubkbd_led[] = { TP10UBKBD_MUTE_LED, TP10UBKBD_MICMUTE_LED };
32864         int led_nr = 0;
32865 +       int ret = 0;
32867         if (led_cdev == &data_pointer->led_micmute)
32868                 led_nr = 1;
32869 @@ -799,9 +816,11 @@ static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
32870                 lenovo_led_set_tpkbd(hdev);
32871                 break;
32872         case USB_DEVICE_ID_LENOVO_TP10UBKBD:
32873 -               lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value);
32874 +               ret = lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value);
32875                 break;
32876         }
32878 +       return ret;
32881  static int lenovo_register_leds(struct hid_device *hdev)
32882 @@ -822,7 +841,8 @@ static int lenovo_register_leds(struct hid_device *hdev)
32884         data->led_mute.name = name_mute;
32885         data->led_mute.brightness_get = lenovo_led_brightness_get;
32886 -       data->led_mute.brightness_set = lenovo_led_brightness_set;
32887 +       data->led_mute.brightness_set_blocking = lenovo_led_brightness_set;
32888 +       data->led_mute.flags = LED_HW_PLUGGABLE;
32889         data->led_mute.dev = &hdev->dev;
32890         ret = led_classdev_register(&hdev->dev, &data->led_mute);
32891         if (ret < 0)
32892 @@ -830,7 +850,8 @@ static int lenovo_register_leds(struct hid_device *hdev)
32894         data->led_micmute.name = name_micm;
32895         data->led_micmute.brightness_get = lenovo_led_brightness_get;
32896 -       data->led_micmute.brightness_set = lenovo_led_brightness_set;
32897 +       data->led_micmute.brightness_set_blocking = lenovo_led_brightness_set;
32898 +       data->led_micmute.flags = LED_HW_PLUGGABLE;
32899         data->led_micmute.dev = &hdev->dev;
32900         ret = led_classdev_register(&hdev->dev, &data->led_micmute);
32901         if (ret < 0) {
32902 diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
32903 index 85b685efc12f..e81b7cec2d12 100644
32904 --- a/drivers/hid/hid-plantronics.c
32905 +++ b/drivers/hid/hid-plantronics.c
32906 @@ -13,6 +13,7 @@
32908  #include <linux/hid.h>
32909  #include <linux/module.h>
32910 +#include <linux/jiffies.h>
32912  #define PLT_HID_1_0_PAGE       0xffa00000
32913  #define PLT_HID_2_0_PAGE       0xffa20000
32914 @@ -36,6 +37,16 @@
32915  #define PLT_ALLOW_CONSUMER (field->application == HID_CP_CONSUMERCONTROL && \
32916                             (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER)
32918 +#define PLT_QUIRK_DOUBLE_VOLUME_KEYS BIT(0)
32920 +#define PLT_DOUBLE_KEY_TIMEOUT 5 /* ms */
32922 +struct plt_drv_data {
32923 +       unsigned long device_type;
32924 +       unsigned long last_volume_key_ts;
32925 +       u32 quirks;
32928  static int plantronics_input_mapping(struct hid_device *hdev,
32929                                      struct hid_input *hi,
32930                                      struct hid_field *field,
32931 @@ -43,7 +54,8 @@ static int plantronics_input_mapping(struct hid_device *hdev,
32932                                      unsigned long **bit, int *max)
32934         unsigned short mapped_key;
32935 -       unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev);
32936 +       struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
32937 +       unsigned long plt_type = drv_data->device_type;
32939         /* special case for PTT products */
32940         if (field->application == HID_GD_JOYSTICK)
32941 @@ -105,6 +117,30 @@ static int plantronics_input_mapping(struct hid_device *hdev,
32942         return 1;
32945 +static int plantronics_event(struct hid_device *hdev, struct hid_field *field,
32946 +                            struct hid_usage *usage, __s32 value)
32948 +       struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
32950 +       if (drv_data->quirks & PLT_QUIRK_DOUBLE_VOLUME_KEYS) {
32951 +               unsigned long prev_ts, cur_ts;
32953 +               /* Usages are filtered in plantronics_usages. */
32955 +               if (!value) /* Handle key presses only. */
32956 +                       return 0;
32958 +               prev_ts = drv_data->last_volume_key_ts;
32959 +               cur_ts = jiffies;
32960 +               if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_DOUBLE_KEY_TIMEOUT)
32961 +                       return 1; /* Ignore the repeated key. */
32963 +               drv_data->last_volume_key_ts = cur_ts;
32964 +       }
32966 +       return 0;
32969  static unsigned long plantronics_device_type(struct hid_device *hdev)
32971         unsigned i, col_page;
32972 @@ -133,15 +169,24 @@ static unsigned long plantronics_device_type(struct hid_device *hdev)
32973  static int plantronics_probe(struct hid_device *hdev,
32974                              const struct hid_device_id *id)
32976 +       struct plt_drv_data *drv_data;
32977         int ret;
32979 +       drv_data = devm_kzalloc(&hdev->dev, sizeof(*drv_data), GFP_KERNEL);
32980 +       if (!drv_data)
32981 +               return -ENOMEM;
32983         ret = hid_parse(hdev);
32984         if (ret) {
32985                 hid_err(hdev, "parse failed\n");
32986                 goto err;
32987         }
32989 -       hid_set_drvdata(hdev, (void *)plantronics_device_type(hdev));
32990 +       drv_data->device_type = plantronics_device_type(hdev);
32991 +       drv_data->quirks = id->driver_data;
32992 +       drv_data->last_volume_key_ts = jiffies - msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT);
32994 +       hid_set_drvdata(hdev, drv_data);
32996         ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT |
32997                 HID_CONNECT_HIDINPUT_FORCE | HID_CONNECT_HIDDEV_FORCE);
32998 @@ -153,15 +198,26 @@ static int plantronics_probe(struct hid_device *hdev,
33001  static const struct hid_device_id plantronics_devices[] = {
33002 +       { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
33003 +                                        USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES),
33004 +               .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
33005         { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
33006         { }
33007  };
33008  MODULE_DEVICE_TABLE(hid, plantronics_devices);
33010 +static const struct hid_usage_id plantronics_usages[] = {
33011 +       { HID_CP_VOLUMEUP, EV_KEY, HID_ANY_ID },
33012 +       { HID_CP_VOLUMEDOWN, EV_KEY, HID_ANY_ID },
33013 +       { HID_TERMINATOR, HID_TERMINATOR, HID_TERMINATOR }
33016  static struct hid_driver plantronics_driver = {
33017         .name = "plantronics",
33018         .id_table = plantronics_devices,
33019 +       .usage_table = plantronics_usages,
33020         .input_mapping = plantronics_input_mapping,
33021 +       .event = plantronics_event,
33022         .probe = plantronics_probe,
33023  };
33024  module_hid_driver(plantronics_driver);
33025 diff --git a/drivers/hsi/hsi_core.c b/drivers/hsi/hsi_core.c
33026 index c3fb5beb846e..ec90713564e3 100644
33027 --- a/drivers/hsi/hsi_core.c
33028 +++ b/drivers/hsi/hsi_core.c
33029 @@ -210,8 +210,6 @@ static void hsi_add_client_from_dt(struct hsi_port *port,
33030         if (err)
33031                 goto err;
33033 -       dev_set_name(&cl->device, "%s", name);
33035         err = hsi_of_property_parse_mode(client, "hsi-mode", &mode);
33036         if (err) {
33037                 err = hsi_of_property_parse_mode(client, "hsi-rx-mode",
33038 @@ -293,6 +291,7 @@ static void hsi_add_client_from_dt(struct hsi_port *port,
33039         cl->device.release = hsi_client_release;
33040         cl->device.of_node = client;
33042 +       dev_set_name(&cl->device, "%s", name);
33043         if (device_register(&cl->device) < 0) {
33044                 pr_err("hsi: failed to register client: %s\n", name);
33045                 put_device(&cl->device);
33046 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
33047 index 0bd202de7960..945e41f5e3a8 100644
33048 --- a/drivers/hv/channel.c
33049 +++ b/drivers/hv/channel.c
33050 @@ -653,7 +653,7 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
33052         if (newchannel->rescind) {
33053                 err = -ENODEV;
33054 -               goto error_free_info;
33055 +               goto error_clean_msglist;
33056         }
33058         err = vmbus_post_msg(open_msg,
33059 diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
33060 index f0ed730e2e4e..ecebf1235fd5 100644
33061 --- a/drivers/hv/channel_mgmt.c
33062 +++ b/drivers/hv/channel_mgmt.c
33063 @@ -756,6 +756,12 @@ static void init_vp_index(struct vmbus_channel *channel)
33064         free_cpumask_var(available_mask);
33067 +#define UNLOAD_DELAY_UNIT_MS   10              /* 10 milliseconds */
33068 +#define UNLOAD_WAIT_MS         (100*1000)      /* 100 seconds */
33069 +#define UNLOAD_WAIT_LOOPS      (UNLOAD_WAIT_MS/UNLOAD_DELAY_UNIT_MS)
33070 +#define UNLOAD_MSG_MS          (5*1000)        /* Every 5 seconds */
33071 +#define UNLOAD_MSG_LOOPS       (UNLOAD_MSG_MS/UNLOAD_DELAY_UNIT_MS)
33073  static void vmbus_wait_for_unload(void)
33075         int cpu;
33076 @@ -773,12 +779,17 @@ static void vmbus_wait_for_unload(void)
33077          * vmbus_connection.unload_event. If not, the last thing we can do is
33078          * read message pages for all CPUs directly.
33079          *
33080 -        * Wait no more than 10 seconds so that the panic path can't get
33081 -        * hung forever in case the response message isn't seen.
33082 +        * Wait up to 100 seconds since an Azure host must writeback any dirty
33083 +        * data in its disk cache before the VMbus UNLOAD request will
33084 +        * complete. This flushing has been empirically observed to take up
33085 +        * to 50 seconds in cases with a lot of dirty data, so allow additional
33086 +        * leeway and for inaccuracies in mdelay(). But eventually time out so
33087 +        * that the panic path can't get hung forever in case the response
33088 +        * message isn't seen.
33089          */
33090 -       for (i = 0; i < 1000; i++) {
33091 +       for (i = 1; i <= UNLOAD_WAIT_LOOPS; i++) {
33092                 if (completion_done(&vmbus_connection.unload_event))
33093 -                       break;
33094 +                       goto completed;
33096                 for_each_online_cpu(cpu) {
33097                         struct hv_per_cpu_context *hv_cpu
33098 @@ -801,9 +812,18 @@ static void vmbus_wait_for_unload(void)
33099                         vmbus_signal_eom(msg, message_type);
33100                 }
33102 -               mdelay(10);
33103 +               /*
33104 +                * Give a notice periodically so someone watching the
33105 +                * serial output won't think it is completely hung.
33106 +                */
33107 +               if (!(i % UNLOAD_MSG_LOOPS))
33108 +                       pr_notice("Waiting for VMBus UNLOAD to complete\n");
33110 +               mdelay(UNLOAD_DELAY_UNIT_MS);
33111         }
33112 +       pr_err("Continuing even though VMBus UNLOAD did not complete\n");
33114 +completed:
33115         /*
33116          * We're crashing and already got the UNLOAD_RESPONSE, cleanup all
33117          * maybe-pending messages on all CPUs to be able to receive new
33118 diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
33119 index 35833d4d1a1d..ecd82ebfd5bc 100644
33120 --- a/drivers/hv/ring_buffer.c
33121 +++ b/drivers/hv/ring_buffer.c
33122 @@ -313,7 +313,6 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
33123                 rqst_id = vmbus_next_request_id(&channel->requestor, requestid);
33124                 if (rqst_id == VMBUS_RQST_ERROR) {
33125                         spin_unlock_irqrestore(&outring_info->ring_lock, flags);
33126 -                       pr_err("No request id available\n");
33127                         return -EAGAIN;
33128                 }
33129         }
33130 diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
33131 index 29f5fed28c2a..974cb08c7aa7 100644
33132 --- a/drivers/hwmon/fam15h_power.c
33133 +++ b/drivers/hwmon/fam15h_power.c
33134 @@ -221,7 +221,7 @@ static ssize_t power1_average_show(struct device *dev,
33135                 prev_ptsc[cu] = data->cpu_sw_pwr_ptsc[cu];
33136         }
33138 -       leftover = schedule_timeout_interruptible(msecs_to_jiffies(data->power_period));
33139 +       leftover = schedule_msec_hrtimeout_interruptible((data->power_period));
33140         if (leftover)
33141                 return 0;
33143 diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
33144 index ac4adb44b224..97ab491d2922 100644
33145 --- a/drivers/hwmon/lm80.c
33146 +++ b/drivers/hwmon/lm80.c
33147 @@ -596,7 +596,6 @@ static int lm80_probe(struct i2c_client *client)
33148         struct device *dev = &client->dev;
33149         struct device *hwmon_dev;
33150         struct lm80_data *data;
33151 -       int rv;
33153         data = devm_kzalloc(dev, sizeof(struct lm80_data), GFP_KERNEL);
33154         if (!data)
33155 @@ -609,14 +608,8 @@ static int lm80_probe(struct i2c_client *client)
33156         lm80_init_client(client);
33158         /* A few vars need to be filled upon startup */
33159 -       rv = lm80_read_value(client, LM80_REG_FAN_MIN(1));
33160 -       if (rv < 0)
33161 -               return rv;
33162 -       data->fan[f_min][0] = rv;
33163 -       rv = lm80_read_value(client, LM80_REG_FAN_MIN(2));
33164 -       if (rv < 0)
33165 -               return rv;
33166 -       data->fan[f_min][1] = rv;
33167 +       data->fan[f_min][0] = lm80_read_value(client, LM80_REG_FAN_MIN(1));
33168 +       data->fan[f_min][1] = lm80_read_value(client, LM80_REG_FAN_MIN(2));
33170         hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
33171                                                            data, lm80_groups);
33172 diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
33173 index 4382105bf142..2a4bed0ab226 100644
33174 --- a/drivers/hwmon/ltc2992.c
33175 +++ b/drivers/hwmon/ltc2992.c
33176 @@ -900,11 +900,15 @@ static int ltc2992_parse_dt(struct ltc2992_state *st)
33178         fwnode_for_each_available_child_node(fwnode, child) {
33179                 ret = fwnode_property_read_u32(child, "reg", &addr);
33180 -               if (ret < 0)
33181 +               if (ret < 0) {
33182 +                       fwnode_handle_put(child);
33183                         return ret;
33184 +               }
33186 -               if (addr > 1)
33187 +               if (addr > 1) {
33188 +                       fwnode_handle_put(child);
33189                         return -EINVAL;
33190 +               }
33192                 ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val);
33193                 if (!ret)
33194 diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
33195 index 7a5e539b567b..580e63d7daa0 100644
33196 --- a/drivers/hwmon/occ/common.c
33197 +++ b/drivers/hwmon/occ/common.c
33198 @@ -217,9 +217,9 @@ int occ_update_response(struct occ *occ)
33199                 return rc;
33201         /* limit the maximum rate of polling the OCC */
33202 -       if (time_after(jiffies, occ->last_update + OCC_UPDATE_FREQUENCY)) {
33203 +       if (time_after(jiffies, occ->next_update)) {
33204                 rc = occ_poll(occ);
33205 -               occ->last_update = jiffies;
33206 +               occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
33207         } else {
33208                 rc = occ->last_error;
33209         }
33210 @@ -1164,6 +1164,7 @@ int occ_setup(struct occ *occ, const char *name)
33211                 return rc;
33212         }
33214 +       occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
33215         occ_parse_poll_response(occ);
33217         rc = occ_setup_sensor_attrs(occ);
33218 diff --git a/drivers/hwmon/occ/common.h b/drivers/hwmon/occ/common.h
33219 index 67e6968b8978..e6df719770e8 100644
33220 --- a/drivers/hwmon/occ/common.h
33221 +++ b/drivers/hwmon/occ/common.h
33222 @@ -99,7 +99,7 @@ struct occ {
33223         u8 poll_cmd_data;               /* to perform OCC poll command */
33224         int (*send_cmd)(struct occ *occ, u8 *cmd);
33226 -       unsigned long last_update;
33227 +       unsigned long next_update;
33228         struct mutex lock;              /* lock OCC access */
33230         struct device *hwmon;
33231 diff --git a/drivers/hwmon/pmbus/pxe1610.c b/drivers/hwmon/pmbus/pxe1610.c
33232 index da27ce34ee3f..eb4a06003b7f 100644
33233 --- a/drivers/hwmon/pmbus/pxe1610.c
33234 +++ b/drivers/hwmon/pmbus/pxe1610.c
33235 @@ -41,6 +41,15 @@ static int pxe1610_identify(struct i2c_client *client,
33236                                 info->vrm_version[i] = vr13;
33237                                 break;
33238                         default:
33239 +                               /*
33240 +                                * If prior pages are available limit operation
33241 +                                * to them
33242 +                                */
33243 +                               if (i != 0) {
33244 +                                       info->pages = i;
33245 +                                       return 0;
33246 +                               }
33248                                 return -ENODEV;
33249                         }
33250                 }
33251 diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
33252 index 0f603b4094f2..a706ba11b93e 100644
33253 --- a/drivers/hwtracing/coresight/coresight-etm-perf.c
33254 +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
33255 @@ -52,7 +52,7 @@ static ssize_t format_attr_contextid_show(struct device *dev,
33257         int pid_fmt = ETM_OPT_CTXTID;
33259 -#if defined(CONFIG_CORESIGHT_SOURCE_ETM4X)
33260 +#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X)
33261         pid_fmt = is_kernel_in_hyp_mode() ? ETM_OPT_CTXTID2 : ETM_OPT_CTXTID;
33262  #endif
33263         return sprintf(page, "config:%d\n", pid_fmt);
33264 diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
33265 index 3629b7885aca..c594f45319fc 100644
33266 --- a/drivers/hwtracing/coresight/coresight-platform.c
33267 +++ b/drivers/hwtracing/coresight/coresight-platform.c
33268 @@ -90,6 +90,12 @@ static void of_coresight_get_ports_legacy(const struct device_node *node,
33269         struct of_endpoint endpoint;
33270         int in = 0, out = 0;
33272 +       /*
33273 +        * Avoid warnings in of_graph_get_next_endpoint()
33274 +        * if the device doesn't have any graph connections
33275 +        */
33276 +       if (!of_graph_is_present(node))
33277 +               return;
33278         do {
33279                 ep = of_graph_get_next_endpoint(node, ep);
33280                 if (!ep)
33281 diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
33282 index f72803a02391..28509b02a0b5 100644
33283 --- a/drivers/hwtracing/intel_th/gth.c
33284 +++ b/drivers/hwtracing/intel_th/gth.c
33285 @@ -543,7 +543,7 @@ static void intel_th_gth_disable(struct intel_th_device *thdev,
33286         output->active = false;
33288         for_each_set_bit(master, gth->output[output->port].master,
33289 -                        TH_CONFIGURABLE_MASTERS) {
33290 +                        TH_CONFIGURABLE_MASTERS + 1) {
33291                 gth_master_set(gth, master, -1);
33292         }
33293         spin_unlock(&gth->gth_lock);
33294 @@ -697,7 +697,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
33295         othdev->output.port = -1;
33296         othdev->output.active = false;
33297         gth->output[port].output = NULL;
33298 -       for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
33299 +       for (master = 0; master < TH_CONFIGURABLE_MASTERS + 1; master++)
33300                 if (gth->master[master] == port)
33301                         gth->master[master] = -1;
33302         spin_unlock(&gth->gth_lock);
33303 diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
33304 index 251e75c9ba9d..817cdb29bbd8 100644
33305 --- a/drivers/hwtracing/intel_th/pci.c
33306 +++ b/drivers/hwtracing/intel_th/pci.c
33307 @@ -273,11 +273,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
33308                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x51a6),
33309                 .driver_data = (kernel_ulong_t)&intel_th_2x,
33310         },
33311 +       {
33312 +               /* Alder Lake-M */
33313 +               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x54a6),
33314 +               .driver_data = (kernel_ulong_t)&intel_th_2x,
33315 +       },
33316         {
33317                 /* Alder Lake CPU */
33318                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
33319                 .driver_data = (kernel_ulong_t)&intel_th_2x,
33320         },
33321 +       {
33322 +               /* Rocket Lake CPU */
33323 +               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c19),
33324 +               .driver_data = (kernel_ulong_t)&intel_th_2x,
33325 +       },
33326         { 0 },
33327  };
33329 diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
33330 index e4b7f2a951ad..c1bbc4caeb5c 100644
33331 --- a/drivers/i2c/busses/i2c-cadence.c
33332 +++ b/drivers/i2c/busses/i2c-cadence.c
33333 @@ -789,7 +789,7 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
33334         bool change_role = false;
33335  #endif
33337 -       ret = pm_runtime_get_sync(id->dev);
33338 +       ret = pm_runtime_resume_and_get(id->dev);
33339         if (ret < 0)
33340                 return ret;
33342 @@ -911,7 +911,7 @@ static int cdns_reg_slave(struct i2c_client *slave)
33343         if (slave->flags & I2C_CLIENT_TEN)
33344                 return -EAFNOSUPPORT;
33346 -       ret = pm_runtime_get_sync(id->dev);
33347 +       ret = pm_runtime_resume_and_get(id->dev);
33348         if (ret < 0)
33349                 return ret;
33351 @@ -1200,7 +1200,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
33352         if (IS_ERR(id->membase))
33353                 return PTR_ERR(id->membase);
33355 -       id->irq = platform_get_irq(pdev, 0);
33356 +       ret = platform_get_irq(pdev, 0);
33357 +       if (ret < 0)
33358 +               return ret;
33359 +       id->irq = ret;
33361         id->adap.owner = THIS_MODULE;
33362         id->adap.dev.of_node = pdev->dev.of_node;
33363 diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
33364 index a08554c1a570..bdff0e6345d9 100644
33365 --- a/drivers/i2c/busses/i2c-emev2.c
33366 +++ b/drivers/i2c/busses/i2c-emev2.c
33367 @@ -395,7 +395,10 @@ static int em_i2c_probe(struct platform_device *pdev)
33369         em_i2c_reset(&priv->adap);
33371 -       priv->irq = platform_get_irq(pdev, 0);
33372 +       ret = platform_get_irq(pdev, 0);
33373 +       if (ret < 0)
33374 +               goto err_clk;
33375 +       priv->irq = ret;
33376         ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
33377                                 "em_i2c", priv);
33378         if (ret)
33379 diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
33380 index 4acee6f9e5a3..99d446763530 100644
33381 --- a/drivers/i2c/busses/i2c-i801.c
33382 +++ b/drivers/i2c/busses/i2c-i801.c
33383 @@ -73,6 +73,7 @@
33384   * Comet Lake-V (PCH)          0xa3a3  32      hard    yes     yes     yes
33385   * Alder Lake-S (PCH)          0x7aa3  32      hard    yes     yes     yes
33386   * Alder Lake-P (PCH)          0x51a3  32      hard    yes     yes     yes
33387 + * Alder Lake-M (PCH)          0x54a3  32      hard    yes     yes     yes
33388   *
33389   * Features supported by this driver:
33390   * Software PEC                                no
33391 @@ -230,6 +231,7 @@
33392  #define PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS         0x4b23
33393  #define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS          0x4da3
33394  #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS         0x51a3
33395 +#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS         0x54a3
33396  #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS              0x5ad4
33397  #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS         0x7aa3
33398  #define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS            0x8c22
33399 @@ -1087,6 +1089,7 @@ static const struct pci_device_id i801_ids[] = {
33400         { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS) },
33401         { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS) },
33402         { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS) },
33403 +       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS) },
33404         { 0, }
33405  };
33407 @@ -1771,6 +1774,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
33408         case PCI_DEVICE_ID_INTEL_EBG_SMBUS:
33409         case PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS:
33410         case PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS:
33411 +       case PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS:
33412                 priv->features |= FEATURE_BLOCK_PROC;
33413                 priv->features |= FEATURE_I2C_BLOCK_READ;
33414                 priv->features |= FEATURE_IRQ;
33415 diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
33416 index 98a89301ed2a..8e987945ed45 100644
33417 --- a/drivers/i2c/busses/i2c-img-scb.c
33418 +++ b/drivers/i2c/busses/i2c-img-scb.c
33419 @@ -1057,7 +1057,7 @@ static int img_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
33420                         atomic = true;
33421         }
33423 -       ret = pm_runtime_get_sync(adap->dev.parent);
33424 +       ret = pm_runtime_resume_and_get(adap->dev.parent);
33425         if (ret < 0)
33426                 return ret;
33428 @@ -1158,7 +1158,7 @@ static int img_i2c_init(struct img_i2c *i2c)
33429         u32 rev;
33430         int ret;
33432 -       ret = pm_runtime_get_sync(i2c->adap.dev.parent);
33433 +       ret = pm_runtime_resume_and_get(i2c->adap.dev.parent);
33434         if (ret < 0)
33435                 return ret;
33437 diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
33438 index 9db6ccded5e9..8b9ba055c418 100644
33439 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c
33440 +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
33441 @@ -259,7 +259,7 @@ static int lpi2c_imx_master_enable(struct lpi2c_imx_struct *lpi2c_imx)
33442         unsigned int temp;
33443         int ret;
33445 -       ret = pm_runtime_get_sync(lpi2c_imx->adapter.dev.parent);
33446 +       ret = pm_runtime_resume_and_get(lpi2c_imx->adapter.dev.parent);
33447         if (ret < 0)
33448                 return ret;
33450 diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
33451 index b80fdc1f0092..dc5ca71906db 100644
33452 --- a/drivers/i2c/busses/i2c-imx.c
33453 +++ b/drivers/i2c/busses/i2c-imx.c
33454 @@ -801,7 +801,7 @@ static int i2c_imx_reg_slave(struct i2c_client *client)
33455         i2c_imx->last_slave_event = I2C_SLAVE_STOP;
33457         /* Resume */
33458 -       ret = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
33459 +       ret = pm_runtime_resume_and_get(i2c_imx->adapter.dev.parent);
33460         if (ret < 0) {
33461                 dev_err(&i2c_imx->adapter.dev, "failed to resume i2c controller");
33462                 return ret;
33463 @@ -1253,7 +1253,7 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
33464         struct imx_i2c_struct *i2c_imx = i2c_get_adapdata(adapter);
33465         int result;
33467 -       result = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
33468 +       result = pm_runtime_resume_and_get(i2c_imx->adapter.dev.parent);
33469         if (result < 0)
33470                 return result;
33472 @@ -1496,7 +1496,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
33473         struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
33474         int irq, ret;
33476 -       ret = pm_runtime_get_sync(&pdev->dev);
33477 +       ret = pm_runtime_resume_and_get(&pdev->dev);
33478         if (ret < 0)
33479                 return ret;
33481 diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
33482 index 55177eb21d7b..baa7319eee53 100644
33483 --- a/drivers/i2c/busses/i2c-jz4780.c
33484 +++ b/drivers/i2c/busses/i2c-jz4780.c
33485 @@ -825,7 +825,10 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
33487         jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0);
33489 -       i2c->irq = platform_get_irq(pdev, 0);
33490 +       ret = platform_get_irq(pdev, 0);
33491 +       if (ret < 0)
33492 +               goto err;
33493 +       i2c->irq = ret;
33494         ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
33495                                dev_name(&pdev->dev), i2c);
33496         if (ret)
33497 diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
33498 index 2fb0532d8a16..ab261d762dea 100644
33499 --- a/drivers/i2c/busses/i2c-mlxbf.c
33500 +++ b/drivers/i2c/busses/i2c-mlxbf.c
33501 @@ -2376,6 +2376,8 @@ static int mlxbf_i2c_probe(struct platform_device *pdev)
33502         mlxbf_i2c_init_slave(pdev, priv);
33504         irq = platform_get_irq(pdev, 0);
33505 +       if (irq < 0)
33506 +               return irq;
33507         ret = devm_request_irq(dev, irq, mlxbf_smbus_irq,
33508                                IRQF_ONESHOT | IRQF_SHARED | IRQF_PROBE_SHARED,
33509                                dev_name(dev), priv);
33510 diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
33511 index 2ffd2f354d0a..bf25acba2ed5 100644
33512 --- a/drivers/i2c/busses/i2c-mt65xx.c
33513 +++ b/drivers/i2c/busses/i2c-mt65xx.c
33514 @@ -479,7 +479,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
33516         u16 control_reg;
33518 -       if (i2c->dev_comp->dma_sync) {
33519 +       if (i2c->dev_comp->apdma_sync) {
33520                 writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);
33521                 udelay(10);
33522                 writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
33523 @@ -564,7 +564,7 @@ static const struct i2c_spec_values *mtk_i2c_get_spec(unsigned int speed)
33525  static int mtk_i2c_max_step_cnt(unsigned int target_speed)
33527 -       if (target_speed > I2C_MAX_FAST_MODE_FREQ)
33528 +       if (target_speed > I2C_MAX_FAST_MODE_PLUS_FREQ)
33529                 return MAX_HS_STEP_CNT_DIV;
33530         else
33531                 return MAX_STEP_CNT_DIV;
33532 @@ -635,7 +635,7 @@ static int mtk_i2c_check_ac_timing(struct mtk_i2c *i2c,
33533         if (sda_min > sda_max)
33534                 return -3;
33536 -       if (check_speed > I2C_MAX_FAST_MODE_FREQ) {
33537 +       if (check_speed > I2C_MAX_FAST_MODE_PLUS_FREQ) {
33538                 if (i2c->dev_comp->ltiming_adjust) {
33539                         i2c->ac_timing.hs = I2C_TIME_DEFAULT_VALUE |
33540                                 (sample_cnt << 12) | (high_cnt << 8);
33541 @@ -850,7 +850,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
33543         control_reg = mtk_i2c_readw(i2c, OFFSET_CONTROL) &
33544                         ~(I2C_CONTROL_DIR_CHANGE | I2C_CONTROL_RS);
33545 -       if ((i2c->speed_hz > I2C_MAX_FAST_MODE_FREQ) || (left_num >= 1))
33546 +       if ((i2c->speed_hz > I2C_MAX_FAST_MODE_PLUS_FREQ) || (left_num >= 1))
33547                 control_reg |= I2C_CONTROL_RS;
33549         if (i2c->op == I2C_MASTER_WRRD)
33550 @@ -1067,7 +1067,8 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
33551                 }
33552         }
33554 -       if (i2c->auto_restart && num >= 2 && i2c->speed_hz > I2C_MAX_FAST_MODE_FREQ)
33555 +       if (i2c->auto_restart && num >= 2 &&
33556 +               i2c->speed_hz > I2C_MAX_FAST_MODE_PLUS_FREQ)
33557                 /* ignore the first restart irq after the master code,
33558                  * otherwise the first transfer will be discarded.
33559                  */
33560 diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
33561 index 12ac4212aded..d4f6c6d60683 100644
33562 --- a/drivers/i2c/busses/i2c-omap.c
33563 +++ b/drivers/i2c/busses/i2c-omap.c
33564 @@ -1404,9 +1404,9 @@ omap_i2c_probe(struct platform_device *pdev)
33565         pm_runtime_set_autosuspend_delay(omap->dev, OMAP_I2C_PM_TIMEOUT);
33566         pm_runtime_use_autosuspend(omap->dev);
33568 -       r = pm_runtime_get_sync(omap->dev);
33569 +       r = pm_runtime_resume_and_get(omap->dev);
33570         if (r < 0)
33571 -               goto err_free_mem;
33572 +               goto err_disable_pm;
33574         /*
33575          * Read the Rev hi bit-[15:14] ie scheme this is 1 indicates ver2.
33576 @@ -1513,8 +1513,8 @@ omap_i2c_probe(struct platform_device *pdev)
33577         omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
33578         pm_runtime_dont_use_autosuspend(omap->dev);
33579         pm_runtime_put_sync(omap->dev);
33580 +err_disable_pm:
33581         pm_runtime_disable(&pdev->dev);
33582 -err_free_mem:
33584         return r;
33586 @@ -1525,7 +1525,7 @@ static int omap_i2c_remove(struct platform_device *pdev)
33587         int ret;
33589         i2c_del_adapter(&omap->adapter);
33590 -       ret = pm_runtime_get_sync(&pdev->dev);
33591 +       ret = pm_runtime_resume_and_get(&pdev->dev);
33592         if (ret < 0)
33593                 return ret;
33595 diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
33596 index 12f6d452c0f7..8722ca23f889 100644
33597 --- a/drivers/i2c/busses/i2c-rcar.c
33598 +++ b/drivers/i2c/busses/i2c-rcar.c
33599 @@ -1027,7 +1027,10 @@ static int rcar_i2c_probe(struct platform_device *pdev)
33600         if (of_property_read_bool(dev->of_node, "smbus"))
33601                 priv->flags |= ID_P_HOST_NOTIFY;
33603 -       priv->irq = platform_get_irq(pdev, 0);
33604 +       ret = platform_get_irq(pdev, 0);
33605 +       if (ret < 0)
33606 +               goto out_pm_disable;
33607 +       priv->irq = ret;
33608         ret = devm_request_irq(dev, priv->irq, irqhandler, irqflags, dev_name(dev), priv);
33609         if (ret < 0) {
33610                 dev_err(dev, "cannot get irq %d\n", priv->irq);
33611 diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
33612 index c2005c789d2b..319d1fa617c8 100644
33613 --- a/drivers/i2c/busses/i2c-sh7760.c
33614 +++ b/drivers/i2c/busses/i2c-sh7760.c
33615 @@ -471,7 +471,10 @@ static int sh7760_i2c_probe(struct platform_device *pdev)
33616                 goto out2;
33617         }
33619 -       id->irq = platform_get_irq(pdev, 0);
33620 +       ret = platform_get_irq(pdev, 0);
33621 +       if (ret < 0)
33622 +               goto out3;
33623 +       id->irq = ret;
33625         id->adap.nr = pdev->id;
33626         id->adap.algo = &sh7760_i2c_algo;
33627 diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
33628 index 2917fecf6c80..8ead7e021008 100644
33629 --- a/drivers/i2c/busses/i2c-sprd.c
33630 +++ b/drivers/i2c/busses/i2c-sprd.c
33631 @@ -290,7 +290,7 @@ static int sprd_i2c_master_xfer(struct i2c_adapter *i2c_adap,
33632         struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
33633         int im, ret;
33635 -       ret = pm_runtime_get_sync(i2c_dev->dev);
33636 +       ret = pm_runtime_resume_and_get(i2c_dev->dev);
33637         if (ret < 0)
33638                 return ret;
33640 @@ -576,7 +576,7 @@ static int sprd_i2c_remove(struct platform_device *pdev)
33641         struct sprd_i2c *i2c_dev = platform_get_drvdata(pdev);
33642         int ret;
33644 -       ret = pm_runtime_get_sync(i2c_dev->dev);
33645 +       ret = pm_runtime_resume_and_get(i2c_dev->dev);
33646         if (ret < 0)
33647                 return ret;
33649 diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
33650 index c62c815b88eb..318abfa7926b 100644
33651 --- a/drivers/i2c/busses/i2c-stm32f7.c
33652 +++ b/drivers/i2c/busses/i2c-stm32f7.c
33653 @@ -1652,7 +1652,7 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
33654         i2c_dev->msg_id = 0;
33655         f7_msg->smbus = false;
33657 -       ret = pm_runtime_get_sync(i2c_dev->dev);
33658 +       ret = pm_runtime_resume_and_get(i2c_dev->dev);
33659         if (ret < 0)
33660                 return ret;
33662 @@ -1698,7 +1698,7 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
33663         f7_msg->read_write = read_write;
33664         f7_msg->smbus = true;
33666 -       ret = pm_runtime_get_sync(dev);
33667 +       ret = pm_runtime_resume_and_get(dev);
33668         if (ret < 0)
33669                 return ret;
33671 @@ -1799,7 +1799,7 @@ static int stm32f7_i2c_reg_slave(struct i2c_client *slave)
33672         if (ret)
33673                 return ret;
33675 -       ret = pm_runtime_get_sync(dev);
33676 +       ret = pm_runtime_resume_and_get(dev);
33677         if (ret < 0)
33678                 return ret;
33680 @@ -1880,7 +1880,7 @@ static int stm32f7_i2c_unreg_slave(struct i2c_client *slave)
33682         WARN_ON(!i2c_dev->slave[id]);
33684 -       ret = pm_runtime_get_sync(i2c_dev->dev);
33685 +       ret = pm_runtime_resume_and_get(i2c_dev->dev);
33686         if (ret < 0)
33687                 return ret;
33689 @@ -2273,7 +2273,7 @@ static int stm32f7_i2c_regs_backup(struct stm32f7_i2c_dev *i2c_dev)
33690         int ret;
33691         struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs;
33693 -       ret = pm_runtime_get_sync(i2c_dev->dev);
33694 +       ret = pm_runtime_resume_and_get(i2c_dev->dev);
33695         if (ret < 0)
33696                 return ret;
33698 @@ -2295,7 +2295,7 @@ static int stm32f7_i2c_regs_restore(struct stm32f7_i2c_dev *i2c_dev)
33699         int ret;
33700         struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs;
33702 -       ret = pm_runtime_get_sync(i2c_dev->dev);
33703 +       ret = pm_runtime_resume_and_get(i2c_dev->dev);
33704         if (ret < 0)
33705                 return ret;
33707 diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
33708 index 087b2951942e..2a8568b97c14 100644
33709 --- a/drivers/i2c/busses/i2c-xiic.c
33710 +++ b/drivers/i2c/busses/i2c-xiic.c
33711 @@ -706,7 +706,7 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
33712         dev_dbg(adap->dev.parent, "%s entry SR: 0x%x\n", __func__,
33713                 xiic_getreg8(i2c, XIIC_SR_REG_OFFSET));
33715 -       err = pm_runtime_get_sync(i2c->dev);
33716 +       err = pm_runtime_resume_and_get(i2c->dev);
33717         if (err < 0)
33718                 return err;
33720 @@ -873,7 +873,7 @@ static int xiic_i2c_remove(struct platform_device *pdev)
33721         /* remove adapter & data */
33722         i2c_del_adapter(&i2c->adap);
33724 -       ret = pm_runtime_get_sync(i2c->dev);
33725 +       ret = pm_runtime_resume_and_get(i2c->dev);
33726         if (ret < 0)
33727                 return ret;
33729 diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
33730 index 6ceb11cc4be1..6ef38a8ee95c 100644
33731 --- a/drivers/i2c/i2c-dev.c
33732 +++ b/drivers/i2c/i2c-dev.c
33733 @@ -440,8 +440,13 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
33734                                    sizeof(rdwr_arg)))
33735                         return -EFAULT;
33737 -               /* Put an arbitrary limit on the number of messages that can
33738 -                * be sent at once */
33739 +               if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0)
33740 +                       return -EINVAL;
33742 +               /*
33743 +                * Put an arbitrary limit on the number of messages that can
33744 +                * be sent at once
33745 +                */
33746                 if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
33747                         return -EINVAL;
33749 diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
33750 index f8e9b7305c13..e2e12a5585e5 100644
33751 --- a/drivers/i3c/master.c
33752 +++ b/drivers/i3c/master.c
33753 @@ -2535,7 +2535,7 @@ int i3c_master_register(struct i3c_master_controller *master,
33755         ret = i3c_master_bus_init(master);
33756         if (ret)
33757 -               goto err_destroy_wq;
33758 +               goto err_put_dev;
33760         ret = device_add(&master->dev);
33761         if (ret)
33762 @@ -2566,9 +2566,6 @@ int i3c_master_register(struct i3c_master_controller *master,
33763  err_cleanup_bus:
33764         i3c_master_bus_cleanup(master);
33766 -err_destroy_wq:
33767 -       destroy_workqueue(master->wq);
33769  err_put_dev:
33770         put_device(&master->dev);
33772 diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
33773 index 2e0c62c39155..8acf277b8b25 100644
33774 --- a/drivers/iio/accel/Kconfig
33775 +++ b/drivers/iio/accel/Kconfig
33776 @@ -211,7 +211,6 @@ config DMARD10
33777  config HID_SENSOR_ACCEL_3D
33778         depends on HID_SENSOR_HUB
33779         select IIO_BUFFER
33780 -       select IIO_TRIGGERED_BUFFER
33781         select HID_SENSOR_IIO_COMMON
33782         select HID_SENSOR_IIO_TRIGGER
33783         tristate "HID Accelerometers 3D"
33784 diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c
33785 index 3633a4e302c6..fe225990de24 100644
33786 --- a/drivers/iio/accel/adis16201.c
33787 +++ b/drivers/iio/accel/adis16201.c
33788 @@ -215,7 +215,7 @@ static const struct iio_chan_spec adis16201_channels[] = {
33789         ADIS_AUX_ADC_CHAN(ADIS16201_AUX_ADC_REG, ADIS16201_SCAN_AUX_ADC, 0, 12),
33790         ADIS_INCLI_CHAN(X, ADIS16201_XINCL_OUT_REG, ADIS16201_SCAN_INCLI_X,
33791                         BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
33792 -       ADIS_INCLI_CHAN(X, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y,
33793 +       ADIS_INCLI_CHAN(Y, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y,
33794                         BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
33795         IIO_CHAN_SOFT_TIMESTAMP(7)
33796  };
33797 diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
33798 index e0667c4b3c08..91958da22dcf 100644
33799 --- a/drivers/iio/adc/Kconfig
33800 +++ b/drivers/iio/adc/Kconfig
33801 @@ -249,7 +249,7 @@ config AD799X
33802  config AD9467
33803         tristate "Analog Devices AD9467 High Speed ADC driver"
33804         depends on SPI
33805 -       select ADI_AXI_ADC
33806 +       depends on ADI_AXI_ADC
33807         help
33808           Say yes here to build support for Analog Devices:
33809           * AD9467 16-Bit, 200 MSPS/250 MSPS Analog-to-Digital Converter
33810 @@ -266,8 +266,6 @@ config ADI_AXI_ADC
33811         select IIO_BUFFER
33812         select IIO_BUFFER_HW_CONSUMER
33813         select IIO_BUFFER_DMAENGINE
33814 -       depends on HAS_IOMEM
33815 -       depends on OF
33816         help
33817           Say yes here to build support for Analog Devices Generic
33818           AXI ADC IP core. The IP core is used for interfacing with
33819 diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
33820 index 17402714b387..9e9ff07cf972 100644
33821 --- a/drivers/iio/adc/ad7476.c
33822 +++ b/drivers/iio/adc/ad7476.c
33823 @@ -321,25 +321,15 @@ static int ad7476_probe(struct spi_device *spi)
33824         spi_message_init(&st->msg);
33825         spi_message_add_tail(&st->xfer, &st->msg);
33827 -       ret = iio_triggered_buffer_setup(indio_dev, NULL,
33828 -                       &ad7476_trigger_handler, NULL);
33829 +       ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev, NULL,
33830 +                                             &ad7476_trigger_handler, NULL);
33831         if (ret)
33832 -               goto error_disable_reg;
33833 +               return ret;
33835         if (st->chip_info->reset)
33836                 st->chip_info->reset(st);
33838 -       ret = iio_device_register(indio_dev);
33839 -       if (ret)
33840 -               goto error_ring_unregister;
33841 -       return 0;
33843 -error_ring_unregister:
33844 -       iio_triggered_buffer_cleanup(indio_dev);
33845 -error_disable_reg:
33846 -       regulator_disable(st->reg);
33848 -       return ret;
33849 +       return devm_iio_device_register(&spi->dev, indio_dev);
33852  static const struct spi_device_id ad7476_id[] = {
33853 diff --git a/drivers/iio/common/hid-sensors/Kconfig b/drivers/iio/common/hid-sensors/Kconfig
33854 index 24d492567336..2a3dd3b907be 100644
33855 --- a/drivers/iio/common/hid-sensors/Kconfig
33856 +++ b/drivers/iio/common/hid-sensors/Kconfig
33857 @@ -19,6 +19,7 @@ config HID_SENSOR_IIO_TRIGGER
33858         tristate "Common module (trigger) for all HID Sensor IIO drivers"
33859         depends on HID_SENSOR_HUB && HID_SENSOR_IIO_COMMON && IIO_BUFFER
33860         select IIO_TRIGGER
33861 +       select IIO_TRIGGERED_BUFFER
33862         help
33863           Say yes here to build trigger support for HID sensors.
33864           Triggers will be send if all requested attributes were read.
33865 diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
33866 index 5824f2edf975..20b5ac7ab66a 100644
33867 --- a/drivers/iio/gyro/Kconfig
33868 +++ b/drivers/iio/gyro/Kconfig
33869 @@ -111,7 +111,6 @@ config FXAS21002C_SPI
33870  config HID_SENSOR_GYRO_3D
33871         depends on HID_SENSOR_HUB
33872         select IIO_BUFFER
33873 -       select IIO_TRIGGERED_BUFFER
33874         select HID_SENSOR_IIO_COMMON
33875         select HID_SENSOR_IIO_TRIGGER
33876         tristate "HID Gyroscope 3D"
33877 diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
33878 index ac90be03332a..f17a93519535 100644
33879 --- a/drivers/iio/gyro/mpu3050-core.c
33880 +++ b/drivers/iio/gyro/mpu3050-core.c
33881 @@ -272,7 +272,16 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
33882         case IIO_CHAN_INFO_OFFSET:
33883                 switch (chan->type) {
33884                 case IIO_TEMP:
33885 -                       /* The temperature scaling is (x+23000)/280 Celsius */
33886 +                       /*
33887 +                        * The temperature scaling is (x+23000)/280 Celsius
33888 +                        * for the "best fit straight line" temperature range
33889 +                        * of -30C..85C.  The 23000 includes room temperature
33890 +                        * offset of +35C, 280 is the precision scale and x is
33891 +                        * the 16-bit signed integer reported by hardware.
33892 +                        *
33893 +                        * Temperature value itself represents temperature of
33894 +                        * the sensor die.
33895 +                        */
33896                         *val = 23000;
33897                         return IIO_VAL_INT;
33898                 default:
33899 @@ -329,7 +338,7 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
33900                                 goto out_read_raw_unlock;
33901                         }
33903 -                       *val = be16_to_cpu(raw_val);
33904 +                       *val = (s16)be16_to_cpu(raw_val);
33905                         ret = IIO_VAL_INT;
33907                         goto out_read_raw_unlock;
33908 diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
33909 index 6549fcf6db69..2de5494e7c22 100644
33910 --- a/drivers/iio/humidity/Kconfig
33911 +++ b/drivers/iio/humidity/Kconfig
33912 @@ -52,7 +52,6 @@ config HID_SENSOR_HUMIDITY
33913         tristate "HID Environmental humidity sensor"
33914         depends on HID_SENSOR_HUB
33915         select IIO_BUFFER
33916 -       select IIO_TRIGGERED_BUFFER
33917         select HID_SENSOR_IIO_COMMON
33918         select HID_SENSOR_IIO_TRIGGER
33919         help
33920 diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
33921 index dfe86c589325..c41b8ef1e250 100644
33922 --- a/drivers/iio/imu/adis16480.c
33923 +++ b/drivers/iio/imu/adis16480.c
33924 @@ -10,6 +10,7 @@
33925  #include <linux/of_irq.h>
33926  #include <linux/interrupt.h>
33927  #include <linux/delay.h>
33928 +#include <linux/math.h>
33929  #include <linux/mutex.h>
33930  #include <linux/device.h>
33931  #include <linux/kernel.h>
33932 @@ -17,6 +18,7 @@
33933  #include <linux/slab.h>
33934  #include <linux/sysfs.h>
33935  #include <linux/module.h>
33936 +#include <linux/lcm.h>
33938  #include <linux/iio/iio.h>
33939  #include <linux/iio/sysfs.h>
33940 @@ -170,6 +172,11 @@ static const char * const adis16480_int_pin_names[4] = {
33941         [ADIS16480_PIN_DIO4] = "DIO4",
33942  };
33944 +static bool low_rate_allow;
33945 +module_param(low_rate_allow, bool, 0444);
33946 +MODULE_PARM_DESC(low_rate_allow,
33947 +                "Allow IMU rates below the minimum advisable when external clk is used in PPS mode (default: N)");
33949  #ifdef CONFIG_DEBUG_FS
33951  static ssize_t adis16480_show_firmware_revision(struct file *file,
33952 @@ -312,7 +319,8 @@ static int adis16480_debugfs_init(struct iio_dev *indio_dev)
33953  static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
33955         struct adis16480 *st = iio_priv(indio_dev);
33956 -       unsigned int t, reg;
33957 +       unsigned int t, sample_rate = st->clk_freq;
33958 +       int ret;
33960         if (val < 0 || val2 < 0)
33961                 return -EINVAL;
33962 @@ -321,28 +329,65 @@ static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
33963         if (t == 0)
33964                 return -EINVAL;
33966 +       mutex_lock(&st->adis.state_lock);
33967         /*
33968 -        * When using PPS mode, the rate of data collection is equal to the
33969 -        * product of the external clock frequency and the scale factor in the
33970 -        * SYNC_SCALE register.
33971 -        * When using sync mode, or internal clock, the output data rate is
33972 -        * equal with  the clock frequency divided by DEC_RATE + 1.
33973 +        * When using PPS mode, the input clock needs to be scaled so that we have an IMU
33974 +        * sample rate between (optimally) 4000 and 4250. After this, we can use the
33975 +        * decimation filter to lower the sampling rate in order to get what the user wants.
33976 +        * Optimally, the user sample rate is a multiple of both the IMU sample rate and
33977 +        * the input clock. Hence, calculating the sync_scale dynamically gives us better
33978 +        * chances of achieving a perfect/integer value for DEC_RATE. The math here is:
33979 +        *      1. lcm of the input clock and the desired output rate.
33980 +        *      2. get the highest multiple of the previous result lower than the adis max rate.
33981 +        *      3. The last result becomes the IMU sample rate. Use that to calculate SYNC_SCALE
33982 +        *         and DEC_RATE (to get the user output rate)
33983          */
33984         if (st->clk_mode == ADIS16480_CLK_PPS) {
33985 -               t = t / st->clk_freq;
33986 -               reg = ADIS16495_REG_SYNC_SCALE;
33987 -       } else {
33988 -               t = st->clk_freq / t;
33989 -               reg = ADIS16480_REG_DEC_RATE;
33990 +               unsigned long scaled_rate = lcm(st->clk_freq, t);
33991 +               int sync_scale;
33993 +               /*
33994 +                * If lcm is bigger than the IMU maximum sampling rate there's no perfect
33995 +                * solution. In this case, we get the highest multiple of the input clock
33996 +                * lower than the IMU max sample rate.
33997 +                */
33998 +               if (scaled_rate > st->chip_info->int_clk)
33999 +                       scaled_rate = st->chip_info->int_clk / st->clk_freq * st->clk_freq;
34000 +               else
34001 +                       scaled_rate = st->chip_info->int_clk / scaled_rate * scaled_rate;
34003 +               /*
34004 +                * This is not an hard requirement but it's not advised to run the IMU
34005 +                * with a sample rate lower than 4000Hz due to possible undersampling
34006 +                * issues. However, there are users that might really want to take the risk.
34007 +                * Hence, we provide a module parameter for them. If set, we allow sample
34008 +                * rates lower than 4KHz. By default, we won't allow this and we just roundup
34009 +                * the rate to the next multiple of the input clock bigger than 4KHz. This
34010 +                * is done like this as in some cases (when DEC_RATE is 0) might give
34011 +                * us the closest value to the one desired by the user...
34012 +                */
34013 +               if (scaled_rate < 4000000 && !low_rate_allow)
34014 +                       scaled_rate = roundup(4000000, st->clk_freq);
34016 +               sync_scale = scaled_rate / st->clk_freq;
34017 +               ret = __adis_write_reg_16(&st->adis, ADIS16495_REG_SYNC_SCALE, sync_scale);
34018 +               if (ret)
34019 +                       goto error;
34021 +               sample_rate = scaled_rate;
34022         }
34024 +       t = DIV_ROUND_CLOSEST(sample_rate, t);
34025 +       if (t)
34026 +               t--;
34028         if (t > st->chip_info->max_dec_rate)
34029                 t = st->chip_info->max_dec_rate;
34031 -       if ((t != 0) && (st->clk_mode != ADIS16480_CLK_PPS))
34032 -               t--;
34034 -       return adis_write_reg_16(&st->adis, reg, t);
34035 +       ret = __adis_write_reg_16(&st->adis, ADIS16480_REG_DEC_RATE, t);
34036 +error:
34037 +       mutex_unlock(&st->adis.state_lock);
34038 +       return ret;
34041  static int adis16480_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
34042 @@ -350,34 +395,35 @@ static int adis16480_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
34043         struct adis16480 *st = iio_priv(indio_dev);
34044         uint16_t t;
34045         int ret;
34046 -       unsigned int freq;
34047 -       unsigned int reg;
34048 +       unsigned int freq, sample_rate = st->clk_freq;
34050 -       if (st->clk_mode == ADIS16480_CLK_PPS)
34051 -               reg = ADIS16495_REG_SYNC_SCALE;
34052 -       else
34053 -               reg = ADIS16480_REG_DEC_RATE;
34054 +       mutex_lock(&st->adis.state_lock);
34056 +       if (st->clk_mode == ADIS16480_CLK_PPS) {
34057 +               u16 sync_scale;
34059 +               ret = __adis_read_reg_16(&st->adis, ADIS16495_REG_SYNC_SCALE, &sync_scale);
34060 +               if (ret)
34061 +                       goto error;
34063 -       ret = adis_read_reg_16(&st->adis, reg, &t);
34064 +               sample_rate = st->clk_freq * sync_scale;
34065 +       }
34067 +       ret = __adis_read_reg_16(&st->adis, ADIS16480_REG_DEC_RATE, &t);
34068         if (ret)
34069 -               return ret;
34070 +               goto error;
34072 -       /*
34073 -        * When using PPS mode, the rate of data collection is equal to the
34074 -        * product of the external clock frequency and the scale factor in the
34075 -        * SYNC_SCALE register.
34076 -        * When using sync mode, or internal clock, the output data rate is
34077 -        * equal with  the clock frequency divided by DEC_RATE + 1.
34078 -        */
34079 -       if (st->clk_mode == ADIS16480_CLK_PPS)
34080 -               freq = st->clk_freq * t;
34081 -       else
34082 -               freq = st->clk_freq / (t + 1);
34083 +       mutex_unlock(&st->adis.state_lock);
34085 +       freq = DIV_ROUND_CLOSEST(sample_rate, (t + 1));
34087         *val = freq / 1000;
34088         *val2 = (freq % 1000) * 1000;
34090         return IIO_VAL_INT_PLUS_MICRO;
34091 +error:
34092 +       mutex_unlock(&st->adis.state_lock);
34093 +       return ret;
34096  enum {
34097 @@ -1278,6 +1324,20 @@ static int adis16480_probe(struct spi_device *spi)
34099                 st->clk_freq = clk_get_rate(st->ext_clk);
34100                 st->clk_freq *= 1000; /* micro */
34101 +               if (st->clk_mode == ADIS16480_CLK_PPS) {
34102 +                       u16 sync_scale;
34104 +                       /*
34105 +                        * In PPS mode, the IMU sample rate is the clk_freq * sync_scale. Hence,
34106 +                        * default the IMU sample rate to the highest multiple of the input clock
34107 +                        * lower than the IMU max sample rate. The internal sample rate is the
34108 +                        * max...
34109 +                        */
34110 +                       sync_scale = st->chip_info->int_clk / st->clk_freq;
34111 +                       ret = __adis_write_reg_16(&st->adis, ADIS16495_REG_SYNC_SCALE, sync_scale);
34112 +                       if (ret)
34113 +                               return ret;
34114 +               }
34115         } else {
34116                 st->clk_freq = st->chip_info->int_clk;
34117         }
34118 diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
34119 index 453c51c79655..69ab94ab7297 100644
34120 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
34121 +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
34122 @@ -731,12 +731,16 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
34123         }
34126 -static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
34127 +static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val,
34128 +                                       int val2)
34130         int result, i;
34132 +       if (val != 0)
34133 +               return -EINVAL;
34135         for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) {
34136 -               if (gyro_scale_6050[i] == val) {
34137 +               if (gyro_scale_6050[i] == val2) {
34138                         result = inv_mpu6050_set_gyro_fsr(st, i);
34139                         if (result)
34140                                 return result;
34141 @@ -767,13 +771,17 @@ static int inv_write_raw_get_fmt(struct iio_dev *indio_dev,
34142         return -EINVAL;
34145 -static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
34146 +static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val,
34147 +                                        int val2)
34149         int result, i;
34150         u8 d;
34152 +       if (val != 0)
34153 +               return -EINVAL;
34155         for (i = 0; i < ARRAY_SIZE(accel_scale); ++i) {
34156 -               if (accel_scale[i] == val) {
34157 +               if (accel_scale[i] == val2) {
34158                         d = (i << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
34159                         result = regmap_write(st->map, st->reg->accl_config, d);
34160                         if (result)
34161 @@ -814,10 +822,10 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
34162         case IIO_CHAN_INFO_SCALE:
34163                 switch (chan->type) {
34164                 case IIO_ANGL_VEL:
34165 -                       result = inv_mpu6050_write_gyro_scale(st, val2);
34166 +                       result = inv_mpu6050_write_gyro_scale(st, val, val2);
34167                         break;
34168                 case IIO_ACCEL:
34169 -                       result = inv_mpu6050_write_accel_scale(st, val2);
34170 +                       result = inv_mpu6050_write_accel_scale(st, val, val2);
34171                         break;
34172                 default:
34173                         result = -EINVAL;
34174 diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
34175 index 7db761afa578..36f3a900878d 100644
34176 --- a/drivers/iio/industrialio-core.c
34177 +++ b/drivers/iio/industrialio-core.c
34178 @@ -1734,7 +1734,6 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
34179         if (!indio_dev->info)
34180                 goto out_unlock;
34182 -       ret = -EINVAL;
34183         list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) {
34184                 ret = h->ioctl(indio_dev, filp, cmd, arg);
34185                 if (ret != IIO_IOCTL_UNHANDLED)
34186 @@ -1742,7 +1741,7 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
34187         }
34189         if (ret == IIO_IOCTL_UNHANDLED)
34190 -               ret = -EINVAL;
34191 +               ret = -ENODEV;
34193  out_unlock:
34194         mutex_unlock(&indio_dev->info_exist_lock);
34195 @@ -1864,9 +1863,6 @@ EXPORT_SYMBOL(__iio_device_register);
34196   **/
34197  void iio_device_unregister(struct iio_dev *indio_dev)
34199 -       struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
34200 -       struct iio_ioctl_handler *h, *t;
34202         cdev_device_del(&indio_dev->chrdev, &indio_dev->dev);
34204         mutex_lock(&indio_dev->info_exist_lock);
34205 @@ -1877,9 +1873,6 @@ void iio_device_unregister(struct iio_dev *indio_dev)
34207         indio_dev->info = NULL;
34209 -       list_for_each_entry_safe(h, t, &iio_dev_opaque->ioctl_handlers, entry)
34210 -               list_del(&h->entry);
34212         iio_device_wakeup_eventset(indio_dev);
34213         iio_buffer_wakeup_poll(indio_dev);
34215 diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
34216 index 33ad4dd0b5c7..917f9becf9c7 100644
34217 --- a/drivers/iio/light/Kconfig
34218 +++ b/drivers/iio/light/Kconfig
34219 @@ -256,7 +256,6 @@ config ISL29125
34220  config HID_SENSOR_ALS
34221         depends on HID_SENSOR_HUB
34222         select IIO_BUFFER
34223 -       select IIO_TRIGGERED_BUFFER
34224         select HID_SENSOR_IIO_COMMON
34225         select HID_SENSOR_IIO_TRIGGER
34226         tristate "HID ALS"
34227 @@ -270,7 +269,6 @@ config HID_SENSOR_ALS
34228  config HID_SENSOR_PROX
34229         depends on HID_SENSOR_HUB
34230         select IIO_BUFFER
34231 -       select IIO_TRIGGERED_BUFFER
34232         select HID_SENSOR_IIO_COMMON
34233         select HID_SENSOR_IIO_TRIGGER
34234         tristate "HID PROX"
34235 diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
34236 index 7ba7aa59437c..040d8429a6e0 100644
34237 --- a/drivers/iio/light/gp2ap002.c
34238 +++ b/drivers/iio/light/gp2ap002.c
34239 @@ -583,7 +583,7 @@ static int gp2ap002_probe(struct i2c_client *client,
34240                                         "gp2ap002", indio_dev);
34241         if (ret) {
34242                 dev_err(dev, "unable to request IRQ\n");
34243 -               goto out_disable_vio;
34244 +               goto out_put_pm;
34245         }
34246         gp2ap002->irq = client->irq;
34248 @@ -613,8 +613,9 @@ static int gp2ap002_probe(struct i2c_client *client,
34250         return 0;
34252 -out_disable_pm:
34253 +out_put_pm:
34254         pm_runtime_put_noidle(dev);
34255 +out_disable_pm:
34256         pm_runtime_disable(dev);
34257  out_disable_vio:
34258         regulator_disable(gp2ap002->vio);
34259 diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
34260 index 5bf2bfbc5379..6ce37819fb73 100644
34261 --- a/drivers/iio/light/tsl2563.c
34262 +++ b/drivers/iio/light/tsl2563.c
34263 @@ -271,11 +271,7 @@ static void tsl2563_wait_adc(struct tsl2563_chip *chip)
34264         default:
34265                 delay = 402;
34266         }
34267 -       /*
34268 -        * TODO: Make sure that we wait at least required delay but why we
34269 -        * have to extend it one tick more?
34270 -        */
34271 -       schedule_timeout_interruptible(msecs_to_jiffies(delay) + 2);
34272 +       schedule_msec_hrtimeout_interruptible(delay + 1);
34275  static int tsl2563_adjust_gainlevel(struct tsl2563_chip *chip, u16 adc)
34276 diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
34277 index 0f787bfc88fc..c9d8f07a6fcd 100644
34278 --- a/drivers/iio/light/tsl2583.c
34279 +++ b/drivers/iio/light/tsl2583.c
34280 @@ -341,6 +341,14 @@ static int tsl2583_als_calibrate(struct iio_dev *indio_dev)
34281                 return lux_val;
34282         }
34284 +       /* Avoid division by zero of lux_value later on */
34285 +       if (lux_val == 0) {
34286 +               dev_err(&chip->client->dev,
34287 +                       "%s: lux_val of 0 will produce out of range trim_value\n",
34288 +                       __func__);
34289 +               return -ENODATA;
34290 +       }
34292         gain_trim_val = (unsigned int)(((chip->als_settings.als_cal_target)
34293                         * chip->als_settings.als_gain_trim) / lux_val);
34294         if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {
34295 diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
34296 index 5d4ffd66032e..74ad5701c6c2 100644
34297 --- a/drivers/iio/magnetometer/Kconfig
34298 +++ b/drivers/iio/magnetometer/Kconfig
34299 @@ -95,7 +95,6 @@ config MAG3110
34300  config HID_SENSOR_MAGNETOMETER_3D
34301         depends on HID_SENSOR_HUB
34302         select IIO_BUFFER
34303 -       select IIO_TRIGGERED_BUFFER
34304         select HID_SENSOR_IIO_COMMON
34305         select HID_SENSOR_IIO_TRIGGER
34306         tristate "HID Magenetometer 3D"
34307 diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c
34308 index d46f23d82b3d..2f2f8cb3c26c 100644
34309 --- a/drivers/iio/magnetometer/yamaha-yas530.c
34310 +++ b/drivers/iio/magnetometer/yamaha-yas530.c
34311 @@ -32,13 +32,14 @@
34312  #include <linux/regmap.h>
34313  #include <linux/regulator/consumer.h>
34314  #include <linux/random.h>
34315 -#include <linux/unaligned/be_byteshift.h>
34317  #include <linux/iio/buffer.h>
34318  #include <linux/iio/iio.h>
34319  #include <linux/iio/trigger_consumer.h>
34320  #include <linux/iio/triggered_buffer.h>
34322 +#include <asm/unaligned.h>
34324  /* This register map covers YAS530 and YAS532 but differs in YAS 537 and YAS539 */
34325  #define YAS5XX_DEVICE_ID               0x80
34326  #define YAS5XX_ACTUATE_INIT_COIL       0x81
34327 @@ -887,6 +888,7 @@ static int yas5xx_probe(struct i2c_client *i2c,
34328                 strncpy(yas5xx->name, "yas532", sizeof(yas5xx->name));
34329                 break;
34330         default:
34331 +               ret = -ENODEV;
34332                 dev_err(dev, "unhandled device ID %02x\n", yas5xx->devid);
34333                 goto assert_reset;
34334         }
34335 diff --git a/drivers/iio/orientation/Kconfig b/drivers/iio/orientation/Kconfig
34336 index a505583cc2fd..396cbbb867f4 100644
34337 --- a/drivers/iio/orientation/Kconfig
34338 +++ b/drivers/iio/orientation/Kconfig
34339 @@ -9,7 +9,6 @@ menu "Inclinometer sensors"
34340  config HID_SENSOR_INCLINOMETER_3D
34341         depends on HID_SENSOR_HUB
34342         select IIO_BUFFER
34343 -       select IIO_TRIGGERED_BUFFER
34344         select HID_SENSOR_IIO_COMMON
34345         select HID_SENSOR_IIO_TRIGGER
34346         tristate "HID Inclinometer 3D"
34347 @@ -20,7 +19,6 @@ config HID_SENSOR_INCLINOMETER_3D
34348  config HID_SENSOR_DEVICE_ROTATION
34349         depends on HID_SENSOR_HUB
34350         select IIO_BUFFER
34351 -       select IIO_TRIGGERED_BUFFER
34352         select HID_SENSOR_IIO_COMMON
34353         select HID_SENSOR_IIO_TRIGGER
34354         tristate "HID Device Rotation"
34355 diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
34356 index 18e4ef060096..c087d8f72a54 100644
34357 --- a/drivers/iio/orientation/hid-sensor-rotation.c
34358 +++ b/drivers/iio/orientation/hid-sensor-rotation.c
34359 @@ -21,7 +21,7 @@ struct dev_rot_state {
34360         struct hid_sensor_common common_attributes;
34361         struct hid_sensor_hub_attribute_info quaternion;
34362         struct {
34363 -               u32 sampled_vals[4] __aligned(16);
34364 +               s32 sampled_vals[4] __aligned(16);
34365                 u64 timestamp __aligned(8);
34366         } scan;
34367         int scale_pre_decml;
34368 @@ -170,8 +170,15 @@ static int dev_rot_capture_sample(struct hid_sensor_hub_device *hsdev,
34369         struct dev_rot_state *rot_state = iio_priv(indio_dev);
34371         if (usage_id == HID_USAGE_SENSOR_ORIENT_QUATERNION) {
34372 -               memcpy(&rot_state->scan.sampled_vals, raw_data,
34373 -                      sizeof(rot_state->scan.sampled_vals));
34374 +               if (raw_len / 4 == sizeof(s16)) {
34375 +                       rot_state->scan.sampled_vals[0] = ((s16 *)raw_data)[0];
34376 +                       rot_state->scan.sampled_vals[1] = ((s16 *)raw_data)[1];
34377 +                       rot_state->scan.sampled_vals[2] = ((s16 *)raw_data)[2];
34378 +                       rot_state->scan.sampled_vals[3] = ((s16 *)raw_data)[3];
34379 +               } else {
34380 +                       memcpy(&rot_state->scan.sampled_vals, raw_data,
34381 +                              sizeof(rot_state->scan.sampled_vals));
34382 +               }
34384                 dev_dbg(&indio_dev->dev, "Recd Quat len:%zu::%zu\n", raw_len,
34385                         sizeof(rot_state->scan.sampled_vals));
34386 diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
34387 index 689b978db4f9..fc0d3cfca418 100644
34388 --- a/drivers/iio/pressure/Kconfig
34389 +++ b/drivers/iio/pressure/Kconfig
34390 @@ -79,7 +79,6 @@ config DPS310
34391  config HID_SENSOR_PRESS
34392         depends on HID_SENSOR_HUB
34393         select IIO_BUFFER
34394 -       select IIO_TRIGGERED_BUFFER
34395         select HID_SENSOR_IIO_COMMON
34396         select HID_SENSOR_IIO_TRIGGER
34397         tristate "HID PRESS"
34398 diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
34399 index c685f10b5ae4..cc206bfa09c7 100644
34400 --- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
34401 +++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
34402 @@ -160,6 +160,7 @@ static int lidar_get_measurement(struct lidar_data *data, u16 *reg)
34403         ret = lidar_write_control(data, LIDAR_REG_CONTROL_ACQUIRE);
34404         if (ret < 0) {
34405                 dev_err(&client->dev, "cannot send start measurement command");
34406 +               pm_runtime_put_noidle(&client->dev);
34407                 return ret;
34408         }
34410 diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
34411 index 37fd0b65a014..ea82cfaf7f42 100644
34412 --- a/drivers/iio/proximity/sx9310.c
34413 +++ b/drivers/iio/proximity/sx9310.c
34414 @@ -763,7 +763,11 @@ static int sx9310_write_far_debounce(struct sx9310_data *data, int val)
34415         int ret;
34416         unsigned int regval;
34418 -       val = ilog2(val);
34419 +       if (val > 0)
34420 +               val = ilog2(val);
34421 +       if (!FIELD_FIT(SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_MASK, val))
34422 +               return -EINVAL;
34424         regval = FIELD_PREP(SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_MASK, val);
34426         mutex_lock(&data->mutex);
34427 @@ -780,7 +784,11 @@ static int sx9310_write_close_debounce(struct sx9310_data *data, int val)
34428         int ret;
34429         unsigned int regval;
34431 -       val = ilog2(val);
34432 +       if (val > 0)
34433 +               val = ilog2(val);
34434 +       if (!FIELD_FIT(SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_MASK, val))
34435 +               return -EINVAL;
34437         regval = FIELD_PREP(SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_MASK, val);
34439         mutex_lock(&data->mutex);
34440 @@ -1213,17 +1221,17 @@ static int sx9310_init_compensation(struct iio_dev *indio_dev)
34443  static const struct sx9310_reg_default *
34444 -sx9310_get_default_reg(struct sx9310_data *data, int i,
34445 +sx9310_get_default_reg(struct sx9310_data *data, int idx,
34446                        struct sx9310_reg_default *reg_def)
34448 -       int ret;
34449         const struct device_node *np = data->client->dev.of_node;
34450 -       u32 combined[SX9310_NUM_CHANNELS] = { 4, 4, 4, 4 };
34451 +       u32 combined[SX9310_NUM_CHANNELS];
34452 +       u32 start = 0, raw = 0, pos = 0;
34453         unsigned long comb_mask = 0;
34454 +       int ret, i, count;
34455         const char *res;
34456 -       u32 start = 0, raw = 0, pos = 0;
34458 -       memcpy(reg_def, &sx9310_default_regs[i], sizeof(*reg_def));
34459 +       memcpy(reg_def, &sx9310_default_regs[idx], sizeof(*reg_def));
34460         if (!np)
34461                 return reg_def;
34463 @@ -1234,15 +1242,31 @@ sx9310_get_default_reg(struct sx9310_data *data, int i,
34464                         reg_def->def |= SX9310_REG_PROX_CTRL2_SHIELDEN_GROUND;
34465                 }
34467 -               reg_def->def &= ~SX9310_REG_PROX_CTRL2_COMBMODE_MASK;
34468 -               of_property_read_u32_array(np, "semtech,combined-sensors",
34469 -                                          combined, ARRAY_SIZE(combined));
34470 -               for (i = 0; i < ARRAY_SIZE(combined); i++) {
34471 -                       if (combined[i] <= SX9310_NUM_CHANNELS)
34472 -                               comb_mask |= BIT(combined[i]);
34473 +               count = of_property_count_elems_of_size(np, "semtech,combined-sensors",
34474 +                                                       sizeof(u32));
34475 +               if (count > 0 && count <= ARRAY_SIZE(combined)) {
34476 +                       ret = of_property_read_u32_array(np, "semtech,combined-sensors",
34477 +                                                        combined, count);
34478 +                       if (ret)
34479 +                               break;
34480 +               } else {
34481 +                       /*
34482 +                        * Either the property does not exist in the DT or the
34483 +                        * number of entries is incorrect.
34484 +                        */
34485 +                       break;
34486                 }
34487 +               for (i = 0; i < count; i++) {
34488 +                       if (combined[i] >= SX9310_NUM_CHANNELS) {
34489 +                               /* Invalid sensor (invalid DT). */
34490 +                               break;
34491 +                       }
34492 +                       comb_mask |= BIT(combined[i]);
34493 +               }
34494 +               if (i < count)
34495 +                       break;
34497 -               comb_mask &= 0xf;
34498 +               reg_def->def &= ~SX9310_REG_PROX_CTRL2_COMBMODE_MASK;
34499                 if (comb_mask == (BIT(3) | BIT(2) | BIT(1) | BIT(0)))
34500                         reg_def->def |= SX9310_REG_PROX_CTRL2_COMBMODE_CS0_CS1_CS2_CS3;
34501                 else if (comb_mask == (BIT(1) | BIT(2)))
34502 diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig
34503 index f1f2a1499c9e..4df60082c1fa 100644
34504 --- a/drivers/iio/temperature/Kconfig
34505 +++ b/drivers/iio/temperature/Kconfig
34506 @@ -45,7 +45,6 @@ config HID_SENSOR_TEMP
34507         tristate "HID Environmental temperature sensor"
34508         depends on HID_SENSOR_HUB
34509         select IIO_BUFFER
34510 -       select IIO_TRIGGERED_BUFFER
34511         select HID_SENSOR_IIO_COMMON
34512         select HID_SENSOR_IIO_TRIGGER
34513         help
34514 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
34515 index 3d194bb60840..6adbaea358ae 100644
34516 --- a/drivers/infiniband/core/cm.c
34517 +++ b/drivers/infiniband/core/cm.c
34518 @@ -2138,7 +2138,8 @@ static int cm_req_handler(struct cm_work *work)
34519                 goto destroy;
34520         }
34522 -       cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
34523 +       if (cm_id_priv->av.ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE)
34524 +               cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
34526         memset(&work->path[0], 0, sizeof(work->path[0]));
34527         if (cm_req_has_alt_path(req_msg))
34528 diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
34529 index 94096511599f..5b9022a8c9ec 100644
34530 --- a/drivers/infiniband/core/cma.c
34531 +++ b/drivers/infiniband/core/cma.c
34532 @@ -463,7 +463,6 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
34533         id_priv->id.route.addr.dev_addr.transport =
34534                 rdma_node_get_transport(cma_dev->device->node_type);
34535         list_add_tail(&id_priv->list, &cma_dev->id_list);
34536 -       rdma_restrack_add(&id_priv->res);
34538         trace_cm_id_attach(id_priv, cma_dev->device);
34540 @@ -483,6 +482,7 @@ static void cma_release_dev(struct rdma_id_private *id_priv)
34541         list_del(&id_priv->list);
34542         cma_dev_put(id_priv->cma_dev);
34543         id_priv->cma_dev = NULL;
34544 +       id_priv->id.device = NULL;
34545         if (id_priv->id.route.addr.dev_addr.sgid_attr) {
34546                 rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
34547                 id_priv->id.route.addr.dev_addr.sgid_attr = NULL;
34548 @@ -700,6 +700,7 @@ static int cma_ib_acquire_dev(struct rdma_id_private *id_priv,
34549         mutex_lock(&lock);
34550         cma_attach_to_dev(id_priv, listen_id_priv->cma_dev);
34551         mutex_unlock(&lock);
34552 +       rdma_restrack_add(&id_priv->res);
34553         return 0;
34556 @@ -754,8 +755,10 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
34557         }
34559  out:
34560 -       if (!ret)
34561 +       if (!ret) {
34562                 cma_attach_to_dev(id_priv, cma_dev);
34563 +               rdma_restrack_add(&id_priv->res);
34564 +       }
34566         mutex_unlock(&lock);
34567         return ret;
34568 @@ -816,6 +819,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
34570  found:
34571         cma_attach_to_dev(id_priv, cma_dev);
34572 +       rdma_restrack_add(&id_priv->res);
34573         mutex_unlock(&lock);
34574         addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
34575         memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
34576 @@ -1861,6 +1865,7 @@ static void _destroy_id(struct rdma_id_private *id_priv,
34577                                 iw_destroy_cm_id(id_priv->cm_id.iw);
34578                 }
34579                 cma_leave_mc_groups(id_priv);
34580 +               rdma_restrack_del(&id_priv->res);
34581                 cma_release_dev(id_priv);
34582         }
34584 @@ -1874,7 +1879,6 @@ static void _destroy_id(struct rdma_id_private *id_priv,
34585         kfree(id_priv->id.route.path_rec);
34587         put_net(id_priv->id.route.addr.dev_addr.net);
34588 -       rdma_restrack_del(&id_priv->res);
34589         kfree(id_priv);
34592 @@ -2529,6 +2533,7 @@ static int cma_listen_on_dev(struct rdma_id_private *id_priv,
34593                rdma_addr_size(cma_src_addr(id_priv)));
34595         _cma_attach_to_dev(dev_id_priv, cma_dev);
34596 +       rdma_restrack_add(&dev_id_priv->res);
34597         cma_id_get(id_priv);
34598         dev_id_priv->internal_id = 1;
34599         dev_id_priv->afonly = id_priv->afonly;
34600 @@ -3169,6 +3174,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
34601         ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
34602         id_priv->id.port_num = p;
34603         cma_attach_to_dev(id_priv, cma_dev);
34604 +       rdma_restrack_add(&id_priv->res);
34605         cma_set_loopback(cma_src_addr(id_priv));
34606  out:
34607         mutex_unlock(&lock);
34608 @@ -3201,6 +3207,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
34609                 if (status)
34610                         pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
34611                                              status);
34612 +               rdma_restrack_add(&id_priv->res);
34613         } else if (status) {
34614                 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
34615         }
34616 @@ -3734,7 +3741,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
34617         }
34619         id_priv->backlog = backlog;
34620 -       if (id->device) {
34621 +       if (id_priv->cma_dev) {
34622                 if (rdma_cap_ib_cm(id->device, 1)) {
34623                         ret = cma_ib_listen(id_priv);
34624                         if (ret)
34625 @@ -3812,6 +3819,8 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
34626         if (ret)
34627                 goto err2;
34629 +       if (!cma_any_addr(addr))
34630 +               rdma_restrack_add(&id_priv->res);
34631         return 0;
34632  err2:
34633         if (id_priv->cma_dev)
34634 diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c
34635 index 9ec6971056fa..049684880ae0 100644
34636 --- a/drivers/infiniband/core/uverbs_std_types_device.c
34637 +++ b/drivers/infiniband/core/uverbs_std_types_device.c
34638 @@ -117,8 +117,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_INFO_HANDLES)(
34639                 return ret;
34641         uapi_object = uapi_get_object(attrs->ufile->device->uapi, object_id);
34642 -       if (!uapi_object)
34643 -               return -EINVAL;
34644 +       if (IS_ERR(uapi_object))
34645 +               return PTR_ERR(uapi_object);
34647         handles = gather_objects_handle(attrs->ufile, uapi_object, attrs,
34648                                         out_len, &total);
34649 @@ -331,6 +331,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_TABLE)(
34650         if (ret)
34651                 return ret;
34653 +       if (!user_entry_size)
34654 +               return -EINVAL;
34656         max_entries = uverbs_attr_ptr_get_array_size(
34657                 attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
34658                 user_entry_size);
34659 diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
34660 index 995d4633b0a1..d4d4959c2434 100644
34661 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
34662 +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
34663 @@ -2784,6 +2784,7 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
34664                 dev_err(&cq->hwq.pdev->dev,
34665                         "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
34666                         cqe_cons, rq->max_wqe);
34667 +               rc = -EINVAL;
34668                 goto done;
34669         }
34671 diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
34672 index fa7878336100..3ca47004b752 100644
34673 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
34674 +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
34675 @@ -854,6 +854,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res     *res,
34677  unmap_io:
34678         pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
34679 +       dpit->dbr_bar_reg_iomem = NULL;
34680         return -ENOMEM;
34683 diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
34684 index 5c95c789f302..e800e8e8bed5 100644
34685 --- a/drivers/infiniband/hw/cxgb4/resource.c
34686 +++ b/drivers/infiniband/hw/cxgb4/resource.c
34687 @@ -216,7 +216,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
34688                         goto out;
34689                 entry->qid = qid;
34690                 list_add_tail(&entry->entry, &uctx->cqids);
34691 -               for (i = qid; i & rdev->qpmask; i++) {
34692 +               for (i = qid + 1; i & rdev->qpmask; i++) {
34693                         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
34694                         if (!entry)
34695                                 goto out;
34696 diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
34697 index 0e83d4b61e46..2cf102b5abd4 100644
34698 --- a/drivers/infiniband/hw/hfi1/firmware.c
34699 +++ b/drivers/infiniband/hw/hfi1/firmware.c
34700 @@ -1916,6 +1916,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
34701                         dd_dev_err(dd, "%s: Failed CRC check at offset %ld\n",
34702                                    __func__, (ptr -
34703                                    (u32 *)dd->platform_config.data));
34704 +                       ret = -EINVAL;
34705                         goto bail;
34706                 }
34707                 /* Jump the CRC DWORD */
34708 diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h
34709 index f650cac9d424..d30c23b6527a 100644
34710 --- a/drivers/infiniband/hw/hfi1/ipoib.h
34711 +++ b/drivers/infiniband/hw/hfi1/ipoib.h
34712 @@ -52,8 +52,9 @@ union hfi1_ipoib_flow {
34713   * @producer_lock: producer sync lock
34714   * @consumer_lock: consumer sync lock
34715   */
34716 +struct ipoib_txreq;
34717  struct hfi1_ipoib_circ_buf {
34718 -       void **items;
34719 +       struct ipoib_txreq **items;
34720         unsigned long head;
34721         unsigned long tail;
34722         unsigned long max_items;
34723 diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
34724 index edd4eeac8dd1..cdc26ee3cf52 100644
34725 --- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
34726 +++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
34727 @@ -702,14 +702,14 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
34729         priv->tx_napis = kcalloc_node(dev->num_tx_queues,
34730                                       sizeof(struct napi_struct),
34731 -                                     GFP_ATOMIC,
34732 +                                     GFP_KERNEL,
34733                                       priv->dd->node);
34734         if (!priv->tx_napis)
34735                 goto free_txreq_cache;
34737         priv->txqs = kcalloc_node(dev->num_tx_queues,
34738                                   sizeof(struct hfi1_ipoib_txq),
34739 -                                 GFP_ATOMIC,
34740 +                                 GFP_KERNEL,
34741                                   priv->dd->node);
34742         if (!priv->txqs)
34743                 goto free_tx_napis;
34744 @@ -741,9 +741,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
34745                                              priv->dd->node);
34747                 txq->tx_ring.items =
34748 -                       vzalloc_node(array_size(tx_ring_size,
34749 -                                               sizeof(struct ipoib_txreq)),
34750 -                                    priv->dd->node);
34751 +                       kcalloc_node(tx_ring_size,
34752 +                                    sizeof(struct ipoib_txreq *),
34753 +                                    GFP_KERNEL, priv->dd->node);
34754                 if (!txq->tx_ring.items)
34755                         goto free_txqs;
34757 @@ -764,7 +764,7 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
34758                 struct hfi1_ipoib_txq *txq = &priv->txqs[i];
34760                 netif_napi_del(txq->napi);
34761 -               vfree(txq->tx_ring.items);
34762 +               kfree(txq->tx_ring.items);
34763         }
34765         kfree(priv->txqs);
34766 @@ -817,7 +817,7 @@ void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
34767                 hfi1_ipoib_drain_tx_list(txq);
34768                 netif_napi_del(txq->napi);
34769                 (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
34770 -               vfree(txq->tx_ring.items);
34771 +               kfree(txq->tx_ring.items);
34772         }
34774         kfree(priv->txqs);
34775 diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
34776 index f3fb28e3d5d7..d213f65d4cdd 100644
34777 --- a/drivers/infiniband/hw/hfi1/mmu_rb.c
34778 +++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
34779 @@ -89,7 +89,7 @@ int hfi1_mmu_rb_register(void *ops_arg,
34780         struct mmu_rb_handler *h;
34781         int ret;
34783 -       h = kmalloc(sizeof(*h), GFP_KERNEL);
34784 +       h = kzalloc(sizeof(*h), GFP_KERNEL);
34785         if (!h)
34786                 return -ENOMEM;
34788 diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
34789 index ce26f97b2ca2..ad3cee54140e 100644
34790 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
34791 +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
34792 @@ -5068,6 +5068,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
34793         qp_attr->cur_qp_state = qp_attr->qp_state;
34794         qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
34795         qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
34796 +       qp_attr->cap.max_inline_data = hr_qp->max_inline_data;
34798         if (!ibqp->uobject) {
34799                 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
34800 diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c
34801 index 53e5cd1a2bd6..146a4148219b 100644
34802 --- a/drivers/infiniband/hw/i40iw/i40iw_pble.c
34803 +++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c
34804 @@ -393,12 +393,9 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
34805         i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n",
34806                     pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
34807         pble_rsrc->unallocated_pble -= (chunk->size >> 3);
34808 -       list_add(&chunk->list, &pble_rsrc->pinfo.clist);
34809         sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ?
34810                         sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa;
34811 -       if (sd_entry->valid)
34812 -               return 0;
34813 -       if (dev->is_pf) {
34814 +       if (dev->is_pf && !sd_entry->valid) {
34815                 ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id,
34816                                             sd_reg_val, idx->sd_idx,
34817                                             sd_entry->entry_type, true);
34818 @@ -409,6 +406,7 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
34819         }
34821         sd_entry->valid = true;
34822 +       list_add(&chunk->list, &pble_rsrc->pinfo.clist);
34823         return 0;
34824   error:
34825         kfree(chunk);
34826 diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
34827 index 07b8350929cd..81276b4247f8 100644
34828 --- a/drivers/infiniband/hw/mlx5/devx.c
34829 +++ b/drivers/infiniband/hw/mlx5/devx.c
34830 @@ -630,9 +630,8 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
34831         case UVERBS_OBJECT_QP:
34832         {
34833                 struct mlx5_ib_qp *qp = to_mqp(uobj->object);
34834 -               enum ib_qp_type qp_type = qp->ibqp.qp_type;
34836 -               if (qp_type == IB_QPT_RAW_PACKET ||
34837 +               if (qp->type == IB_QPT_RAW_PACKET ||
34838                     (qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
34839                         struct mlx5_ib_raw_packet_qp *raw_packet_qp =
34840                                                          &qp->raw_packet_qp;
34841 @@ -649,10 +648,9 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
34842                                                sq->tisn) == obj_id);
34843                 }
34845 -               if (qp_type == MLX5_IB_QPT_DCT)
34846 +               if (qp->type == MLX5_IB_QPT_DCT)
34847                         return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
34848                                               qp->dct.mdct.mqp.qpn) == obj_id;
34850                 return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
34851                                       qp->ibqp.qp_num) == obj_id;
34852         }
34853 diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
34854 index 25da0b05b4e2..f0af3f1ae039 100644
34855 --- a/drivers/infiniband/hw/mlx5/fs.c
34856 +++ b/drivers/infiniband/hw/mlx5/fs.c
34857 @@ -1528,8 +1528,8 @@ static struct mlx5_ib_flow_handler *raw_fs_rule_add(
34858                 dst_num++;
34859         }
34861 -       handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher,
34862 -                                       flow_context, flow_act,
34863 +       handler = _create_raw_flow_rule(dev, ft_prio, dst_num ? dst : NULL,
34864 +                                       fs_matcher, flow_context, flow_act,
34865                                         cmd_in, inlen, dst_num);
34867         if (IS_ERR(handler)) {
34868 @@ -1885,8 +1885,9 @@ static int get_dests(struct uverbs_attr_bundle *attrs,
34869                 else
34870                         *dest_id = mqp->raw_packet_qp.rq.tirn;
34871                 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
34872 -       } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
34873 -                  fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
34874 +       } else if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
34875 +                   fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) &&
34876 +                  !(*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)) {
34877                 *dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
34878         }
34880 diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
34881 index 0d69a697d75f..59ffbbdda317 100644
34882 --- a/drivers/infiniband/hw/mlx5/main.c
34883 +++ b/drivers/infiniband/hw/mlx5/main.c
34884 @@ -499,7 +499,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
34885         translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
34886                                  &props->active_width, ext);
34888 -       if (!dev->is_rep && mlx5_is_roce_enabled(mdev)) {
34889 +       if (!dev->is_rep && dev->mdev->roce.roce_en) {
34890                 u16 qkey_viol_cntr;
34892                 props->port_cap_flags |= IB_PORT_CM_SUP;
34893 @@ -4174,7 +4174,7 @@ static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
34895                 /* Register only for native ports */
34896                 err = mlx5_add_netdev_notifier(dev, port_num);
34897 -               if (err || dev->is_rep || !mlx5_is_roce_enabled(mdev))
34898 +               if (err || dev->is_rep || !mlx5_is_roce_init_enabled(mdev))
34899                         /*
34900                          * We don't enable ETH interface for
34901                          * 1. IB representors
34902 @@ -4655,6 +4655,7 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev,
34904                 if (bound) {
34905                         rdma_roce_rescan_device(&dev->ib_dev);
34906 +                       mpi->ibdev->ib_active = true;
34907                         break;
34908                 }
34909         }
34910 @@ -4711,7 +4712,7 @@ static int mlx5r_probe(struct auxiliary_device *adev,
34911         dev->mdev = mdev;
34912         dev->num_ports = num_ports;
34914 -       if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_enabled(mdev))
34915 +       if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_init_enabled(mdev))
34916                 profile = &raw_eth_profile;
34917         else
34918                 profile = &pf_profile;
34919 diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
34920 index 88cc26e008fc..b085c02b53d0 100644
34921 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
34922 +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
34923 @@ -547,11 +547,6 @@ static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
34924         return container_of(wr, struct mlx5_umr_wr, wr);
34927 -struct mlx5_shared_mr_info {
34928 -       int mr_id;
34929 -       struct ib_umem          *umem;
34932  enum mlx5_ib_cq_pr_flags {
34933         MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0,
34934  };
34935 @@ -654,47 +649,69 @@ struct mlx5_ib_dm {
34936         atomic64_add(value, &((mr)->odp_stats.counter_name))
34938  struct mlx5_ib_mr {
34939 -       struct ib_mr            ibmr;
34940 -       void                    *descs;
34941 -       dma_addr_t              desc_map;
34942 -       int                     ndescs;
34943 -       int                     data_length;
34944 -       int                     meta_ndescs;
34945 -       int                     meta_length;
34946 -       int                     max_descs;
34947 -       int                     desc_size;
34948 -       int                     access_mode;
34949 -       unsigned int            page_shift;
34950 -       struct mlx5_core_mkey   mmkey;
34951 -       struct ib_umem         *umem;
34952 -       struct mlx5_shared_mr_info      *smr_info;
34953 -       struct list_head        list;
34954 -       struct mlx5_cache_ent  *cache_ent;
34955 -       u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
34956 -       struct mlx5_core_sig_ctx    *sig;
34957 -       void                    *descs_alloc;
34958 -       int                     access_flags; /* Needed for rereg MR */
34960 -       struct mlx5_ib_mr      *parent;
34961 -       /* Needed for IB_MR_TYPE_INTEGRITY */
34962 -       struct mlx5_ib_mr      *pi_mr;
34963 -       struct mlx5_ib_mr      *klm_mr;
34964 -       struct mlx5_ib_mr      *mtt_mr;
34965 -       u64                     data_iova;
34966 -       u64                     pi_iova;
34968 -       /* For ODP and implicit */
34969 -       struct xarray           implicit_children;
34970 -       union {
34971 -               struct list_head elm;
34972 -               struct work_struct work;
34973 -       } odp_destroy;
34974 -       struct ib_odp_counters  odp_stats;
34975 -       bool                    is_odp_implicit;
34976 +       struct ib_mr ibmr;
34977 +       struct mlx5_core_mkey mmkey;
34979 -       struct mlx5_async_work  cb_work;
34980 +       /* User MR data */
34981 +       struct mlx5_cache_ent *cache_ent;
34982 +       struct ib_umem *umem;
34984 +       /* This is zero'd when the MR is allocated */
34985 +       struct {
34986 +               /* Used only while the MR is in the cache */
34987 +               struct {
34988 +                       u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
34989 +                       struct mlx5_async_work cb_work;
34990 +                       /* Cache list element */
34991 +                       struct list_head list;
34992 +               };
34994 +               /* Used only by kernel MRs (umem == NULL) */
34995 +               struct {
34996 +                       void *descs;
34997 +                       void *descs_alloc;
34998 +                       dma_addr_t desc_map;
34999 +                       int max_descs;
35000 +                       int ndescs;
35001 +                       int desc_size;
35002 +                       int access_mode;
35004 +                       /* For Kernel IB_MR_TYPE_INTEGRITY */
35005 +                       struct mlx5_core_sig_ctx *sig;
35006 +                       struct mlx5_ib_mr *pi_mr;
35007 +                       struct mlx5_ib_mr *klm_mr;
35008 +                       struct mlx5_ib_mr *mtt_mr;
35009 +                       u64 data_iova;
35010 +                       u64 pi_iova;
35011 +                       int meta_ndescs;
35012 +                       int meta_length;
35013 +                       int data_length;
35014 +               };
35016 +               /* Used only by User MRs (umem != NULL) */
35017 +               struct {
35018 +                       unsigned int page_shift;
35019 +                       /* Current access_flags */
35020 +                       int access_flags;
35022 +                       /* For User ODP */
35023 +                       struct mlx5_ib_mr *parent;
35024 +                       struct xarray implicit_children;
35025 +                       union {
35026 +                               struct work_struct work;
35027 +                       } odp_destroy;
35028 +                       struct ib_odp_counters odp_stats;
35029 +                       bool is_odp_implicit;
35030 +               };
35031 +       };
35032  };
35034 +/* Zero the fields in the mr that are variant depending on usage */
35035 +static inline void mlx5_clear_mr(struct mlx5_ib_mr *mr)
35037 +       memset(mr->out, 0, sizeof(*mr) - offsetof(struct mlx5_ib_mr, out));
35040  static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
35042         return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
35043 diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
35044 index db05b0e0a8d7..ea8f068a6da3 100644
35045 --- a/drivers/infiniband/hw/mlx5/mr.c
35046 +++ b/drivers/infiniband/hw/mlx5/mr.c
35047 @@ -590,6 +590,8 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
35048                 ent->available_mrs--;
35049                 queue_adjust_cache_locked(ent);
35050                 spin_unlock_irq(&ent->lock);
35052 +               mlx5_clear_mr(mr);
35053         }
35054         mr->access_flags = access_flags;
35055         return mr;
35056 @@ -615,16 +617,14 @@ static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent)
35057                         ent->available_mrs--;
35058                         queue_adjust_cache_locked(ent);
35059                         spin_unlock_irq(&ent->lock);
35060 -                       break;
35061 +                       mlx5_clear_mr(mr);
35062 +                       return mr;
35063                 }
35064                 queue_adjust_cache_locked(ent);
35065                 spin_unlock_irq(&ent->lock);
35066         }
35068 -       if (!mr)
35069 -               req_ent->miss++;
35071 -       return mr;
35072 +       req_ent->miss++;
35073 +       return NULL;
35076  static void detach_mr_from_cache(struct mlx5_ib_mr *mr)
35077 @@ -993,8 +993,6 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
35079         mr->ibmr.pd = pd;
35080         mr->umem = umem;
35081 -       mr->access_flags = access_flags;
35082 -       mr->desc_size = sizeof(struct mlx5_mtt);
35083         mr->mmkey.iova = iova;
35084         mr->mmkey.size = umem->length;
35085         mr->mmkey.pd = to_mpd(pd)->pdn;
35086 diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
35087 index b103555b1f5d..d98755e78362 100644
35088 --- a/drivers/infiniband/hw/mlx5/odp.c
35089 +++ b/drivers/infiniband/hw/mlx5/odp.c
35090 @@ -227,7 +227,6 @@ static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt)
35092         dma_fence_odp_mr(mr);
35094 -       mr->parent = NULL;
35095         mlx5_mr_cache_free(mr_to_mdev(mr), mr);
35096         ib_umem_odp_release(odp);
35098 diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
35099 index f5a52a6fae43..843f9e7fe96f 100644
35100 --- a/drivers/infiniband/hw/mlx5/qp.c
35101 +++ b/drivers/infiniband/hw/mlx5/qp.c
35102 @@ -3146,6 +3146,19 @@ enum {
35103         MLX5_PATH_FLAG_COUNTER  = 1 << 2,
35104  };
35106 +static int mlx5_to_ib_rate_map(u8 rate)
35108 +       static const int rates[] = { IB_RATE_PORT_CURRENT, IB_RATE_56_GBPS,
35109 +                                    IB_RATE_25_GBPS,      IB_RATE_100_GBPS,
35110 +                                    IB_RATE_200_GBPS,     IB_RATE_50_GBPS,
35111 +                                    IB_RATE_400_GBPS };
35113 +       if (rate < ARRAY_SIZE(rates))
35114 +               return rates[rate];
35116 +       return rate - MLX5_STAT_RATE_OFFSET;
35119  static int ib_to_mlx5_rate_map(u8 rate)
35121         switch (rate) {
35122 @@ -4485,7 +4498,7 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
35123         rdma_ah_set_path_bits(ah_attr, MLX5_GET(ads, path, mlid));
35125         static_rate = MLX5_GET(ads, path, stat_rate);
35126 -       rdma_ah_set_static_rate(ah_attr, static_rate ? static_rate - 5 : 0);
35127 +       rdma_ah_set_static_rate(ah_attr, mlx5_to_ib_rate_map(static_rate));
35128         if (MLX5_GET(ads, path, grh) ||
35129             ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
35130                 rdma_ah_set_grh(ah_attr, NULL, MLX5_GET(ads, path, flow_label),
35131 diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
35132 index c4bc58736e48..1715fbe0719d 100644
35133 --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
35134 +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
35135 @@ -636,8 +636,10 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35136         memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN);
35138         if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
35139 -                            &qp->iwarp_cm_flags))
35140 +                            &qp->iwarp_cm_flags)) {
35141 +               rc = -ENODEV;
35142                 goto err; /* QP already being destroyed */
35143 +       }
35145         rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
35146         if (rc) {
35147 diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
35148 index df0d173d6acb..da2e867a1ed9 100644
35149 --- a/drivers/infiniband/sw/rxe/rxe_av.c
35150 +++ b/drivers/infiniband/sw/rxe/rxe_av.c
35151 @@ -88,7 +88,7 @@ void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr)
35152                 type = RXE_NETWORK_TYPE_IPV4;
35153                 break;
35154         case RDMA_NETWORK_IPV6:
35155 -               type = RXE_NETWORK_TYPE_IPV4;
35156 +               type = RXE_NETWORK_TYPE_IPV6;
35157                 break;
35158         default:
35159                 /* not reached - checked in rxe_av_chk_attr */
35160 diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
35161 index 17a361b8dbb1..06b556169867 100644
35162 --- a/drivers/infiniband/sw/rxe/rxe_comp.c
35163 +++ b/drivers/infiniband/sw/rxe/rxe_comp.c
35164 @@ -345,14 +345,16 @@ static inline enum comp_state do_read(struct rxe_qp *qp,
35166         ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
35167                         &wqe->dma, payload_addr(pkt),
35168 -                       payload_size(pkt), to_mem_obj, NULL);
35169 -       if (ret)
35170 +                       payload_size(pkt), to_mr_obj, NULL);
35171 +       if (ret) {
35172 +               wqe->status = IB_WC_LOC_PROT_ERR;
35173                 return COMPST_ERROR;
35174 +       }
35176         if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
35177                 return COMPST_COMP_ACK;
35178 -       else
35179 -               return COMPST_UPDATE_COMP;
35181 +       return COMPST_UPDATE_COMP;
35184  static inline enum comp_state do_atomic(struct rxe_qp *qp,
35185 @@ -365,11 +367,13 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp,
35187         ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
35188                         &wqe->dma, &atomic_orig,
35189 -                       sizeof(u64), to_mem_obj, NULL);
35190 -       if (ret)
35191 +                       sizeof(u64), to_mr_obj, NULL);
35192 +       if (ret) {
35193 +               wqe->status = IB_WC_LOC_PROT_ERR;
35194                 return COMPST_ERROR;
35195 -       else
35196 -               return COMPST_COMP_ACK;
35197 +       }
35199 +       return COMPST_COMP_ACK;
35202  static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
35203 diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
35204 index 0d758760b9ae..08e21fa9ec97 100644
35205 --- a/drivers/infiniband/sw/rxe/rxe_loc.h
35206 +++ b/drivers/infiniband/sw/rxe/rxe_loc.h
35207 @@ -72,40 +72,37 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
35209  /* rxe_mr.c */
35210  enum copy_direction {
35211 -       to_mem_obj,
35212 -       from_mem_obj,
35213 +       to_mr_obj,
35214 +       from_mr_obj,
35215  };
35217 -void rxe_mem_init_dma(struct rxe_pd *pd,
35218 -                     int access, struct rxe_mem *mem);
35219 +void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr);
35221 -int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
35222 -                     u64 length, u64 iova, int access, struct ib_udata *udata,
35223 -                     struct rxe_mem *mr);
35224 +int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
35225 +                    int access, struct ib_udata *udata, struct rxe_mr *mr);
35227 -int rxe_mem_init_fast(struct rxe_pd *pd,
35228 -                     int max_pages, struct rxe_mem *mem);
35229 +int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr);
35231 -int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr,
35232 -                int length, enum copy_direction dir, u32 *crcp);
35233 +int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
35234 +               enum copy_direction dir, u32 *crcp);
35236  int copy_data(struct rxe_pd *pd, int access,
35237               struct rxe_dma_info *dma, void *addr, int length,
35238               enum copy_direction dir, u32 *crcp);
35240 -void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length);
35241 +void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length);
35243  enum lookup_type {
35244         lookup_local,
35245         lookup_remote,
35246  };
35248 -struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
35249 -                          enum lookup_type type);
35250 +struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
35251 +                        enum lookup_type type);
35253 -int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length);
35254 +int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
35256 -void rxe_mem_cleanup(struct rxe_pool_entry *arg);
35257 +void rxe_mr_cleanup(struct rxe_pool_entry *arg);
35259  int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
35261 diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
35262 index 6e8c41567ba0..9f63947bab12 100644
35263 --- a/drivers/infiniband/sw/rxe/rxe_mr.c
35264 +++ b/drivers/infiniband/sw/rxe/rxe_mr.c
35265 @@ -24,16 +24,15 @@ static u8 rxe_get_key(void)
35266         return key;
35269 -int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
35270 +int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
35272 -       switch (mem->type) {
35273 -       case RXE_MEM_TYPE_DMA:
35274 +       switch (mr->type) {
35275 +       case RXE_MR_TYPE_DMA:
35276                 return 0;
35278 -       case RXE_MEM_TYPE_MR:
35279 -               if (iova < mem->iova ||
35280 -                   length > mem->length ||
35281 -                   iova > mem->iova + mem->length - length)
35282 +       case RXE_MR_TYPE_MR:
35283 +               if (iova < mr->iova || length > mr->length ||
35284 +                   iova > mr->iova + mr->length - length)
35285                         return -EFAULT;
35286                 return 0;
35288 @@ -46,85 +45,83 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
35289                                 | IB_ACCESS_REMOTE_WRITE        \
35290                                 | IB_ACCESS_REMOTE_ATOMIC)
35292 -static void rxe_mem_init(int access, struct rxe_mem *mem)
35293 +static void rxe_mr_init(int access, struct rxe_mr *mr)
35295 -       u32 lkey = mem->pelem.index << 8 | rxe_get_key();
35296 +       u32 lkey = mr->pelem.index << 8 | rxe_get_key();
35297         u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
35299 -       mem->ibmr.lkey          = lkey;
35300 -       mem->ibmr.rkey          = rkey;
35301 -       mem->state              = RXE_MEM_STATE_INVALID;
35302 -       mem->type               = RXE_MEM_TYPE_NONE;
35303 -       mem->map_shift          = ilog2(RXE_BUF_PER_MAP);
35304 +       mr->ibmr.lkey = lkey;
35305 +       mr->ibmr.rkey = rkey;
35306 +       mr->state = RXE_MR_STATE_INVALID;
35307 +       mr->type = RXE_MR_TYPE_NONE;
35308 +       mr->map_shift = ilog2(RXE_BUF_PER_MAP);
35311 -void rxe_mem_cleanup(struct rxe_pool_entry *arg)
35312 +void rxe_mr_cleanup(struct rxe_pool_entry *arg)
35314 -       struct rxe_mem *mem = container_of(arg, typeof(*mem), pelem);
35315 +       struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem);
35316         int i;
35318 -       ib_umem_release(mem->umem);
35319 +       ib_umem_release(mr->umem);
35321 -       if (mem->map) {
35322 -               for (i = 0; i < mem->num_map; i++)
35323 -                       kfree(mem->map[i]);
35324 +       if (mr->map) {
35325 +               for (i = 0; i < mr->num_map; i++)
35326 +                       kfree(mr->map[i]);
35328 -               kfree(mem->map);
35329 +               kfree(mr->map);
35330         }
35333 -static int rxe_mem_alloc(struct rxe_mem *mem, int num_buf)
35334 +static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
35336         int i;
35337         int num_map;
35338 -       struct rxe_map **map = mem->map;
35339 +       struct rxe_map **map = mr->map;
35341         num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
35343 -       mem->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
35344 -       if (!mem->map)
35345 +       mr->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
35346 +       if (!mr->map)
35347                 goto err1;
35349         for (i = 0; i < num_map; i++) {
35350 -               mem->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
35351 -               if (!mem->map[i])
35352 +               mr->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
35353 +               if (!mr->map[i])
35354                         goto err2;
35355         }
35357         BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP));
35359 -       mem->map_shift  = ilog2(RXE_BUF_PER_MAP);
35360 -       mem->map_mask   = RXE_BUF_PER_MAP - 1;
35361 +       mr->map_shift = ilog2(RXE_BUF_PER_MAP);
35362 +       mr->map_mask = RXE_BUF_PER_MAP - 1;
35364 -       mem->num_buf = num_buf;
35365 -       mem->num_map = num_map;
35366 -       mem->max_buf = num_map * RXE_BUF_PER_MAP;
35367 +       mr->num_buf = num_buf;
35368 +       mr->num_map = num_map;
35369 +       mr->max_buf = num_map * RXE_BUF_PER_MAP;
35371         return 0;
35373  err2:
35374         for (i--; i >= 0; i--)
35375 -               kfree(mem->map[i]);
35376 +               kfree(mr->map[i]);
35378 -       kfree(mem->map);
35379 +       kfree(mr->map);
35380  err1:
35381         return -ENOMEM;
35384 -void rxe_mem_init_dma(struct rxe_pd *pd,
35385 -                     int access, struct rxe_mem *mem)
35386 +void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
35388 -       rxe_mem_init(access, mem);
35389 +       rxe_mr_init(access, mr);
35391 -       mem->ibmr.pd            = &pd->ibpd;
35392 -       mem->access             = access;
35393 -       mem->state              = RXE_MEM_STATE_VALID;
35394 -       mem->type               = RXE_MEM_TYPE_DMA;
35395 +       mr->ibmr.pd = &pd->ibpd;
35396 +       mr->access = access;
35397 +       mr->state = RXE_MR_STATE_VALID;
35398 +       mr->type = RXE_MR_TYPE_DMA;
35401 -int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
35402 -                     u64 length, u64 iova, int access, struct ib_udata *udata,
35403 -                     struct rxe_mem *mem)
35404 +int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
35405 +                    int access, struct ib_udata *udata, struct rxe_mr *mr)
35407         struct rxe_map          **map;
35408         struct rxe_phys_buf     *buf = NULL;
35409 @@ -142,23 +139,23 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
35410                 goto err1;
35411         }
35413 -       mem->umem = umem;
35414 +       mr->umem = umem;
35415         num_buf = ib_umem_num_pages(umem);
35417 -       rxe_mem_init(access, mem);
35418 +       rxe_mr_init(access, mr);
35420 -       err = rxe_mem_alloc(mem, num_buf);
35421 +       err = rxe_mr_alloc(mr, num_buf);
35422         if (err) {
35423 -               pr_warn("err %d from rxe_mem_alloc\n", err);
35424 +               pr_warn("err %d from rxe_mr_alloc\n", err);
35425                 ib_umem_release(umem);
35426                 goto err1;
35427         }
35429 -       mem->page_shift         = PAGE_SHIFT;
35430 -       mem->page_mask = PAGE_SIZE - 1;
35431 +       mr->page_shift = PAGE_SHIFT;
35432 +       mr->page_mask = PAGE_SIZE - 1;
35434         num_buf                 = 0;
35435 -       map                     = mem->map;
35436 +       map = mr->map;
35437         if (length > 0) {
35438                 buf = map[0]->buf;
35440 @@ -185,15 +182,15 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
35441                 }
35442         }
35444 -       mem->ibmr.pd            = &pd->ibpd;
35445 -       mem->umem               = umem;
35446 -       mem->access             = access;
35447 -       mem->length             = length;
35448 -       mem->iova               = iova;
35449 -       mem->va                 = start;
35450 -       mem->offset             = ib_umem_offset(umem);
35451 -       mem->state              = RXE_MEM_STATE_VALID;
35452 -       mem->type               = RXE_MEM_TYPE_MR;
35453 +       mr->ibmr.pd = &pd->ibpd;
35454 +       mr->umem = umem;
35455 +       mr->access = access;
35456 +       mr->length = length;
35457 +       mr->iova = iova;
35458 +       mr->va = start;
35459 +       mr->offset = ib_umem_offset(umem);
35460 +       mr->state = RXE_MR_STATE_VALID;
35461 +       mr->type = RXE_MR_TYPE_MR;
35463         return 0;
35465 @@ -201,24 +198,23 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
35466         return err;
35469 -int rxe_mem_init_fast(struct rxe_pd *pd,
35470 -                     int max_pages, struct rxe_mem *mem)
35471 +int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
35473         int err;
35475 -       rxe_mem_init(0, mem);
35476 +       rxe_mr_init(0, mr);
35478         /* In fastreg, we also set the rkey */
35479 -       mem->ibmr.rkey = mem->ibmr.lkey;
35480 +       mr->ibmr.rkey = mr->ibmr.lkey;
35482 -       err = rxe_mem_alloc(mem, max_pages);
35483 +       err = rxe_mr_alloc(mr, max_pages);
35484         if (err)
35485                 goto err1;
35487 -       mem->ibmr.pd            = &pd->ibpd;
35488 -       mem->max_buf            = max_pages;
35489 -       mem->state              = RXE_MEM_STATE_FREE;
35490 -       mem->type               = RXE_MEM_TYPE_MR;
35491 +       mr->ibmr.pd = &pd->ibpd;
35492 +       mr->max_buf = max_pages;
35493 +       mr->state = RXE_MR_STATE_FREE;
35494 +       mr->type = RXE_MR_TYPE_MR;
35496         return 0;
35498 @@ -226,28 +222,24 @@ int rxe_mem_init_fast(struct rxe_pd *pd,
35499         return err;
35502 -static void lookup_iova(
35503 -       struct rxe_mem  *mem,
35504 -       u64                     iova,
35505 -       int                     *m_out,
35506 -       int                     *n_out,
35507 -       size_t                  *offset_out)
35508 +static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
35509 +                       size_t *offset_out)
35511 -       size_t                  offset = iova - mem->iova + mem->offset;
35512 +       size_t offset = iova - mr->iova + mr->offset;
35513         int                     map_index;
35514         int                     buf_index;
35515         u64                     length;
35517 -       if (likely(mem->page_shift)) {
35518 -               *offset_out = offset & mem->page_mask;
35519 -               offset >>= mem->page_shift;
35520 -               *n_out = offset & mem->map_mask;
35521 -               *m_out = offset >> mem->map_shift;
35522 +       if (likely(mr->page_shift)) {
35523 +               *offset_out = offset & mr->page_mask;
35524 +               offset >>= mr->page_shift;
35525 +               *n_out = offset & mr->map_mask;
35526 +               *m_out = offset >> mr->map_shift;
35527         } else {
35528                 map_index = 0;
35529                 buf_index = 0;
35531 -               length = mem->map[map_index]->buf[buf_index].size;
35532 +               length = mr->map[map_index]->buf[buf_index].size;
35534                 while (offset >= length) {
35535                         offset -= length;
35536 @@ -257,7 +249,7 @@ static void lookup_iova(
35537                                 map_index++;
35538                                 buf_index = 0;
35539                         }
35540 -                       length = mem->map[map_index]->buf[buf_index].size;
35541 +                       length = mr->map[map_index]->buf[buf_index].size;
35542                 }
35544                 *m_out = map_index;
35545 @@ -266,49 +258,49 @@ static void lookup_iova(
35546         }
35549 -void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length)
35550 +void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
35552         size_t offset;
35553         int m, n;
35554         void *addr;
35556 -       if (mem->state != RXE_MEM_STATE_VALID) {
35557 -               pr_warn("mem not in valid state\n");
35558 +       if (mr->state != RXE_MR_STATE_VALID) {
35559 +               pr_warn("mr not in valid state\n");
35560                 addr = NULL;
35561                 goto out;
35562         }
35564 -       if (!mem->map) {
35565 +       if (!mr->map) {
35566                 addr = (void *)(uintptr_t)iova;
35567                 goto out;
35568         }
35570 -       if (mem_check_range(mem, iova, length)) {
35571 +       if (mr_check_range(mr, iova, length)) {
35572                 pr_warn("range violation\n");
35573                 addr = NULL;
35574                 goto out;
35575         }
35577 -       lookup_iova(mem, iova, &m, &n, &offset);
35578 +       lookup_iova(mr, iova, &m, &n, &offset);
35580 -       if (offset + length > mem->map[m]->buf[n].size) {
35581 +       if (offset + length > mr->map[m]->buf[n].size) {
35582                 pr_warn("crosses page boundary\n");
35583                 addr = NULL;
35584                 goto out;
35585         }
35587 -       addr = (void *)(uintptr_t)mem->map[m]->buf[n].addr + offset;
35588 +       addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;
35590  out:
35591         return addr;
35594  /* copy data from a range (vaddr, vaddr+length-1) to or from
35595 - * a mem object starting at iova. Compute incremental value of
35596 - * crc32 if crcp is not zero. caller must hold a reference to mem
35597 + * a mr object starting at iova. Compute incremental value of
35598 + * crc32 if crcp is not zero. caller must hold a reference to mr
35599   */
35600 -int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
35601 -                enum copy_direction dir, u32 *crcp)
35602 +int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
35603 +               enum copy_direction dir, u32 *crcp)
35605         int                     err;
35606         int                     bytes;
35607 @@ -323,43 +315,41 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
35608         if (length == 0)
35609                 return 0;
35611 -       if (mem->type == RXE_MEM_TYPE_DMA) {
35612 +       if (mr->type == RXE_MR_TYPE_DMA) {
35613                 u8 *src, *dest;
35615 -               src  = (dir == to_mem_obj) ?
35616 -                       addr : ((void *)(uintptr_t)iova);
35617 +               src = (dir == to_mr_obj) ? addr : ((void *)(uintptr_t)iova);
35619 -               dest = (dir == to_mem_obj) ?
35620 -                       ((void *)(uintptr_t)iova) : addr;
35621 +               dest = (dir == to_mr_obj) ? ((void *)(uintptr_t)iova) : addr;
35623                 memcpy(dest, src, length);
35625                 if (crcp)
35626 -                       *crcp = rxe_crc32(to_rdev(mem->ibmr.device),
35627 -                                       *crcp, dest, length);
35628 +                       *crcp = rxe_crc32(to_rdev(mr->ibmr.device), *crcp, dest,
35629 +                                         length);
35631                 return 0;
35632         }
35634 -       WARN_ON_ONCE(!mem->map);
35635 +       WARN_ON_ONCE(!mr->map);
35637 -       err = mem_check_range(mem, iova, length);
35638 +       err = mr_check_range(mr, iova, length);
35639         if (err) {
35640                 err = -EFAULT;
35641                 goto err1;
35642         }
35644 -       lookup_iova(mem, iova, &m, &i, &offset);
35645 +       lookup_iova(mr, iova, &m, &i, &offset);
35647 -       map     = mem->map + m;
35648 +       map = mr->map + m;
35649         buf     = map[0]->buf + i;
35651         while (length > 0) {
35652                 u8 *src, *dest;
35654                 va      = (u8 *)(uintptr_t)buf->addr + offset;
35655 -               src  = (dir == to_mem_obj) ? addr : va;
35656 -               dest = (dir == to_mem_obj) ? va : addr;
35657 +               src = (dir == to_mr_obj) ? addr : va;
35658 +               dest = (dir == to_mr_obj) ? va : addr;
35660                 bytes   = buf->size - offset;
35662 @@ -369,8 +359,8 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
35663                 memcpy(dest, src, bytes);
35665                 if (crcp)
35666 -                       crc = rxe_crc32(to_rdev(mem->ibmr.device),
35667 -                                       crc, dest, bytes);
35668 +                       crc = rxe_crc32(to_rdev(mr->ibmr.device), crc, dest,
35669 +                                       bytes);
35671                 length  -= bytes;
35672                 addr    += bytes;
35673 @@ -411,7 +401,7 @@ int copy_data(
35674         struct rxe_sge          *sge    = &dma->sge[dma->cur_sge];
35675         int                     offset  = dma->sge_offset;
35676         int                     resid   = dma->resid;
35677 -       struct rxe_mem          *mem    = NULL;
35678 +       struct rxe_mr           *mr     = NULL;
35679         u64                     iova;
35680         int                     err;
35682 @@ -424,8 +414,8 @@ int copy_data(
35683         }
35685         if (sge->length && (offset < sge->length)) {
35686 -               mem = lookup_mem(pd, access, sge->lkey, lookup_local);
35687 -               if (!mem) {
35688 +               mr = lookup_mr(pd, access, sge->lkey, lookup_local);
35689 +               if (!mr) {
35690                         err = -EINVAL;
35691                         goto err1;
35692                 }
35693 @@ -435,9 +425,9 @@ int copy_data(
35694                 bytes = length;
35696                 if (offset >= sge->length) {
35697 -                       if (mem) {
35698 -                               rxe_drop_ref(mem);
35699 -                               mem = NULL;
35700 +                       if (mr) {
35701 +                               rxe_drop_ref(mr);
35702 +                               mr = NULL;
35703                         }
35704                         sge++;
35705                         dma->cur_sge++;
35706 @@ -449,9 +439,9 @@ int copy_data(
35707                         }
35709                         if (sge->length) {
35710 -                               mem = lookup_mem(pd, access, sge->lkey,
35711 -                                                lookup_local);
35712 -                               if (!mem) {
35713 +                               mr = lookup_mr(pd, access, sge->lkey,
35714 +                                              lookup_local);
35715 +                               if (!mr) {
35716                                         err = -EINVAL;
35717                                         goto err1;
35718                                 }
35719 @@ -466,7 +456,7 @@ int copy_data(
35720                 if (bytes > 0) {
35721                         iova = sge->addr + offset;
35723 -                       err = rxe_mem_copy(mem, iova, addr, bytes, dir, crcp);
35724 +                       err = rxe_mr_copy(mr, iova, addr, bytes, dir, crcp);
35725                         if (err)
35726                                 goto err2;
35728 @@ -480,14 +470,14 @@ int copy_data(
35729         dma->sge_offset = offset;
35730         dma->resid      = resid;
35732 -       if (mem)
35733 -               rxe_drop_ref(mem);
35734 +       if (mr)
35735 +               rxe_drop_ref(mr);
35737         return 0;
35739  err2:
35740 -       if (mem)
35741 -               rxe_drop_ref(mem);
35742 +       if (mr)
35743 +               rxe_drop_ref(mr);
35744  err1:
35745         return err;
35747 @@ -525,31 +515,30 @@ int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
35748         return 0;
35751 -/* (1) find the mem (mr or mw) corresponding to lkey/rkey
35752 +/* (1) find the mr corresponding to lkey/rkey
35753   *     depending on lookup_type
35754 - * (2) verify that the (qp) pd matches the mem pd
35755 - * (3) verify that the mem can support the requested access
35756 - * (4) verify that mem state is valid
35757 + * (2) verify that the (qp) pd matches the mr pd
35758 + * (3) verify that the mr can support the requested access
35759 + * (4) verify that mr state is valid
35760   */
35761 -struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
35762 -                          enum lookup_type type)
35763 +struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
35764 +                        enum lookup_type type)
35766 -       struct rxe_mem *mem;
35767 +       struct rxe_mr *mr;
35768         struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
35769         int index = key >> 8;
35771 -       mem = rxe_pool_get_index(&rxe->mr_pool, index);
35772 -       if (!mem)
35773 +       mr = rxe_pool_get_index(&rxe->mr_pool, index);
35774 +       if (!mr)
35775                 return NULL;
35777 -       if (unlikely((type == lookup_local && mr_lkey(mem) != key) ||
35778 -                    (type == lookup_remote && mr_rkey(mem) != key) ||
35779 -                    mr_pd(mem) != pd ||
35780 -                    (access && !(access & mem->access)) ||
35781 -                    mem->state != RXE_MEM_STATE_VALID)) {
35782 -               rxe_drop_ref(mem);
35783 -               mem = NULL;
35784 +       if (unlikely((type == lookup_local && mr_lkey(mr) != key) ||
35785 +                    (type == lookup_remote && mr_rkey(mr) != key) ||
35786 +                    mr_pd(mr) != pd || (access && !(access & mr->access)) ||
35787 +                    mr->state != RXE_MR_STATE_VALID)) {
35788 +               rxe_drop_ref(mr);
35789 +               mr = NULL;
35790         }
35792 -       return mem;
35793 +       return mr;
35795 diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
35796 index 307d8986e7c9..d24901f2af3f 100644
35797 --- a/drivers/infiniband/sw/rxe/rxe_pool.c
35798 +++ b/drivers/infiniband/sw/rxe/rxe_pool.c
35799 @@ -8,8 +8,6 @@
35800  #include "rxe_loc.h"
35802  /* info about object pools
35803 - * note that mr and mw share a single index space
35804 - * so that one can map an lkey to the correct type of object
35805   */
35806  struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
35807         [RXE_TYPE_UC] = {
35808 @@ -56,18 +54,18 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
35809         },
35810         [RXE_TYPE_MR] = {
35811                 .name           = "rxe-mr",
35812 -               .size           = sizeof(struct rxe_mem),
35813 -               .elem_offset    = offsetof(struct rxe_mem, pelem),
35814 -               .cleanup        = rxe_mem_cleanup,
35815 +               .size           = sizeof(struct rxe_mr),
35816 +               .elem_offset    = offsetof(struct rxe_mr, pelem),
35817 +               .cleanup        = rxe_mr_cleanup,
35818                 .flags          = RXE_POOL_INDEX,
35819                 .max_index      = RXE_MAX_MR_INDEX,
35820                 .min_index      = RXE_MIN_MR_INDEX,
35821         },
35822         [RXE_TYPE_MW] = {
35823                 .name           = "rxe-mw",
35824 -               .size           = sizeof(struct rxe_mem),
35825 -               .elem_offset    = offsetof(struct rxe_mem, pelem),
35826 -               .flags          = RXE_POOL_INDEX,
35827 +               .size           = sizeof(struct rxe_mw),
35828 +               .elem_offset    = offsetof(struct rxe_mw, pelem),
35829 +               .flags          = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
35830                 .max_index      = RXE_MAX_MW_INDEX,
35831                 .min_index      = RXE_MIN_MW_INDEX,
35832         },
35833 diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
35834 index 34ae957a315c..b0f350d674fd 100644
35835 --- a/drivers/infiniband/sw/rxe/rxe_qp.c
35836 +++ b/drivers/infiniband/sw/rxe/rxe_qp.c
35837 @@ -242,6 +242,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
35838         if (err) {
35839                 vfree(qp->sq.queue->buf);
35840                 kfree(qp->sq.queue);
35841 +               qp->sq.queue = NULL;
35842                 return err;
35843         }
35845 @@ -295,6 +296,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
35846                 if (err) {
35847                         vfree(qp->rq.queue->buf);
35848                         kfree(qp->rq.queue);
35849 +                       qp->rq.queue = NULL;
35850                         return err;
35851                 }
35852         }
35853 @@ -355,6 +357,11 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
35854  err2:
35855         rxe_queue_cleanup(qp->sq.queue);
35856  err1:
35857 +       qp->pd = NULL;
35858 +       qp->rcq = NULL;
35859 +       qp->scq = NULL;
35860 +       qp->srq = NULL;
35862         if (srq)
35863                 rxe_drop_ref(srq);
35864         rxe_drop_ref(scq);
35865 diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
35866 index 889290793d75..3664cdae7e1f 100644
35867 --- a/drivers/infiniband/sw/rxe/rxe_req.c
35868 +++ b/drivers/infiniband/sw/rxe/rxe_req.c
35869 @@ -464,7 +464,7 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
35870                 } else {
35871                         err = copy_data(qp->pd, 0, &wqe->dma,
35872                                         payload_addr(pkt), paylen,
35873 -                                       from_mem_obj,
35874 +                                       from_mr_obj,
35875                                         &crc);
35876                         if (err)
35877                                 return err;
35878 @@ -596,7 +596,7 @@ int rxe_requester(void *arg)
35879         if (wqe->mask & WR_REG_MASK) {
35880                 if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
35881                         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
35882 -                       struct rxe_mem *rmr;
35883 +                       struct rxe_mr *rmr;
35885                         rmr = rxe_pool_get_index(&rxe->mr_pool,
35886                                                  wqe->wr.ex.invalidate_rkey >> 8);
35887 @@ -607,14 +607,14 @@ int rxe_requester(void *arg)
35888                                 wqe->status = IB_WC_MW_BIND_ERR;
35889                                 goto exit;
35890                         }
35891 -                       rmr->state = RXE_MEM_STATE_FREE;
35892 +                       rmr->state = RXE_MR_STATE_FREE;
35893                         rxe_drop_ref(rmr);
35894                         wqe->state = wqe_state_done;
35895                         wqe->status = IB_WC_SUCCESS;
35896                 } else if (wqe->wr.opcode == IB_WR_REG_MR) {
35897 -                       struct rxe_mem *rmr = to_rmr(wqe->wr.wr.reg.mr);
35898 +                       struct rxe_mr *rmr = to_rmr(wqe->wr.wr.reg.mr);
35900 -                       rmr->state = RXE_MEM_STATE_VALID;
35901 +                       rmr->state = RXE_MR_STATE_VALID;
35902                         rmr->access = wqe->wr.wr.reg.access;
35903                         rmr->ibmr.lkey = wqe->wr.wr.reg.key;
35904                         rmr->ibmr.rkey = wqe->wr.wr.reg.key;
35905 diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
35906 index 142f3d8014d8..8e237b623b31 100644
35907 --- a/drivers/infiniband/sw/rxe/rxe_resp.c
35908 +++ b/drivers/infiniband/sw/rxe/rxe_resp.c
35909 @@ -391,7 +391,7 @@ static enum resp_states check_length(struct rxe_qp *qp,
35910  static enum resp_states check_rkey(struct rxe_qp *qp,
35911                                    struct rxe_pkt_info *pkt)
35913 -       struct rxe_mem *mem = NULL;
35914 +       struct rxe_mr *mr = NULL;
35915         u64 va;
35916         u32 rkey;
35917         u32 resid;
35918 @@ -430,18 +430,18 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
35919         resid   = qp->resp.resid;
35920         pktlen  = payload_size(pkt);
35922 -       mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
35923 -       if (!mem) {
35924 +       mr = lookup_mr(qp->pd, access, rkey, lookup_remote);
35925 +       if (!mr) {
35926                 state = RESPST_ERR_RKEY_VIOLATION;
35927                 goto err;
35928         }
35930 -       if (unlikely(mem->state == RXE_MEM_STATE_FREE)) {
35931 +       if (unlikely(mr->state == RXE_MR_STATE_FREE)) {
35932                 state = RESPST_ERR_RKEY_VIOLATION;
35933                 goto err;
35934         }
35936 -       if (mem_check_range(mem, va, resid)) {
35937 +       if (mr_check_range(mr, va, resid)) {
35938                 state = RESPST_ERR_RKEY_VIOLATION;
35939                 goto err;
35940         }
35941 @@ -469,12 +469,12 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
35943         WARN_ON_ONCE(qp->resp.mr);
35945 -       qp->resp.mr = mem;
35946 +       qp->resp.mr = mr;
35947         return RESPST_EXECUTE;
35949  err:
35950 -       if (mem)
35951 -               rxe_drop_ref(mem);
35952 +       if (mr)
35953 +               rxe_drop_ref(mr);
35954         return state;
35957 @@ -484,7 +484,7 @@ static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
35958         int err;
35960         err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
35961 -                       data_addr, data_len, to_mem_obj, NULL);
35962 +                       data_addr, data_len, to_mr_obj, NULL);
35963         if (unlikely(err))
35964                 return (err == -ENOSPC) ? RESPST_ERR_LENGTH
35965                                         : RESPST_ERR_MALFORMED_WQE;
35966 @@ -499,8 +499,8 @@ static enum resp_states write_data_in(struct rxe_qp *qp,
35967         int     err;
35968         int data_len = payload_size(pkt);
35970 -       err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt),
35971 -                          data_len, to_mem_obj, NULL);
35972 +       err = rxe_mr_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt), data_len,
35973 +                         to_mr_obj, NULL);
35974         if (err) {
35975                 rc = RESPST_ERR_RKEY_VIOLATION;
35976                 goto out;
35977 @@ -522,9 +522,9 @@ static enum resp_states process_atomic(struct rxe_qp *qp,
35978         u64 iova = atmeth_va(pkt);
35979         u64 *vaddr;
35980         enum resp_states ret;
35981 -       struct rxe_mem *mr = qp->resp.mr;
35982 +       struct rxe_mr *mr = qp->resp.mr;
35984 -       if (mr->state != RXE_MEM_STATE_VALID) {
35985 +       if (mr->state != RXE_MR_STATE_VALID) {
35986                 ret = RESPST_ERR_RKEY_VIOLATION;
35987                 goto out;
35988         }
35989 @@ -700,8 +700,8 @@ static enum resp_states read_reply(struct rxe_qp *qp,
35990         if (!skb)
35991                 return RESPST_ERR_RNR;
35993 -       err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
35994 -                          payload, from_mem_obj, &icrc);
35995 +       err = rxe_mr_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
35996 +                         payload, from_mr_obj, &icrc);
35997         if (err)
35998                 pr_err("Failed copying memory\n");
36000 @@ -883,7 +883,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
36001                         }
36003                         if (pkt->mask & RXE_IETH_MASK) {
36004 -                               struct rxe_mem *rmr;
36005 +                               struct rxe_mr *rmr;
36007                                 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
36008                                 wc->ex.invalidate_rkey = ieth_rkey(pkt);
36009 @@ -895,7 +895,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
36010                                                wc->ex.invalidate_rkey);
36011                                         return RESPST_ERROR;
36012                                 }
36013 -                               rmr->state = RXE_MEM_STATE_FREE;
36014 +                               rmr->state = RXE_MR_STATE_FREE;
36015                                 rxe_drop_ref(rmr);
36016                         }
36018 diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
36019 index dee5e0e919d2..38249c1a76a8 100644
36020 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c
36021 +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
36022 @@ -865,7 +865,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
36024         struct rxe_dev *rxe = to_rdev(ibpd->device);
36025         struct rxe_pd *pd = to_rpd(ibpd);
36026 -       struct rxe_mem *mr;
36027 +       struct rxe_mr *mr;
36029         mr = rxe_alloc(&rxe->mr_pool);
36030         if (!mr)
36031 @@ -873,7 +873,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
36033         rxe_add_index(mr);
36034         rxe_add_ref(pd);
36035 -       rxe_mem_init_dma(pd, access, mr);
36036 +       rxe_mr_init_dma(pd, access, mr);
36038         return &mr->ibmr;
36040 @@ -887,7 +887,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
36041         int err;
36042         struct rxe_dev *rxe = to_rdev(ibpd->device);
36043         struct rxe_pd *pd = to_rpd(ibpd);
36044 -       struct rxe_mem *mr;
36045 +       struct rxe_mr *mr;
36047         mr = rxe_alloc(&rxe->mr_pool);
36048         if (!mr) {
36049 @@ -899,8 +899,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
36051         rxe_add_ref(pd);
36053 -       err = rxe_mem_init_user(pd, start, length, iova,
36054 -                               access, udata, mr);
36055 +       err = rxe_mr_init_user(pd, start, length, iova, access, udata, mr);
36056         if (err)
36057                 goto err3;
36059 @@ -916,9 +915,9 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
36061  static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
36063 -       struct rxe_mem *mr = to_rmr(ibmr);
36064 +       struct rxe_mr *mr = to_rmr(ibmr);
36066 -       mr->state = RXE_MEM_STATE_ZOMBIE;
36067 +       mr->state = RXE_MR_STATE_ZOMBIE;
36068         rxe_drop_ref(mr_pd(mr));
36069         rxe_drop_index(mr);
36070         rxe_drop_ref(mr);
36071 @@ -930,7 +929,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
36073         struct rxe_dev *rxe = to_rdev(ibpd->device);
36074         struct rxe_pd *pd = to_rpd(ibpd);
36075 -       struct rxe_mem *mr;
36076 +       struct rxe_mr *mr;
36077         int err;
36079         if (mr_type != IB_MR_TYPE_MEM_REG)
36080 @@ -946,7 +945,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
36082         rxe_add_ref(pd);
36084 -       err = rxe_mem_init_fast(pd, max_num_sg, mr);
36085 +       err = rxe_mr_init_fast(pd, max_num_sg, mr);
36086         if (err)
36087                 goto err2;
36089 @@ -962,7 +961,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
36091  static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
36093 -       struct rxe_mem *mr = to_rmr(ibmr);
36094 +       struct rxe_mr *mr = to_rmr(ibmr);
36095         struct rxe_map *map;
36096         struct rxe_phys_buf *buf;
36098 @@ -982,7 +981,7 @@ static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
36099  static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
36100                          int sg_nents, unsigned int *sg_offset)
36102 -       struct rxe_mem *mr = to_rmr(ibmr);
36103 +       struct rxe_mr *mr = to_rmr(ibmr);
36104         int n;
36106         mr->nbuf = 0;
36107 @@ -1110,6 +1109,7 @@ static const struct ib_device_ops rxe_dev_ops = {
36108         INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
36109         INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
36110         INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
36111 +       INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw),
36112  };
36114  int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
36115 diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
36116 index 79e0a5a878da..11eba7a3ba8f 100644
36117 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h
36118 +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
36119 @@ -156,7 +156,7 @@ struct resp_res {
36120                         struct sk_buff  *skb;
36121                 } atomic;
36122                 struct {
36123 -                       struct rxe_mem  *mr;
36124 +                       struct rxe_mr   *mr;
36125                         u64             va_org;
36126                         u32             rkey;
36127                         u32             length;
36128 @@ -183,7 +183,7 @@ struct rxe_resp_info {
36130         /* RDMA read / atomic only */
36131         u64                     va;
36132 -       struct rxe_mem          *mr;
36133 +       struct rxe_mr           *mr;
36134         u32                     resid;
36135         u32                     rkey;
36136         u32                     length;
36137 @@ -262,18 +262,18 @@ struct rxe_qp {
36138         struct execute_work     cleanup_work;
36139  };
36141 -enum rxe_mem_state {
36142 -       RXE_MEM_STATE_ZOMBIE,
36143 -       RXE_MEM_STATE_INVALID,
36144 -       RXE_MEM_STATE_FREE,
36145 -       RXE_MEM_STATE_VALID,
36146 +enum rxe_mr_state {
36147 +       RXE_MR_STATE_ZOMBIE,
36148 +       RXE_MR_STATE_INVALID,
36149 +       RXE_MR_STATE_FREE,
36150 +       RXE_MR_STATE_VALID,
36151  };
36153 -enum rxe_mem_type {
36154 -       RXE_MEM_TYPE_NONE,
36155 -       RXE_MEM_TYPE_DMA,
36156 -       RXE_MEM_TYPE_MR,
36157 -       RXE_MEM_TYPE_MW,
36158 +enum rxe_mr_type {
36159 +       RXE_MR_TYPE_NONE,
36160 +       RXE_MR_TYPE_DMA,
36161 +       RXE_MR_TYPE_MR,
36162 +       RXE_MR_TYPE_MW,
36163  };
36165  #define RXE_BUF_PER_MAP                (PAGE_SIZE / sizeof(struct rxe_phys_buf))
36166 @@ -287,17 +287,14 @@ struct rxe_map {
36167         struct rxe_phys_buf     buf[RXE_BUF_PER_MAP];
36168  };
36170 -struct rxe_mem {
36171 +struct rxe_mr {
36172         struct rxe_pool_entry   pelem;
36173 -       union {
36174 -               struct ib_mr            ibmr;
36175 -               struct ib_mw            ibmw;
36176 -       };
36177 +       struct ib_mr            ibmr;
36179         struct ib_umem          *umem;
36181 -       enum rxe_mem_state      state;
36182 -       enum rxe_mem_type       type;
36183 +       enum rxe_mr_state       state;
36184 +       enum rxe_mr_type        type;
36185         u64                     va;
36186         u64                     iova;
36187         size_t                  length;
36188 @@ -318,6 +315,17 @@ struct rxe_mem {
36189         struct rxe_map          **map;
36190  };
36192 +enum rxe_mw_state {
36193 +       RXE_MW_STATE_INVALID = RXE_MR_STATE_INVALID,
36194 +       RXE_MW_STATE_FREE = RXE_MR_STATE_FREE,
36195 +       RXE_MW_STATE_VALID = RXE_MR_STATE_VALID,
36198 +struct rxe_mw {
36199 +       struct ib_mw ibmw;
36200 +       struct rxe_pool_entry pelem;
36203  struct rxe_mc_grp {
36204         struct rxe_pool_entry   pelem;
36205         spinlock_t              mcg_lock; /* guard group */
36206 @@ -422,27 +430,27 @@ static inline struct rxe_cq *to_rcq(struct ib_cq *cq)
36207         return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL;
36210 -static inline struct rxe_mem *to_rmr(struct ib_mr *mr)
36211 +static inline struct rxe_mr *to_rmr(struct ib_mr *mr)
36213 -       return mr ? container_of(mr, struct rxe_mem, ibmr) : NULL;
36214 +       return mr ? container_of(mr, struct rxe_mr, ibmr) : NULL;
36217 -static inline struct rxe_mem *to_rmw(struct ib_mw *mw)
36218 +static inline struct rxe_mw *to_rmw(struct ib_mw *mw)
36220 -       return mw ? container_of(mw, struct rxe_mem, ibmw) : NULL;
36221 +       return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL;
36224 -static inline struct rxe_pd *mr_pd(struct rxe_mem *mr)
36225 +static inline struct rxe_pd *mr_pd(struct rxe_mr *mr)
36227         return to_rpd(mr->ibmr.pd);
36230 -static inline u32 mr_lkey(struct rxe_mem *mr)
36231 +static inline u32 mr_lkey(struct rxe_mr *mr)
36233         return mr->ibmr.lkey;
36236 -static inline u32 mr_rkey(struct rxe_mem *mr)
36237 +static inline u32 mr_rkey(struct rxe_mr *mr)
36239         return mr->ibmr.rkey;
36241 diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
36242 index 34a910cf0edb..61c17db70d65 100644
36243 --- a/drivers/infiniband/sw/siw/siw_mem.c
36244 +++ b/drivers/infiniband/sw/siw/siw_mem.c
36245 @@ -106,8 +106,6 @@ int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
36246         mem->perms = rights & IWARP_ACCESS_MASK;
36247         kref_init(&mem->ref);
36249 -       mr->mem = mem;
36251         get_random_bytes(&next, 4);
36252         next &= 0x00ffffff;
36254 @@ -116,6 +114,8 @@ int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
36255                 kfree(mem);
36256                 return -ENOMEM;
36257         }
36259 +       mr->mem = mem;
36260         /* Set the STag index part */
36261         mem->stag = id << 8;
36262         mr->base_mr.lkey = mr->base_mr.rkey = mem->stag;
36263 diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
36264 index e389d44e5591..8a00c06e5f56 100644
36265 --- a/drivers/infiniband/sw/siw/siw_verbs.c
36266 +++ b/drivers/infiniband/sw/siw/siw_verbs.c
36267 @@ -300,7 +300,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
36268         struct siw_ucontext *uctx =
36269                 rdma_udata_to_drv_context(udata, struct siw_ucontext,
36270                                           base_ucontext);
36271 -       struct siw_cq *scq = NULL, *rcq = NULL;
36272         unsigned long flags;
36273         int num_sqe, num_rqe, rv = 0;
36274         size_t length;
36275 @@ -343,10 +342,8 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
36276                 rv = -EINVAL;
36277                 goto err_out;
36278         }
36279 -       scq = to_siw_cq(attrs->send_cq);
36280 -       rcq = to_siw_cq(attrs->recv_cq);
36282 -       if (!scq || (!rcq && !attrs->srq)) {
36283 +       if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) {
36284                 siw_dbg(base_dev, "send CQ or receive CQ invalid\n");
36285                 rv = -EINVAL;
36286                 goto err_out;
36287 @@ -378,7 +375,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
36288         else {
36289                 /* Zero sized SQ is not supported */
36290                 rv = -EINVAL;
36291 -               goto err_out;
36292 +               goto err_out_xa;
36293         }
36294         if (num_rqe)
36295                 num_rqe = roundup_pow_of_two(num_rqe);
36296 @@ -401,8 +398,8 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
36297                 }
36298         }
36299         qp->pd = pd;
36300 -       qp->scq = scq;
36301 -       qp->rcq = rcq;
36302 +       qp->scq = to_siw_cq(attrs->send_cq);
36303 +       qp->rcq = to_siw_cq(attrs->recv_cq);
36305         if (attrs->srq) {
36306                 /*
36307 diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
36308 index 7305ed8976c2..18266f07c58d 100644
36309 --- a/drivers/infiniband/ulp/isert/ib_isert.c
36310 +++ b/drivers/infiniband/ulp/isert/ib_isert.c
36311 @@ -438,23 +438,23 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
36312         isert_init_conn(isert_conn);
36313         isert_conn->cm_id = cma_id;
36315 -       ret = isert_alloc_login_buf(isert_conn, cma_id->device);
36316 -       if (ret)
36317 -               goto out;
36319         device = isert_device_get(cma_id);
36320         if (IS_ERR(device)) {
36321                 ret = PTR_ERR(device);
36322 -               goto out_rsp_dma_map;
36323 +               goto out;
36324         }
36325         isert_conn->device = device;
36327 +       ret = isert_alloc_login_buf(isert_conn, cma_id->device);
36328 +       if (ret)
36329 +               goto out_conn_dev;
36331         isert_set_nego_params(isert_conn, &event->param.conn);
36333         isert_conn->qp = isert_create_qp(isert_conn, cma_id);
36334         if (IS_ERR(isert_conn->qp)) {
36335                 ret = PTR_ERR(isert_conn->qp);
36336 -               goto out_conn_dev;
36337 +               goto out_rsp_dma_map;
36338         }
36340         ret = isert_login_post_recv(isert_conn);
36341 @@ -473,10 +473,10 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
36343  out_destroy_qp:
36344         isert_destroy_qp(isert_conn);
36345 -out_conn_dev:
36346 -       isert_device_put(device);
36347  out_rsp_dma_map:
36348         isert_free_login_buf(isert_conn);
36349 +out_conn_dev:
36350 +       isert_device_put(device);
36351  out:
36352         kfree(isert_conn);
36353         rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
36354 diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
36355 index 6734329cca33..959ba0462ef0 100644
36356 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
36357 +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
36358 @@ -2784,8 +2784,8 @@ int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
36359         } while (!changed && old_state != RTRS_CLT_DEAD);
36361         if (likely(changed)) {
36362 -               rtrs_clt_destroy_sess_files(sess, sysfs_self);
36363                 rtrs_clt_remove_path_from_arr(sess);
36364 +               rtrs_clt_destroy_sess_files(sess, sysfs_self);
36365                 kobject_put(&sess->kobj);
36366         }
36368 diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
36369 index 6be60aa5ffe2..7f0420ad9057 100644
36370 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c
36371 +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
36372 @@ -2378,6 +2378,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
36373                 pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
36374                         dev_name(&sdev->device->dev), port_num);
36375                 mutex_unlock(&sport->mutex);
36376 +               ret = -EINVAL;
36377                 goto reject;
36378         }
36380 diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
36381 index 5f7706febcb0..17540bdb1eaf 100644
36382 --- a/drivers/input/touchscreen/elants_i2c.c
36383 +++ b/drivers/input/touchscreen/elants_i2c.c
36384 @@ -38,6 +38,7 @@
36385  #include <linux/of.h>
36386  #include <linux/gpio/consumer.h>
36387  #include <linux/regulator/consumer.h>
36388 +#include <linux/uuid.h>
36389  #include <asm/unaligned.h>
36391  /* Device, Driver information */
36392 @@ -1334,6 +1335,40 @@ static void elants_i2c_power_off(void *_data)
36393         }
36396 +#ifdef CONFIG_ACPI
36397 +static const struct acpi_device_id i2c_hid_ids[] = {
36398 +       {"ACPI0C50", 0 },
36399 +       {"PNP0C50", 0 },
36400 +       { },
36403 +static const guid_t i2c_hid_guid =
36404 +       GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
36405 +                 0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
36407 +static bool elants_acpi_is_hid_device(struct device *dev)
36409 +       acpi_handle handle = ACPI_HANDLE(dev);
36410 +       union acpi_object *obj;
36412 +       if (acpi_match_device_ids(ACPI_COMPANION(dev), i2c_hid_ids))
36413 +               return false;
36415 +       obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL, ACPI_TYPE_INTEGER);
36416 +       if (obj) {
36417 +               ACPI_FREE(obj);
36418 +               return true;
36419 +       }
36421 +       return false;
36423 +#else
36424 +static bool elants_acpi_is_hid_device(struct device *dev)
36426 +       return false;
36428 +#endif
36430  static int elants_i2c_probe(struct i2c_client *client,
36431                             const struct i2c_device_id *id)
36433 @@ -1342,9 +1377,14 @@ static int elants_i2c_probe(struct i2c_client *client,
36434         unsigned long irqflags;
36435         int error;
36437 +       /* Don't bind to i2c-hid compatible devices, these are handled by the i2c-hid drv. */
36438 +       if (elants_acpi_is_hid_device(&client->dev)) {
36439 +               dev_warn(&client->dev, "This device appears to be an I2C-HID device, not binding\n");
36440 +               return -ENODEV;
36441 +       }
36443         if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
36444 -               dev_err(&client->dev,
36445 -                       "%s: i2c check functionality error\n", DEVICE_NAME);
36446 +               dev_err(&client->dev, "I2C check functionality error\n");
36447                 return -ENXIO;
36448         }
36450 diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
36451 index d8fccf048bf4..30576a5f2f04 100644
36452 --- a/drivers/input/touchscreen/ili210x.c
36453 +++ b/drivers/input/touchscreen/ili210x.c
36454 @@ -87,7 +87,7 @@ static bool ili210x_touchdata_to_coords(const u8 *touchdata,
36455                                         unsigned int *x, unsigned int *y,
36456                                         unsigned int *z)
36458 -       if (touchdata[0] & BIT(finger))
36459 +       if (!(touchdata[0] & BIT(finger)))
36460                 return false;
36462         *x = get_unaligned_be16(touchdata + 1 + (finger * 4) + 0);
36463 diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
36464 index 8fa2f3b7cfd8..e8b6c3137420 100644
36465 --- a/drivers/input/touchscreen/silead.c
36466 +++ b/drivers/input/touchscreen/silead.c
36467 @@ -20,6 +20,7 @@
36468  #include <linux/input/mt.h>
36469  #include <linux/input/touchscreen.h>
36470  #include <linux/pm.h>
36471 +#include <linux/pm_runtime.h>
36472  #include <linux/irq.h>
36473  #include <linux/regulator/consumer.h>
36475 @@ -335,10 +336,8 @@ static int silead_ts_get_id(struct i2c_client *client)
36477         error = i2c_smbus_read_i2c_block_data(client, SILEAD_REG_ID,
36478                                               sizeof(chip_id), (u8 *)&chip_id);
36479 -       if (error < 0) {
36480 -               dev_err(&client->dev, "Chip ID read error %d\n", error);
36481 +       if (error < 0)
36482                 return error;
36483 -       }
36485         data->chip_id = le32_to_cpu(chip_id);
36486         dev_info(&client->dev, "Silead chip ID: 0x%8X", data->chip_id);
36487 @@ -351,12 +350,49 @@ static int silead_ts_setup(struct i2c_client *client)
36488         int error;
36489         u32 status;
36491 +       /*
36492 +        * Some buggy BIOS-es bring up the chip in a stuck state where it
36493 +        * blocks the I2C bus. The following steps are necessary to
36494 +        * unstuck the chip / bus:
36495 +        * 1. Turn off the Silead chip.
36496 +        * 2. Try to do an I2C transfer with the chip, this will fail in
36497 +        *    response to which the I2C-bus-driver will call:
36498 +        *    i2c_recover_bus() which will unstuck the I2C-bus. Note the
36499 +        *    unstuck-ing of the I2C bus only works if we first drop the
36500 +        *    chip off the bus by turning it off.
36501 +        * 3. Turn the chip back on.
36502 +        *
36503 +        * On the x86/ACPI systems were this problem is seen, step 1. and
36504 +        * 3. require making ACPI calls and dealing with ACPI Power
36505 +        * Resources. The workaround below runtime-suspends the chip to
36506 +        * turn it off, leaving it up to the ACPI subsystem to deal with
36507 +        * this.
36508 +        */
36510 +       if (device_property_read_bool(&client->dev,
36511 +                                     "silead,stuck-controller-bug")) {
36512 +               pm_runtime_set_active(&client->dev);
36513 +               pm_runtime_enable(&client->dev);
36514 +               pm_runtime_allow(&client->dev);
36516 +               pm_runtime_suspend(&client->dev);
36518 +               dev_warn(&client->dev, FW_BUG "Stuck I2C bus: please ignore the next 'controller timed out' error\n");
36519 +               silead_ts_get_id(client);
36521 +               /* The forbid will also resume the device */
36522 +               pm_runtime_forbid(&client->dev);
36523 +               pm_runtime_disable(&client->dev);
36524 +       }
36526         silead_ts_set_power(client, SILEAD_POWER_OFF);
36527         silead_ts_set_power(client, SILEAD_POWER_ON);
36529         error = silead_ts_get_id(client);
36530 -       if (error)
36531 +       if (error) {
36532 +               dev_err(&client->dev, "Chip ID read error %d\n", error);
36533                 return error;
36534 +       }
36536         error = silead_ts_init(client);
36537         if (error)
36538 diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
36539 index 321f5906e6ed..df7b19ff0a9e 100644
36540 --- a/drivers/iommu/amd/init.c
36541 +++ b/drivers/iommu/amd/init.c
36542 @@ -12,7 +12,6 @@
36543  #include <linux/acpi.h>
36544  #include <linux/list.h>
36545  #include <linux/bitmap.h>
36546 -#include <linux/delay.h>
36547  #include <linux/slab.h>
36548  #include <linux/syscore_ops.h>
36549  #include <linux/interrupt.h>
36550 @@ -257,8 +256,6 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
36551  static int amd_iommu_enable_interrupts(void);
36552  static int __init iommu_go_to_state(enum iommu_init_state state);
36553  static void init_device_table_dma(void);
36554 -static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
36555 -                               u8 fxn, u64 *value, bool is_write);
36557  static bool amd_iommu_pre_enabled = true;
36559 @@ -1717,53 +1714,16 @@ static int __init init_iommu_all(struct acpi_table_header *table)
36560         return 0;
36563 -static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
36564 +static void init_iommu_perf_ctr(struct amd_iommu *iommu)
36566 -       int retry;
36567 +       u64 val;
36568         struct pci_dev *pdev = iommu->dev;
36569 -       u64 val = 0xabcd, val2 = 0, save_reg, save_src;
36571         if (!iommu_feature(iommu, FEATURE_PC))
36572                 return;
36574         amd_iommu_pc_present = true;
36576 -       /* save the value to restore, if writable */
36577 -       if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false) ||
36578 -           iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, false))
36579 -               goto pc_false;
36581 -       /*
36582 -        * Disable power gating by programing the performance counter
36583 -        * source to 20 (i.e. counts the reads and writes from/to IOMMU
36584 -        * Reserved Register [MMIO Offset 1FF8h] that are ignored.),
36585 -        * which never get incremented during this init phase.
36586 -        * (Note: The event is also deprecated.)
36587 -        */
36588 -       val = 20;
36589 -       if (iommu_pc_get_set_reg(iommu, 0, 0, 8, &val, true))
36590 -               goto pc_false;
36592 -       /* Check if the performance counters can be written to */
36593 -       val = 0xabcd;
36594 -       for (retry = 5; retry; retry--) {
36595 -               if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true) ||
36596 -                   iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false) ||
36597 -                   val2)
36598 -                       break;
36600 -               /* Wait about 20 msec for power gating to disable and retry. */
36601 -               msleep(20);
36602 -       }
36604 -       /* restore */
36605 -       if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true) ||
36606 -           iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, true))
36607 -               goto pc_false;
36609 -       if (val != val2)
36610 -               goto pc_false;
36612         pci_info(pdev, "IOMMU performance counters supported\n");
36614         val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
36615 @@ -1771,11 +1731,6 @@ static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
36616         iommu->max_counters = (u8) ((val >> 7) & 0xf);
36618         return;
36620 -pc_false:
36621 -       pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
36622 -       amd_iommu_pc_present = false;
36623 -       return;
36626  static ssize_t amd_iommu_show_cap(struct device *dev,
36627 @@ -1837,7 +1792,7 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu)
36628          * IVHD and MMIO conflict.
36629          */
36630         if (features != iommu->features)
36631 -               pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx\n).",
36632 +               pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx).\n",
36633                         features, iommu->features);
36636 diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
36637 index 8594b4a83043..941ba5484731 100644
36638 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
36639 +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
36640 @@ -2305,6 +2305,9 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
36642         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
36644 +       if (!gather->pgsize)
36645 +               return;
36647         arm_smmu_tlb_inv_range_domain(gather->start,
36648                                       gather->end - gather->start + 1,
36649                                       gather->pgsize, true, smmu_domain);
36650 diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
36651 index f985817c967a..230b6f6b3901 100644
36652 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
36653 +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
36654 @@ -115,7 +115,7 @@
36655  #define GERROR_PRIQ_ABT_ERR            (1 << 3)
36656  #define GERROR_EVTQ_ABT_ERR            (1 << 2)
36657  #define GERROR_CMDQ_ERR                        (1 << 0)
36658 -#define GERROR_ERR_MASK                        0xfd
36659 +#define GERROR_ERR_MASK                        0x1fd
36661  #define ARM_SMMU_GERRORN               0x64
36663 diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
36664 index af765c813cc8..fdd095e1fa52 100644
36665 --- a/drivers/iommu/dma-iommu.c
36666 +++ b/drivers/iommu/dma-iommu.c
36667 @@ -52,6 +52,17 @@ struct iommu_dma_cookie {
36668  };
36670  static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
36671 +bool iommu_dma_forcedac __read_mostly;
36673 +static int __init iommu_dma_forcedac_setup(char *str)
36675 +       int ret = kstrtobool(str, &iommu_dma_forcedac);
36677 +       if (!ret && iommu_dma_forcedac)
36678 +               pr_info("Forcing DAC for PCI devices\n");
36679 +       return ret;
36681 +early_param("iommu.forcedac", iommu_dma_forcedac_setup);
36683  void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
36684                 struct iommu_domain *domain)
36685 @@ -444,7 +455,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
36686                 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
36688         /* Try to get PCI devices a SAC address */
36689 -       if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
36690 +       if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev))
36691                 iova = alloc_iova_fast(iovad, iova_len,
36692                                        DMA_BIT_MASK(32) >> shift, false);
36694 diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
36695 index ee0932307d64..7e551da6c1fb 100644
36696 --- a/drivers/iommu/intel/iommu.c
36697 +++ b/drivers/iommu/intel/iommu.c
36698 @@ -360,7 +360,6 @@ int intel_iommu_enabled = 0;
36699  EXPORT_SYMBOL_GPL(intel_iommu_enabled);
36701  static int dmar_map_gfx = 1;
36702 -static int dmar_forcedac;
36703  static int intel_iommu_strict;
36704  static int intel_iommu_superpage = 1;
36705  static int iommu_identity_mapping;
36706 @@ -451,8 +450,8 @@ static int __init intel_iommu_setup(char *str)
36707                         dmar_map_gfx = 0;
36708                         pr_info("Disable GFX device mapping\n");
36709                 } else if (!strncmp(str, "forcedac", 8)) {
36710 -                       pr_info("Forcing DAC for PCI devices\n");
36711 -                       dmar_forcedac = 1;
36712 +                       pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
36713 +                       iommu_dma_forcedac = true;
36714                 } else if (!strncmp(str, "strict", 6)) {
36715                         pr_info("Disable batched IOTLB flush\n");
36716                         intel_iommu_strict = 1;
36717 @@ -658,7 +657,14 @@ static int domain_update_iommu_snooping(struct intel_iommu *skip)
36718         rcu_read_lock();
36719         for_each_active_iommu(iommu, drhd) {
36720                 if (iommu != skip) {
36721 -                       if (!ecap_sc_support(iommu->ecap)) {
36722 +                       /*
36723 +                        * If the hardware is operating in the scalable mode,
36724 +                        * the snooping control is always supported since we
36725 +                        * always set PASID-table-entry.PGSNP bit if the domain
36726 +                        * is managed outside (UNMANAGED).
36727 +                        */
36728 +                       if (!sm_supported(iommu) &&
36729 +                           !ecap_sc_support(iommu->ecap)) {
36730                                 ret = 0;
36731                                 break;
36732                         }
36733 @@ -1340,6 +1346,11 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
36734                       readl, (sts & DMA_GSTS_RTPS), sts);
36736         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
36738 +       iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
36739 +       if (sm_supported(iommu))
36740 +               qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
36741 +       iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
36744  void iommu_flush_write_buffer(struct intel_iommu *iommu)
36745 @@ -2289,6 +2300,41 @@ static inline int hardware_largepage_caps(struct dmar_domain *domain,
36746         return level;
36750 + * Ensure that old small page tables are removed to make room for superpage(s).
36751 + * We're going to add new large pages, so make sure we don't remove their parent
36752 + * tables. The IOTLB/devTLBs should be flushed if any PDE/PTEs are cleared.
36753 + */
36754 +static void switch_to_super_page(struct dmar_domain *domain,
36755 +                                unsigned long start_pfn,
36756 +                                unsigned long end_pfn, int level)
36758 +       unsigned long lvl_pages = lvl_to_nr_pages(level);
36759 +       struct dma_pte *pte = NULL;
36760 +       int i;
36762 +       while (start_pfn <= end_pfn) {
36763 +               if (!pte)
36764 +                       pte = pfn_to_dma_pte(domain, start_pfn, &level);
36766 +               if (dma_pte_present(pte)) {
36767 +                       dma_pte_free_pagetable(domain, start_pfn,
36768 +                                              start_pfn + lvl_pages - 1,
36769 +                                              level + 1);
36771 +                       for_each_domain_iommu(i, domain)
36772 +                               iommu_flush_iotlb_psi(g_iommus[i], domain,
36773 +                                                     start_pfn, lvl_pages,
36774 +                                                     0, 0);
36775 +               }
36777 +               pte++;
36778 +               start_pfn += lvl_pages;
36779 +               if (first_pte_in_page(pte))
36780 +                       pte = NULL;
36781 +       }
36784  static int
36785  __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
36786                  unsigned long phys_pfn, unsigned long nr_pages, int prot)
36787 @@ -2305,8 +2351,9 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
36788                 return -EINVAL;
36790         attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
36791 +       attr |= DMA_FL_PTE_PRESENT;
36792         if (domain_use_first_level(domain)) {
36793 -               attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
36794 +               attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
36796                 if (domain->domain.type == IOMMU_DOMAIN_DMA) {
36797                         attr |= DMA_FL_PTE_ACCESS;
36798 @@ -2329,22 +2376,11 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
36799                                 return -ENOMEM;
36800                         /* It is large page*/
36801                         if (largepage_lvl > 1) {
36802 -                               unsigned long nr_superpages, end_pfn;
36803 +                               unsigned long end_pfn;
36805                                 pteval |= DMA_PTE_LARGE_PAGE;
36806 -                               lvl_pages = lvl_to_nr_pages(largepage_lvl);
36808 -                               nr_superpages = nr_pages / lvl_pages;
36809 -                               end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
36811 -                               /*
36812 -                                * Ensure that old small page tables are
36813 -                                * removed to make room for superpage(s).
36814 -                                * We're adding new large pages, so make sure
36815 -                                * we don't remove their parent tables.
36816 -                                */
36817 -                               dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
36818 -                                                      largepage_lvl + 1);
36819 +                               end_pfn = ((iov_pfn + nr_pages) & level_mask(largepage_lvl)) - 1;
36820 +                               switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
36821                         } else {
36822                                 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
36823                         }
36824 @@ -2422,6 +2458,10 @@ static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn
36825                                    (((u16)bus) << 8) | devfn,
36826                                    DMA_CCMD_MASK_NOBIT,
36827                                    DMA_CCMD_DEVICE_INVL);
36829 +       if (sm_supported(iommu))
36830 +               qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0);
36832         iommu->flush.flush_iotlb(iommu,
36833                                  did_old,
36834                                  0,
36835 @@ -2505,6 +2545,9 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
36837         flags |= (level == 5) ? PASID_FLAG_FL5LP : 0;
36839 +       if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
36840 +               flags |= PASID_FLAG_PAGE_SNOOP;
36842         return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
36843                                              domain->iommu_did[iommu->seq_id],
36844                                              flags);
36845 @@ -3267,8 +3310,6 @@ static int __init init_dmars(void)
36846                 register_pasid_allocator(iommu);
36847  #endif
36848                 iommu_set_root_entry(iommu);
36849 -               iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
36850 -               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
36851         }
36853  #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
36854 @@ -3458,12 +3499,7 @@ static int init_iommu_hw(void)
36855                 }
36857                 iommu_flush_write_buffer(iommu);
36859                 iommu_set_root_entry(iommu);
36861 -               iommu->flush.flush_context(iommu, 0, 0, 0,
36862 -                                          DMA_CCMD_GLOBAL_INVL);
36863 -               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
36864                 iommu_enable_translation(iommu);
36865                 iommu_disable_protect_mem_regions(iommu);
36866         }
36867 @@ -3846,8 +3882,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
36868                 goto disable_iommu;
36870         iommu_set_root_entry(iommu);
36871 -       iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
36872 -       iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
36873         iommu_enable_translation(iommu);
36875         iommu_disable_protect_mem_regions(iommu);
36876 diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
36877 index 611ef5243cb6..5c16ebe037a1 100644
36878 --- a/drivers/iommu/intel/irq_remapping.c
36879 +++ b/drivers/iommu/intel/irq_remapping.c
36880 @@ -736,7 +736,7 @@ static int __init intel_prepare_irq_remapping(void)
36881                 return -ENODEV;
36883         if (intel_cap_audit(CAP_AUDIT_STATIC_IRQR, NULL))
36884 -               goto error;
36885 +               return -ENODEV;
36887         if (!dmar_ir_support())
36888                 return -ENODEV;
36889 diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
36890 index f26cb6195b2c..5093d317ff1a 100644
36891 --- a/drivers/iommu/intel/pasid.c
36892 +++ b/drivers/iommu/intel/pasid.c
36893 @@ -411,6 +411,16 @@ static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
36894         pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
36898 + * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
36899 + * PASID entry.
36900 + */
36901 +static inline void
36902 +pasid_set_pgsnp(struct pasid_entry *pe)
36904 +       pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
36907  /*
36908   * Setup the First Level Page table Pointer field (Bit 140~191)
36909   * of a scalable mode PASID entry.
36910 @@ -565,6 +575,9 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
36911                 }
36912         }
36914 +       if (flags & PASID_FLAG_PAGE_SNOOP)
36915 +               pasid_set_pgsnp(pte);
36917         pasid_set_domain_id(pte, did);
36918         pasid_set_address_width(pte, iommu->agaw);
36919         pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
36920 @@ -643,6 +656,9 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
36921         pasid_set_fault_enable(pte);
36922         pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
36924 +       if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
36925 +               pasid_set_pgsnp(pte);
36927         /*
36928          * Since it is a second level only translation setup, we should
36929          * set SRE bit as well (addresses are expected to be GPAs).
36930 diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
36931 index 444c0bec221a..086ebd697319 100644
36932 --- a/drivers/iommu/intel/pasid.h
36933 +++ b/drivers/iommu/intel/pasid.h
36934 @@ -48,6 +48,7 @@
36935   */
36936  #define PASID_FLAG_SUPERVISOR_MODE     BIT(0)
36937  #define PASID_FLAG_NESTED              BIT(1)
36938 +#define PASID_FLAG_PAGE_SNOOP          BIT(2)
36940  /*
36941   * The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first-
36942 diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
36943 index 574a7e657a9a..ecb6314fdd5c 100644
36944 --- a/drivers/iommu/intel/svm.c
36945 +++ b/drivers/iommu/intel/svm.c
36946 @@ -862,7 +862,7 @@ intel_svm_prq_report(struct device *dev, struct page_req_dsc *desc)
36947         /* Fill in event data for device specific processing */
36948         memset(&event, 0, sizeof(struct iommu_fault_event));
36949         event.fault.type = IOMMU_FAULT_PAGE_REQ;
36950 -       event.fault.prm.addr = desc->addr;
36951 +       event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT;
36952         event.fault.prm.pasid = desc->pasid;
36953         event.fault.prm.grpid = desc->prg_index;
36954         event.fault.prm.perm = prq_to_iommu_prot(desc);
36955 @@ -920,7 +920,17 @@ static irqreturn_t prq_event_thread(int irq, void *d)
36956                                ((unsigned long long *)req)[1]);
36957                         goto no_pasid;
36958                 }
36960 +               /* We shall not receive page request for supervisor SVM */
36961 +               if (req->pm_req && (req->rd_req | req->wr_req)) {
36962 +                       pr_err("Unexpected page request in Privilege Mode");
36963 +                       /* No need to find the matching sdev as for bad_req */
36964 +                       goto no_pasid;
36965 +               }
36966 +               /* DMA read with exec requeset is not supported. */
36967 +               if (req->exe_req && req->rd_req) {
36968 +                       pr_err("Execution request not supported\n");
36969 +                       goto no_pasid;
36970 +               }
36971                 if (!svm || svm->pasid != req->pasid) {
36972                         rcu_read_lock();
36973                         svm = ioasid_find(NULL, req->pasid, NULL);
36974 @@ -1021,12 +1031,12 @@ static irqreturn_t prq_event_thread(int irq, void *d)
36975                                 QI_PGRP_RESP_TYPE;
36976                         resp.qw1 = QI_PGRP_IDX(req->prg_index) |
36977                                 QI_PGRP_LPIG(req->lpig);
36978 +                       resp.qw2 = 0;
36979 +                       resp.qw3 = 0;
36981                         if (req->priv_data_present)
36982                                 memcpy(&resp.qw2, req->priv_data,
36983                                        sizeof(req->priv_data));
36984 -                       resp.qw2 = 0;
36985 -                       resp.qw3 = 0;
36986                         qi_submit_sync(iommu, &resp, 1, 0);
36987                 }
36988  prq_advance:
36989 diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
36990 index d0b0a15dba84..e10cfa99057c 100644
36991 --- a/drivers/iommu/iommu.c
36992 +++ b/drivers/iommu/iommu.c
36993 @@ -2878,10 +2878,12 @@ EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
36994   */
36995  int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
36997 -       const struct iommu_ops *ops = dev->bus->iommu_ops;
36998 +       if (dev->iommu && dev->iommu->iommu_dev) {
36999 +               const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
37001 -       if (ops && ops->dev_enable_feat)
37002 -               return ops->dev_enable_feat(dev, feat);
37003 +               if (ops->dev_enable_feat)
37004 +                       return ops->dev_enable_feat(dev, feat);
37005 +       }
37007         return -ENODEV;
37009 @@ -2894,10 +2896,12 @@ EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
37010   */
37011  int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
37013 -       const struct iommu_ops *ops = dev->bus->iommu_ops;
37014 +       if (dev->iommu && dev->iommu->iommu_dev) {
37015 +               const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
37017 -       if (ops && ops->dev_disable_feat)
37018 -               return ops->dev_disable_feat(dev, feat);
37019 +               if (ops->dev_disable_feat)
37020 +                       return ops->dev_disable_feat(dev, feat);
37021 +       }
37023         return -EBUSY;
37025 @@ -2905,10 +2909,12 @@ EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
37027  bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
37029 -       const struct iommu_ops *ops = dev->bus->iommu_ops;
37030 +       if (dev->iommu && dev->iommu->iommu_dev) {
37031 +               const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
37033 -       if (ops && ops->dev_feat_enabled)
37034 -               return ops->dev_feat_enabled(dev, feat);
37035 +               if (ops->dev_feat_enabled)
37036 +                       return ops->dev_feat_enabled(dev, feat);
37037 +       }
37039         return false;
37041 diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
37042 index 6ecc007f07cd..e168a682806a 100644
37043 --- a/drivers/iommu/mtk_iommu.c
37044 +++ b/drivers/iommu/mtk_iommu.c
37045 @@ -688,13 +688,6 @@ static const struct iommu_ops mtk_iommu_ops = {
37046  static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
37048         u32 regval;
37049 -       int ret;
37051 -       ret = clk_prepare_enable(data->bclk);
37052 -       if (ret) {
37053 -               dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
37054 -               return ret;
37055 -       }
37057         if (data->plat_data->m4u_plat == M4U_MT8173) {
37058                 regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
37059 @@ -760,7 +753,6 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
37060         if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
37061                              dev_name(data->dev), (void *)data)) {
37062                 writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
37063 -               clk_disable_unprepare(data->bclk);
37064                 dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
37065                 return -ENODEV;
37066         }
37067 @@ -977,14 +969,19 @@ static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev)
37068         void __iomem *base = data->base;
37069         int ret;
37071 -       /* Avoid first resume to affect the default value of registers below. */
37072 -       if (!m4u_dom)
37073 -               return 0;
37074         ret = clk_prepare_enable(data->bclk);
37075         if (ret) {
37076                 dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
37077                 return ret;
37078         }
37080 +       /*
37081 +        * Uppon first resume, only enable the clk and return, since the values of the
37082 +        * registers are not yet set.
37083 +        */
37084 +       if (!m4u_dom)
37085 +               return 0;
37087         writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL);
37088         writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL);
37089         writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
37090 diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
37091 index 563a9b366294..e81e89a81cb5 100644
37092 --- a/drivers/irqchip/irq-gic-v3-mbi.c
37093 +++ b/drivers/irqchip/irq-gic-v3-mbi.c
37094 @@ -303,7 +303,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent)
37095         reg = of_get_property(np, "mbi-alias", NULL);
37096         if (reg) {
37097                 mbi_phys_base = of_translate_address(np, reg);
37098 -               if (mbi_phys_base == OF_BAD_ADDR) {
37099 +               if (mbi_phys_base == (phys_addr_t)OF_BAD_ADDR) {
37100                         ret = -ENXIO;
37101                         goto err_free_mbi;
37102                 }
37103 diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
37104 index eb0ee356a629..00404024d7cd 100644
37105 --- a/drivers/irqchip/irq-gic-v3.c
37106 +++ b/drivers/irqchip/irq-gic-v3.c
37107 @@ -648,6 +648,10 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
37109         irqnr = gic_read_iar();
37111 +       /* Check for special IDs first */
37112 +       if ((irqnr >= 1020 && irqnr <= 1023))
37113 +               return;
37115         if (gic_supports_nmi() &&
37116             unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) {
37117                 gic_handle_nmi(irqnr, regs);
37118 @@ -659,10 +663,6 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
37119                 gic_arch_enable_irqs();
37120         }
37122 -       /* Check for special IDs first */
37123 -       if ((irqnr >= 1020 && irqnr <= 1023))
37124 -               return;
37126         if (static_branch_likely(&supports_deactivate_key))
37127                 gic_write_eoir(irqnr);
37128         else
37129 diff --git a/drivers/leds/blink/Kconfig b/drivers/leds/blink/Kconfig
37130 index 265b53476a80..6dedc58c47b3 100644
37131 --- a/drivers/leds/blink/Kconfig
37132 +++ b/drivers/leds/blink/Kconfig
37133 @@ -9,6 +9,7 @@ if LEDS_BLINK
37135  config LEDS_BLINK_LGM
37136         tristate "LED support for Intel LGM SoC series"
37137 +       depends on GPIOLIB
37138         depends on LEDS_CLASS
37139         depends on MFD_SYSCON
37140         depends on OF
37141 diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
37142 index fc433e63b1dc..b1590cb4a188 100644
37143 --- a/drivers/leds/leds-lp5523.c
37144 +++ b/drivers/leds/leds-lp5523.c
37145 @@ -307,7 +307,7 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
37146         usleep_range(3000, 6000);
37147         ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
37148         if (ret)
37149 -               return ret;
37150 +               goto out;
37151         status &= LP5523_ENG_STATUS_MASK;
37153         if (status != LP5523_ENG_STATUS_MASK) {
37154 diff --git a/drivers/mailbox/sprd-mailbox.c b/drivers/mailbox/sprd-mailbox.c
37155 index 4c325301a2fe..94d9067dc8d0 100644
37156 --- a/drivers/mailbox/sprd-mailbox.c
37157 +++ b/drivers/mailbox/sprd-mailbox.c
37158 @@ -60,6 +60,8 @@ struct sprd_mbox_priv {
37159         struct clk              *clk;
37160         u32                     outbox_fifo_depth;
37162 +       struct mutex            lock;
37163 +       u32                     refcnt;
37164         struct mbox_chan        chan[SPRD_MBOX_CHAN_MAX];
37165  };
37167 @@ -115,7 +117,11 @@ static irqreturn_t sprd_mbox_outbox_isr(int irq, void *data)
37168                 id = readl(priv->outbox_base + SPRD_MBOX_ID);
37170                 chan = &priv->chan[id];
37171 -               mbox_chan_received_data(chan, (void *)msg);
37172 +               if (chan->cl)
37173 +                       mbox_chan_received_data(chan, (void *)msg);
37174 +               else
37175 +                       dev_warn_ratelimited(priv->dev,
37176 +                                   "message's been dropped at ch[%d]\n", id);
37178                 /* Trigger to update outbox FIFO pointer */
37179                 writel(0x1, priv->outbox_base + SPRD_MBOX_TRIGGER);
37180 @@ -215,18 +221,22 @@ static int sprd_mbox_startup(struct mbox_chan *chan)
37181         struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
37182         u32 val;
37184 -       /* Select outbox FIFO mode and reset the outbox FIFO status */
37185 -       writel(0x0, priv->outbox_base + SPRD_MBOX_FIFO_RST);
37186 +       mutex_lock(&priv->lock);
37187 +       if (priv->refcnt++ == 0) {
37188 +               /* Select outbox FIFO mode and reset the outbox FIFO status */
37189 +               writel(0x0, priv->outbox_base + SPRD_MBOX_FIFO_RST);
37191 -       /* Enable inbox FIFO overflow and delivery interrupt */
37192 -       val = readl(priv->inbox_base + SPRD_MBOX_IRQ_MSK);
37193 -       val &= ~(SPRD_INBOX_FIFO_OVERFLOW_IRQ | SPRD_INBOX_FIFO_DELIVER_IRQ);
37194 -       writel(val, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
37195 +               /* Enable inbox FIFO overflow and delivery interrupt */
37196 +               val = readl(priv->inbox_base + SPRD_MBOX_IRQ_MSK);
37197 +               val &= ~(SPRD_INBOX_FIFO_OVERFLOW_IRQ | SPRD_INBOX_FIFO_DELIVER_IRQ);
37198 +               writel(val, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
37200 -       /* Enable outbox FIFO not empty interrupt */
37201 -       val = readl(priv->outbox_base + SPRD_MBOX_IRQ_MSK);
37202 -       val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ;
37203 -       writel(val, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
37204 +               /* Enable outbox FIFO not empty interrupt */
37205 +               val = readl(priv->outbox_base + SPRD_MBOX_IRQ_MSK);
37206 +               val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ;
37207 +               writel(val, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
37208 +       }
37209 +       mutex_unlock(&priv->lock);
37211         return 0;
37213 @@ -235,9 +245,13 @@ static void sprd_mbox_shutdown(struct mbox_chan *chan)
37215         struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
37217 -       /* Disable inbox & outbox interrupt */
37218 -       writel(SPRD_INBOX_FIFO_IRQ_MASK, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
37219 -       writel(SPRD_OUTBOX_FIFO_IRQ_MASK, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
37220 +       mutex_lock(&priv->lock);
37221 +       if (--priv->refcnt == 0) {
37222 +               /* Disable inbox & outbox interrupt */
37223 +               writel(SPRD_INBOX_FIFO_IRQ_MASK, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
37224 +               writel(SPRD_OUTBOX_FIFO_IRQ_MASK, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
37225 +       }
37226 +       mutex_unlock(&priv->lock);
37229  static const struct mbox_chan_ops sprd_mbox_ops = {
37230 @@ -266,6 +280,7 @@ static int sprd_mbox_probe(struct platform_device *pdev)
37231                 return -ENOMEM;
37233         priv->dev = dev;
37234 +       mutex_init(&priv->lock);
37236         /*
37237          * The Spreadtrum mailbox uses an inbox to send messages to the target
37238 diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
37239 index 82d4e0880a99..4fb635c0baa0 100644
37240 --- a/drivers/md/bcache/writeback.c
37241 +++ b/drivers/md/bcache/writeback.c
37242 @@ -110,13 +110,13 @@ static void __update_writeback_rate(struct cached_dev *dc)
37243                 int64_t fps;
37245                 if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID) {
37246 -                       fp_term = dc->writeback_rate_fp_term_low *
37247 +                       fp_term = (int64_t)dc->writeback_rate_fp_term_low *
37248                         (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW);
37249                 } else if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH) {
37250 -                       fp_term = dc->writeback_rate_fp_term_mid *
37251 +                       fp_term = (int64_t)dc->writeback_rate_fp_term_mid *
37252                         (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID);
37253                 } else {
37254 -                       fp_term = dc->writeback_rate_fp_term_high *
37255 +                       fp_term = (int64_t)dc->writeback_rate_fp_term_high *
37256                         (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH);
37257                 }
37258                 fps = div_s64(dirty, dirty_buckets) * fp_term;
37259 diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
37260 index 46b5d542b8fe..362c887d33b3 100644
37261 --- a/drivers/md/dm-integrity.c
37262 +++ b/drivers/md/dm-integrity.c
37263 @@ -4039,6 +4039,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
37264                         if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
37265                                 r = -EINVAL;
37266                                 ti->error = "Invalid bitmap_flush_interval argument";
37267 +                               goto bad;
37268                         }
37269                         ic->bitmap_flush_interval = msecs_to_jiffies(val);
37270                 } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
37271 diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
37272 index cab12b2251ba..91461b6904c1 100644
37273 --- a/drivers/md/dm-raid.c
37274 +++ b/drivers/md/dm-raid.c
37275 @@ -1868,6 +1868,14 @@ static bool rs_takeover_requested(struct raid_set *rs)
37276         return rs->md.new_level != rs->md.level;
37279 +/* True if layout is set to reshape. */
37280 +static bool rs_is_layout_change(struct raid_set *rs, bool use_mddev)
37282 +       return (use_mddev ? rs->md.delta_disks : rs->delta_disks) ||
37283 +              rs->md.new_layout != rs->md.layout ||
37284 +              rs->md.new_chunk_sectors != rs->md.chunk_sectors;
37287  /* True if @rs is requested to reshape by ctr */
37288  static bool rs_reshape_requested(struct raid_set *rs)
37290 @@ -1880,9 +1888,7 @@ static bool rs_reshape_requested(struct raid_set *rs)
37291         if (rs_is_raid0(rs))
37292                 return false;
37294 -       change = mddev->new_layout != mddev->layout ||
37295 -                mddev->new_chunk_sectors != mddev->chunk_sectors ||
37296 -                rs->delta_disks;
37297 +       change = rs_is_layout_change(rs, false);
37299         /* Historical case to support raid1 reshape without delta disks */
37300         if (rs_is_raid1(rs)) {
37301 @@ -2817,7 +2823,7 @@ static sector_t _get_reshape_sectors(struct raid_set *rs)
37304  /*
37305 - *
37306 + * Reshape:
37307   * - change raid layout
37308   * - change chunk size
37309   * - add disks
37310 @@ -2926,6 +2932,20 @@ static int rs_setup_reshape(struct raid_set *rs)
37311         return r;
37315 + * If the md resync thread has updated superblock with max reshape position
37316 + * at the end of a reshape but not (yet) reset the layout configuration
37317 + * changes -> reset the latter.
37318 + */
37319 +static void rs_reset_inconclusive_reshape(struct raid_set *rs)
37321 +       if (!rs_is_reshaping(rs) && rs_is_layout_change(rs, true)) {
37322 +               rs_set_cur(rs);
37323 +               rs->md.delta_disks = 0;
37324 +               rs->md.reshape_backwards = 0;
37325 +       }
37328  /*
37329   * Enable/disable discard support on RAID set depending on
37330   * RAID level and discard properties of underlying RAID members.
37331 @@ -3212,11 +3232,14 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
37332         if (r)
37333                 goto bad;
37335 +       /* Catch any inconclusive reshape superblock content. */
37336 +       rs_reset_inconclusive_reshape(rs);
37338         /* Start raid set read-only and assumed clean to change in raid_resume() */
37339         rs->md.ro = 1;
37340         rs->md.in_sync = 1;
37342 -       /* Keep array frozen */
37343 +       /* Keep array frozen until resume. */
37344         set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
37346         /* Has to be held on running the array */
37347 @@ -3230,7 +3253,6 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
37348         }
37350         r = md_start(&rs->md);
37352         if (r) {
37353                 ti->error = "Failed to start raid array";
37354                 mddev_unlock(&rs->md);
37355 diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
37356 index 13b4385f4d5a..9c3bc3711b33 100644
37357 --- a/drivers/md/dm-rq.c
37358 +++ b/drivers/md/dm-rq.c
37359 @@ -569,6 +569,7 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
37360         blk_mq_free_tag_set(md->tag_set);
37361  out_kfree_tag_set:
37362         kfree(md->tag_set);
37363 +       md->tag_set = NULL;
37365         return err;
37367 @@ -578,6 +579,7 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md)
37368         if (md->tag_set) {
37369                 blk_mq_free_tag_set(md->tag_set);
37370                 kfree(md->tag_set);
37371 +               md->tag_set = NULL;
37372         }
37375 diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
37376 index 11890db71f3f..962f7df0691e 100644
37377 --- a/drivers/md/dm-snap.c
37378 +++ b/drivers/md/dm-snap.c
37379 @@ -1408,6 +1408,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
37381         if (!s->store->chunk_size) {
37382                 ti->error = "Chunk size not set";
37383 +               r = -EINVAL;
37384                 goto bad_read_metadata;
37385         }
37387 diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
37388 index 200c5d0f08bf..ea3130e11680 100644
37389 --- a/drivers/md/md-bitmap.c
37390 +++ b/drivers/md/md-bitmap.c
37391 @@ -1722,6 +1722,8 @@ void md_bitmap_flush(struct mddev *mddev)
37392         md_bitmap_daemon_work(mddev);
37393         bitmap->daemon_lastrun -= sleep;
37394         md_bitmap_daemon_work(mddev);
37395 +       if (mddev->bitmap_info.external)
37396 +               md_super_wait(mddev);
37397         md_bitmap_update_sb(bitmap);
37400 diff --git a/drivers/md/md.c b/drivers/md/md.c
37401 index 21da0c48f6c2..2a9553efc2d1 100644
37402 --- a/drivers/md/md.c
37403 +++ b/drivers/md/md.c
37404 @@ -734,7 +734,34 @@ void mddev_init(struct mddev *mddev)
37406  EXPORT_SYMBOL_GPL(mddev_init);
37408 +static struct mddev *mddev_find_locked(dev_t unit)
37410 +       struct mddev *mddev;
37412 +       list_for_each_entry(mddev, &all_mddevs, all_mddevs)
37413 +               if (mddev->unit == unit)
37414 +                       return mddev;
37416 +       return NULL;
37419  static struct mddev *mddev_find(dev_t unit)
37421 +       struct mddev *mddev;
37423 +       if (MAJOR(unit) != MD_MAJOR)
37424 +               unit &= ~((1 << MdpMinorShift) - 1);
37426 +       spin_lock(&all_mddevs_lock);
37427 +       mddev = mddev_find_locked(unit);
37428 +       if (mddev)
37429 +               mddev_get(mddev);
37430 +       spin_unlock(&all_mddevs_lock);
37432 +       return mddev;
37435 +static struct mddev *mddev_find_or_alloc(dev_t unit)
37437         struct mddev *mddev, *new = NULL;
37439 @@ -745,13 +772,13 @@ static struct mddev *mddev_find(dev_t unit)
37440         spin_lock(&all_mddevs_lock);
37442         if (unit) {
37443 -               list_for_each_entry(mddev, &all_mddevs, all_mddevs)
37444 -                       if (mddev->unit == unit) {
37445 -                               mddev_get(mddev);
37446 -                               spin_unlock(&all_mddevs_lock);
37447 -                               kfree(new);
37448 -                               return mddev;
37449 -                       }
37450 +               mddev = mddev_find_locked(unit);
37451 +               if (mddev) {
37452 +                       mddev_get(mddev);
37453 +                       spin_unlock(&all_mddevs_lock);
37454 +                       kfree(new);
37455 +                       return mddev;
37456 +               }
37458                 if (new) {
37459                         list_add(&new->all_mddevs, &all_mddevs);
37460 @@ -777,12 +804,7 @@ static struct mddev *mddev_find(dev_t unit)
37461                                 return NULL;
37462                         }
37464 -                       is_free = 1;
37465 -                       list_for_each_entry(mddev, &all_mddevs, all_mddevs)
37466 -                               if (mddev->unit == dev) {
37467 -                                       is_free = 0;
37468 -                                       break;
37469 -                               }
37470 +                       is_free = !mddev_find_locked(dev);
37471                 }
37472                 new->unit = dev;
37473                 new->md_minor = MINOR(dev);
37474 @@ -5644,7 +5666,7 @@ static int md_alloc(dev_t dev, char *name)
37475          * writing to /sys/module/md_mod/parameters/new_array.
37476          */
37477         static DEFINE_MUTEX(disks_mutex);
37478 -       struct mddev *mddev = mddev_find(dev);
37479 +       struct mddev *mddev = mddev_find_or_alloc(dev);
37480         struct gendisk *disk;
37481         int partitioned;
37482         int shift;
37483 @@ -6524,11 +6546,9 @@ static void autorun_devices(int part)
37485                 md_probe(dev);
37486                 mddev = mddev_find(dev);
37487 -               if (!mddev || !mddev->gendisk) {
37488 -                       if (mddev)
37489 -                               mddev_put(mddev);
37490 +               if (!mddev)
37491                         break;
37492 -               }
37494                 if (mddev_lock(mddev))
37495                         pr_warn("md: %s locked, cannot run\n", mdname(mddev));
37496                 else if (mddev->raid_disks || mddev->major_version
37497 @@ -7821,8 +7841,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
37498                 /* Wait until bdev->bd_disk is definitely gone */
37499                 if (work_pending(&mddev->del_work))
37500                         flush_workqueue(md_misc_wq);
37501 -               /* Then retry the open from the top */
37502 -               return -ERESTARTSYS;
37503 +               return -EBUSY;
37504         }
37505         BUG_ON(mddev != bdev->bd_disk->private_data);
37507 @@ -8153,7 +8172,11 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos)
37508         loff_t l = *pos;
37509         struct mddev *mddev;
37511 -       if (l >= 0x10000)
37512 +       if (l == 0x10000) {
37513 +               ++*pos;
37514 +               return (void *)2;
37515 +       }
37516 +       if (l > 0x10000)
37517                 return NULL;
37518         if (!l--)
37519                 /* header */
37520 @@ -9251,11 +9274,11 @@ void md_check_recovery(struct mddev *mddev)
37521                 }
37523                 if (mddev_is_clustered(mddev)) {
37524 -                       struct md_rdev *rdev;
37525 +                       struct md_rdev *rdev, *tmp;
37526                         /* kick the device if another node issued a
37527                          * remove disk.
37528                          */
37529 -                       rdev_for_each(rdev, mddev) {
37530 +                       rdev_for_each_safe(rdev, tmp, mddev) {
37531                                 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
37532                                                 rdev->raid_disk < 0)
37533                                         md_kick_rdev_from_array(rdev);
37534 @@ -9569,7 +9592,7 @@ static int __init md_init(void)
37535  static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
37537         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
37538 -       struct md_rdev *rdev2;
37539 +       struct md_rdev *rdev2, *tmp;
37540         int role, ret;
37541         char b[BDEVNAME_SIZE];
37543 @@ -9586,7 +9609,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
37544         }
37546         /* Check for change of roles in the active devices */
37547 -       rdev_for_each(rdev2, mddev) {
37548 +       rdev_for_each_safe(rdev2, tmp, mddev) {
37549                 if (test_bit(Faulty, &rdev2->flags))
37550                         continue;
37552 diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
37553 index fe073d92f01e..70cfdea27efd 100644
37554 --- a/drivers/md/persistent-data/dm-btree-internal.h
37555 +++ b/drivers/md/persistent-data/dm-btree-internal.h
37556 @@ -34,12 +34,12 @@ struct node_header {
37557         __le32 max_entries;
37558         __le32 value_size;
37559         __le32 padding;
37560 -} __packed;
37561 +} __attribute__((packed, aligned(8)));
37563  struct btree_node {
37564         struct node_header header;
37565         __le64 keys[];
37566 -} __packed;
37567 +} __attribute__((packed, aligned(8)));
37570  /*
37571 diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
37572 index d8b4125e338c..a213bf11738f 100644
37573 --- a/drivers/md/persistent-data/dm-space-map-common.c
37574 +++ b/drivers/md/persistent-data/dm-space-map-common.c
37575 @@ -339,6 +339,8 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
37576          */
37577         begin = do_div(index_begin, ll->entries_per_block);
37578         end = do_div(end, ll->entries_per_block);
37579 +       if (end == 0)
37580 +               end = ll->entries_per_block;
37582         for (i = index_begin; i < index_end; i++, begin = 0) {
37583                 struct dm_block *blk;
37584 diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h
37585 index 8de63ce39bdd..87e17909ef52 100644
37586 --- a/drivers/md/persistent-data/dm-space-map-common.h
37587 +++ b/drivers/md/persistent-data/dm-space-map-common.h
37588 @@ -33,7 +33,7 @@ struct disk_index_entry {
37589         __le64 blocknr;
37590         __le32 nr_free;
37591         __le32 none_free_before;
37592 -} __packed;
37593 +} __attribute__ ((packed, aligned(8)));
37596  #define MAX_METADATA_BITMAPS 255
37597 @@ -43,7 +43,7 @@ struct disk_metadata_index {
37598         __le64 blocknr;
37600         struct disk_index_entry index[MAX_METADATA_BITMAPS];
37601 -} __packed;
37602 +} __attribute__ ((packed, aligned(8)));
37604  struct ll_disk;
37606 @@ -86,7 +86,7 @@ struct disk_sm_root {
37607         __le64 nr_allocated;
37608         __le64 bitmap_root;
37609         __le64 ref_count_root;
37610 -} __packed;
37611 +} __attribute__ ((packed, aligned(8)));
37613  #define ENTRIES_PER_BYTE 4
37615 @@ -94,7 +94,7 @@ struct disk_bitmap_header {
37616         __le32 csum;
37617         __le32 not_used;
37618         __le64 blocknr;
37619 -} __packed;
37620 +} __attribute__ ((packed, aligned(8)));
37622  enum allocation_event {
37623         SM_NONE,
37624 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
37625 index d2378765dc15..ced076ba560e 100644
37626 --- a/drivers/md/raid1.c
37627 +++ b/drivers/md/raid1.c
37628 @@ -478,6 +478,8 @@ static void raid1_end_write_request(struct bio *bio)
37629                 if (!test_bit(Faulty, &rdev->flags))
37630                         set_bit(R1BIO_WriteError, &r1_bio->state);
37631                 else {
37632 +                       /* Fail the request */
37633 +                       set_bit(R1BIO_Degraded, &r1_bio->state);
37634                         /* Finished with this branch */
37635                         r1_bio->bios[mirror] = NULL;
37636                         to_put = bio;
37637 diff --git a/drivers/media/common/saa7146/saa7146_core.c b/drivers/media/common/saa7146/saa7146_core.c
37638 index f2d13b71416c..e50fa0ff7c5d 100644
37639 --- a/drivers/media/common/saa7146/saa7146_core.c
37640 +++ b/drivers/media/common/saa7146/saa7146_core.c
37641 @@ -253,7 +253,7 @@ int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt
37642                          i, sg_dma_address(list), sg_dma_len(list),
37643                          list->offset);
37644  */
37645 -               for (p = 0; p * 4096 < list->length; p++, ptr++) {
37646 +               for (p = 0; p * 4096 < sg_dma_len(list); p++, ptr++) {
37647                         *ptr = cpu_to_le32(sg_dma_address(list) + p * 4096);
37648                         nr_pages++;
37649                 }
37650 diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c
37651 index 7b8795eca589..66215d9106a4 100644
37652 --- a/drivers/media/common/saa7146/saa7146_video.c
37653 +++ b/drivers/media/common/saa7146/saa7146_video.c
37654 @@ -247,9 +247,8 @@ static int saa7146_pgtable_build(struct saa7146_dev *dev, struct saa7146_buf *bu
37656                 /* walk all pages, copy all page addresses to ptr1 */
37657                 for (i = 0; i < length; i++, list++) {
37658 -                       for (p = 0; p * 4096 < list->length; p++, ptr1++) {
37659 +                       for (p = 0; p * 4096 < sg_dma_len(list); p++, ptr1++)
37660                                 *ptr1 = cpu_to_le32(sg_dma_address(list) - list->offset);
37661 -                       }
37662                 }
37663  /*
37664                 ptr1 = pt1->cpu;
37665 diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
37666 index 5ff7bedee247..3862ddc86ec4 100644
37667 --- a/drivers/media/dvb-core/dvbdev.c
37668 +++ b/drivers/media/dvb-core/dvbdev.c
37669 @@ -241,6 +241,7 @@ static void dvb_media_device_free(struct dvb_device *dvbdev)
37671         if (dvbdev->adapter->conn) {
37672                 media_device_unregister_entity(dvbdev->adapter->conn);
37673 +               kfree(dvbdev->adapter->conn);
37674                 dvbdev->adapter->conn = NULL;
37675                 kfree(dvbdev->adapter->conn_pads);
37676                 dvbdev->adapter->conn_pads = NULL;
37677 diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
37678 index cfa4cdde99d8..02e8aa11e36e 100644
37679 --- a/drivers/media/dvb-frontends/m88ds3103.c
37680 +++ b/drivers/media/dvb-frontends/m88ds3103.c
37681 @@ -1904,8 +1904,8 @@ static int m88ds3103_probe(struct i2c_client *client,
37683                 dev->dt_client = i2c_new_dummy_device(client->adapter,
37684                                                       dev->dt_addr);
37685 -               if (!dev->dt_client) {
37686 -                       ret = -ENODEV;
37687 +               if (IS_ERR(dev->dt_client)) {
37688 +                       ret = PTR_ERR(dev->dt_client);
37689                         goto err_kfree;
37690                 }
37691         }
37692 diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
37693 index a3161d709015..ab7883cff8b2 100644
37694 --- a/drivers/media/i2c/adv7511-v4l2.c
37695 +++ b/drivers/media/i2c/adv7511-v4l2.c
37696 @@ -1964,7 +1964,7 @@ static int adv7511_remove(struct i2c_client *client)
37698         adv7511_set_isr(sd, false);
37699         adv7511_init_setup(sd);
37700 -       cancel_delayed_work(&state->edid_handler);
37701 +       cancel_delayed_work_sync(&state->edid_handler);
37702         i2c_unregister_device(state->i2c_edid);
37703         i2c_unregister_device(state->i2c_cec);
37704         i2c_unregister_device(state->i2c_pktmem);
37705 diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
37706 index 09004d928d11..d1f58795794f 100644
37707 --- a/drivers/media/i2c/adv7604.c
37708 +++ b/drivers/media/i2c/adv7604.c
37709 @@ -3616,7 +3616,7 @@ static int adv76xx_remove(struct i2c_client *client)
37710         io_write(sd, 0x6e, 0);
37711         io_write(sd, 0x73, 0);
37713 -       cancel_delayed_work(&state->delayed_work_enable_hotplug);
37714 +       cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
37715         v4l2_async_unregister_subdev(sd);
37716         media_entity_cleanup(&sd->entity);
37717         adv76xx_unregister_clients(to_state(sd));
37718 diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
37719 index 0855f648416d..f7d2b6cd3008 100644
37720 --- a/drivers/media/i2c/adv7842.c
37721 +++ b/drivers/media/i2c/adv7842.c
37722 @@ -3586,7 +3586,7 @@ static int adv7842_remove(struct i2c_client *client)
37723         struct adv7842_state *state = to_state(sd);
37725         adv7842_irq_enable(sd, false);
37726 -       cancel_delayed_work(&state->delayed_work_enable_hotplug);
37727 +       cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
37728         v4l2_device_unregister_subdev(sd);
37729         media_entity_cleanup(&sd->entity);
37730         adv7842_unregister_clients(sd);
37731 diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
37732 index 15afbb4f5b31..4505594996bd 100644
37733 --- a/drivers/media/i2c/ccs/ccs-core.c
37734 +++ b/drivers/media/i2c/ccs/ccs-core.c
37735 @@ -3522,11 +3522,11 @@ static int ccs_probe(struct i2c_client *client)
37736         sensor->pll.scale_n = CCS_LIM(sensor, SCALER_N_MIN);
37738         ccs_create_subdev(sensor, sensor->scaler, " scaler", 2,
37739 -                         MEDIA_ENT_F_CAM_SENSOR);
37740 +                         MEDIA_ENT_F_PROC_VIDEO_SCALER);
37741         ccs_create_subdev(sensor, sensor->binner, " binner", 2,
37742                           MEDIA_ENT_F_PROC_VIDEO_SCALER);
37743         ccs_create_subdev(sensor, sensor->pixel_array, " pixel_array", 1,
37744 -                         MEDIA_ENT_F_PROC_VIDEO_SCALER);
37745 +                         MEDIA_ENT_F_CAM_SENSOR);
37747         rval = ccs_init_controls(sensor);
37748         if (rval < 0)
37749 diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
37750 index 6e3382b85a90..49ba39418360 100644
37751 --- a/drivers/media/i2c/imx219.c
37752 +++ b/drivers/media/i2c/imx219.c
37753 @@ -1035,29 +1035,47 @@ static int imx219_start_streaming(struct imx219 *imx219)
37754         const struct imx219_reg_list *reg_list;
37755         int ret;
37757 +       ret = pm_runtime_get_sync(&client->dev);
37758 +       if (ret < 0) {
37759 +               pm_runtime_put_noidle(&client->dev);
37760 +               return ret;
37761 +       }
37763         /* Apply default values of current mode */
37764         reg_list = &imx219->mode->reg_list;
37765         ret = imx219_write_regs(imx219, reg_list->regs, reg_list->num_of_regs);
37766         if (ret) {
37767                 dev_err(&client->dev, "%s failed to set mode\n", __func__);
37768 -               return ret;
37769 +               goto err_rpm_put;
37770         }
37772         ret = imx219_set_framefmt(imx219);
37773         if (ret) {
37774                 dev_err(&client->dev, "%s failed to set frame format: %d\n",
37775                         __func__, ret);
37776 -               return ret;
37777 +               goto err_rpm_put;
37778         }
37780         /* Apply customized values from user */
37781         ret =  __v4l2_ctrl_handler_setup(imx219->sd.ctrl_handler);
37782         if (ret)
37783 -               return ret;
37784 +               goto err_rpm_put;
37786         /* set stream on register */
37787 -       return imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
37788 -                               IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
37789 +       ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
37790 +                              IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
37791 +       if (ret)
37792 +               goto err_rpm_put;
37794 +       /* vflip and hflip cannot change during streaming */
37795 +       __v4l2_ctrl_grab(imx219->vflip, true);
37796 +       __v4l2_ctrl_grab(imx219->hflip, true);
37798 +       return 0;
37800 +err_rpm_put:
37801 +       pm_runtime_put(&client->dev);
37802 +       return ret;
37805  static void imx219_stop_streaming(struct imx219 *imx219)
37806 @@ -1070,12 +1088,16 @@ static void imx219_stop_streaming(struct imx219 *imx219)
37807                                IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY);
37808         if (ret)
37809                 dev_err(&client->dev, "%s failed to set stream\n", __func__);
37811 +       __v4l2_ctrl_grab(imx219->vflip, false);
37812 +       __v4l2_ctrl_grab(imx219->hflip, false);
37814 +       pm_runtime_put(&client->dev);
37817  static int imx219_set_stream(struct v4l2_subdev *sd, int enable)
37819         struct imx219 *imx219 = to_imx219(sd);
37820 -       struct i2c_client *client = v4l2_get_subdevdata(sd);
37821         int ret = 0;
37823         mutex_lock(&imx219->mutex);
37824 @@ -1085,36 +1107,23 @@ static int imx219_set_stream(struct v4l2_subdev *sd, int enable)
37825         }
37827         if (enable) {
37828 -               ret = pm_runtime_get_sync(&client->dev);
37829 -               if (ret < 0) {
37830 -                       pm_runtime_put_noidle(&client->dev);
37831 -                       goto err_unlock;
37832 -               }
37834                 /*
37835                  * Apply default & customized values
37836                  * and then start streaming.
37837                  */
37838                 ret = imx219_start_streaming(imx219);
37839                 if (ret)
37840 -                       goto err_rpm_put;
37841 +                       goto err_unlock;
37842         } else {
37843                 imx219_stop_streaming(imx219);
37844 -               pm_runtime_put(&client->dev);
37845         }
37847         imx219->streaming = enable;
37849 -       /* vflip and hflip cannot change during streaming */
37850 -       __v4l2_ctrl_grab(imx219->vflip, enable);
37851 -       __v4l2_ctrl_grab(imx219->hflip, enable);
37853         mutex_unlock(&imx219->mutex);
37855         return ret;
37857 -err_rpm_put:
37858 -       pm_runtime_put(&client->dev);
37859  err_unlock:
37860         mutex_unlock(&imx219->mutex);
37862 diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
37863 index 39530d43590e..a7caf2eb5771 100644
37864 --- a/drivers/media/i2c/msp3400-driver.c
37865 +++ b/drivers/media/i2c/msp3400-driver.c
37866 @@ -170,7 +170,7 @@ static int msp_read(struct i2c_client *client, int dev, int addr)
37867                         break;
37868                 dev_warn(&client->dev, "I/O error #%d (read 0x%02x/0x%02x)\n", err,
37869                        dev, addr);
37870 -               schedule_timeout_interruptible(msecs_to_jiffies(10));
37871 +               schedule_msec_hrtimeout_interruptible((10));
37872         }
37873         if (err == 3) {
37874                 dev_warn(&client->dev, "resetting chip, sound will go off.\n");
37875 @@ -211,7 +211,7 @@ static int msp_write(struct i2c_client *client, int dev, int addr, int val)
37876                         break;
37877                 dev_warn(&client->dev, "I/O error #%d (write 0x%02x/0x%02x)\n", err,
37878                        dev, addr);
37879 -               schedule_timeout_interruptible(msecs_to_jiffies(10));
37880 +               schedule_msec_hrtimeout_interruptible((10));
37881         }
37882         if (err == 3) {
37883                 dev_warn(&client->dev, "resetting chip, sound will go off.\n");
37884 diff --git a/drivers/media/i2c/rdacm21.c b/drivers/media/i2c/rdacm21.c
37885 index dcc21515e5a4..179d107f494c 100644
37886 --- a/drivers/media/i2c/rdacm21.c
37887 +++ b/drivers/media/i2c/rdacm21.c
37888 @@ -345,7 +345,7 @@ static int ov10640_initialize(struct rdacm21_device *dev)
37889         /* Read OV10640 ID to test communications. */
37890         ov490_write_reg(dev, OV490_SCCB_SLAVE0_DIR, OV490_SCCB_SLAVE_READ);
37891         ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_HIGH, OV10640_CHIP_ID >> 8);
37892 -       ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_LOW, (u8)OV10640_CHIP_ID);
37893 +       ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_LOW, OV10640_CHIP_ID & 0xff);
37895         /* Trigger SCCB slave transaction and give it some time to complete. */
37896         ov490_write_reg(dev, OV490_HOST_CMD, OV490_HOST_CMD_TRIGGER);
37897 diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
37898 index 831b5b54fd78..1b309bb743c7 100644
37899 --- a/drivers/media/i2c/tc358743.c
37900 +++ b/drivers/media/i2c/tc358743.c
37901 @@ -2193,7 +2193,7 @@ static int tc358743_remove(struct i2c_client *client)
37902                 del_timer_sync(&state->timer);
37903                 flush_work(&state->work_i2c_poll);
37904         }
37905 -       cancel_delayed_work(&state->delayed_work_enable_hotplug);
37906 +       cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
37907         cec_unregister_adapter(state->cec_adap);
37908         v4l2_async_unregister_subdev(sd);
37909         v4l2_device_unregister_subdev(sd);
37910 diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
37911 index a09bf0a39d05..89bb7e6dc7a4 100644
37912 --- a/drivers/media/i2c/tda1997x.c
37913 +++ b/drivers/media/i2c/tda1997x.c
37914 @@ -2804,7 +2804,7 @@ static int tda1997x_remove(struct i2c_client *client)
37915         media_entity_cleanup(&sd->entity);
37916         v4l2_ctrl_handler_free(&state->hdl);
37917         regulator_bulk_disable(TDA1997X_NUM_SUPPLIES, state->supplies);
37918 -       cancel_delayed_work(&state->delayed_work_enable_hpd);
37919 +       cancel_delayed_work_sync(&state->delayed_work_enable_hpd);
37920         mutex_destroy(&state->page_lock);
37921         mutex_destroy(&state->lock);
37923 diff --git a/drivers/media/pci/cx18/cx18-gpio.c b/drivers/media/pci/cx18/cx18-gpio.c
37924 index cf7cfda94107..f63e17489547 100644
37925 --- a/drivers/media/pci/cx18/cx18-gpio.c
37926 +++ b/drivers/media/pci/cx18/cx18-gpio.c
37927 @@ -81,11 +81,11 @@ static void gpio_reset_seq(struct cx18 *cx, u32 active_lo, u32 active_hi,
37929         /* Assert */
37930         gpio_update(cx, mask, ~active_lo);
37931 -       schedule_timeout_uninterruptible(msecs_to_jiffies(assert_msecs));
37932 +       schedule_msec_hrtimeout_uninterruptible((assert_msecs));
37934         /* Deassert */
37935         gpio_update(cx, mask, ~active_hi);
37936 -       schedule_timeout_uninterruptible(msecs_to_jiffies(recovery_msecs));
37937 +       schedule_msec_hrtimeout_uninterruptible((recovery_msecs));
37940  /*
37941 diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
37942 index 22f55a7840a6..d0ca260ecf70 100644
37943 --- a/drivers/media/pci/cx23885/cx23885-core.c
37944 +++ b/drivers/media/pci/cx23885/cx23885-core.c
37945 @@ -2077,6 +2077,15 @@ static struct {
37946          * 0x1423 is the PCI ID for the IOMMU found on Kaveri
37947          */
37948         { PCI_VENDOR_ID_AMD, 0x1423 },
37949 +       /* 0x1481 is the PCI ID for the IOMMU found on Starship/Matisse
37950 +        */
37951 +       { PCI_VENDOR_ID_AMD, 0x1481 },
37952 +       /* 0x1419 is the PCI ID for the IOMMU found on 15h (Models 10h-1fh) family
37953 +        */
37954 +       { PCI_VENDOR_ID_AMD, 0x1419 },
37955 +       /* 0x5a23 is the PCI ID for the IOMMU found on RD890S/RD990
37956 +        */
37957 +       { PCI_VENDOR_ID_ATI, 0x5a23 },
37958  };
37960  static bool cx23885_does_need_dma_reset(void)
37961 diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
37962 index 6e8c0c230e11..fecef85bd62e 100644
37963 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
37964 +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
37965 @@ -302,7 +302,7 @@ static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
37966         if (!q->sensor)
37967                 return -ENODEV;
37969 -       freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes);
37970 +       freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes * 2);
37971         if (freq < 0) {
37972                 dev_err(dev, "error %lld, invalid link_freq\n", freq);
37973                 return freq;
37974 diff --git a/drivers/media/pci/ivtv/ivtv-gpio.c b/drivers/media/pci/ivtv/ivtv-gpio.c
37975 index 856e7ab7f33e..766a26251337 100644
37976 --- a/drivers/media/pci/ivtv/ivtv-gpio.c
37977 +++ b/drivers/media/pci/ivtv/ivtv-gpio.c
37978 @@ -105,7 +105,7 @@ void ivtv_reset_ir_gpio(struct ivtv *itv)
37979         curout = (curout & ~0xF) | 1;
37980         write_reg(curout, IVTV_REG_GPIO_OUT);
37981         /* We could use something else for smaller time */
37982 -       schedule_timeout_interruptible(msecs_to_jiffies(1));
37983 +       schedule_msec_hrtimeout_interruptible((1));
37984         curout |= 2;
37985         write_reg(curout, IVTV_REG_GPIO_OUT);
37986         curdir &= ~0x80;
37987 @@ -125,11 +125,11 @@ int ivtv_reset_tuner_gpio(void *dev, int component, int cmd, int value)
37988         curout = read_reg(IVTV_REG_GPIO_OUT);
37989         curout &= ~(1 << itv->card->xceive_pin);
37990         write_reg(curout, IVTV_REG_GPIO_OUT);
37991 -       schedule_timeout_interruptible(msecs_to_jiffies(1));
37992 +       schedule_msec_hrtimeout_interruptible((1));
37994         curout |= 1 << itv->card->xceive_pin;
37995         write_reg(curout, IVTV_REG_GPIO_OUT);
37996 -       schedule_timeout_interruptible(msecs_to_jiffies(1));
37997 +       schedule_msec_hrtimeout_interruptible((1));
37998         return 0;
38001 diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
38002 index 35dccb31174c..8181cd65e876 100644
38003 --- a/drivers/media/pci/ivtv/ivtv-ioctl.c
38004 +++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
38005 @@ -1139,7 +1139,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id std)
38006                                 TASK_UNINTERRUPTIBLE);
38007                 if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
38008                         break;
38009 -               schedule_timeout(msecs_to_jiffies(25));
38010 +               schedule_msec_hrtimeout((25));
38011         }
38012         finish_wait(&itv->vsync_waitq, &wait);
38013         mutex_lock(&itv->serialize_lock);
38014 diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
38015 index f04ee84bab5f..c4469b4b8f99 100644
38016 --- a/drivers/media/pci/ivtv/ivtv-streams.c
38017 +++ b/drivers/media/pci/ivtv/ivtv-streams.c
38018 @@ -849,7 +849,7 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
38019                         while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) &&
38020                                 time_before(jiffies,
38021                                             then + msecs_to_jiffies(2000))) {
38022 -                               schedule_timeout(msecs_to_jiffies(10));
38023 +                               schedule_msec_hrtimeout((10));
38024                         }
38026                         /* To convert jiffies to ms, we must multiply by 1000
38027 diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
38028 index 391572a6ec76..efb757d5168a 100644
38029 --- a/drivers/media/pci/saa7134/saa7134-core.c
38030 +++ b/drivers/media/pci/saa7134/saa7134-core.c
38031 @@ -243,7 +243,7 @@ int saa7134_pgtable_build(struct pci_dev *pci, struct saa7134_pgtable *pt,
38033         ptr = pt->cpu + startpage;
38034         for (i = 0; i < length; i++, list = sg_next(list)) {
38035 -               for (p = 0; p * 4096 < list->length; p++, ptr++)
38036 +               for (p = 0; p * 4096 < sg_dma_len(list); p++, ptr++)
38037                         *ptr = cpu_to_le32(sg_dma_address(list) +
38038                                                 list->offset + p * 4096);
38039         }
38040 diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
38041 index 11e1eb6a6809..1d1d32e043f1 100644
38042 --- a/drivers/media/pci/saa7164/saa7164-encoder.c
38043 +++ b/drivers/media/pci/saa7164/saa7164-encoder.c
38044 @@ -1008,7 +1008,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
38045                 printk(KERN_ERR "%s() failed (errno = %d), NO PCI configuration\n",
38046                         __func__, result);
38047                 result = -ENOMEM;
38048 -               goto failed;
38049 +               goto fail_pci;
38050         }
38052         /* Establish encoder defaults here */
38053 @@ -1062,7 +1062,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
38054                           100000, ENCODER_DEF_BITRATE);
38055         if (hdl->error) {
38056                 result = hdl->error;
38057 -               goto failed;
38058 +               goto fail_hdl;
38059         }
38061         port->std = V4L2_STD_NTSC_M;
38062 @@ -1080,7 +1080,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
38063                 printk(KERN_INFO "%s: can't allocate mpeg device\n",
38064                         dev->name);
38065                 result = -ENOMEM;
38066 -               goto failed;
38067 +               goto fail_hdl;
38068         }
38070         port->v4l_device->ctrl_handler = hdl;
38071 @@ -1091,10 +1091,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
38072         if (result < 0) {
38073                 printk(KERN_INFO "%s: can't register mpeg device\n",
38074                         dev->name);
38075 -               /* TODO: We're going to leak here if we don't dealloc
38076 -                The buffers above. The unreg function can't deal wit it.
38077 -               */
38078 -               goto failed;
38079 +               goto fail_reg;
38080         }
38082         printk(KERN_INFO "%s: registered device video%d [mpeg]\n",
38083 @@ -1116,9 +1113,14 @@ int saa7164_encoder_register(struct saa7164_port *port)
38085         saa7164_api_set_encoder(port);
38086         saa7164_api_get_encoder(port);
38087 +       return 0;
38089 -       result = 0;
38090 -failed:
38091 +fail_reg:
38092 +       video_device_release(port->v4l_device);
38093 +       port->v4l_device = NULL;
38094 +fail_hdl:
38095 +       v4l2_ctrl_handler_free(hdl);
38096 +fail_pci:
38097         return result;
38100 diff --git a/drivers/media/pci/sta2x11/Kconfig b/drivers/media/pci/sta2x11/Kconfig
38101 index 4dd98f94a91e..27bb78513631 100644
38102 --- a/drivers/media/pci/sta2x11/Kconfig
38103 +++ b/drivers/media/pci/sta2x11/Kconfig
38104 @@ -3,6 +3,7 @@ config STA2X11_VIP
38105         tristate "STA2X11 VIP Video For Linux"
38106         depends on PCI && VIDEO_V4L2 && VIRT_TO_BUS && I2C
38107         depends on STA2X11 || COMPILE_TEST
38108 +       select GPIOLIB if MEDIA_SUBDRV_AUTOSELECT
38109         select VIDEO_ADV7180 if MEDIA_SUBDRV_AUTOSELECT
38110         select VIDEOBUF2_DMA_CONTIG
38111         select MEDIA_CONTROLLER
38112 diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
38113 index fd1831e97b22..1ddb5d6354cf 100644
38114 --- a/drivers/media/platform/Kconfig
38115 +++ b/drivers/media/platform/Kconfig
38116 @@ -244,6 +244,7 @@ config VIDEO_MEDIATEK_JPEG
38117         depends on MTK_IOMMU_V1 || MTK_IOMMU || COMPILE_TEST
38118         depends on VIDEO_DEV && VIDEO_V4L2
38119         depends on ARCH_MEDIATEK || COMPILE_TEST
38120 +       depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
38121         select VIDEOBUF2_DMA_CONTIG
38122         select V4L2_MEM2MEM_DEV
38123         help
38124 @@ -271,6 +272,7 @@ config VIDEO_MEDIATEK_MDP
38125         depends on MTK_IOMMU || COMPILE_TEST
38126         depends on VIDEO_DEV && VIDEO_V4L2
38127         depends on ARCH_MEDIATEK || COMPILE_TEST
38128 +       depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
38129         select VIDEOBUF2_DMA_CONTIG
38130         select V4L2_MEM2MEM_DEV
38131         select VIDEO_MEDIATEK_VPU
38132 @@ -291,6 +293,7 @@ config VIDEO_MEDIATEK_VCODEC
38133         # our dependencies, to avoid missing symbols during link.
38134         depends on VIDEO_MEDIATEK_VPU || !VIDEO_MEDIATEK_VPU
38135         depends on MTK_SCP || !MTK_SCP
38136 +       depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
38137         select VIDEOBUF2_DMA_CONTIG
38138         select V4L2_MEM2MEM_DEV
38139         select VIDEO_MEDIATEK_VCODEC_VPU if VIDEO_MEDIATEK_VPU
38140 diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
38141 index f2c4dadd6a0e..7bb6babdcade 100644
38142 --- a/drivers/media/platform/aspeed-video.c
38143 +++ b/drivers/media/platform/aspeed-video.c
38144 @@ -514,8 +514,8 @@ static void aspeed_video_off(struct aspeed_video *video)
38145         aspeed_video_write(video, VE_INTERRUPT_STATUS, 0xffffffff);
38147         /* Turn off the relevant clocks */
38148 -       clk_disable(video->vclk);
38149         clk_disable(video->eclk);
38150 +       clk_disable(video->vclk);
38152         clear_bit(VIDEO_CLOCKS_ON, &video->flags);
38154 @@ -526,8 +526,8 @@ static void aspeed_video_on(struct aspeed_video *video)
38155                 return;
38157         /* Turn on the relevant clocks */
38158 -       clk_enable(video->eclk);
38159         clk_enable(video->vclk);
38160 +       clk_enable(video->eclk);
38162         set_bit(VIDEO_CLOCKS_ON, &video->flags);
38164 @@ -1719,8 +1719,11 @@ static int aspeed_video_probe(struct platform_device *pdev)
38165                 return rc;
38167         rc = aspeed_video_setup_video(video);
38168 -       if (rc)
38169 +       if (rc) {
38170 +               clk_unprepare(video->vclk);
38171 +               clk_unprepare(video->eclk);
38172                 return rc;
38173 +       }
38175         return 0;
38177 diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
38178 index 995e95272e51..e600764dce96 100644
38179 --- a/drivers/media/platform/coda/coda-common.c
38180 +++ b/drivers/media/platform/coda/coda-common.c
38181 @@ -2062,7 +2062,9 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
38182         if (q_data_dst->fourcc == V4L2_PIX_FMT_JPEG)
38183                 ctx->params.gop_size = 1;
38184         ctx->gopcounter = ctx->params.gop_size - 1;
38185 -       v4l2_ctrl_s_ctrl(ctx->mb_err_cnt_ctrl, 0);
38186 +       /* Only decoders have this control */
38187 +       if (ctx->mb_err_cnt_ctrl)
38188 +               v4l2_ctrl_s_ctrl(ctx->mb_err_cnt_ctrl, 0);
38190         ret = ctx->ops->start_streaming(ctx);
38191         if (ctx->inst_type == CODA_INST_DECODER) {
38192 diff --git a/drivers/media/platform/meson/ge2d/ge2d.c b/drivers/media/platform/meson/ge2d/ge2d.c
38193 index 153612ca96fc..a1393fefa8ae 100644
38194 --- a/drivers/media/platform/meson/ge2d/ge2d.c
38195 +++ b/drivers/media/platform/meson/ge2d/ge2d.c
38196 @@ -757,7 +757,7 @@ static int ge2d_s_ctrl(struct v4l2_ctrl *ctrl)
38198                 if (ctrl->val == 90) {
38199                         ctx->hflip = 0;
38200 -                       ctx->vflip = 0;
38201 +                       ctx->vflip = 1;
38202                         ctx->xy_swap = 1;
38203                 } else if (ctrl->val == 180) {
38204                         ctx->hflip = 1;
38205 @@ -765,7 +765,7 @@ static int ge2d_s_ctrl(struct v4l2_ctrl *ctrl)
38206                         ctx->xy_swap = 0;
38207                 } else if (ctrl->val == 270) {
38208                         ctx->hflip = 1;
38209 -                       ctx->vflip = 1;
38210 +                       ctx->vflip = 0;
38211                         ctx->xy_swap = 1;
38212                 } else {
38213                         ctx->hflip = 0;
38214 diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
38215 index f9896c121fd8..ae374bb2a48f 100644
38216 --- a/drivers/media/platform/qcom/venus/core.c
38217 +++ b/drivers/media/platform/qcom/venus/core.c
38218 @@ -218,18 +218,17 @@ static int venus_probe(struct platform_device *pdev)
38219                 return -ENOMEM;
38221         core->dev = dev;
38222 -       platform_set_drvdata(pdev, core);
38224         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
38225         core->base = devm_ioremap_resource(dev, r);
38226         if (IS_ERR(core->base))
38227                 return PTR_ERR(core->base);
38229 -       core->video_path = of_icc_get(dev, "video-mem");
38230 +       core->video_path = devm_of_icc_get(dev, "video-mem");
38231         if (IS_ERR(core->video_path))
38232                 return PTR_ERR(core->video_path);
38234 -       core->cpucfg_path = of_icc_get(dev, "cpu-cfg");
38235 +       core->cpucfg_path = devm_of_icc_get(dev, "cpu-cfg");
38236         if (IS_ERR(core->cpucfg_path))
38237                 return PTR_ERR(core->cpucfg_path);
38239 @@ -248,7 +247,7 @@ static int venus_probe(struct platform_device *pdev)
38240                 return -ENODEV;
38242         if (core->pm_ops->core_get) {
38243 -               ret = core->pm_ops->core_get(dev);
38244 +               ret = core->pm_ops->core_get(core);
38245                 if (ret)
38246                         return ret;
38247         }
38248 @@ -273,6 +272,12 @@ static int venus_probe(struct platform_device *pdev)
38249         if (ret)
38250                 goto err_core_put;
38252 +       ret = v4l2_device_register(dev, &core->v4l2_dev);
38253 +       if (ret)
38254 +               goto err_core_deinit;
38256 +       platform_set_drvdata(pdev, core);
38258         pm_runtime_enable(dev);
38260         ret = pm_runtime_get_sync(dev);
38261 @@ -307,10 +312,6 @@ static int venus_probe(struct platform_device *pdev)
38262         if (ret)
38263                 goto err_venus_shutdown;
38265 -       ret = v4l2_device_register(dev, &core->v4l2_dev);
38266 -       if (ret)
38267 -               goto err_core_deinit;
38269         ret = pm_runtime_put_sync(dev);
38270         if (ret) {
38271                 pm_runtime_get_noresume(dev);
38272 @@ -323,8 +324,6 @@ static int venus_probe(struct platform_device *pdev)
38274  err_dev_unregister:
38275         v4l2_device_unregister(&core->v4l2_dev);
38276 -err_core_deinit:
38277 -       hfi_core_deinit(core, false);
38278  err_venus_shutdown:
38279         venus_shutdown(core);
38280  err_runtime_disable:
38281 @@ -332,9 +331,11 @@ static int venus_probe(struct platform_device *pdev)
38282         pm_runtime_set_suspended(dev);
38283         pm_runtime_disable(dev);
38284         hfi_destroy(core);
38285 +err_core_deinit:
38286 +       hfi_core_deinit(core, false);
38287  err_core_put:
38288         if (core->pm_ops->core_put)
38289 -               core->pm_ops->core_put(dev);
38290 +               core->pm_ops->core_put(core);
38291         return ret;
38294 @@ -360,14 +361,14 @@ static int venus_remove(struct platform_device *pdev)
38295         pm_runtime_disable(dev);
38297         if (pm_ops->core_put)
38298 -               pm_ops->core_put(dev);
38299 +               pm_ops->core_put(core);
38301 -       hfi_destroy(core);
38302 +       v4l2_device_unregister(&core->v4l2_dev);
38304 -       icc_put(core->video_path);
38305 -       icc_put(core->cpucfg_path);
38306 +       hfi_destroy(core);
38308         v4l2_device_unregister(&core->v4l2_dev);
38310         mutex_destroy(&core->pm_lock);
38311         mutex_destroy(&core->lock);
38312         venus_dbgfs_deinit(core);
38313 @@ -396,7 +397,7 @@ static __maybe_unused int venus_runtime_suspend(struct device *dev)
38314                 return ret;
38316         if (pm_ops->core_power) {
38317 -               ret = pm_ops->core_power(dev, POWER_OFF);
38318 +               ret = pm_ops->core_power(core, POWER_OFF);
38319                 if (ret)
38320                         return ret;
38321         }
38322 @@ -414,7 +415,7 @@ static __maybe_unused int venus_runtime_suspend(struct device *dev)
38323  err_video_path:
38324         icc_set_bw(core->cpucfg_path, kbps_to_icc(1000), 0);
38325  err_cpucfg_path:
38326 -       pm_ops->core_power(dev, POWER_ON);
38327 +       pm_ops->core_power(core, POWER_ON);
38329         return ret;
38331 @@ -434,7 +435,7 @@ static __maybe_unused int venus_runtime_resume(struct device *dev)
38332                 return ret;
38334         if (pm_ops->core_power) {
38335 -               ret = pm_ops->core_power(dev, POWER_ON);
38336 +               ret = pm_ops->core_power(core, POWER_ON);
38337                 if (ret)
38338                         return ret;
38339         }
38340 diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
38341 index 4f7565834469..558510a8dfc8 100644
38342 --- a/drivers/media/platform/qcom/venus/hfi_cmds.c
38343 +++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
38344 @@ -1039,6 +1039,18 @@ static int pkt_session_set_property_1x(struct hfi_session_set_property_pkt *pkt,
38345                 pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hierp);
38346                 break;
38347         }
38348 +       case HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO: {
38349 +               struct hfi_uncompressed_plane_actual_info *in = pdata;
38350 +               struct hfi_uncompressed_plane_actual_info *info = prop_data;
38352 +               info->buffer_type = in->buffer_type;
38353 +               info->num_planes = in->num_planes;
38354 +               info->plane_format[0] = in->plane_format[0];
38355 +               if (in->num_planes > 1)
38356 +                       info->plane_format[1] = in->plane_format[1];
38357 +               pkt->shdr.hdr.size += sizeof(u32) + sizeof(*info);
38358 +               break;
38359 +       }
38361         /* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
38362         case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
38363 @@ -1205,18 +1217,6 @@ pkt_session_set_property_4xx(struct hfi_session_set_property_pkt *pkt,
38364                 pkt->shdr.hdr.size += sizeof(u32) + sizeof(*cu);
38365                 break;
38366         }
38367 -       case HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO: {
38368 -               struct hfi_uncompressed_plane_actual_info *in = pdata;
38369 -               struct hfi_uncompressed_plane_actual_info *info = prop_data;
38371 -               info->buffer_type = in->buffer_type;
38372 -               info->num_planes = in->num_planes;
38373 -               info->plane_format[0] = in->plane_format[0];
38374 -               if (in->num_planes > 1)
38375 -                       info->plane_format[1] = in->plane_format[1];
38376 -               pkt->shdr.hdr.size += sizeof(u32) + sizeof(*info);
38377 -               break;
38378 -       }
38379         case HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE:
38380         case HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER:
38381         case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE:
38382 diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
38383 index 7263c0c32695..5b8389b98299 100644
38384 --- a/drivers/media/platform/qcom/venus/hfi_parser.c
38385 +++ b/drivers/media/platform/qcom/venus/hfi_parser.c
38386 @@ -235,13 +235,13 @@ static int hfi_platform_parser(struct venus_core *core, struct venus_inst *inst)
38387         u32 enc_codecs, dec_codecs, count = 0;
38388         unsigned int entries;
38390 -       if (inst)
38391 -               return 0;
38393         plat = hfi_platform_get(core->res->hfi_version);
38394         if (!plat)
38395                 return -EINVAL;
38397 +       if (inst)
38398 +               return 0;
38400         if (plat->codecs)
38401                 plat->codecs(&enc_codecs, &dec_codecs, &count);
38403 @@ -277,8 +277,10 @@ u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf,
38405         parser_init(inst, &codecs, &domain);
38407 -       core->codecs_count = 0;
38408 -       memset(core->caps, 0, sizeof(core->caps));
38409 +       if (core->res->hfi_version > HFI_VERSION_1XX) {
38410 +               core->codecs_count = 0;
38411 +               memset(core->caps, 0, sizeof(core->caps));
38412 +       }
38414         while (words_count) {
38415                 data = word + 1;
38416 diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
38417 index 43c4e3d9e281..95b4d40ff6a5 100644
38418 --- a/drivers/media/platform/qcom/venus/pm_helpers.c
38419 +++ b/drivers/media/platform/qcom/venus/pm_helpers.c
38420 @@ -277,16 +277,28 @@ static int load_scale_v1(struct venus_inst *inst)
38421         return 0;
38424 -static int core_get_v1(struct device *dev)
38425 +static int core_get_v1(struct venus_core *core)
38427 -       struct venus_core *core = dev_get_drvdata(dev);
38428 +       int ret;
38430 +       ret = core_clks_get(core);
38431 +       if (ret)
38432 +               return ret;
38434 -       return core_clks_get(core);
38435 +       core->opp_table = dev_pm_opp_set_clkname(core->dev, "core");
38436 +       if (IS_ERR(core->opp_table))
38437 +               return PTR_ERR(core->opp_table);
38439 +       return 0;
38442 -static int core_power_v1(struct device *dev, int on)
38443 +static void core_put_v1(struct venus_core *core)
38445 +       dev_pm_opp_put_clkname(core->opp_table);
38448 +static int core_power_v1(struct venus_core *core, int on)
38450 -       struct venus_core *core = dev_get_drvdata(dev);
38451         int ret = 0;
38453         if (on == POWER_ON)
38454 @@ -299,6 +311,7 @@ static int core_power_v1(struct device *dev, int on)
38456  static const struct venus_pm_ops pm_ops_v1 = {
38457         .core_get = core_get_v1,
38458 +       .core_put = core_put_v1,
38459         .core_power = core_power_v1,
38460         .load_scale = load_scale_v1,
38461  };
38462 @@ -371,6 +384,7 @@ static int venc_power_v3(struct device *dev, int on)
38464  static const struct venus_pm_ops pm_ops_v3 = {
38465         .core_get = core_get_v1,
38466 +       .core_put = core_put_v1,
38467         .core_power = core_power_v1,
38468         .vdec_get = vdec_get_v3,
38469         .vdec_power = vdec_power_v3,
38470 @@ -753,12 +767,12 @@ static int venc_power_v4(struct device *dev, int on)
38471         return ret;
38474 -static int vcodec_domains_get(struct device *dev)
38475 +static int vcodec_domains_get(struct venus_core *core)
38477         int ret;
38478         struct opp_table *opp_table;
38479         struct device **opp_virt_dev;
38480 -       struct venus_core *core = dev_get_drvdata(dev);
38481 +       struct device *dev = core->dev;
38482         const struct venus_resources *res = core->res;
38483         struct device *pd;
38484         unsigned int i;
38485 @@ -809,9 +823,8 @@ static int vcodec_domains_get(struct device *dev)
38486         return ret;
38489 -static void vcodec_domains_put(struct device *dev)
38490 +static void vcodec_domains_put(struct venus_core *core)
38492 -       struct venus_core *core = dev_get_drvdata(dev);
38493         const struct venus_resources *res = core->res;
38494         unsigned int i;
38496 @@ -834,9 +847,9 @@ static void vcodec_domains_put(struct device *dev)
38497         dev_pm_opp_detach_genpd(core->opp_table);
38500 -static int core_get_v4(struct device *dev)
38501 +static int core_get_v4(struct venus_core *core)
38503 -       struct venus_core *core = dev_get_drvdata(dev);
38504 +       struct device *dev = core->dev;
38505         const struct venus_resources *res = core->res;
38506         int ret;
38508 @@ -875,7 +888,7 @@ static int core_get_v4(struct device *dev)
38509                 }
38510         }
38512 -       ret = vcodec_domains_get(dev);
38513 +       ret = vcodec_domains_get(core);
38514         if (ret) {
38515                 if (core->has_opp_table)
38516                         dev_pm_opp_of_remove_table(dev);
38517 @@ -886,14 +899,14 @@ static int core_get_v4(struct device *dev)
38518         return 0;
38521 -static void core_put_v4(struct device *dev)
38522 +static void core_put_v4(struct venus_core *core)
38524 -       struct venus_core *core = dev_get_drvdata(dev);
38525 +       struct device *dev = core->dev;
38527         if (legacy_binding)
38528                 return;
38530 -       vcodec_domains_put(dev);
38531 +       vcodec_domains_put(core);
38533         if (core->has_opp_table)
38534                 dev_pm_opp_of_remove_table(dev);
38535 @@ -901,9 +914,9 @@ static void core_put_v4(struct device *dev)
38539 -static int core_power_v4(struct device *dev, int on)
38540 +static int core_power_v4(struct venus_core *core, int on)
38542 -       struct venus_core *core = dev_get_drvdata(dev);
38543 +       struct device *dev = core->dev;
38544         struct device *pmctrl = core->pmdomains[0];
38545         int ret = 0;
38547 diff --git a/drivers/media/platform/qcom/venus/pm_helpers.h b/drivers/media/platform/qcom/venus/pm_helpers.h
38548 index aa2f6afa2354..a492c50c5543 100644
38549 --- a/drivers/media/platform/qcom/venus/pm_helpers.h
38550 +++ b/drivers/media/platform/qcom/venus/pm_helpers.h
38551 @@ -4,14 +4,15 @@
38552  #define __VENUS_PM_HELPERS_H__
38554  struct device;
38555 +struct venus_core;
38557  #define POWER_ON       1
38558  #define POWER_OFF      0
38560  struct venus_pm_ops {
38561 -       int (*core_get)(struct device *dev);
38562 -       void (*core_put)(struct device *dev);
38563 -       int (*core_power)(struct device *dev, int on);
38564 +       int (*core_get)(struct venus_core *core);
38565 +       void (*core_put)(struct venus_core *core);
38566 +       int (*core_power)(struct venus_core *core, int on);
38568         int (*vdec_get)(struct device *dev);
38569         void (*vdec_put)(struct device *dev);
38570 diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c
38571 index a52b80055173..abef0037bf55 100644
38572 --- a/drivers/media/platform/qcom/venus/venc_ctrls.c
38573 +++ b/drivers/media/platform/qcom/venus/venc_ctrls.c
38574 @@ -359,7 +359,7 @@ int venc_ctrl_init(struct venus_inst *inst)
38575                 V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
38576                 ~((1 << V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) |
38577                 (1 << V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME)),
38578 -               V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE);
38579 +               V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME);
38581         v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
38582                 V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
38583 diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
38584 index 83bd9a412a56..1e3b68a8743a 100644
38585 --- a/drivers/media/platform/rcar_drif.c
38586 +++ b/drivers/media/platform/rcar_drif.c
38587 @@ -915,7 +915,6 @@ static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv,
38589         struct rcar_drif_sdr *sdr = video_drvdata(file);
38591 -       memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
38592         f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
38593         f->fmt.sdr.buffersize = sdr->fmt->buffersize;
38595 diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
38596 index 813670ed9577..79deed8adcea 100644
38597 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
38598 +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
38599 @@ -520,14 +520,15 @@ static void rkisp1_rsz_set_src_fmt(struct rkisp1_resizer *rsz,
38600                                    struct v4l2_mbus_framefmt *format,
38601                                    unsigned int which)
38603 -       const struct rkisp1_isp_mbus_info *mbus_info;
38604 -       struct v4l2_mbus_framefmt *src_fmt;
38605 +       const struct rkisp1_isp_mbus_info *sink_mbus_info;
38606 +       struct v4l2_mbus_framefmt *src_fmt, *sink_fmt;
38608 +       sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SINK, which);
38609         src_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SRC, which);
38610 -       mbus_info = rkisp1_isp_mbus_info_get(src_fmt->code);
38611 +       sink_mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
38613         /* for YUV formats, userspace can change the mbus code on the src pad if it is supported */
38614 -       if (mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV &&
38615 +       if (sink_mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV &&
38616             rkisp1_rsz_get_yuv_mbus_info(format->code))
38617                 src_fmt->code = format->code;
38619 diff --git a/drivers/media/platform/sti/bdisp/bdisp-debug.c b/drivers/media/platform/sti/bdisp/bdisp-debug.c
38620 index 2b270093009c..a27f638df11c 100644
38621 --- a/drivers/media/platform/sti/bdisp/bdisp-debug.c
38622 +++ b/drivers/media/platform/sti/bdisp/bdisp-debug.c
38623 @@ -480,7 +480,7 @@ static int regs_show(struct seq_file *s, void *data)
38624         int ret;
38625         unsigned int i;
38627 -       ret = pm_runtime_get_sync(bdisp->dev);
38628 +       ret = pm_runtime_resume_and_get(bdisp->dev);
38629         if (ret < 0) {
38630                 seq_puts(s, "Cannot wake up IP\n");
38631                 return 0;
38632 diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
38633 index b55de9ab64d8..3181d0781b61 100644
38634 --- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
38635 +++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
38636 @@ -151,8 +151,10 @@ static int sun6i_video_start_streaming(struct vb2_queue *vq, unsigned int count)
38637         }
38639         subdev = sun6i_video_remote_subdev(video, NULL);
38640 -       if (!subdev)
38641 +       if (!subdev) {
38642 +               ret = -EINVAL;
38643                 goto stop_media_pipeline;
38644 +       }
38646         config.pixelformat = video->fmt.fmt.pix.pixelformat;
38647         config.code = video->mbus_code;
38648 diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
38649 index ed863bf5ea80..671e4a928993 100644
38650 --- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
38651 +++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
38652 @@ -589,7 +589,7 @@ static int deinterlace_start_streaming(struct vb2_queue *vq, unsigned int count)
38653         int ret;
38655         if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
38656 -               ret = pm_runtime_get_sync(dev);
38657 +               ret = pm_runtime_resume_and_get(dev);
38658                 if (ret < 0) {
38659                         dev_err(dev, "Failed to enable module\n");
38661 diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
38662 index cb0437b4c331..163fffc0e1d4 100644
38663 --- a/drivers/media/radio/radio-mr800.c
38664 +++ b/drivers/media/radio/radio-mr800.c
38665 @@ -366,7 +366,7 @@ static int vidioc_s_hw_freq_seek(struct file *file, void *priv,
38666                         retval = -ENODATA;
38667                         break;
38668                 }
38669 -               if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
38670 +               if (schedule_msec_hrtimeout_interruptible((10))) {
38671                         retval = -ERESTARTSYS;
38672                         break;
38673                 }
38674 diff --git a/drivers/media/radio/radio-tea5777.c b/drivers/media/radio/radio-tea5777.c
38675 index fb9de7bbcd19..e53cf45e7f3f 100644
38676 --- a/drivers/media/radio/radio-tea5777.c
38677 +++ b/drivers/media/radio/radio-tea5777.c
38678 @@ -235,7 +235,7 @@ static int radio_tea5777_update_read_reg(struct radio_tea5777 *tea, int wait)
38679         }
38681         if (wait) {
38682 -               if (schedule_timeout_interruptible(msecs_to_jiffies(wait)))
38683 +               if (schedule_msec_hrtimeout_interruptible((wait)))
38684                         return -ERESTARTSYS;
38685         }
38687 diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c
38688 index c37315226c42..e73e6393403c 100644
38689 --- a/drivers/media/radio/tea575x.c
38690 +++ b/drivers/media/radio/tea575x.c
38691 @@ -401,7 +401,7 @@ int snd_tea575x_s_hw_freq_seek(struct file *file, struct snd_tea575x *tea,
38692         for (;;) {
38693                 if (time_after(jiffies, timeout))
38694                         break;
38695 -               if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
38696 +               if (schedule_msec_hrtimeout_interruptible((10))) {
38697                         /* some signal arrived, stop search */
38698                         tea->val &= ~TEA575X_BIT_SEARCH;
38699                         snd_tea575x_set_freq(tea);
38700 diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
38701 index 0c6229592e13..e5c4a6941d26 100644
38702 --- a/drivers/media/rc/ite-cir.c
38703 +++ b/drivers/media/rc/ite-cir.c
38704 @@ -276,8 +276,14 @@ static irqreturn_t ite_cir_isr(int irq, void *data)
38705         /* read the interrupt flags */
38706         iflags = dev->params.get_irq_causes(dev);
38708 +       /* Check for RX overflow */
38709 +       if (iflags & ITE_IRQ_RX_FIFO_OVERRUN) {
38710 +               dev_warn(&dev->rdev->dev, "receive overflow\n");
38711 +               ir_raw_event_reset(dev->rdev);
38712 +       }
38714         /* check for the receive interrupt */
38715 -       if (iflags & (ITE_IRQ_RX_FIFO | ITE_IRQ_RX_FIFO_OVERRUN)) {
38716 +       if (iflags & ITE_IRQ_RX_FIFO) {
38717                 /* read the FIFO bytes */
38718                 rx_bytes =
38719                         dev->params.get_rx_bytes(dev, rx_buf,
38720 diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
38721 index 0dc65ef3aa14..ca0ebf6ad9cc 100644
38722 --- a/drivers/media/test-drivers/vivid/vivid-core.c
38723 +++ b/drivers/media/test-drivers/vivid/vivid-core.c
38724 @@ -205,13 +205,13 @@ static const u8 vivid_hdmi_edid[256] = {
38725         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
38726         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7b,
38728 -       0x02, 0x03, 0x3f, 0xf0, 0x51, 0x61, 0x60, 0x5f,
38729 +       0x02, 0x03, 0x3f, 0xf1, 0x51, 0x61, 0x60, 0x5f,
38730         0x5e, 0x5d, 0x10, 0x1f, 0x04, 0x13, 0x22, 0x21,
38731         0x20, 0x05, 0x14, 0x02, 0x11, 0x01, 0x23, 0x09,
38732         0x07, 0x07, 0x83, 0x01, 0x00, 0x00, 0x6d, 0x03,
38733         0x0c, 0x00, 0x10, 0x00, 0x00, 0x3c, 0x21, 0x00,
38734         0x60, 0x01, 0x02, 0x03, 0x67, 0xd8, 0x5d, 0xc4,
38735 -       0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xea, 0xe3,
38736 +       0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xca, 0xe3,
38737         0x05, 0x00, 0x00, 0xe3, 0x06, 0x01, 0x00, 0x4d,
38738         0xd0, 0x00, 0xa0, 0xf0, 0x70, 0x3e, 0x80, 0x30,
38739         0x20, 0x35, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00,
38740 @@ -220,7 +220,7 @@ static const u8 vivid_hdmi_edid[256] = {
38741         0x00, 0x00, 0x1a, 0x1a, 0x1d, 0x00, 0x80, 0x51,
38742         0xd0, 0x1c, 0x20, 0x40, 0x80, 0x35, 0x00, 0xc0,
38743         0x1c, 0x32, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00,
38744 -       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63,
38745 +       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x82,
38746  };
38748  static int vidioc_querycap(struct file *file, void  *priv,
38749 diff --git a/drivers/media/test-drivers/vivid/vivid-vid-out.c b/drivers/media/test-drivers/vivid/vivid-vid-out.c
38750 index ac1e981e8342..9f731f085179 100644
38751 --- a/drivers/media/test-drivers/vivid/vivid-vid-out.c
38752 +++ b/drivers/media/test-drivers/vivid/vivid-vid-out.c
38753 @@ -1021,7 +1021,7 @@ int vivid_vid_out_s_fbuf(struct file *file, void *fh,
38754                 return -EINVAL;
38755         }
38756         dev->fbuf_out_flags &= ~(chroma_flags | alpha_flags);
38757 -       dev->fbuf_out_flags = a->flags & (chroma_flags | alpha_flags);
38758 +       dev->fbuf_out_flags |= a->flags & (chroma_flags | alpha_flags);
38759         return 0;
38762 diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c
38763 index b3505f402476..8647c50b66e5 100644
38764 --- a/drivers/media/tuners/m88rs6000t.c
38765 +++ b/drivers/media/tuners/m88rs6000t.c
38766 @@ -525,7 +525,7 @@ static int m88rs6000t_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
38767         PGA2_cri = PGA2_GC >> 2;
38768         PGA2_crf = PGA2_GC & 0x03;
38770 -       for (i = 0; i <= RF_GC; i++)
38771 +       for (i = 0; i <= RF_GC && i < ARRAY_SIZE(RFGS); i++)
38772                 RFG += RFGS[i];
38774         if (RF_GC == 0)
38775 @@ -537,12 +537,12 @@ static int m88rs6000t_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
38776         if (RF_GC == 3)
38777                 RFG += 100;
38779 -       for (i = 0; i <= IF_GC; i++)
38780 +       for (i = 0; i <= IF_GC && i < ARRAY_SIZE(IFGS); i++)
38781                 IFG += IFGS[i];
38783         TIAG = TIA_GC * TIA_GS;
38785 -       for (i = 0; i <= BB_GC; i++)
38786 +       for (i = 0; i <= BB_GC && i < ARRAY_SIZE(BBGS); i++)
38787                 BBG += BBGS[i];
38789         PGA2G = PGA2_cri * PGA2_cri_GS + PGA2_crf * PGA2_crf_GS;
38790 diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
38791 index c1a7634e27b4..28e1fd64dd3c 100644
38792 --- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
38793 +++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
38794 @@ -79,11 +79,17 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
38795                         }
38796                 }
38798 -               if ((ret = dvb_usb_adapter_stream_init(adap)) ||
38799 -                       (ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs)) ||
38800 -                       (ret = dvb_usb_adapter_frontend_init(adap))) {
38801 +               ret = dvb_usb_adapter_stream_init(adap);
38802 +               if (ret)
38803                         return ret;
38804 -               }
38806 +               ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs);
38807 +               if (ret)
38808 +                       goto dvb_init_err;
38810 +               ret = dvb_usb_adapter_frontend_init(adap);
38811 +               if (ret)
38812 +                       goto frontend_init_err;
38814                 /* use exclusive FE lock if there is multiple shared FEs */
38815                 if (adap->fe_adap[1].fe)
38816 @@ -103,6 +109,12 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
38817         }
38819         return 0;
38821 +frontend_init_err:
38822 +       dvb_usb_adapter_dvb_exit(adap);
38823 +dvb_init_err:
38824 +       dvb_usb_adapter_stream_exit(adap);
38825 +       return ret;
38828  static int dvb_usb_adapter_exit(struct dvb_usb_device *d)
38829 @@ -158,22 +170,20 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
38831                 if (d->props.priv_init != NULL) {
38832                         ret = d->props.priv_init(d);
38833 -                       if (ret != 0) {
38834 -                               kfree(d->priv);
38835 -                               d->priv = NULL;
38836 -                               return ret;
38837 -                       }
38838 +                       if (ret != 0)
38839 +                               goto err_priv_init;
38840                 }
38841         }
38843         /* check the capabilities and set appropriate variables */
38844         dvb_usb_device_power_ctrl(d, 1);
38846 -       if ((ret = dvb_usb_i2c_init(d)) ||
38847 -               (ret = dvb_usb_adapter_init(d, adapter_nums))) {
38848 -               dvb_usb_exit(d);
38849 -               return ret;
38850 -       }
38851 +       ret = dvb_usb_i2c_init(d);
38852 +       if (ret)
38853 +               goto err_i2c_init;
38854 +       ret = dvb_usb_adapter_init(d, adapter_nums);
38855 +       if (ret)
38856 +               goto err_adapter_init;
38858         if ((ret = dvb_usb_remote_init(d)))
38859                 err("could not initialize remote control.");
38860 @@ -181,6 +191,17 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
38861         dvb_usb_device_power_ctrl(d, 0);
38863         return 0;
38865 +err_adapter_init:
38866 +       dvb_usb_adapter_exit(d);
38867 +err_i2c_init:
38868 +       dvb_usb_i2c_exit(d);
38869 +       if (d->priv && d->props.priv_destroy)
38870 +               d->props.priv_destroy(d);
38871 +err_priv_init:
38872 +       kfree(d->priv);
38873 +       d->priv = NULL;
38874 +       return ret;
38877  /* determine the name and the state of the just found USB device */
38878 @@ -255,41 +276,50 @@ int dvb_usb_device_init(struct usb_interface *intf,
38879         if (du != NULL)
38880                 *du = NULL;
38882 -       if ((desc = dvb_usb_find_device(udev, props, &cold)) == NULL) {
38883 +       d = kzalloc(sizeof(*d), GFP_KERNEL);
38884 +       if (!d) {
38885 +               err("no memory for 'struct dvb_usb_device'");
38886 +               return -ENOMEM;
38887 +       }
38889 +       memcpy(&d->props, props, sizeof(struct dvb_usb_device_properties));
38891 +       desc = dvb_usb_find_device(udev, &d->props, &cold);
38892 +       if (!desc) {
38893                 deb_err("something went very wrong, device was not found in current device list - let's see what comes next.\n");
38894 -               return -ENODEV;
38895 +               ret = -ENODEV;
38896 +               goto error;
38897         }
38899         if (cold) {
38900                 info("found a '%s' in cold state, will try to load a firmware", desc->name);
38901                 ret = dvb_usb_download_firmware(udev, props);
38902                 if (!props->no_reconnect || ret != 0)
38903 -                       return ret;
38904 +                       goto error;
38905         }
38907         info("found a '%s' in warm state.", desc->name);
38908 -       d = kzalloc(sizeof(struct dvb_usb_device), GFP_KERNEL);
38909 -       if (d == NULL) {
38910 -               err("no memory for 'struct dvb_usb_device'");
38911 -               return -ENOMEM;
38912 -       }
38914         d->udev = udev;
38915 -       memcpy(&d->props, props, sizeof(struct dvb_usb_device_properties));
38916         d->desc = desc;
38917         d->owner = owner;
38919         usb_set_intfdata(intf, d);
38921 -       if (du != NULL)
38922 +       ret = dvb_usb_init(d, adapter_nums);
38923 +       if (ret) {
38924 +               info("%s error while loading driver (%d)", desc->name, ret);
38925 +               goto error;
38926 +       }
38928 +       if (du)
38929                 *du = d;
38931 -       ret = dvb_usb_init(d, adapter_nums);
38932 +       info("%s successfully initialized and connected.", desc->name);
38933 +       return 0;
38935 -       if (ret == 0)
38936 -               info("%s successfully initialized and connected.", desc->name);
38937 -       else
38938 -               info("%s error while loading driver (%d)", desc->name, ret);
38939 + error:
38940 +       usb_set_intfdata(intf, NULL);
38941 +       kfree(d);
38942         return ret;
38944  EXPORT_SYMBOL(dvb_usb_device_init);
38945 diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
38946 index 741be0e69447..2b8ad2bde8a4 100644
38947 --- a/drivers/media/usb/dvb-usb/dvb-usb.h
38948 +++ b/drivers/media/usb/dvb-usb/dvb-usb.h
38949 @@ -487,7 +487,7 @@ extern int __must_check
38950  dvb_usb_generic_write(struct dvb_usb_device *, u8 *, u16);
38952  /* commonly used remote control parsing */
38953 -extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[], u32 *, int *);
38954 +extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[5], u32 *, int *);
38956  /* commonly used firmware download types and function */
38957  struct hexline {
38958 diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
38959 index 526424279637..471bd74667e3 100644
38960 --- a/drivers/media/usb/em28xx/em28xx-dvb.c
38961 +++ b/drivers/media/usb/em28xx/em28xx-dvb.c
38962 @@ -2010,6 +2010,7 @@ static int em28xx_dvb_init(struct em28xx *dev)
38963         return result;
38965  out_free:
38966 +       em28xx_uninit_usb_xfer(dev, EM28XX_DIGITAL_MODE);
38967         kfree(dvb);
38968         dev->dvb = NULL;
38969         goto ret;
38970 diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
38971 index 158c8e28ed2c..47d8f28bfdfc 100644
38972 --- a/drivers/media/usb/gspca/gspca.c
38973 +++ b/drivers/media/usb/gspca/gspca.c
38974 @@ -1576,6 +1576,8 @@ int gspca_dev_probe2(struct usb_interface *intf,
38975  #endif
38976         v4l2_ctrl_handler_free(gspca_dev->vdev.ctrl_handler);
38977         v4l2_device_unregister(&gspca_dev->v4l2_dev);
38978 +       if (sd_desc->probe_error)
38979 +               sd_desc->probe_error(gspca_dev);
38980         kfree(gspca_dev->usb_buf);
38981         kfree(gspca_dev);
38982         return ret;
38983 diff --git a/drivers/media/usb/gspca/gspca.h b/drivers/media/usb/gspca/gspca.h
38984 index b0ced2e14006..a6554d5e9e1a 100644
38985 --- a/drivers/media/usb/gspca/gspca.h
38986 +++ b/drivers/media/usb/gspca/gspca.h
38987 @@ -105,6 +105,7 @@ struct sd_desc {
38988         cam_cf_op config;       /* called on probe */
38989         cam_op init;            /* called on probe and resume */
38990         cam_op init_controls;   /* called on probe */
38991 +       cam_v_op probe_error;   /* called if probe failed, do cleanup here */
38992         cam_op start;           /* called on stream on after URBs creation */
38993         cam_pkt_op pkt_scan;
38994  /* optional operations */
38995 diff --git a/drivers/media/usb/gspca/sq905.c b/drivers/media/usb/gspca/sq905.c
38996 index 97799cfb832e..949111070971 100644
38997 --- a/drivers/media/usb/gspca/sq905.c
38998 +++ b/drivers/media/usb/gspca/sq905.c
38999 @@ -158,7 +158,7 @@ static int
39000  sq905_read_data(struct gspca_dev *gspca_dev, u8 *data, int size, int need_lock)
39002         int ret;
39003 -       int act_len;
39004 +       int act_len = 0;
39006         gspca_dev->usb_buf[0] = '\0';
39007         if (need_lock)
39008 diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c
39009 index 95673fc0a99c..d9bc2aacc885 100644
39010 --- a/drivers/media/usb/gspca/stv06xx/stv06xx.c
39011 +++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c
39012 @@ -529,12 +529,21 @@ static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
39013  static int stv06xx_config(struct gspca_dev *gspca_dev,
39014                           const struct usb_device_id *id);
39016 +static void stv06xx_probe_error(struct gspca_dev *gspca_dev)
39018 +       struct sd *sd = (struct sd *)gspca_dev;
39020 +       kfree(sd->sensor_priv);
39021 +       sd->sensor_priv = NULL;
39024  /* sub-driver description */
39025  static const struct sd_desc sd_desc = {
39026         .name = MODULE_NAME,
39027         .config = stv06xx_config,
39028         .init = stv06xx_init,
39029         .init_controls = stv06xx_init_controls,
39030 +       .probe_error = stv06xx_probe_error,
39031         .start = stv06xx_start,
39032         .stopN = stv06xx_stopN,
39033         .pkt_scan = stv06xx_pkt_scan,
39034 diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
39035 index 30ef2a3110f7..9a791d8ef200 100644
39036 --- a/drivers/media/usb/uvc/uvc_driver.c
39037 +++ b/drivers/media/usb/uvc/uvc_driver.c
39038 @@ -1712,10 +1712,35 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain,
39039                         if (forward->bNrInPins != 1) {
39040                                 uvc_dbg(chain->dev, DESCR,
39041                                         "Extension unit %d has more than 1 input pin\n",
39042 -                                       entity->id);
39043 +                                       forward->id);
39044                                 return -EINVAL;
39045                         }
39047 +                       /*
39048 +                        * Some devices reference an output terminal as the
39049 +                        * source of extension units. This is incorrect, as
39050 +                        * output terminals only have an input pin, and thus
39051 +                        * can't be connected to any entity in the forward
39052 +                        * direction. The resulting topology would cause issues
39053 +                        * when registering the media controller graph. To
39054 +                        * avoid this problem, connect the extension unit to
39055 +                        * the source of the output terminal instead.
39056 +                        */
39057 +                       if (UVC_ENTITY_IS_OTERM(entity)) {
39058 +                               struct uvc_entity *source;
39060 +                               source = uvc_entity_by_id(chain->dev,
39061 +                                                         entity->baSourceID[0]);
39062 +                               if (!source) {
39063 +                                       uvc_dbg(chain->dev, DESCR,
39064 +                                               "Can't connect extension unit %u in chain\n",
39065 +                                               forward->id);
39066 +                                       break;
39067 +                               }
39069 +                               forward->baSourceID[0] = source->id;
39070 +                       }
39072                         list_add_tail(&forward->chain, &chain->entities);
39073                         if (!found)
39074                                 uvc_dbg_cont(PROBE, " (->");
39075 @@ -1735,6 +1760,13 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain,
39076                                 return -EINVAL;
39077                         }
39079 +                       if (UVC_ENTITY_IS_OTERM(entity)) {
39080 +                               uvc_dbg(chain->dev, DESCR,
39081 +                                       "Unsupported connection between output terminals %u and %u\n",
39082 +                                       entity->id, forward->id);
39083 +                               break;
39084 +                       }
39086                         list_add_tail(&forward->chain, &chain->entities);
39087                         if (!found)
39088                                 uvc_dbg_cont(PROBE, " (->");
39089 diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
39090 index d29b861367ea..1ef611e08323 100644
39091 --- a/drivers/media/usb/zr364xx/zr364xx.c
39092 +++ b/drivers/media/usb/zr364xx/zr364xx.c
39093 @@ -1430,7 +1430,7 @@ static int zr364xx_probe(struct usb_interface *intf,
39094         if (hdl->error) {
39095                 err = hdl->error;
39096                 dev_err(&udev->dev, "couldn't register control\n");
39097 -               goto unregister;
39098 +               goto free_hdlr_and_unreg_dev;
39099         }
39100         /* save the init method used by this camera */
39101         cam->method = id->driver_info;
39102 @@ -1503,7 +1503,7 @@ static int zr364xx_probe(struct usb_interface *intf,
39103         if (!cam->read_endpoint) {
39104                 err = -ENOMEM;
39105                 dev_err(&intf->dev, "Could not find bulk-in endpoint\n");
39106 -               goto unregister;
39107 +               goto free_hdlr_and_unreg_dev;
39108         }
39110         /* v4l */
39111 @@ -1515,7 +1515,7 @@ static int zr364xx_probe(struct usb_interface *intf,
39112         /* load zr364xx board specific */
39113         err = zr364xx_board_init(cam);
39114         if (err)
39115 -               goto unregister;
39116 +               goto free_hdlr_and_unreg_dev;
39117         err = v4l2_ctrl_handler_setup(hdl);
39118         if (err)
39119                 goto board_uninit;
39120 @@ -1533,7 +1533,7 @@ static int zr364xx_probe(struct usb_interface *intf,
39121         err = video_register_device(&cam->vdev, VFL_TYPE_VIDEO, -1);
39122         if (err) {
39123                 dev_err(&udev->dev, "video_register_device failed\n");
39124 -               goto free_handler;
39125 +               goto board_uninit;
39126         }
39127         cam->v4l2_dev.release = zr364xx_release;
39129 @@ -1541,11 +1541,10 @@ static int zr364xx_probe(struct usb_interface *intf,
39130                  video_device_node_name(&cam->vdev));
39131         return 0;
39133 -free_handler:
39134 -       v4l2_ctrl_handler_free(hdl);
39135  board_uninit:
39136         zr364xx_board_uninit(cam);
39137 -unregister:
39138 +free_hdlr_and_unreg_dev:
39139 +       v4l2_ctrl_handler_free(hdl);
39140         v4l2_device_unregister(&cam->v4l2_dev);
39141  free_cam:
39142         kfree(cam);
39143 diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
39144 index 016cf6204cbb..6219c8185782 100644
39145 --- a/drivers/media/v4l2-core/v4l2-ctrls.c
39146 +++ b/drivers/media/v4l2-core/v4l2-ctrls.c
39147 @@ -1675,6 +1675,8 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
39148                 p_fwht_params->version = V4L2_FWHT_VERSION;
39149                 p_fwht_params->width = 1280;
39150                 p_fwht_params->height = 720;
39151 +               p_fwht_params->flags = V4L2_FWHT_FL_PIXENC_YUV |
39152 +                       (2 << V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET);
39153                 break;
39154         }
39156 @@ -2395,7 +2397,16 @@ static void new_to_req(struct v4l2_ctrl_ref *ref)
39157         if (!ref)
39158                 return;
39159         ptr_to_ptr(ref->ctrl, ref->ctrl->p_new, ref->p_req);
39160 -       ref->req = ref;
39161 +       ref->valid_p_req = true;
39164 +/* Copy the current value to the request value */
39165 +static void cur_to_req(struct v4l2_ctrl_ref *ref)
39167 +       if (!ref)
39168 +               return;
39169 +       ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->p_req);
39170 +       ref->valid_p_req = true;
39173  /* Copy the request value to the new value */
39174 @@ -2403,8 +2414,8 @@ static void req_to_new(struct v4l2_ctrl_ref *ref)
39176         if (!ref)
39177                 return;
39178 -       if (ref->req)
39179 -               ptr_to_ptr(ref->ctrl, ref->req->p_req, ref->ctrl->p_new);
39180 +       if (ref->valid_p_req)
39181 +               ptr_to_ptr(ref->ctrl, ref->p_req, ref->ctrl->p_new);
39182         else
39183                 ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->ctrl->p_new);
39185 @@ -2541,7 +2552,15 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
39186         if (hdl == NULL || hdl->buckets == NULL)
39187                 return;
39189 -       if (!hdl->req_obj.req && !list_empty(&hdl->requests)) {
39190 +       /*
39191 +        * If the main handler is freed and it is used by handler objects in
39192 +        * outstanding requests, then unbind and put those objects before
39193 +        * freeing the main handler.
39194 +        *
39195 +        * The main handler can be identified by having a NULL ops pointer in
39196 +        * the request object.
39197 +        */
39198 +       if (!hdl->req_obj.ops && !list_empty(&hdl->requests)) {
39199                 struct v4l2_ctrl_handler *req, *next_req;
39201                 list_for_each_entry_safe(req, next_req, &hdl->requests, requests) {
39202 @@ -3571,39 +3590,8 @@ static void v4l2_ctrl_request_queue(struct media_request_object *obj)
39203         struct v4l2_ctrl_handler *hdl =
39204                 container_of(obj, struct v4l2_ctrl_handler, req_obj);
39205         struct v4l2_ctrl_handler *main_hdl = obj->priv;
39206 -       struct v4l2_ctrl_handler *prev_hdl = NULL;
39207 -       struct v4l2_ctrl_ref *ref_ctrl, *ref_ctrl_prev = NULL;
39209         mutex_lock(main_hdl->lock);
39210 -       if (list_empty(&main_hdl->requests_queued))
39211 -               goto queue;
39213 -       prev_hdl = list_last_entry(&main_hdl->requests_queued,
39214 -                                  struct v4l2_ctrl_handler, requests_queued);
39215 -       /*
39216 -        * Note: prev_hdl and hdl must contain the same list of control
39217 -        * references, so if any differences are detected then that is a
39218 -        * driver bug and the WARN_ON is triggered.
39219 -        */
39220 -       mutex_lock(prev_hdl->lock);
39221 -       ref_ctrl_prev = list_first_entry(&prev_hdl->ctrl_refs,
39222 -                                        struct v4l2_ctrl_ref, node);
39223 -       list_for_each_entry(ref_ctrl, &hdl->ctrl_refs, node) {
39224 -               if (ref_ctrl->req)
39225 -                       continue;
39226 -               while (ref_ctrl_prev->ctrl->id < ref_ctrl->ctrl->id) {
39227 -                       /* Should never happen, but just in case... */
39228 -                       if (list_is_last(&ref_ctrl_prev->node,
39229 -                                        &prev_hdl->ctrl_refs))
39230 -                               break;
39231 -                       ref_ctrl_prev = list_next_entry(ref_ctrl_prev, node);
39232 -               }
39233 -               if (WARN_ON(ref_ctrl_prev->ctrl->id != ref_ctrl->ctrl->id))
39234 -                       break;
39235 -               ref_ctrl->req = ref_ctrl_prev->req;
39236 -       }
39237 -       mutex_unlock(prev_hdl->lock);
39238 -queue:
39239         list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued);
39240         hdl->request_is_queued = true;
39241         mutex_unlock(main_hdl->lock);
39242 @@ -3615,8 +3603,8 @@ static void v4l2_ctrl_request_unbind(struct media_request_object *obj)
39243                 container_of(obj, struct v4l2_ctrl_handler, req_obj);
39244         struct v4l2_ctrl_handler *main_hdl = obj->priv;
39246 -       list_del_init(&hdl->requests);
39247         mutex_lock(main_hdl->lock);
39248 +       list_del_init(&hdl->requests);
39249         if (hdl->request_is_queued) {
39250                 list_del_init(&hdl->requests_queued);
39251                 hdl->request_is_queued = false;
39252 @@ -3660,7 +3648,7 @@ v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
39254         struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
39256 -       return (ref && ref->req == ref) ? ref->ctrl : NULL;
39257 +       return (ref && ref->valid_p_req) ? ref->ctrl : NULL;
39259  EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_ctrl_find);
39261 @@ -3675,8 +3663,11 @@ static int v4l2_ctrl_request_bind(struct media_request *req,
39262         if (!ret) {
39263                 ret = media_request_object_bind(req, &req_ops,
39264                                                 from, false, &hdl->req_obj);
39265 -               if (!ret)
39266 +               if (!ret) {
39267 +                       mutex_lock(from->lock);
39268                         list_add_tail(&hdl->requests, &from->requests);
39269 +                       mutex_unlock(from->lock);
39270 +               }
39271         }
39272         return ret;
39274 @@ -3846,7 +3837,13 @@ static int class_check(struct v4l2_ctrl_handler *hdl, u32 which)
39275         return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL;
39278 -/* Get extended controls. Allocates the helpers array if needed. */
39280 + * Get extended controls. Allocates the helpers array if needed.
39281 + *
39282 + * Note that v4l2_g_ext_ctrls_common() with 'which' set to
39283 + * V4L2_CTRL_WHICH_REQUEST_VAL is only called if the request was
39284 + * completed, and in that case valid_p_req is true for all controls.
39285 + */
39286  static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
39287                                    struct v4l2_ext_controls *cs,
39288                                    struct video_device *vdev)
39289 @@ -3855,9 +3852,10 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
39290         struct v4l2_ctrl_helper *helpers = helper;
39291         int ret;
39292         int i, j;
39293 -       bool def_value;
39294 +       bool is_default, is_request;
39296 -       def_value = (cs->which == V4L2_CTRL_WHICH_DEF_VAL);
39297 +       is_default = (cs->which == V4L2_CTRL_WHICH_DEF_VAL);
39298 +       is_request = (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL);
39300         cs->error_idx = cs->count;
39301         cs->which = V4L2_CTRL_ID2WHICH(cs->which);
39302 @@ -3883,11 +3881,9 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
39303                         ret = -EACCES;
39305         for (i = 0; !ret && i < cs->count; i++) {
39306 -               int (*ctrl_to_user)(struct v4l2_ext_control *c,
39307 -                                   struct v4l2_ctrl *ctrl);
39308                 struct v4l2_ctrl *master;
39310 -               ctrl_to_user = def_value ? def_to_user : cur_to_user;
39311 +               bool is_volatile = false;
39312 +               u32 idx = i;
39314                 if (helpers[i].mref == NULL)
39315                         continue;
39316 @@ -3897,31 +3893,48 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
39318                 v4l2_ctrl_lock(master);
39320 -               /* g_volatile_ctrl will update the new control values */
39321 -               if (!def_value &&
39322 +               /*
39323 +                * g_volatile_ctrl will update the new control values.
39324 +                * This makes no sense for V4L2_CTRL_WHICH_DEF_VAL and
39325 +                * V4L2_CTRL_WHICH_REQUEST_VAL. In the case of requests
39326 +                * it is v4l2_ctrl_request_complete() that copies the
39327 +                * volatile controls at the time of request completion
39328 +                * to the request, so you don't want to do that again.
39329 +                */
39330 +               if (!is_default && !is_request &&
39331                     ((master->flags & V4L2_CTRL_FLAG_VOLATILE) ||
39332                     (master->has_volatiles && !is_cur_manual(master)))) {
39333                         for (j = 0; j < master->ncontrols; j++)
39334                                 cur_to_new(master->cluster[j]);
39335                         ret = call_op(master, g_volatile_ctrl);
39336 -                       ctrl_to_user = new_to_user;
39337 +                       is_volatile = true;
39338                 }
39339 -               /* If OK, then copy the current (for non-volatile controls)
39340 -                  or the new (for volatile controls) control values to the
39341 -                  caller */
39342 -               if (!ret) {
39343 -                       u32 idx = i;
39345 -                       do {
39346 -                               if (helpers[idx].ref->req)
39347 -                                       ret = req_to_user(cs->controls + idx,
39348 -                                               helpers[idx].ref->req);
39349 -                               else
39350 -                                       ret = ctrl_to_user(cs->controls + idx,
39351 -                                               helpers[idx].ref->ctrl);
39352 -                               idx = helpers[idx].next;
39353 -                       } while (!ret && idx);
39354 +               if (ret) {
39355 +                       v4l2_ctrl_unlock(master);
39356 +                       break;
39357                 }
39359 +               /*
39360 +                * Copy the default value (if is_default is true), the
39361 +                * request value (if is_request is true and p_req is valid),
39362 +                * the new volatile value (if is_volatile is true) or the
39363 +                * current value.
39364 +                */
39365 +               do {
39366 +                       struct v4l2_ctrl_ref *ref = helpers[idx].ref;
39368 +                       if (is_default)
39369 +                               ret = def_to_user(cs->controls + idx, ref->ctrl);
39370 +                       else if (is_request && ref->valid_p_req)
39371 +                               ret = req_to_user(cs->controls + idx, ref);
39372 +                       else if (is_volatile)
39373 +                               ret = new_to_user(cs->controls + idx, ref->ctrl);
39374 +                       else
39375 +                               ret = cur_to_user(cs->controls + idx, ref->ctrl);
39376 +                       idx = helpers[idx].next;
39377 +               } while (!ret && idx);
39379                 v4l2_ctrl_unlock(master);
39380         }
39382 @@ -4564,8 +4577,6 @@ void v4l2_ctrl_request_complete(struct media_request *req,
39383                 unsigned int i;
39385                 if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
39386 -                       ref->req = ref;
39388                         v4l2_ctrl_lock(master);
39389                         /* g_volatile_ctrl will update the current control values */
39390                         for (i = 0; i < master->ncontrols; i++)
39391 @@ -4575,21 +4586,12 @@ void v4l2_ctrl_request_complete(struct media_request *req,
39392                         v4l2_ctrl_unlock(master);
39393                         continue;
39394                 }
39395 -               if (ref->req == ref)
39396 +               if (ref->valid_p_req)
39397                         continue;
39399 +               /* Copy the current control value into the request */
39400                 v4l2_ctrl_lock(ctrl);
39401 -               if (ref->req) {
39402 -                       ptr_to_ptr(ctrl, ref->req->p_req, ref->p_req);
39403 -               } else {
39404 -                       ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req);
39405 -                       /*
39406 -                        * Set ref->req to ensure that when userspace wants to
39407 -                        * obtain the controls of this request it will take
39408 -                        * this value and not the current value of the control.
39409 -                        */
39410 -                       ref->req = ref;
39411 -               }
39412 +               cur_to_req(ref);
39413                 v4l2_ctrl_unlock(ctrl);
39414         }
39416 @@ -4653,7 +4655,7 @@ int v4l2_ctrl_request_setup(struct media_request *req,
39417                                 struct v4l2_ctrl_ref *r =
39418                                         find_ref(hdl, master->cluster[i]->id);
39420 -                               if (r->req && r == r->req) {
39421 +                               if (r->valid_p_req) {
39422                                         have_new_data = true;
39423                                         break;
39424                                 }
39425 diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
39426 index cfa730cfd145..f80c2ea39ca4 100644
39427 --- a/drivers/memory/omap-gpmc.c
39428 +++ b/drivers/memory/omap-gpmc.c
39429 @@ -1009,8 +1009,8 @@ EXPORT_SYMBOL(gpmc_cs_request);
39431  void gpmc_cs_free(int cs)
39433 -       struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
39434 -       struct resource *res = &gpmc->mem;
39435 +       struct gpmc_cs_data *gpmc;
39436 +       struct resource *res;
39438         spin_lock(&gpmc_mem_lock);
39439         if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) {
39440 @@ -1018,6 +1018,9 @@ void gpmc_cs_free(int cs)
39441                 spin_unlock(&gpmc_mem_lock);
39442                 return;
39443         }
39444 +       gpmc = &gpmc_cs[cs];
39445 +       res = &gpmc->mem;
39447         gpmc_cs_disable_mem(cs);
39448         if (res->flags)
39449                 release_resource(res);
39450 diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c
39451 index 3b5b1045edd9..9c0a28416777 100644
39452 --- a/drivers/memory/pl353-smc.c
39453 +++ b/drivers/memory/pl353-smc.c
39454 @@ -63,7 +63,7 @@
39455  /* ECC memory config register specific constants */
39456  #define PL353_SMC_ECC_MEMCFG_MODE_MASK 0xC
39457  #define PL353_SMC_ECC_MEMCFG_MODE_SHIFT        2
39458 -#define PL353_SMC_ECC_MEMCFG_PGSIZE_MASK       0xC
39459 +#define PL353_SMC_ECC_MEMCFG_PGSIZE_MASK       0x3
39461  #define PL353_SMC_DC_UPT_NAND_REGS     ((4 << 23) |    /* CS: NAND chip */ \
39462                                  (2 << 21))     /* UpdateRegs operation */
39463 diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
39464 index 8d36e221def1..45eed659b0c6 100644
39465 --- a/drivers/memory/renesas-rpc-if.c
39466 +++ b/drivers/memory/renesas-rpc-if.c
39467 @@ -192,10 +192,10 @@ int rpcif_sw_init(struct rpcif *rpc, struct device *dev)
39468         }
39470         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap");
39471 -       rpc->size = resource_size(res);
39472         rpc->dirmap = devm_ioremap_resource(&pdev->dev, res);
39473         if (IS_ERR(rpc->dirmap))
39474                 rpc->dirmap = NULL;
39475 +       rpc->size = resource_size(res);
39477         rpc->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
39479 diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
39480 index 1dabb509dec3..dee503640e12 100644
39481 --- a/drivers/memory/samsung/exynos5422-dmc.c
39482 +++ b/drivers/memory/samsung/exynos5422-dmc.c
39483 @@ -1298,7 +1298,9 @@ static int exynos5_dmc_init_clks(struct exynos5_dmc *dmc)
39485         dmc->curr_volt = target_volt;
39487 -       clk_set_parent(dmc->mout_mx_mspll_ccore, dmc->mout_spll);
39488 +       ret = clk_set_parent(dmc->mout_mx_mspll_ccore, dmc->mout_spll);
39489 +       if (ret)
39490 +               return ret;
39492         clk_prepare_enable(dmc->fout_bpll);
39493         clk_prepare_enable(dmc->mout_bpll);
39494 diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
39495 index 077d9ab112b7..d919ae9691e2 100644
39496 --- a/drivers/mfd/arizona-irq.c
39497 +++ b/drivers/mfd/arizona-irq.c
39498 @@ -100,7 +100,7 @@ static irqreturn_t arizona_irq_thread(int irq, void *data)
39499         unsigned int val;
39500         int ret;
39502 -       ret = pm_runtime_get_sync(arizona->dev);
39503 +       ret = pm_runtime_resume_and_get(arizona->dev);
39504         if (ret < 0) {
39505                 dev_err(arizona->dev, "Failed to resume device: %d\n", ret);
39506                 return IRQ_NONE;
39507 diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
39508 index 3781d0bb7786..783a14af18e2 100644
39509 --- a/drivers/mfd/da9063-i2c.c
39510 +++ b/drivers/mfd/da9063-i2c.c
39511 @@ -442,6 +442,16 @@ static int da9063_i2c_probe(struct i2c_client *i2c,
39512                 return ret;
39513         }
39515 +       /* If SMBus is not available and only I2C is possible, enter I2C mode */
39516 +       if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C)) {
39517 +               ret = regmap_clear_bits(da9063->regmap, DA9063_REG_CONFIG_J,
39518 +                                       DA9063_TWOWIRE_TO);
39519 +               if (ret < 0) {
39520 +                       dev_err(da9063->dev, "Failed to set Two-Wire Bus Mode.\n");
39521 +                       return -EIO;
39522 +               }
39523 +       }
39525         return da9063_device_init(da9063, i2c->irq);
39528 diff --git a/drivers/mfd/intel_pmt.c b/drivers/mfd/intel_pmt.c
39529 index 744b230cdcca..65da2b17a204 100644
39530 --- a/drivers/mfd/intel_pmt.c
39531 +++ b/drivers/mfd/intel_pmt.c
39532 @@ -79,19 +79,18 @@ static int pmt_add_dev(struct pci_dev *pdev, struct intel_dvsec_header *header,
39533         case DVSEC_INTEL_ID_WATCHER:
39534                 if (quirks & PMT_QUIRK_NO_WATCHER) {
39535                         dev_info(dev, "Watcher not supported\n");
39536 -                       return 0;
39537 +                       return -EINVAL;
39538                 }
39539                 name = "pmt_watcher";
39540                 break;
39541         case DVSEC_INTEL_ID_CRASHLOG:
39542                 if (quirks & PMT_QUIRK_NO_CRASHLOG) {
39543                         dev_info(dev, "Crashlog not supported\n");
39544 -                       return 0;
39545 +                       return -EINVAL;
39546                 }
39547                 name = "pmt_crashlog";
39548                 break;
39549         default:
39550 -               dev_err(dev, "Unrecognized PMT capability: %d\n", id);
39551                 return -EINVAL;
39552         }
39554 @@ -174,12 +173,8 @@ static int pmt_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
39555                 header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
39557                 ret = pmt_add_dev(pdev, &header, quirks);
39558 -               if (ret) {
39559 -                       dev_warn(&pdev->dev,
39560 -                                "Failed to add device for DVSEC id %d\n",
39561 -                                header.id);
39562 +               if (ret)
39563                         continue;
39564 -               }
39566                 found_devices = true;
39567         } while (true);
39568 diff --git a/drivers/mfd/stm32-timers.c b/drivers/mfd/stm32-timers.c
39569 index add603359124..44ed2fce0319 100644
39570 --- a/drivers/mfd/stm32-timers.c
39571 +++ b/drivers/mfd/stm32-timers.c
39572 @@ -158,13 +158,18 @@ static const struct regmap_config stm32_timers_regmap_cfg = {
39574  static void stm32_timers_get_arr_size(struct stm32_timers *ddata)
39576 +       u32 arr;
39578 +       /* Backup ARR to restore it after getting the maximum value */
39579 +       regmap_read(ddata->regmap, TIM_ARR, &arr);
39581         /*
39582          * Only the available bits will be written so when readback
39583          * we get the maximum value of auto reload register
39584          */
39585         regmap_write(ddata->regmap, TIM_ARR, ~0L);
39586         regmap_read(ddata->regmap, TIM_ARR, &ddata->max_arr);
39587 -       regmap_write(ddata->regmap, TIM_ARR, 0x0);
39588 +       regmap_write(ddata->regmap, TIM_ARR, arr);
39591  static int stm32_timers_dma_probe(struct device *dev,
39592 diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
39593 index 90f3292230c9..1dd39483e7c1 100644
39594 --- a/drivers/mfd/stmpe.c
39595 +++ b/drivers/mfd/stmpe.c
39596 @@ -312,7 +312,7 @@ EXPORT_SYMBOL_GPL(stmpe_set_altfunc);
39597   * GPIO (all variants)
39598   */
39600 -static const struct resource stmpe_gpio_resources[] = {
39601 +static struct resource stmpe_gpio_resources[] = {
39602         /* Start and end filled dynamically */
39603         {
39604                 .flags  = IORESOURCE_IRQ,
39605 @@ -336,7 +336,8 @@ static const struct mfd_cell stmpe_gpio_cell_noirq = {
39606   * Keypad (1601, 2401, 2403)
39607   */
39609 -static const struct resource stmpe_keypad_resources[] = {
39610 +static struct resource stmpe_keypad_resources[] = {
39611 +       /* Start and end filled dynamically */
39612         {
39613                 .name   = "KEYPAD",
39614                 .flags  = IORESOURCE_IRQ,
39615 @@ -357,7 +358,8 @@ static const struct mfd_cell stmpe_keypad_cell = {
39616  /*
39617   * PWM (1601, 2401, 2403)
39618   */
39619 -static const struct resource stmpe_pwm_resources[] = {
39620 +static struct resource stmpe_pwm_resources[] = {
39621 +       /* Start and end filled dynamically */
39622         {
39623                 .name   = "PWM0",
39624                 .flags  = IORESOURCE_IRQ,
39625 @@ -445,7 +447,8 @@ static struct stmpe_variant_info stmpe801_noirq = {
39626   * Touchscreen (STMPE811 or STMPE610)
39627   */
39629 -static const struct resource stmpe_ts_resources[] = {
39630 +static struct resource stmpe_ts_resources[] = {
39631 +       /* Start and end filled dynamically */
39632         {
39633                 .name   = "TOUCH_DET",
39634                 .flags  = IORESOURCE_IRQ,
39635 @@ -467,7 +470,8 @@ static const struct mfd_cell stmpe_ts_cell = {
39636   * ADC (STMPE811)
39637   */
39639 -static const struct resource stmpe_adc_resources[] = {
39640 +static struct resource stmpe_adc_resources[] = {
39641 +       /* Start and end filled dynamically */
39642         {
39643                 .name   = "STMPE_TEMP_SENS",
39644                 .flags  = IORESOURCE_IRQ,
39645 diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
39646 index b690796d24d4..448b13da62b4 100644
39647 --- a/drivers/mfd/ucb1x00-core.c
39648 +++ b/drivers/mfd/ucb1x00-core.c
39649 @@ -250,7 +250,7 @@ unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
39650                         break;
39651                 /* yield to other processes */
39652                 set_current_state(TASK_INTERRUPTIBLE);
39653 -               schedule_timeout(1);
39654 +               schedule_min_hrtimeout();
39655         }
39657         return UCB_ADC_DAT(val);
39658 diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
39659 index 926408b41270..7a6f01ace78a 100644
39660 --- a/drivers/misc/eeprom/at24.c
39661 +++ b/drivers/misc/eeprom/at24.c
39662 @@ -763,7 +763,8 @@ static int at24_probe(struct i2c_client *client)
39663         at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
39664         if (IS_ERR(at24->nvmem)) {
39665                 pm_runtime_disable(dev);
39666 -               regulator_disable(at24->vcc_reg);
39667 +               if (!pm_runtime_status_suspended(dev))
39668 +                       regulator_disable(at24->vcc_reg);
39669                 return PTR_ERR(at24->nvmem);
39670         }
39672 @@ -774,7 +775,8 @@ static int at24_probe(struct i2c_client *client)
39673         err = at24_read(at24, 0, &test_byte, 1);
39674         if (err) {
39675                 pm_runtime_disable(dev);
39676 -               regulator_disable(at24->vcc_reg);
39677 +               if (!pm_runtime_status_suspended(dev))
39678 +                       regulator_disable(at24->vcc_reg);
39679                 return -ENODEV;
39680         }
39682 diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
39683 index 9152242778f5..ecdedd87f8cc 100644
39684 --- a/drivers/misc/habanalabs/gaudi/gaudi.c
39685 +++ b/drivers/misc/habanalabs/gaudi/gaudi.c
39686 @@ -5546,6 +5546,7 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
39687         struct hl_cs_job *job;
39688         u32 cb_size, ctl, err_cause;
39689         struct hl_cb *cb;
39690 +       u64 id;
39691         int rc;
39693         cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
39694 @@ -5612,8 +5613,9 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
39695         }
39697  release_cb:
39698 +       id = cb->id;
39699         hl_cb_put(cb);
39700 -       hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
39701 +       hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, id << PAGE_SHIFT);
39703         return rc;
39705 diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
39706 index 2bdf560ee681..0f9ea75b0b18 100644
39707 --- a/drivers/misc/ics932s401.c
39708 +++ b/drivers/misc/ics932s401.c
39709 @@ -134,7 +134,7 @@ static struct ics932s401_data *ics932s401_update_device(struct device *dev)
39710         for (i = 0; i < NUM_MIRRORED_REGS; i++) {
39711                 temp = i2c_smbus_read_word_data(client, regs_to_copy[i]);
39712                 if (temp < 0)
39713 -                       data->regs[regs_to_copy[i]] = 0;
39714 +                       temp = 0;
39715                 data->regs[regs_to_copy[i]] = temp >> 8;
39716         }
39718 diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
39719 index 945701bce553..2e081a58da6c 100644
39720 --- a/drivers/misc/kgdbts.c
39721 +++ b/drivers/misc/kgdbts.c
39722 @@ -95,19 +95,19 @@
39724  #include <asm/sections.h>
39726 -#define v1printk(a...) do { \
39727 -       if (verbose) \
39728 -               printk(KERN_INFO a); \
39729 -       } while (0)
39730 -#define v2printk(a...) do { \
39731 -       if (verbose > 1) \
39732 -               printk(KERN_INFO a); \
39733 -               touch_nmi_watchdog();   \
39734 -       } while (0)
39735 -#define eprintk(a...) do { \
39736 -               printk(KERN_ERR a); \
39737 -               WARN_ON(1); \
39738 -       } while (0)
39739 +#define v1printk(a...) do {            \
39740 +       if (verbose)                    \
39741 +               printk(KERN_INFO a);    \
39742 +} while (0)
39743 +#define v2printk(a...) do {            \
39744 +       if (verbose > 1)                \
39745 +               printk(KERN_INFO a);    \
39746 +       touch_nmi_watchdog();           \
39747 +} while (0)
39748 +#define eprintk(a...) do {             \
39749 +       printk(KERN_ERR a);             \
39750 +       WARN_ON(1);                     \
39751 +} while (0)
39752  #define MAX_CONFIG_LEN         40
39754  static struct kgdb_io kgdbts_io_ops;
39755 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
39756 index dd65cedf3b12..9d14bf444481 100644
39757 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
39758 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
39759 @@ -208,7 +208,7 @@ static int lis3_3dc_rates[16] = {0, 1, 10, 25, 50, 100, 200, 400, 1600, 5000};
39760  static int lis3_3dlh_rates[4] = {50, 100, 400, 1000};
39762  /* ODR is Output Data Rate */
39763 -static int lis3lv02d_get_odr(struct lis3lv02d *lis3)
39764 +static int lis3lv02d_get_odr_index(struct lis3lv02d *lis3)
39766         u8 ctrl;
39767         int shift;
39768 @@ -216,15 +216,23 @@ static int lis3lv02d_get_odr(struct lis3lv02d *lis3)
39769         lis3->read(lis3, CTRL_REG1, &ctrl);
39770         ctrl &= lis3->odr_mask;
39771         shift = ffs(lis3->odr_mask) - 1;
39772 -       return lis3->odrs[(ctrl >> shift)];
39773 +       return (ctrl >> shift);
39776  static int lis3lv02d_get_pwron_wait(struct lis3lv02d *lis3)
39778 -       int div = lis3lv02d_get_odr(lis3);
39779 +       int odr_idx = lis3lv02d_get_odr_index(lis3);
39780 +       int div = lis3->odrs[odr_idx];
39782 -       if (WARN_ONCE(div == 0, "device returned spurious data"))
39783 +       if (div == 0) {
39784 +               if (odr_idx == 0) {
39785 +                       /* Power-down mode, not sampling no need to sleep */
39786 +                       return 0;
39787 +               }
39789 +               dev_err(&lis3->pdev->dev, "Error unknown odrs-index: %d\n", odr_idx);
39790                 return -ENXIO;
39791 +       }
39793         /* LIS3 power on delay is quite long */
39794         msleep(lis3->pwron_delay / div);
39795 @@ -816,9 +824,12 @@ static ssize_t lis3lv02d_rate_show(struct device *dev,
39796                         struct device_attribute *attr, char *buf)
39798         struct lis3lv02d *lis3 = dev_get_drvdata(dev);
39799 +       int odr_idx;
39801         lis3lv02d_sysfs_poweron(lis3);
39802 -       return sprintf(buf, "%d\n", lis3lv02d_get_odr(lis3));
39804 +       odr_idx = lis3lv02d_get_odr_index(lis3);
39805 +       return sprintf(buf, "%d\n", lis3->odrs[odr_idx]);
39808  static ssize_t lis3lv02d_rate_set(struct device *dev,
39809 diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
39810 index 14be76d4c2e6..cb34925e10f1 100644
39811 --- a/drivers/misc/mei/hw-me-regs.h
39812 +++ b/drivers/misc/mei/hw-me-regs.h
39813 @@ -105,6 +105,7 @@
39815  #define MEI_DEV_ID_ADP_S      0x7AE8  /* Alder Lake Point S */
39816  #define MEI_DEV_ID_ADP_LP     0x7A60  /* Alder Lake Point LP */
39817 +#define MEI_DEV_ID_ADP_P      0x51E0  /* Alder Lake Point P */
39819  /*
39820   * MEI HW Section
39821 diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
39822 index a7e179626b63..c3393b383e59 100644
39823 --- a/drivers/misc/mei/pci-me.c
39824 +++ b/drivers/misc/mei/pci-me.c
39825 @@ -111,6 +111,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
39827         {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)},
39828         {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_LP, MEI_ME_PCH15_CFG)},
39829 +       {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
39831         /* required last entry */
39832         {0, }
39833 diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
39834 index 8e6607fc8a67..b9ab770bbdb5 100644
39835 --- a/drivers/misc/sgi-xp/xpc_channel.c
39836 +++ b/drivers/misc/sgi-xp/xpc_channel.c
39837 @@ -834,7 +834,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
39839         atomic_inc(&ch->n_on_msg_allocate_wq);
39840         prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE);
39841 -       ret = schedule_timeout(1);
39842 +       ret = schedule_min_hrtimeout();
39843         finish_wait(&ch->msg_allocate_wq, &wait);
39844         atomic_dec(&ch->n_on_msg_allocate_wq);
39846 diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
39847 index 345addd9306d..fa8a7fce4481 100644
39848 --- a/drivers/misc/vmw_vmci/vmci_doorbell.c
39849 +++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
39850 @@ -326,7 +326,7 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
39851  bool vmci_dbell_register_notification_bitmap(u64 bitmap_ppn)
39853         int result;
39854 -       struct vmci_notify_bm_set_msg bitmap_set_msg;
39855 +       struct vmci_notify_bm_set_msg bitmap_set_msg = { };
39857         bitmap_set_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
39858                                                   VMCI_SET_NOTIFY_BITMAP);
39859 diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
39860 index cc8eeb361fcd..1018dc77269d 100644
39861 --- a/drivers/misc/vmw_vmci/vmci_guest.c
39862 +++ b/drivers/misc/vmw_vmci/vmci_guest.c
39863 @@ -168,7 +168,7 @@ static int vmci_check_host_caps(struct pci_dev *pdev)
39864                                 VMCI_UTIL_NUM_RESOURCES * sizeof(u32);
39865         struct vmci_datagram *check_msg;
39867 -       check_msg = kmalloc(msg_size, GFP_KERNEL);
39868 +       check_msg = kzalloc(msg_size, GFP_KERNEL);
39869         if (!check_msg) {
39870                 dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__);
39871                 return -ENOMEM;
39872 diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
39873 index d666e24fbe0e..a4c06ef67394 100644
39874 --- a/drivers/mmc/core/block.c
39875 +++ b/drivers/mmc/core/block.c
39876 @@ -572,6 +572,18 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
39877                 main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK;
39878         }
39880 +       /*
39881 +        * Make sure to update CACHE_CTRL in case it was changed. The cache
39882 +        * will get turned back on if the card is re-initialized, e.g.
39883 +        * suspend/resume or hw reset in recovery.
39884 +        */
39885 +       if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_CACHE_CTRL) &&
39886 +           (cmd.opcode == MMC_SWITCH)) {
39887 +               u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg) & 1;
39889 +               card->ext_csd.cache_ctrl = value;
39890 +       }
39892         /*
39893          * According to the SD specs, some commands require a delay after
39894          * issuing the command.
39895 @@ -2224,6 +2236,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
39896         case MMC_ISSUE_ASYNC:
39897                 switch (req_op(req)) {
39898                 case REQ_OP_FLUSH:
39899 +                       if (!mmc_cache_enabled(host)) {
39900 +                               blk_mq_end_request(req, BLK_STS_OK);
39901 +                               return MMC_REQ_FINISHED;
39902 +                       }
39903                         ret = mmc_blk_cqe_issue_flush(mq, req);
39904                         break;
39905                 case REQ_OP_READ:
39906 diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
39907 index 1136b859ddd8..e30c4e88e404 100644
39908 --- a/drivers/mmc/core/core.c
39909 +++ b/drivers/mmc/core/core.c
39910 @@ -1207,7 +1207,7 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
39912         err = mmc_wait_for_cmd(host, &cmd, 0);
39913         if (err)
39914 -               return err;
39915 +               goto power_cycle;
39917         if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
39918                 return -EIO;
39919 @@ -2369,80 +2369,6 @@ void mmc_stop_host(struct mmc_host *host)
39920         mmc_release_host(host);
39923 -#ifdef CONFIG_PM_SLEEP
39924 -/* Do the card removal on suspend if card is assumed removeable
39925 - * Do that in pm notifier while userspace isn't yet frozen, so we will be able
39926 -   to sync the card.
39928 -static int mmc_pm_notify(struct notifier_block *notify_block,
39929 -                       unsigned long mode, void *unused)
39931 -       struct mmc_host *host = container_of(
39932 -               notify_block, struct mmc_host, pm_notify);
39933 -       unsigned long flags;
39934 -       int err = 0;
39936 -       switch (mode) {
39937 -       case PM_HIBERNATION_PREPARE:
39938 -       case PM_SUSPEND_PREPARE:
39939 -       case PM_RESTORE_PREPARE:
39940 -               spin_lock_irqsave(&host->lock, flags);
39941 -               host->rescan_disable = 1;
39942 -               spin_unlock_irqrestore(&host->lock, flags);
39943 -               cancel_delayed_work_sync(&host->detect);
39945 -               if (!host->bus_ops)
39946 -                       break;
39948 -               /* Validate prerequisites for suspend */
39949 -               if (host->bus_ops->pre_suspend)
39950 -                       err = host->bus_ops->pre_suspend(host);
39951 -               if (!err)
39952 -                       break;
39954 -               if (!mmc_card_is_removable(host)) {
39955 -                       dev_warn(mmc_dev(host),
39956 -                                "pre_suspend failed for non-removable host: "
39957 -                                "%d\n", err);
39958 -                       /* Avoid removing non-removable hosts */
39959 -                       break;
39960 -               }
39962 -               /* Calling bus_ops->remove() with a claimed host can deadlock */
39963 -               host->bus_ops->remove(host);
39964 -               mmc_claim_host(host);
39965 -               mmc_detach_bus(host);
39966 -               mmc_power_off(host);
39967 -               mmc_release_host(host);
39968 -               host->pm_flags = 0;
39969 -               break;
39971 -       case PM_POST_SUSPEND:
39972 -       case PM_POST_HIBERNATION:
39973 -       case PM_POST_RESTORE:
39975 -               spin_lock_irqsave(&host->lock, flags);
39976 -               host->rescan_disable = 0;
39977 -               spin_unlock_irqrestore(&host->lock, flags);
39978 -               _mmc_detect_change(host, 0, false);
39980 -       }
39982 -       return 0;
39985 -void mmc_register_pm_notifier(struct mmc_host *host)
39987 -       host->pm_notify.notifier_call = mmc_pm_notify;
39988 -       register_pm_notifier(&host->pm_notify);
39991 -void mmc_unregister_pm_notifier(struct mmc_host *host)
39993 -       unregister_pm_notifier(&host->pm_notify);
39995 -#endif
39997  static int __init mmc_init(void)
39999         int ret;
40000 diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
40001 index 575ac0257af2..db3c9c68875d 100644
40002 --- a/drivers/mmc/core/core.h
40003 +++ b/drivers/mmc/core/core.h
40004 @@ -29,6 +29,7 @@ struct mmc_bus_ops {
40005         int (*shutdown)(struct mmc_host *);
40006         int (*hw_reset)(struct mmc_host *);
40007         int (*sw_reset)(struct mmc_host *);
40008 +       bool (*cache_enabled)(struct mmc_host *);
40009  };
40011  void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
40012 @@ -93,14 +94,6 @@ int mmc_execute_tuning(struct mmc_card *card);
40013  int mmc_hs200_to_hs400(struct mmc_card *card);
40014  int mmc_hs400_to_hs200(struct mmc_card *card);
40016 -#ifdef CONFIG_PM_SLEEP
40017 -void mmc_register_pm_notifier(struct mmc_host *host);
40018 -void mmc_unregister_pm_notifier(struct mmc_host *host);
40019 -#else
40020 -static inline void mmc_register_pm_notifier(struct mmc_host *host) { }
40021 -static inline void mmc_unregister_pm_notifier(struct mmc_host *host) { }
40022 -#endif
40024  void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq);
40025  bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
40027 @@ -171,4 +164,12 @@ static inline void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
40028                 host->ops->post_req(host, mrq, err);
40031 +static inline bool mmc_cache_enabled(struct mmc_host *host)
40033 +       if (host->bus_ops->cache_enabled)
40034 +               return host->bus_ops->cache_enabled(host);
40036 +       return false;
40039  #endif
40040 diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
40041 index 9b89a91b6b47..fe05b3645fe9 100644
40042 --- a/drivers/mmc/core/host.c
40043 +++ b/drivers/mmc/core/host.c
40044 @@ -35,6 +35,42 @@
40046  static DEFINE_IDA(mmc_host_ida);
40048 +#ifdef CONFIG_PM_SLEEP
40049 +static int mmc_host_class_prepare(struct device *dev)
40051 +       struct mmc_host *host = cls_dev_to_mmc_host(dev);
40053 +       /*
40054 +        * It's safe to access the bus_ops pointer, as both userspace and the
40055 +        * workqueue for detecting cards are frozen at this point.
40056 +        */
40057 +       if (!host->bus_ops)
40058 +               return 0;
40060 +       /* Validate conditions for system suspend. */
40061 +       if (host->bus_ops->pre_suspend)
40062 +               return host->bus_ops->pre_suspend(host);
40064 +       return 0;
40067 +static void mmc_host_class_complete(struct device *dev)
40069 +       struct mmc_host *host = cls_dev_to_mmc_host(dev);
40071 +       _mmc_detect_change(host, 0, false);
40074 +static const struct dev_pm_ops mmc_host_class_dev_pm_ops = {
40075 +       .prepare = mmc_host_class_prepare,
40076 +       .complete = mmc_host_class_complete,
40079 +#define MMC_HOST_CLASS_DEV_PM_OPS (&mmc_host_class_dev_pm_ops)
40080 +#else
40081 +#define MMC_HOST_CLASS_DEV_PM_OPS NULL
40082 +#endif
40084  static void mmc_host_classdev_release(struct device *dev)
40086         struct mmc_host *host = cls_dev_to_mmc_host(dev);
40087 @@ -46,6 +82,7 @@ static void mmc_host_classdev_release(struct device *dev)
40088  static struct class mmc_host_class = {
40089         .name           = "mmc_host",
40090         .dev_release    = mmc_host_classdev_release,
40091 +       .pm             = MMC_HOST_CLASS_DEV_PM_OPS,
40092  };
40094  int mmc_register_host_class(void)
40095 @@ -538,8 +575,6 @@ int mmc_add_host(struct mmc_host *host)
40096  #endif
40098         mmc_start_host(host);
40099 -       mmc_register_pm_notifier(host);
40101         return 0;
40104 @@ -555,7 +590,6 @@ EXPORT_SYMBOL(mmc_add_host);
40105   */
40106  void mmc_remove_host(struct mmc_host *host)
40108 -       mmc_unregister_pm_notifier(host);
40109         mmc_stop_host(host);
40111  #ifdef CONFIG_DEBUG_FS
40112 diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
40113 index 8741271d3971..4d2b4b0da93c 100644
40114 --- a/drivers/mmc/core/mmc.c
40115 +++ b/drivers/mmc/core/mmc.c
40116 @@ -2029,6 +2029,12 @@ static void mmc_detect(struct mmc_host *host)
40117         }
40120 +static bool _mmc_cache_enabled(struct mmc_host *host)
40122 +       return host->card->ext_csd.cache_size > 0 &&
40123 +              host->card->ext_csd.cache_ctrl & 1;
40126  static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
40128         int err = 0;
40129 @@ -2208,6 +2214,7 @@ static const struct mmc_bus_ops mmc_ops = {
40130         .alive = mmc_alive,
40131         .shutdown = mmc_shutdown,
40132         .hw_reset = _mmc_hw_reset,
40133 +       .cache_enabled = _mmc_cache_enabled,
40134  };
40136  /*
40137 diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
40138 index 265d95ec82ce..c458f6b626a2 100644
40139 --- a/drivers/mmc/core/mmc_ops.c
40140 +++ b/drivers/mmc/core/mmc_ops.c
40141 @@ -988,9 +988,7 @@ int mmc_flush_cache(struct mmc_card *card)
40143         int err = 0;
40145 -       if (mmc_card_mmc(card) &&
40146 -                       (card->ext_csd.cache_size > 0) &&
40147 -                       (card->ext_csd.cache_ctrl & 1)) {
40148 +       if (mmc_cache_enabled(card->host)) {
40149                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
40150                                  EXT_CSD_FLUSH_CACHE, 1,
40151                                  MMC_CACHE_FLUSH_TIMEOUT_MS);
40152 diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
40153 index 6fa51a6ed058..2c48d6504101 100644
40154 --- a/drivers/mmc/core/sd.c
40155 +++ b/drivers/mmc/core/sd.c
40156 @@ -135,6 +135,9 @@ static int mmc_decode_csd(struct mmc_card *card)
40157                         csd->erase_size = UNSTUFF_BITS(resp, 39, 7) + 1;
40158                         csd->erase_size <<= csd->write_blkbits - 9;
40159                 }
40161 +               if (UNSTUFF_BITS(resp, 13, 1))
40162 +                       mmc_card_set_readonly(card);
40163                 break;
40164         case 1:
40165                 /*
40166 @@ -169,6 +172,9 @@ static int mmc_decode_csd(struct mmc_card *card)
40167                 csd->write_blkbits = 9;
40168                 csd->write_partial = 0;
40169                 csd->erase_size = 1;
40171 +               if (UNSTUFF_BITS(resp, 13, 1))
40172 +                       mmc_card_set_readonly(card);
40173                 break;
40174         default:
40175                 pr_err("%s: unrecognised CSD structure version %d\n",
40176 diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
40177 index 0fda7784cab2..3eb94ac2712e 100644
40178 --- a/drivers/mmc/core/sdio.c
40179 +++ b/drivers/mmc/core/sdio.c
40180 @@ -985,21 +985,37 @@ static void mmc_sdio_detect(struct mmc_host *host)
40181   */
40182  static int mmc_sdio_pre_suspend(struct mmc_host *host)
40184 -       int i, err = 0;
40185 +       int i;
40187         for (i = 0; i < host->card->sdio_funcs; i++) {
40188                 struct sdio_func *func = host->card->sdio_func[i];
40189                 if (func && sdio_func_present(func) && func->dev.driver) {
40190                         const struct dev_pm_ops *pmops = func->dev.driver->pm;
40191 -                       if (!pmops || !pmops->suspend || !pmops->resume) {
40192 +                       if (!pmops || !pmops->suspend || !pmops->resume)
40193                                 /* force removal of entire card in that case */
40194 -                               err = -ENOSYS;
40195 -                               break;
40196 -                       }
40197 +                               goto remove;
40198                 }
40199         }
40201 -       return err;
40202 +       return 0;
40204 +remove:
40205 +       if (!mmc_card_is_removable(host)) {
40206 +               dev_warn(mmc_dev(host),
40207 +                        "missing suspend/resume ops for non-removable SDIO card\n");
40208 +               /* Don't remove a non-removable card - we can't re-detect it. */
40209 +               return 0;
40210 +       }
40212 +       /* Remove the SDIO card and let it be re-detected later on. */
40213 +       mmc_sdio_remove(host);
40214 +       mmc_claim_host(host);
40215 +       mmc_detach_bus(host);
40216 +       mmc_power_off(host);
40217 +       mmc_release_host(host);
40218 +       host->pm_flags = 0;
40220 +       return 0;
40223  /*
40224 diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
40225 index b8b771b643cc..016a6106151a 100644
40226 --- a/drivers/mmc/host/meson-gx-mmc.c
40227 +++ b/drivers/mmc/host/meson-gx-mmc.c
40228 @@ -236,7 +236,8 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
40229         if (host->dram_access_quirk)
40230                 return;
40232 -       if (data->blocks > 1) {
40233 +       /* SD_IO_RW_EXTENDED (CMD53) can also use block mode under the hood */
40234 +       if (data->blocks > 1 || mrq->cmd->opcode == SD_IO_RW_EXTENDED) {
40235                 /*
40236                  * In block mode DMA descriptor format, "length" field indicates
40237                  * number of blocks and there is no way to pass DMA size that
40238 @@ -258,7 +259,9 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
40239         for_each_sg(data->sg, sg, data->sg_len, i) {
40240                 /* check for 8 byte alignment */
40241                 if (sg->offset % 8) {
40242 -                       WARN_ONCE(1, "unaligned scatterlist buffer\n");
40243 +                       dev_warn_once(mmc_dev(mmc),
40244 +                                     "unaligned sg offset %u, disabling descriptor DMA for transfer\n",
40245 +                                     sg->offset);
40246                         return;
40247                 }
40248         }
40249 diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
40250 index f9780c65ebe9..f24623aac2db 100644
40251 --- a/drivers/mmc/host/sdhci-brcmstb.c
40252 +++ b/drivers/mmc/host/sdhci-brcmstb.c
40253 @@ -199,7 +199,6 @@ static int sdhci_brcmstb_add_host(struct sdhci_host *host,
40254         if (dma64) {
40255                 dev_dbg(mmc_dev(host->mmc), "Using 64 bit DMA\n");
40256                 cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
40257 -               cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
40258         }
40260         ret = cqhci_init(cq_host, host->mmc, dma64);
40261 diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
40262 index a20459744d21..94327988da91 100644
40263 --- a/drivers/mmc/host/sdhci-esdhc-imx.c
40264 +++ b/drivers/mmc/host/sdhci-esdhc-imx.c
40265 @@ -1488,7 +1488,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
40267         mmc_of_parse_voltage(np, &host->ocr_mask);
40269 -       if (esdhc_is_usdhc(imx_data)) {
40270 +       if (esdhc_is_usdhc(imx_data) && !IS_ERR(imx_data->pinctrl)) {
40271                 imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
40272                                                 ESDHC_PINCTRL_STATE_100MHZ);
40273                 imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
40274 diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
40275 index 9552708846ca..bf04a08eeba1 100644
40276 --- a/drivers/mmc/host/sdhci-pci-core.c
40277 +++ b/drivers/mmc/host/sdhci-pci-core.c
40278 @@ -516,6 +516,7 @@ struct intel_host {
40279         int     drv_strength;
40280         bool    d3_retune;
40281         bool    rpm_retune_ok;
40282 +       bool    needs_pwr_off;
40283         u32     glk_rx_ctrl1;
40284         u32     glk_tun_val;
40285         u32     active_ltr;
40286 @@ -643,9 +644,25 @@ static int bxt_get_cd(struct mmc_host *mmc)
40287  static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
40288                                   unsigned short vdd)
40290 +       struct sdhci_pci_slot *slot = sdhci_priv(host);
40291 +       struct intel_host *intel_host = sdhci_pci_priv(slot);
40292         int cntr;
40293         u8 reg;
40295 +       /*
40296 +        * Bus power may control card power, but a full reset still may not
40297 +        * reset the power, whereas a direct write to SDHCI_POWER_CONTROL can.
40298 +        * That might be needed to initialize correctly, if the card was left
40299 +        * powered on previously.
40300 +        */
40301 +       if (intel_host->needs_pwr_off) {
40302 +               intel_host->needs_pwr_off = false;
40303 +               if (mode != MMC_POWER_OFF) {
40304 +                       sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
40305 +                       usleep_range(10000, 12500);
40306 +               }
40307 +       }
40309         sdhci_set_power(host, mode, vdd);
40311         if (mode == MMC_POWER_OFF)
40312 @@ -1135,6 +1152,14 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
40313         return 0;
40316 +static void byt_needs_pwr_off(struct sdhci_pci_slot *slot)
40318 +       struct intel_host *intel_host = sdhci_pci_priv(slot);
40319 +       u8 reg = sdhci_readb(slot->host, SDHCI_POWER_CONTROL);
40321 +       intel_host->needs_pwr_off = reg  & SDHCI_POWER_ON;
40324  static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
40326         byt_probe_slot(slot);
40327 @@ -1152,6 +1177,8 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
40328             slot->chip->pdev->subsystem_device == PCI_SUBDEVICE_ID_NI_78E3)
40329                 slot->host->mmc->caps2 |= MMC_CAP2_AVOID_3_3V;
40331 +       byt_needs_pwr_off(slot);
40333         return 0;
40336 @@ -1903,6 +1930,8 @@ static const struct pci_device_id pci_ids[] = {
40337         SDHCI_PCI_DEVICE(INTEL, CMLH_SD,   intel_byt_sd),
40338         SDHCI_PCI_DEVICE(INTEL, JSL_EMMC,  intel_glk_emmc),
40339         SDHCI_PCI_DEVICE(INTEL, JSL_SD,    intel_byt_sd),
40340 +       SDHCI_PCI_DEVICE(INTEL, LKF_EMMC,  intel_glk_emmc),
40341 +       SDHCI_PCI_DEVICE(INTEL, LKF_SD,    intel_byt_sd),
40342         SDHCI_PCI_DEVICE(O2, 8120,     o2),
40343         SDHCI_PCI_DEVICE(O2, 8220,     o2),
40344         SDHCI_PCI_DEVICE(O2, 8221,     o2),
40345 diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
40346 index 4a0f69b97a78..757211922506 100644
40347 --- a/drivers/mmc/host/sdhci-pci-gli.c
40348 +++ b/drivers/mmc/host/sdhci-pci-gli.c
40349 @@ -587,8 +587,13 @@ static void sdhci_gli_voltage_switch(struct sdhci_host *host)
40350          *
40351          * Wait 5ms after set 1.8V signal enable in Host Control 2 register
40352          * to ensure 1.8V signal enable bit is set by GL9750/GL9755.
40353 +        *
40354 +        * ...however, the controller in the NUC10i3FNK4 (a 9755) requires
40355 +        * slightly longer than 5ms before the control register reports that
40356 +        * 1.8V is ready, and far longer still before the card will actually
40357 +        * work reliably.
40358          */
40359 -       usleep_range(5000, 5500);
40360 +       usleep_range(100000, 110000);
40363  static void sdhci_gl9750_reset(struct sdhci_host *host, u8 mask)
40364 diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
40365 index d0ed232af0eb..8f90c4163bb5 100644
40366 --- a/drivers/mmc/host/sdhci-pci.h
40367 +++ b/drivers/mmc/host/sdhci-pci.h
40368 @@ -57,6 +57,8 @@
40369  #define PCI_DEVICE_ID_INTEL_CMLH_SD    0x06f5
40370  #define PCI_DEVICE_ID_INTEL_JSL_EMMC   0x4dc4
40371  #define PCI_DEVICE_ID_INTEL_JSL_SD     0x4df8
40372 +#define PCI_DEVICE_ID_INTEL_LKF_EMMC   0x98c4
40373 +#define PCI_DEVICE_ID_INTEL_LKF_SD     0x98f8
40375  #define PCI_DEVICE_ID_SYSKONNECT_8000  0x8000
40376  #define PCI_DEVICE_ID_VIA_95D0         0x95d0
40377 diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
40378 index 41d193fa77bb..8ea9132ebca4 100644
40379 --- a/drivers/mmc/host/sdhci-tegra.c
40380 +++ b/drivers/mmc/host/sdhci-tegra.c
40381 @@ -119,6 +119,10 @@
40382  /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
40383  #define SDHCI_TEGRA_CQE_BASE_ADDR                      0xF000
40385 +#define SDHCI_TEGRA_CQE_TRNS_MODE      (SDHCI_TRNS_MULTI | \
40386 +                                        SDHCI_TRNS_BLK_CNT_EN | \
40387 +                                        SDHCI_TRNS_DMA)
40389  struct sdhci_tegra_soc_data {
40390         const struct sdhci_pltfm_data *pdata;
40391         u64 dma_mask;
40392 @@ -1156,6 +1160,7 @@ static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
40393  static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
40395         struct mmc_host *mmc = cq_host->mmc;
40396 +       struct sdhci_host *host = mmc_priv(mmc);
40397         u8 ctrl;
40398         ktime_t timeout;
40399         bool timed_out;
40400 @@ -1170,6 +1175,7 @@ static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
40401          */
40402         if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
40403             cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
40404 +               sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
40405                 sdhci_cqe_enable(mmc);
40406                 writel(val, cq_host->mmio + reg);
40407                 timeout = ktime_add_us(ktime_get(), 50);
40408 @@ -1205,6 +1211,7 @@ static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
40409  static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
40411         struct cqhci_host *cq_host = mmc->cqe_private;
40412 +       struct sdhci_host *host = mmc_priv(mmc);
40413         u32 val;
40415         /*
40416 @@ -1218,6 +1225,7 @@ static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
40417                 if (val & CQHCI_ENABLE)
40418                         cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
40419                                      CQHCI_CFG);
40420 +               sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
40421                 sdhci_cqe_enable(mmc);
40422                 if (val & CQHCI_ENABLE)
40423                         cqhci_writel(cq_host, val, CQHCI_CFG);
40424 @@ -1281,12 +1289,36 @@ static void tegra_sdhci_set_timeout(struct sdhci_host *host,
40425         __sdhci_set_timeout(host, cmd);
40428 +static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc)
40430 +       struct cqhci_host *cq_host = mmc->cqe_private;
40431 +       u32 reg;
40433 +       reg = cqhci_readl(cq_host, CQHCI_CFG);
40434 +       reg |= CQHCI_ENABLE;
40435 +       cqhci_writel(cq_host, reg, CQHCI_CFG);
40438 +static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc)
40440 +       struct cqhci_host *cq_host = mmc->cqe_private;
40441 +       struct sdhci_host *host = mmc_priv(mmc);
40442 +       u32 reg;
40444 +       reg = cqhci_readl(cq_host, CQHCI_CFG);
40445 +       reg &= ~CQHCI_ENABLE;
40446 +       cqhci_writel(cq_host, reg, CQHCI_CFG);
40447 +       sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
40450  static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
40451         .write_l    = tegra_cqhci_writel,
40452         .enable = sdhci_tegra_cqe_enable,
40453         .disable = sdhci_cqe_disable,
40454         .dumpregs = sdhci_tegra_dumpregs,
40455         .update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
40456 +       .pre_enable = sdhci_tegra_cqe_pre_enable,
40457 +       .post_disable = sdhci_tegra_cqe_post_disable,
40458  };
40460  static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
40461 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
40462 index 2d73407ee52e..a9e20818ff3a 100644
40463 --- a/drivers/mmc/host/sdhci.c
40464 +++ b/drivers/mmc/host/sdhci.c
40465 @@ -2996,6 +2996,37 @@ static bool sdhci_request_done(struct sdhci_host *host)
40466                 return true;
40467         }
40469 +       /*
40470 +        * The controller needs a reset of internal state machines
40471 +        * upon error conditions.
40472 +        */
40473 +       if (sdhci_needs_reset(host, mrq)) {
40474 +               /*
40475 +                * Do not finish until command and data lines are available for
40476 +                * reset. Note there can only be one other mrq, so it cannot
40477 +                * also be in mrqs_done, otherwise host->cmd and host->data_cmd
40478 +                * would both be null.
40479 +                */
40480 +               if (host->cmd || host->data_cmd) {
40481 +                       spin_unlock_irqrestore(&host->lock, flags);
40482 +                       return true;
40483 +               }
40485 +               /* Some controllers need this kick or reset won't work here */
40486 +               if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
40487 +                       /* This is to force an update */
40488 +                       host->ops->set_clock(host, host->clock);
40490 +               /*
40491 +                * Spec says we should do both at the same time, but Ricoh
40492 +                * controllers do not like that.
40493 +                */
40494 +               sdhci_do_reset(host, SDHCI_RESET_CMD);
40495 +               sdhci_do_reset(host, SDHCI_RESET_DATA);
40497 +               host->pending_reset = false;
40498 +       }
40500         /*
40501          * Always unmap the data buffers if they were mapped by
40502          * sdhci_prepare_data() whenever we finish with a request.
40503 @@ -3059,35 +3090,6 @@ static bool sdhci_request_done(struct sdhci_host *host)
40504                 }
40505         }
40507 -       /*
40508 -        * The controller needs a reset of internal state machines
40509 -        * upon error conditions.
40510 -        */
40511 -       if (sdhci_needs_reset(host, mrq)) {
40512 -               /*
40513 -                * Do not finish until command and data lines are available for
40514 -                * reset. Note there can only be one other mrq, so it cannot
40515 -                * also be in mrqs_done, otherwise host->cmd and host->data_cmd
40516 -                * would both be null.
40517 -                */
40518 -               if (host->cmd || host->data_cmd) {
40519 -                       spin_unlock_irqrestore(&host->lock, flags);
40520 -                       return true;
40521 -               }
40523 -               /* Some controllers need this kick or reset won't work here */
40524 -               if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
40525 -                       /* This is to force an update */
40526 -                       host->ops->set_clock(host, host->clock);
40528 -               /* Spec says we should do both at the same time, but Ricoh
40529 -                  controllers do not like that. */
40530 -               sdhci_do_reset(host, SDHCI_RESET_CMD);
40531 -               sdhci_do_reset(host, SDHCI_RESET_DATA);
40533 -               host->pending_reset = false;
40534 -       }
40536         host->mrqs_done[i] = NULL;
40538         spin_unlock_irqrestore(&host->lock, flags);
40539 diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c
40540 index 2413b6750cec..ccbf9885a52b 100644
40541 --- a/drivers/mmc/host/uniphier-sd.c
40542 +++ b/drivers/mmc/host/uniphier-sd.c
40543 @@ -635,7 +635,7 @@ static int uniphier_sd_probe(struct platform_device *pdev)
40545         ret = tmio_mmc_host_probe(host);
40546         if (ret)
40547 -               goto free_host;
40548 +               goto disable_clk;
40550         ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED,
40551                                dev_name(dev), host);
40552 @@ -646,6 +646,8 @@ static int uniphier_sd_probe(struct platform_device *pdev)
40554  remove_host:
40555         tmio_mmc_host_remove(host);
40556 +disable_clk:
40557 +       uniphier_sd_clk_disable(host);
40558  free_host:
40559         tmio_mmc_host_free(host);
40561 @@ -658,6 +660,7 @@ static int uniphier_sd_remove(struct platform_device *pdev)
40563         tmio_mmc_host_remove(host);
40564         uniphier_sd_clk_disable(host);
40565 +       tmio_mmc_host_free(host);
40567         return 0;
40569 diff --git a/drivers/mtd/maps/physmap-bt1-rom.c b/drivers/mtd/maps/physmap-bt1-rom.c
40570 index a35450002284..58782cfaf71c 100644
40571 --- a/drivers/mtd/maps/physmap-bt1-rom.c
40572 +++ b/drivers/mtd/maps/physmap-bt1-rom.c
40573 @@ -79,7 +79,7 @@ static void __xipram bt1_rom_map_copy_from(struct map_info *map,
40574         if (shift) {
40575                 chunk = min_t(ssize_t, 4 - shift, len);
40576                 data = readl_relaxed(src - shift);
40577 -               memcpy(to, &data + shift, chunk);
40578 +               memcpy(to, (char *)&data + shift, chunk);
40579                 src += chunk;
40580                 to += chunk;
40581                 len -= chunk;
40582 diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
40583 index 001ed5deb622..4f63b8430c71 100644
40584 --- a/drivers/mtd/maps/physmap-core.c
40585 +++ b/drivers/mtd/maps/physmap-core.c
40586 @@ -69,8 +69,10 @@ static int physmap_flash_remove(struct platform_device *dev)
40587         int i, err = 0;
40589         info = platform_get_drvdata(dev);
40590 -       if (!info)
40591 +       if (!info) {
40592 +               err = -EINVAL;
40593                 goto out;
40594 +       }
40596         if (info->cmtd) {
40597                 err = mtd_device_unregister(info->cmtd);
40598 diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
40599 index 323035d4f2d0..688de663cabf 100644
40600 --- a/drivers/mtd/mtdchar.c
40601 +++ b/drivers/mtd/mtdchar.c
40602 @@ -651,16 +651,12 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
40603         case MEMGETINFO:
40604         case MEMREADOOB:
40605         case MEMREADOOB64:
40606 -       case MEMLOCK:
40607 -       case MEMUNLOCK:
40608         case MEMISLOCKED:
40609         case MEMGETOOBSEL:
40610         case MEMGETBADBLOCK:
40611 -       case MEMSETBADBLOCK:
40612         case OTPSELECT:
40613         case OTPGETREGIONCOUNT:
40614         case OTPGETREGIONINFO:
40615 -       case OTPLOCK:
40616         case ECCGETLAYOUT:
40617         case ECCGETSTATS:
40618         case MTDFILEMODE:
40619 @@ -671,9 +667,13 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
40620         /* "dangerous" commands */
40621         case MEMERASE:
40622         case MEMERASE64:
40623 +       case MEMLOCK:
40624 +       case MEMUNLOCK:
40625 +       case MEMSETBADBLOCK:
40626         case MEMWRITEOOB:
40627         case MEMWRITEOOB64:
40628         case MEMWRITE:
40629 +       case OTPLOCK:
40630                 if (!(file->f_mode & FMODE_WRITE))
40631                         return -EPERM;
40632                 break;
40633 diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
40634 index 2d6423d89a17..d97ddc65b5d4 100644
40635 --- a/drivers/mtd/mtdcore.c
40636 +++ b/drivers/mtd/mtdcore.c
40637 @@ -820,6 +820,9 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
40639         /* Prefer parsed partitions over driver-provided fallback */
40640         ret = parse_mtd_partitions(mtd, types, parser_data);
40641 +       if (ret == -EPROBE_DEFER)
40642 +               goto out;
40644         if (ret > 0)
40645                 ret = 0;
40646         else if (nr_parts)
40647 diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
40648 index 12ca4f19cb14..665fd9020b76 100644
40649 --- a/drivers/mtd/mtdpart.c
40650 +++ b/drivers/mtd/mtdpart.c
40651 @@ -331,7 +331,7 @@ static int __del_mtd_partitions(struct mtd_info *mtd)
40653         list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
40654                 if (mtd_has_partitions(child))
40655 -                       del_mtd_partitions(child);
40656 +                       __del_mtd_partitions(child);
40658                 pr_info("Deleting %s MTD partition\n", child->name);
40659                 ret = del_mtd_device(child);
40660 diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
40661 index e6ceec8f50dc..8aab1017b460 100644
40662 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c
40663 +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
40664 @@ -883,10 +883,12 @@ static int atmel_nand_pmecc_correct_data(struct nand_chip *chip, void *buf,
40665                                                           NULL, 0,
40666                                                           chip->ecc.strength);
40668 -               if (ret >= 0)
40669 +               if (ret >= 0) {
40670 +                       mtd->ecc_stats.corrected += ret;
40671                         max_bitflips = max(ret, max_bitflips);
40672 -               else
40673 +               } else {
40674                         mtd->ecc_stats.failed++;
40675 +               }
40677                 databuf += chip->ecc.size;
40678                 eccbuf += chip->ecc.bytes;
40679 diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
40680 index 659eaa6f0980..5ff4291380c5 100644
40681 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
40682 +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
40683 @@ -2688,6 +2688,12 @@ static int brcmnand_attach_chip(struct nand_chip *chip)
40685         ret = brcmstb_choose_ecc_layout(host);
40687 +       /* If OOB is written with ECC enabled it will cause ECC errors */
40688 +       if (is_hamming_ecc(host->ctrl, &host->hwcfg)) {
40689 +               chip->ecc.write_oob = brcmnand_write_oob_raw;
40690 +               chip->ecc.read_oob = brcmnand_read_oob_raw;
40691 +       }
40693         return ret;
40696 diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
40697 index 0101c0fab50a..a24e2f57fa68 100644
40698 --- a/drivers/mtd/nand/raw/fsmc_nand.c
40699 +++ b/drivers/mtd/nand/raw/fsmc_nand.c
40700 @@ -1077,11 +1077,13 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
40701                 host->read_dma_chan = dma_request_channel(mask, filter, NULL);
40702                 if (!host->read_dma_chan) {
40703                         dev_err(&pdev->dev, "Unable to get read dma channel\n");
40704 +                       ret = -ENODEV;
40705                         goto disable_clk;
40706                 }
40707                 host->write_dma_chan = dma_request_channel(mask, filter, NULL);
40708                 if (!host->write_dma_chan) {
40709                         dev_err(&pdev->dev, "Unable to get write dma channel\n");
40710 +                       ret = -ENODEV;
40711                         goto release_dma_read_chan;
40712                 }
40713         }
40714 diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
40715 index 3fa8c22d3f36..4d08e4ab5c1b 100644
40716 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
40717 +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
40718 @@ -2449,7 +2449,7 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
40719         this->bch_geometry.auxiliary_size = 128;
40720         ret = gpmi_alloc_dma_buffer(this);
40721         if (ret)
40722 -               goto err_out;
40723 +               return ret;
40725         nand_controller_init(&this->base);
40726         this->base.ops = &gpmi_nand_controller_ops;
40727 diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
40728 index fd4c318b520f..87c23bb320bf 100644
40729 --- a/drivers/mtd/nand/raw/qcom_nandc.c
40730 +++ b/drivers/mtd/nand/raw/qcom_nandc.c
40731 @@ -2898,7 +2898,7 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
40732         struct device *dev = nandc->dev;
40733         struct device_node *dn = dev->of_node, *child;
40734         struct qcom_nand_host *host;
40735 -       int ret;
40736 +       int ret = -ENODEV;
40738         for_each_available_child_of_node(dn, child) {
40739                 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
40740 @@ -2916,10 +2916,7 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
40741                 list_add_tail(&host->node, &nandc->host_list);
40742         }
40744 -       if (list_empty(&nandc->host_list))
40745 -               return -ENODEV;
40747 -       return 0;
40748 +       return ret;
40751  /* parse custom DT properties here */
40752 diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
40753 index 61d932c1b718..17f63f95f4a2 100644
40754 --- a/drivers/mtd/nand/spi/core.c
40755 +++ b/drivers/mtd/nand/spi/core.c
40756 @@ -1263,12 +1263,14 @@ static const struct spi_device_id spinand_ids[] = {
40757         { .name = "spi-nand" },
40758         { /* sentinel */ },
40759  };
40760 +MODULE_DEVICE_TABLE(spi, spinand_ids);
40762  #ifdef CONFIG_OF
40763  static const struct of_device_id spinand_of_ids[] = {
40764         { .compatible = "spi-nand" },
40765         { /* sentinel */ },
40766  };
40767 +MODULE_DEVICE_TABLE(of, spinand_of_ids);
40768  #endif
40770  static struct spi_mem_driver spinand_drv = {
40771 diff --git a/drivers/mtd/parsers/qcomsmempart.c b/drivers/mtd/parsers/qcomsmempart.c
40772 index 808cb33d71f8..d9083308f6ba 100644
40773 --- a/drivers/mtd/parsers/qcomsmempart.c
40774 +++ b/drivers/mtd/parsers/qcomsmempart.c
40775 @@ -65,6 +65,13 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
40776         int ret, i, numparts;
40777         char *name, *c;
40779 +       if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS)
40780 +                       && mtd->type == MTD_NORFLASH) {
40781 +               pr_err("%s: SMEM partition parser is incompatible with 4K sectors\n",
40782 +                               mtd->name);
40783 +               return -EINVAL;
40784 +       }
40786         pr_debug("Parsing partition table info from SMEM\n");
40787         ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len);
40788         if (IS_ERR(ptable)) {
40789 @@ -104,7 +111,7 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
40790          * complete partition table
40791          */
40792         ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len);
40793 -       if (IS_ERR_OR_NULL(ptable)) {
40794 +       if (IS_ERR(ptable)) {
40795                 pr_err("Error reading partition table\n");
40796                 return PTR_ERR(ptable);
40797         }
40798 diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
40799 index 0522304f52fa..72bc1342c3ff 100644
40800 --- a/drivers/mtd/spi-nor/core.c
40801 +++ b/drivers/mtd/spi-nor/core.c
40802 @@ -3301,6 +3301,37 @@ static void spi_nor_resume(struct mtd_info *mtd)
40803                 dev_err(dev, "resume() failed\n");
40806 +static int spi_nor_get_device(struct mtd_info *mtd)
40808 +       struct mtd_info *master = mtd_get_master(mtd);
40809 +       struct spi_nor *nor = mtd_to_spi_nor(master);
40810 +       struct device *dev;
40812 +       if (nor->spimem)
40813 +               dev = nor->spimem->spi->controller->dev.parent;
40814 +       else
40815 +               dev = nor->dev;
40817 +       if (!try_module_get(dev->driver->owner))
40818 +               return -ENODEV;
40820 +       return 0;
40823 +static void spi_nor_put_device(struct mtd_info *mtd)
40825 +       struct mtd_info *master = mtd_get_master(mtd);
40826 +       struct spi_nor *nor = mtd_to_spi_nor(master);
40827 +       struct device *dev;
40829 +       if (nor->spimem)
40830 +               dev = nor->spimem->spi->controller->dev.parent;
40831 +       else
40832 +               dev = nor->dev;
40834 +       module_put(dev->driver->owner);
40837  void spi_nor_restore(struct spi_nor *nor)
40839         /* restore the addressing mode */
40840 @@ -3495,6 +3526,8 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
40841         mtd->_read = spi_nor_read;
40842         mtd->_suspend = spi_nor_suspend;
40843         mtd->_resume = spi_nor_resume;
40844 +       mtd->_get_device = spi_nor_get_device;
40845 +       mtd->_put_device = spi_nor_put_device;
40847         if (nor->params->locking_ops) {
40848                 mtd->_lock = spi_nor_lock;
40849 diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
40850 index 9203abaac229..662b212787d4 100644
40851 --- a/drivers/mtd/spi-nor/macronix.c
40852 +++ b/drivers/mtd/spi-nor/macronix.c
40853 @@ -73,9 +73,6 @@ static const struct flash_info macronix_parts[] = {
40854                               SECT_4K | SPI_NOR_DUAL_READ |
40855                               SPI_NOR_QUAD_READ) },
40856         { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
40857 -       { "mx25l51245g", INFO(0xc2201a, 0, 64 * 1024, 1024,
40858 -                             SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
40859 -                             SPI_NOR_4B_OPCODES) },
40860         { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024,
40861                               SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
40862                               SPI_NOR_4B_OPCODES) },
40863 diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
40864 index 3d63b15bbaa1..164071e9d457 100644
40865 --- a/drivers/net/caif/caif_hsi.c
40866 +++ b/drivers/net/caif/caif_hsi.c
40867 @@ -924,7 +924,7 @@ static void cfhsi_wake_down(struct work_struct *work)
40868                         break;
40870                 set_current_state(TASK_INTERRUPTIBLE);
40871 -               schedule_timeout(1);
40872 +               schedule_min_hrtimeout();
40873                 retry--;
40874         }
40876 diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
40877 index 6a64fe410987..c3508109263e 100644
40878 --- a/drivers/net/can/dev/skb.c
40879 +++ b/drivers/net/can/dev/skb.c
40880 @@ -151,7 +151,11 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx)
40882         struct can_priv *priv = netdev_priv(dev);
40884 -       BUG_ON(idx >= priv->echo_skb_max);
40885 +       if (idx >= priv->echo_skb_max) {
40886 +               netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
40887 +                          __func__, idx, priv->echo_skb_max);
40888 +               return;
40889 +       }
40891         if (priv->echo_skb[idx]) {
40892                 dev_kfree_skb_any(priv->echo_skb[idx]);
40893 diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
40894 index 0c8d36bc668c..f71127229caf 100644
40895 --- a/drivers/net/can/m_can/m_can.c
40896 +++ b/drivers/net/can/m_can/m_can.c
40897 @@ -1455,6 +1455,8 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
40898         int i;
40899         int putidx;
40901 +       cdev->tx_skb = NULL;
40903         /* Generate ID field for TX buffer Element */
40904         /* Common to all supported M_CAN versions */
40905         if (cf->can_id & CAN_EFF_FLAG) {
40906 @@ -1571,7 +1573,6 @@ static void m_can_tx_work_queue(struct work_struct *ws)
40907                                                    tx_work);
40909         m_can_tx_handler(cdev);
40910 -       cdev->tx_skb = NULL;
40913  static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
40914 diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
40915 index a57da43680d8..bd7d0251be10 100644
40916 --- a/drivers/net/can/spi/mcp251x.c
40917 +++ b/drivers/net/can/spi/mcp251x.c
40918 @@ -956,8 +956,6 @@ static int mcp251x_stop(struct net_device *net)
40920         priv->force_quit = 1;
40921         free_irq(spi->irq, priv);
40922 -       destroy_workqueue(priv->wq);
40923 -       priv->wq = NULL;
40925         mutex_lock(&priv->mcp_lock);
40927 @@ -1224,24 +1222,15 @@ static int mcp251x_open(struct net_device *net)
40928                 goto out_close;
40929         }
40931 -       priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
40932 -                                  0);
40933 -       if (!priv->wq) {
40934 -               ret = -ENOMEM;
40935 -               goto out_clean;
40936 -       }
40937 -       INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
40938 -       INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
40940         ret = mcp251x_hw_wake(spi);
40941         if (ret)
40942 -               goto out_free_wq;
40943 +               goto out_free_irq;
40944         ret = mcp251x_setup(net, spi);
40945         if (ret)
40946 -               goto out_free_wq;
40947 +               goto out_free_irq;
40948         ret = mcp251x_set_normal_mode(spi);
40949         if (ret)
40950 -               goto out_free_wq;
40951 +               goto out_free_irq;
40953         can_led_event(net, CAN_LED_EVENT_OPEN);
40955 @@ -1250,9 +1239,7 @@ static int mcp251x_open(struct net_device *net)
40957         return 0;
40959 -out_free_wq:
40960 -       destroy_workqueue(priv->wq);
40961 -out_clean:
40962 +out_free_irq:
40963         free_irq(spi->irq, priv);
40964         mcp251x_hw_sleep(spi);
40965  out_close:
40966 @@ -1373,6 +1360,15 @@ static int mcp251x_can_probe(struct spi_device *spi)
40967         if (ret)
40968                 goto out_clk;
40970 +       priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
40971 +                                  0);
40972 +       if (!priv->wq) {
40973 +               ret = -ENOMEM;
40974 +               goto out_clk;
40975 +       }
40976 +       INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
40977 +       INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
40979         priv->spi = spi;
40980         mutex_init(&priv->mcp_lock);
40982 @@ -1417,6 +1413,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
40983         return 0;
40985  error_probe:
40986 +       destroy_workqueue(priv->wq);
40987 +       priv->wq = NULL;
40988         mcp251x_power_enable(priv->power, 0);
40990  out_clk:
40991 @@ -1438,6 +1436,9 @@ static int mcp251x_can_remove(struct spi_device *spi)
40993         mcp251x_power_enable(priv->power, 0);
40995 +       destroy_workqueue(priv->wq);
40996 +       priv->wq = NULL;
40998         clk_disable_unprepare(priv->clk);
41000         free_candev(net);
41001 diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
41002 index 799e9d5d3481..4a742aa5c417 100644
41003 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
41004 +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
41005 @@ -2856,8 +2856,8 @@ static int mcp251xfd_probe(struct spi_device *spi)
41007         clk = devm_clk_get(&spi->dev, NULL);
41008         if (IS_ERR(clk))
41009 -               dev_err_probe(&spi->dev, PTR_ERR(clk),
41010 -                             "Failed to get Oscillator (clock)!\n");
41011 +               return dev_err_probe(&spi->dev, PTR_ERR(clk),
41012 +                                    "Failed to get Oscillator (clock)!\n");
41013         freq = clk_get_rate(clk);
41015         /* Sanity check */
41016 @@ -2957,10 +2957,12 @@ static int mcp251xfd_probe(struct spi_device *spi)
41018         err = mcp251xfd_register(priv);
41019         if (err)
41020 -               goto out_free_candev;
41021 +               goto out_can_rx_offload_del;
41023         return 0;
41025 + out_can_rx_offload_del:
41026 +       can_rx_offload_del(&priv->offload);
41027   out_free_candev:
41028         spi->max_speed_hz = priv->spi_max_speed_hz_orig;
41030 diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
41031 index e393e8457d77..4274f78682d9 100644
41032 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c
41033 +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
41034 @@ -288,7 +288,7 @@ static int pcan_usb_write_mode(struct peak_usb_device *dev, u8 onoff)
41035         } else {
41036                 /* the PCAN-USB needs time to init */
41037                 set_current_state(TASK_INTERRUPTIBLE);
41038 -               schedule_timeout(msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT));
41039 +               schedule_msec_hrtimeout((PCAN_USB_STARTUP_TIMEOUT));
41040         }
41042         return err;
41043 diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
41044 index ba5d546d06aa..9c86cacc4a72 100644
41045 --- a/drivers/net/dsa/bcm_sf2.c
41046 +++ b/drivers/net/dsa/bcm_sf2.c
41047 @@ -32,6 +32,36 @@
41048  #include "b53/b53_priv.h"
41049  #include "b53/b53_regs.h"
41051 +static u16 bcm_sf2_reg_rgmii_cntrl(struct bcm_sf2_priv *priv, int port)
41053 +       switch (priv->type) {
41054 +       case BCM4908_DEVICE_ID:
41055 +               switch (port) {
41056 +               case 7:
41057 +                       return REG_RGMII_11_CNTRL;
41058 +               default:
41059 +                       break;
41060 +               }
41061 +               break;
41062 +       default:
41063 +               switch (port) {
41064 +               case 0:
41065 +                       return REG_RGMII_0_CNTRL;
41066 +               case 1:
41067 +                       return REG_RGMII_1_CNTRL;
41068 +               case 2:
41069 +                       return REG_RGMII_2_CNTRL;
41070 +               default:
41071 +                       break;
41072 +               }
41073 +       }
41075 +       WARN_ONCE(1, "Unsupported port %d\n", port);
41077 +       /* RO fallback reg */
41078 +       return REG_SWITCH_STATUS;
41081  /* Return the number of active ports, not counting the IMP (CPU) port */
41082  static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
41084 @@ -647,6 +677,7 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
41086         struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
41087         u32 id_mode_dis = 0, port_mode;
41088 +       u32 reg_rgmii_ctrl;
41089         u32 reg;
41091         if (port == core_readl(priv, CORE_IMP0_PRT_ID))
41092 @@ -670,10 +701,12 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
41093                 return;
41094         }
41096 +       reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
41098         /* Clear id_mode_dis bit, and the existing port mode, let
41099          * RGMII_MODE_EN bet set by mac_link_{up,down}
41100          */
41101 -       reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
41102 +       reg = reg_readl(priv, reg_rgmii_ctrl);
41103         reg &= ~ID_MODE_DIS;
41104         reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
41106 @@ -681,13 +714,14 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
41107         if (id_mode_dis)
41108                 reg |= ID_MODE_DIS;
41110 -       reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
41111 +       reg_writel(priv, reg, reg_rgmii_ctrl);
41114  static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
41115                                     phy_interface_t interface, bool link)
41117         struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
41118 +       u32 reg_rgmii_ctrl;
41119         u32 reg;
41121         if (!phy_interface_mode_is_rgmii(interface) &&
41122 @@ -695,13 +729,15 @@ static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
41123             interface != PHY_INTERFACE_MODE_REVMII)
41124                 return;
41126 +       reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
41128         /* If the link is down, just disable the interface to conserve power */
41129 -       reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
41130 +       reg = reg_readl(priv, reg_rgmii_ctrl);
41131         if (link)
41132                 reg |= RGMII_MODE_EN;
41133         else
41134                 reg &= ~RGMII_MODE_EN;
41135 -       reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
41136 +       reg_writel(priv, reg, reg_rgmii_ctrl);
41139  static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
41140 @@ -735,11 +771,15 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
41142         struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
41143         struct ethtool_eee *p = &priv->dev->ports[port].eee;
41144 -       u32 reg, offset;
41146         bcm_sf2_sw_mac_link_set(ds, port, interface, true);
41148         if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
41149 +               u32 reg_rgmii_ctrl;
41150 +               u32 reg, offset;
41152 +               reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
41154                 if (priv->type == BCM4908_DEVICE_ID ||
41155                     priv->type == BCM7445_DEVICE_ID)
41156                         offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
41157 @@ -750,7 +790,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
41158                     interface == PHY_INTERFACE_MODE_RGMII_TXID ||
41159                     interface == PHY_INTERFACE_MODE_MII ||
41160                     interface == PHY_INTERFACE_MODE_REVMII) {
41161 -                       reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
41162 +                       reg = reg_readl(priv, reg_rgmii_ctrl);
41163                         reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
41165                         if (tx_pause)
41166 @@ -758,7 +798,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
41167                         if (rx_pause)
41168                                 reg |= RX_PAUSE_EN;
41170 -                       reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
41171 +                       reg_writel(priv, reg, reg_rgmii_ctrl);
41172                 }
41174                 reg = SW_OVERRIDE | LINK_STS;
41175 @@ -1144,9 +1184,7 @@ static const u16 bcm_sf2_4908_reg_offsets[] = {
41176         [REG_PHY_REVISION]      = 0x14,
41177         [REG_SPHY_CNTRL]        = 0x24,
41178         [REG_CROSSBAR]          = 0xc8,
41179 -       [REG_RGMII_0_CNTRL]     = 0xe0,
41180 -       [REG_RGMII_1_CNTRL]     = 0xec,
41181 -       [REG_RGMII_2_CNTRL]     = 0xf8,
41182 +       [REG_RGMII_11_CNTRL]    = 0x014c,
41183         [REG_LED_0_CNTRL]       = 0x40,
41184         [REG_LED_1_CNTRL]       = 0x4c,
41185         [REG_LED_2_CNTRL]       = 0x58,
41186 diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
41187 index 1d2d55c9f8aa..9e141d1a0b07 100644
41188 --- a/drivers/net/dsa/bcm_sf2_regs.h
41189 +++ b/drivers/net/dsa/bcm_sf2_regs.h
41190 @@ -21,6 +21,7 @@ enum bcm_sf2_reg_offs {
41191         REG_RGMII_0_CNTRL,
41192         REG_RGMII_1_CNTRL,
41193         REG_RGMII_2_CNTRL,
41194 +       REG_RGMII_11_CNTRL,
41195         REG_LED_0_CNTRL,
41196         REG_LED_1_CNTRL,
41197         REG_LED_2_CNTRL,
41198 @@ -48,8 +49,6 @@ enum bcm_sf2_reg_offs {
41199  #define  PHY_PHYAD_SHIFT               8
41200  #define  PHY_PHYAD_MASK                        0x1F
41202 -#define REG_RGMII_CNTRL_P(x)           (REG_RGMII_0_CNTRL + (x))
41204  /* Relative to REG_RGMII_CNTRL */
41205  #define  RGMII_MODE_EN                 (1 << 0)
41206  #define  ID_MODE_DIS                   (1 << 1)
41207 diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
41208 index 21953d6d484c..ada7a38d4d31 100644
41209 --- a/drivers/net/dsa/mv88e6xxx/devlink.c
41210 +++ b/drivers/net/dsa/mv88e6xxx/devlink.c
41211 @@ -678,7 +678,7 @@ static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds,
41212                                 sizeof(struct mv88e6xxx_devlink_atu_entry);
41213                         break;
41214                 case MV88E6XXX_REGION_VTU:
41215 -                       size = mv88e6xxx_max_vid(chip) *
41216 +                       size = (mv88e6xxx_max_vid(chip) + 1) *
41217                                 sizeof(struct mv88e6xxx_devlink_vtu_entry);
41218                         break;
41219                 }
41220 diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
41221 index 3195936dc5be..2ce04fef698d 100644
41222 --- a/drivers/net/dsa/mv88e6xxx/serdes.c
41223 +++ b/drivers/net/dsa/mv88e6xxx/serdes.c
41224 @@ -443,15 +443,15 @@ int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
41225  u8 mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
41227         /* There are no configurable serdes lanes on this switch chip but we
41228 -        * need to return non-zero so that callers of
41229 +        * need to return a non-negative lane number so that callers of
41230          * mv88e6xxx_serdes_get_lane() know this is a serdes port.
41231          */
41232         switch (chip->ports[port].cmode) {
41233         case MV88E6185_PORT_STS_CMODE_SERDES:
41234         case MV88E6185_PORT_STS_CMODE_1000BASE_X:
41235 -               return 0xff;
41236 -       default:
41237                 return 0;
41238 +       default:
41239 +               return -ENODEV;
41240         }
41243 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
41244 index b53a0d87371a..cf4249d59383 100644
41245 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
41246 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
41247 @@ -122,7 +122,10 @@ enum board_idx {
41248         NETXTREME_E_VF,
41249         NETXTREME_C_VF,
41250         NETXTREME_S_VF,
41251 +       NETXTREME_C_VF_HV,
41252 +       NETXTREME_E_VF_HV,
41253         NETXTREME_E_P5_VF,
41254 +       NETXTREME_E_P5_VF_HV,
41255  };
41257  /* indexed by enum above */
41258 @@ -170,7 +173,10 @@ static const struct {
41259         [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
41260         [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
41261         [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
41262 +       [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
41263 +       [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
41264         [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
41265 +       [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
41266  };
41268  static const struct pci_device_id bnxt_pci_tbl[] = {
41269 @@ -222,15 +228,25 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
41270         { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
41271  #ifdef CONFIG_BNXT_SRIOV
41272         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
41273 +       { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
41274 +       { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
41275         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
41276 +       { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
41277         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
41278 +       { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
41279 +       { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
41280 +       { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
41281 +       { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
41282         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
41283         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
41284         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
41285         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
41286         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
41287 +       { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
41288         { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
41289         { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
41290 +       { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
41291 +       { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
41292         { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
41293  #endif
41294         { 0 }
41295 @@ -265,7 +281,8 @@ static struct workqueue_struct *bnxt_pf_wq;
41296  static bool bnxt_vf_pciid(enum board_idx idx)
41298         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
41299 -               idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
41300 +               idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
41301 +               idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF);
41304  #define DB_CP_REARM_FLAGS      (DB_KEY_CP | DB_IDX_VALID)
41305 @@ -1732,14 +1749,16 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
41307         cons = rxcmp->rx_cmp_opaque;
41308         if (unlikely(cons != rxr->rx_next_cons)) {
41309 -               int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
41310 +               int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
41312                 /* 0xffff is forced error, don't print it */
41313                 if (rxr->rx_next_cons != 0xffff)
41314                         netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
41315                                     cons, rxr->rx_next_cons);
41316                 bnxt_sched_reset(bp, rxr);
41317 -               return rc1;
41318 +               if (rc1)
41319 +                       return rc1;
41320 +               goto next_rx_no_prod_no_len;
41321         }
41322         rx_buf = &rxr->rx_buf_ring[cons];
41323         data = rx_buf->data;
41324 @@ -9736,7 +9755,9 @@ static ssize_t bnxt_show_temp(struct device *dev,
41325         if (!rc)
41326                 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
41327         mutex_unlock(&bp->hwrm_cmd_lock);
41328 -       return rc ?: len;
41329 +       if (rc)
41330 +               return rc;
41331 +       return len;
41333  static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
41335 diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
41336 index e6d4ad99cc38..3f1c189646f4 100644
41337 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
41338 +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
41339 @@ -521,7 +521,7 @@
41340  #define    CN23XX_BAR1_INDEX_OFFSET                3
41342  #define    CN23XX_PEM_BAR1_INDEX_REG(port, idx)                \
41343 -               (CN23XX_PEM_BAR1_INDEX_START + ((port) << CN23XX_PEM_OFFSET) + \
41344 +               (CN23XX_PEM_BAR1_INDEX_START + (((u64)port) << CN23XX_PEM_OFFSET) + \
41345                  ((idx) << CN23XX_BAR1_INDEX_OFFSET))
41347  /*############################ DPI #########################*/
41348 diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
41349 index f782e6af45e9..50bbe79fb93d 100644
41350 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
41351 +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
41352 @@ -776,7 +776,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
41353         mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
41354         mbx.rq.qs_num = qs->vnic_id;
41355         mbx.rq.rq_num = qidx;
41356 -       mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
41357 +       mbx.rq.cfg = ((u64)rq->caching << 26) | (rq->cq_qs << 19) |
41358                           (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
41359                           (rq->cont_qs_rbdr_idx << 8) |
41360                           (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
41361 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
41362 index 83b46440408b..bde8494215c4 100644
41363 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
41364 +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
41365 @@ -174,31 +174,31 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
41366                                       WORD_MASK, f->fs.nat_lip[15] |
41367                                       f->fs.nat_lip[14] << 8 |
41368                                       f->fs.nat_lip[13] << 16 |
41369 -                                     f->fs.nat_lip[12] << 24, 1);
41370 +                                     (u64)f->fs.nat_lip[12] << 24, 1);
41372                         set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 1,
41373                                       WORD_MASK, f->fs.nat_lip[11] |
41374                                       f->fs.nat_lip[10] << 8 |
41375                                       f->fs.nat_lip[9] << 16 |
41376 -                                     f->fs.nat_lip[8] << 24, 1);
41377 +                                     (u64)f->fs.nat_lip[8] << 24, 1);
41379                         set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 2,
41380                                       WORD_MASK, f->fs.nat_lip[7] |
41381                                       f->fs.nat_lip[6] << 8 |
41382                                       f->fs.nat_lip[5] << 16 |
41383 -                                     f->fs.nat_lip[4] << 24, 1);
41384 +                                     (u64)f->fs.nat_lip[4] << 24, 1);
41386                         set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 3,
41387                                       WORD_MASK, f->fs.nat_lip[3] |
41388                                       f->fs.nat_lip[2] << 8 |
41389                                       f->fs.nat_lip[1] << 16 |
41390 -                                     f->fs.nat_lip[0] << 24, 1);
41391 +                                     (u64)f->fs.nat_lip[0] << 24, 1);
41392                 } else {
41393                         set_tcb_field(adap, f, tid, TCB_RX_FRAG3_LEN_RAW_W,
41394                                       WORD_MASK, f->fs.nat_lip[3] |
41395                                       f->fs.nat_lip[2] << 8 |
41396                                       f->fs.nat_lip[1] << 16 |
41397 -                                     f->fs.nat_lip[0] << 24, 1);
41398 +                                     (u64)f->fs.nat_lip[0] << 25, 1);
41399                 }
41400         }
41402 @@ -208,25 +208,25 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
41403                                       WORD_MASK, f->fs.nat_fip[15] |
41404                                       f->fs.nat_fip[14] << 8 |
41405                                       f->fs.nat_fip[13] << 16 |
41406 -                                     f->fs.nat_fip[12] << 24, 1);
41407 +                                     (u64)f->fs.nat_fip[12] << 24, 1);
41409                         set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 1,
41410                                       WORD_MASK, f->fs.nat_fip[11] |
41411                                       f->fs.nat_fip[10] << 8 |
41412                                       f->fs.nat_fip[9] << 16 |
41413 -                                     f->fs.nat_fip[8] << 24, 1);
41414 +                                     (u64)f->fs.nat_fip[8] << 24, 1);
41416                         set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 2,
41417                                       WORD_MASK, f->fs.nat_fip[7] |
41418                                       f->fs.nat_fip[6] << 8 |
41419                                       f->fs.nat_fip[5] << 16 |
41420 -                                     f->fs.nat_fip[4] << 24, 1);
41421 +                                     (u64)f->fs.nat_fip[4] << 24, 1);
41423                         set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 3,
41424                                       WORD_MASK, f->fs.nat_fip[3] |
41425                                       f->fs.nat_fip[2] << 8 |
41426                                       f->fs.nat_fip[1] << 16 |
41427 -                                     f->fs.nat_fip[0] << 24, 1);
41428 +                                     (u64)f->fs.nat_fip[0] << 24, 1);
41430                 } else {
41431                         set_tcb_field(adap, f, tid,
41432 @@ -234,13 +234,13 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
41433                                       WORD_MASK, f->fs.nat_fip[3] |
41434                                       f->fs.nat_fip[2] << 8 |
41435                                       f->fs.nat_fip[1] << 16 |
41436 -                                     f->fs.nat_fip[0] << 24, 1);
41437 +                                     (u64)f->fs.nat_fip[0] << 24, 1);
41438                 }
41439         }
41441         set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK,
41442                       (dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) |
41443 -                     (sp ? (nat_fp[1] << 16 | nat_fp[0] << 24) : 0),
41444 +                     (sp ? (nat_fp[1] << 16 | (u64)nat_fp[0] << 24) : 0),
41445                       1);
41448 diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
41449 index 256fae15e032..1e5f2edb70cf 100644
41450 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
41451 +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
41452 @@ -2563,12 +2563,12 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
41453         spin_lock_bh(&eosw_txq->lock);
41454         if (tc != FW_SCHED_CLS_NONE) {
41455                 if (eosw_txq->state != CXGB4_EO_STATE_CLOSED)
41456 -                       goto out_unlock;
41457 +                       goto out_free_skb;
41459                 next_state = CXGB4_EO_STATE_FLOWC_OPEN_SEND;
41460         } else {
41461                 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
41462 -                       goto out_unlock;
41463 +                       goto out_free_skb;
41465                 next_state = CXGB4_EO_STATE_FLOWC_CLOSE_SEND;
41466         }
41467 @@ -2604,17 +2604,19 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
41468                 eosw_txq_flush_pending_skbs(eosw_txq);
41470         ret = eosw_txq_enqueue(eosw_txq, skb);
41471 -       if (ret) {
41472 -               dev_consume_skb_any(skb);
41473 -               goto out_unlock;
41474 -       }
41475 +       if (ret)
41476 +               goto out_free_skb;
41478         eosw_txq->state = next_state;
41479         eosw_txq->flowc_idx = eosw_txq->pidx;
41480         eosw_txq_advance(eosw_txq, 1);
41481         ethofld_xmit(dev, eosw_txq);
41483 -out_unlock:
41484 +       spin_unlock_bh(&eosw_txq->lock);
41485 +       return 0;
41487 +out_free_skb:
41488 +       dev_consume_skb_any(skb);
41489         spin_unlock_bh(&eosw_txq->lock);
41490         return ret;
41492 diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
41493 index f04ec53544ae..b1443ff439de 100644
41494 --- a/drivers/net/ethernet/cisco/enic/enic_main.c
41495 +++ b/drivers/net/ethernet/cisco/enic/enic_main.c
41496 @@ -768,7 +768,7 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
41497         return err;
41500 -static inline void enic_queue_wq_skb(struct enic *enic,
41501 +static inline int enic_queue_wq_skb(struct enic *enic,
41502         struct vnic_wq *wq, struct sk_buff *skb)
41504         unsigned int mss = skb_shinfo(skb)->gso_size;
41505 @@ -814,6 +814,7 @@ static inline void enic_queue_wq_skb(struct enic *enic,
41506                 wq->to_use = buf->next;
41507                 dev_kfree_skb(skb);
41508         }
41509 +       return err;
41512  /* netif_tx_lock held, process context with BHs disabled, or BH */
41513 @@ -857,7 +858,8 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
41514                 return NETDEV_TX_BUSY;
41515         }
41517 -       enic_queue_wq_skb(enic, wq, skb);
41518 +       if (enic_queue_wq_skb(enic, wq, skb))
41519 +               goto error;
41521         if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
41522                 netif_tx_stop_queue(txq);
41523 @@ -865,6 +867,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
41524         if (!netdev_xmit_more() || netif_xmit_stopped(txq))
41525                 vnic_wq_doorbell(wq);
41527 +error:
41528         spin_unlock(&enic->wq_lock[txq_map]);
41530         return NETDEV_TX_OK;
41531 diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
41532 index 67c436400352..de7b31842233 100644
41533 --- a/drivers/net/ethernet/freescale/Makefile
41534 +++ b/drivers/net/ethernet/freescale/Makefile
41535 @@ -24,6 +24,4 @@ obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/
41537  obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/
41539 -obj-$(CONFIG_FSL_ENETC) += enetc/
41540 -obj-$(CONFIG_FSL_ENETC_MDIO) += enetc/
41541 -obj-$(CONFIG_FSL_ENETC_VF) += enetc/
41542 +obj-y += enetc/
41543 diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
41544 index 3db882322b2b..70aea9c274fe 100644
41545 --- a/drivers/net/ethernet/freescale/fec_main.c
41546 +++ b/drivers/net/ethernet/freescale/fec_main.c
41547 @@ -2048,6 +2048,8 @@ static int fec_enet_mii_probe(struct net_device *ndev)
41548         fep->link = 0;
41549         fep->full_duplex = 0;
41551 +       phy_dev->mac_managed_pm = 1;
41553         phy_attached_info(phy_dev);
41555         return 0;
41556 @@ -3864,6 +3866,7 @@ static int __maybe_unused fec_resume(struct device *dev)
41557                 netif_device_attach(ndev);
41558                 netif_tx_unlock_bh(ndev);
41559                 napi_enable(&fep->napi);
41560 +               phy_init_hw(ndev->phydev);
41561                 phy_start(ndev->phydev);
41562         }
41563         rtnl_unlock();
41564 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
41565 index bf4302a5cf95..0f70158c2551 100644
41566 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
41567 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
41568 @@ -576,8 +576,8 @@ static int hns3_nic_net_stop(struct net_device *netdev)
41569         if (h->ae_algo->ops->set_timer_task)
41570                 h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
41572 -       netif_tx_stop_all_queues(netdev);
41573         netif_carrier_off(netdev);
41574 +       netif_tx_disable(netdev);
41576         hns3_nic_net_down(netdev);
41578 @@ -823,7 +823,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
41579   * and it is udp packet, which has a dest port as the IANA assigned.
41580   * the hardware is expected to do the checksum offload, but the
41581   * hardware will not do the checksum offload when udp dest port is
41582 - * 4789 or 6081.
41583 + * 4789, 4790 or 6081.
41584   */
41585  static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
41587 @@ -841,7 +841,8 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
41589         if (!(!skb->encapsulation &&
41590               (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) ||
41591 -             l4.udp->dest == htons(GENEVE_UDP_PORT))))
41592 +             l4.udp->dest == htons(GENEVE_UDP_PORT) ||
41593 +             l4.udp->dest == htons(4790))))
41594                 return false;
41596         skb_checksum_help(skb);
41597 @@ -1277,23 +1278,21 @@ static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
41600  static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
41601 -                                  u8 max_non_tso_bd_num)
41602 +                                  u8 max_non_tso_bd_num, unsigned int bd_num,
41603 +                                  unsigned int recursion_level)
41605 +#define HNS3_MAX_RECURSION_LEVEL       24
41607         struct sk_buff *frag_skb;
41608 -       unsigned int bd_num = 0;
41610         /* If the total len is within the max bd limit */
41611 -       if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) &&
41612 +       if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level &&
41613 +                  !skb_has_frag_list(skb) &&
41614                    skb_shinfo(skb)->nr_frags < max_non_tso_bd_num))
41615                 return skb_shinfo(skb)->nr_frags + 1U;
41617 -       /* The below case will always be linearized, return
41618 -        * HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized.
41619 -        */
41620 -       if (unlikely(skb->len > HNS3_MAX_TSO_SIZE ||
41621 -                    (!skb_is_gso(skb) && skb->len >
41622 -                     HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))))
41623 -               return HNS3_MAX_TSO_BD_NUM + 1U;
41624 +       if (unlikely(recursion_level >= HNS3_MAX_RECURSION_LEVEL))
41625 +               return UINT_MAX;
41627         bd_num = hns3_skb_bd_num(skb, bd_size, bd_num);
41629 @@ -1301,7 +1300,8 @@ static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
41630                 return bd_num;
41632         skb_walk_frags(skb, frag_skb) {
41633 -               bd_num = hns3_skb_bd_num(frag_skb, bd_size, bd_num);
41634 +               bd_num = hns3_tx_bd_num(frag_skb, bd_size, max_non_tso_bd_num,
41635 +                                       bd_num, recursion_level + 1);
41636                 if (bd_num > HNS3_MAX_TSO_BD_NUM)
41637                         return bd_num;
41638         }
41639 @@ -1361,6 +1361,43 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
41640                 size[i] = skb_frag_size(&shinfo->frags[i]);
41643 +static int hns3_skb_linearize(struct hns3_enet_ring *ring,
41644 +                             struct sk_buff *skb,
41645 +                             u8 max_non_tso_bd_num,
41646 +                             unsigned int bd_num)
41648 +       /* 'bd_num == UINT_MAX' means the skb' fraglist has a
41649 +        * recursion level of over HNS3_MAX_RECURSION_LEVEL.
41650 +        */
41651 +       if (bd_num == UINT_MAX) {
41652 +               u64_stats_update_begin(&ring->syncp);
41653 +               ring->stats.over_max_recursion++;
41654 +               u64_stats_update_end(&ring->syncp);
41655 +               return -ENOMEM;
41656 +       }
41658 +       /* The skb->len has exceeded the hw limitation, linearization
41659 +        * will not help.
41660 +        */
41661 +       if (skb->len > HNS3_MAX_TSO_SIZE ||
41662 +           (!skb_is_gso(skb) && skb->len >
41663 +            HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) {
41664 +               u64_stats_update_begin(&ring->syncp);
41665 +               ring->stats.hw_limitation++;
41666 +               u64_stats_update_end(&ring->syncp);
41667 +               return -ENOMEM;
41668 +       }
41670 +       if (__skb_linearize(skb)) {
41671 +               u64_stats_update_begin(&ring->syncp);
41672 +               ring->stats.sw_err_cnt++;
41673 +               u64_stats_update_end(&ring->syncp);
41674 +               return -ENOMEM;
41675 +       }
41677 +       return 0;
41680  static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
41681                                   struct net_device *netdev,
41682                                   struct sk_buff *skb)
41683 @@ -1370,7 +1407,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
41684         unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
41685         unsigned int bd_num;
41687 -       bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num);
41688 +       bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num, 0, 0);
41689         if (unlikely(bd_num > max_non_tso_bd_num)) {
41690                 if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
41691                     !hns3_skb_need_linearized(skb, bd_size, bd_num,
41692 @@ -1379,16 +1416,11 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
41693                         goto out;
41694                 }
41696 -               if (__skb_linearize(skb))
41697 +               if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num,
41698 +                                      bd_num))
41699                         return -ENOMEM;
41701                 bd_num = hns3_tx_bd_count(skb->len);
41702 -               if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
41703 -                   (!skb_is_gso(skb) &&
41704 -                    bd_num > max_non_tso_bd_num)) {
41705 -                       trace_hns3_over_max_bd(skb);
41706 -                       return -ENOMEM;
41707 -               }
41709                 u64_stats_update_begin(&ring->syncp);
41710                 ring->stats.tx_copy++;
41711 @@ -1412,6 +1444,10 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
41712                 return bd_num;
41713         }
41715 +       u64_stats_update_begin(&ring->syncp);
41716 +       ring->stats.tx_busy++;
41717 +       u64_stats_update_end(&ring->syncp);
41719         return -EBUSY;
41722 @@ -1459,6 +1495,7 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
41723                                  struct sk_buff *skb, enum hns_desc_type type)
41725         unsigned int size = skb_headlen(skb);
41726 +       struct sk_buff *frag_skb;
41727         int i, ret, bd_num = 0;
41729         if (size) {
41730 @@ -1483,6 +1520,15 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
41731                 bd_num += ret;
41732         }
41734 +       skb_walk_frags(skb, frag_skb) {
41735 +               ret = hns3_fill_skb_to_desc(ring, frag_skb,
41736 +                                           DESC_TYPE_FRAGLIST_SKB);
41737 +               if (unlikely(ret < 0))
41738 +                       return ret;
41740 +               bd_num += ret;
41741 +       }
41743         return bd_num;
41746 @@ -1513,8 +1559,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
41747         struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
41748         struct netdev_queue *dev_queue;
41749         int pre_ntu, next_to_use_head;
41750 -       struct sk_buff *frag_skb;
41751 -       int bd_num = 0;
41752         bool doorbell;
41753         int ret;
41755 @@ -1530,15 +1574,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
41756         ret = hns3_nic_maybe_stop_tx(ring, netdev, skb);
41757         if (unlikely(ret <= 0)) {
41758                 if (ret == -EBUSY) {
41759 -                       u64_stats_update_begin(&ring->syncp);
41760 -                       ring->stats.tx_busy++;
41761 -                       u64_stats_update_end(&ring->syncp);
41762                         hns3_tx_doorbell(ring, 0, true);
41763                         return NETDEV_TX_BUSY;
41764 -               } else if (ret == -ENOMEM) {
41765 -                       u64_stats_update_begin(&ring->syncp);
41766 -                       ring->stats.sw_err_cnt++;
41767 -                       u64_stats_update_end(&ring->syncp);
41768                 }
41770                 hns3_rl_err(netdev, "xmit error: %d!\n", ret);
41771 @@ -1551,21 +1588,14 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
41772         if (unlikely(ret < 0))
41773                 goto fill_err;
41775 +       /* 'ret < 0' means filling error, 'ret == 0' means skb->len is
41776 +        * zero, which is unlikely, and 'ret > 0' means how many tx desc
41777 +        * need to be notified to the hw.
41778 +        */
41779         ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
41780 -       if (unlikely(ret < 0))
41781 +       if (unlikely(ret <= 0))
41782                 goto fill_err;
41784 -       bd_num += ret;
41786 -       skb_walk_frags(skb, frag_skb) {
41787 -               ret = hns3_fill_skb_to_desc(ring, frag_skb,
41788 -                                           DESC_TYPE_FRAGLIST_SKB);
41789 -               if (unlikely(ret < 0))
41790 -                       goto fill_err;
41792 -               bd_num += ret;
41793 -       }
41795         pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
41796                                         (ring->desc_num - 1);
41797         ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
41798 @@ -1576,7 +1606,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
41799         dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
41800         doorbell = __netdev_tx_sent_queue(dev_queue, skb->len,
41801                                           netdev_xmit_more());
41802 -       hns3_tx_doorbell(ring, bd_num, doorbell);
41803 +       hns3_tx_doorbell(ring, ret, doorbell);
41805         return NETDEV_TX_OK;
41807 @@ -1748,11 +1778,15 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
41808                         tx_drop += ring->stats.tx_l4_proto_err;
41809                         tx_drop += ring->stats.tx_l2l3l4_err;
41810                         tx_drop += ring->stats.tx_tso_err;
41811 +                       tx_drop += ring->stats.over_max_recursion;
41812 +                       tx_drop += ring->stats.hw_limitation;
41813                         tx_errors += ring->stats.sw_err_cnt;
41814                         tx_errors += ring->stats.tx_vlan_err;
41815                         tx_errors += ring->stats.tx_l4_proto_err;
41816                         tx_errors += ring->stats.tx_l2l3l4_err;
41817                         tx_errors += ring->stats.tx_tso_err;
41818 +                       tx_errors += ring->stats.over_max_recursion;
41819 +                       tx_errors += ring->stats.hw_limitation;
41820                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
41822                 /* fetch the rx stats */
41823 @@ -3704,7 +3738,6 @@ static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
41825  static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
41827 -       struct hnae3_ring_chain_node vector_ring_chain;
41828         struct hnae3_handle *h = priv->ae_handle;
41829         struct hns3_enet_tqp_vector *tqp_vector;
41830         int ret;
41831 @@ -3736,6 +3769,8 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
41832         }
41834         for (i = 0; i < priv->vector_num; i++) {
41835 +               struct hnae3_ring_chain_node vector_ring_chain;
41837                 tqp_vector = &priv->tqp_vector[i];
41839                 tqp_vector->rx_group.total_bytes = 0;
41840 @@ -4554,6 +4589,11 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
41841         struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
41842         int ret = 0;
41844 +       if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
41845 +               netdev_err(kinfo->netdev, "device is not initialized yet\n");
41846 +               return -EFAULT;
41847 +       }
41849         clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
41851         if (netif_running(kinfo->netdev)) {
41852 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
41853 index d069b04ee587..e44224e23315 100644
41854 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
41855 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
41856 @@ -376,6 +376,8 @@ struct ring_stats {
41857                         u64 tx_l4_proto_err;
41858                         u64 tx_l2l3l4_err;
41859                         u64 tx_tso_err;
41860 +                       u64 over_max_recursion;
41861 +                       u64 hw_limitation;
41862                 };
41863                 struct {
41864                         u64 rx_pkts;
41865 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
41866 index adcec4ea7cb9..d20f2e246017 100644
41867 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
41868 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
41869 @@ -44,6 +44,8 @@ static const struct hns3_stats hns3_txq_stats[] = {
41870         HNS3_TQP_STAT("l4_proto_err", tx_l4_proto_err),
41871         HNS3_TQP_STAT("l2l3l4_err", tx_l2l3l4_err),
41872         HNS3_TQP_STAT("tso_err", tx_tso_err),
41873 +       HNS3_TQP_STAT("over_max_recursion", over_max_recursion),
41874 +       HNS3_TQP_STAT("hw_limitation", hw_limitation),
41875  };
41877  #define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
41878 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
41879 index 0ca7f1b984bf..78d3eb142df8 100644
41880 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
41881 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
41882 @@ -753,8 +753,9 @@ static int hclge_config_igu_egu_hw_err_int(struct hclge_dev *hdev, bool en)
41884         /* configure IGU,EGU error interrupts */
41885         hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false);
41886 +       desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_TYPE);
41887         if (en)
41888 -               desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
41889 +               desc.data[0] |= cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
41891         desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK);
41893 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
41894 index 608fe26fc3fe..d647f3c84134 100644
41895 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
41896 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
41897 @@ -32,7 +32,8 @@
41898  #define HCLGE_TQP_ECC_ERR_INT_EN_MASK  0x0FFF
41899  #define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK    0x0F000000
41900  #define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN 0x0F000000
41901 -#define HCLGE_IGU_ERR_INT_EN   0x0000066F
41902 +#define HCLGE_IGU_ERR_INT_EN   0x0000000F
41903 +#define HCLGE_IGU_ERR_INT_TYPE 0x00000660
41904  #define HCLGE_IGU_ERR_INT_EN_MASK      0x000F
41905  #define HCLGE_IGU_TNL_ERR_INT_EN    0x0002AABF
41906  #define HCLGE_IGU_TNL_ERR_INT_EN_MASK  0x003F
41907 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
41908 index b0dbe6dcaa7b..7a560d0e19b9 100644
41909 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
41910 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
41911 @@ -11379,7 +11379,6 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
41912  #define REG_LEN_PER_LINE       (REG_NUM_PER_LINE * sizeof(u32))
41913  #define REG_SEPARATOR_LINE     1
41914  #define REG_NUM_REMAIN_MASK    3
41915 -#define BD_LIST_MAX_NUM                30
41917  int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
41919 @@ -11473,15 +11472,19 @@ static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
41921         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
41922         int data_len_per_desc, bd_num, i;
41923 -       int bd_num_list[BD_LIST_MAX_NUM];
41924 +       int *bd_num_list;
41925         u32 data_len;
41926         int ret;
41928 +       bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
41929 +       if (!bd_num_list)
41930 +               return -ENOMEM;
41932         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
41933         if (ret) {
41934                 dev_err(&hdev->pdev->dev,
41935                         "Get dfx reg bd num fail, status is %d.\n", ret);
41936 -               return ret;
41937 +               goto out;
41938         }
41940         data_len_per_desc = sizeof_field(struct hclge_desc, data);
41941 @@ -11492,6 +11495,8 @@ static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
41942                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
41943         }
41945 +out:
41946 +       kfree(bd_num_list);
41947         return ret;
41950 @@ -11499,16 +11504,20 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
41952         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
41953         int bd_num, bd_num_max, buf_len, i;
41954 -       int bd_num_list[BD_LIST_MAX_NUM];
41955         struct hclge_desc *desc_src;
41956 +       int *bd_num_list;
41957         u32 *reg = data;
41958         int ret;
41960 +       bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
41961 +       if (!bd_num_list)
41962 +               return -ENOMEM;
41964         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
41965         if (ret) {
41966                 dev_err(&hdev->pdev->dev,
41967                         "Get dfx reg bd num fail, status is %d.\n", ret);
41968 -               return ret;
41969 +               goto out;
41970         }
41972         bd_num_max = bd_num_list[0];
41973 @@ -11517,8 +11526,10 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
41975         buf_len = sizeof(*desc_src) * bd_num_max;
41976         desc_src = kzalloc(buf_len, GFP_KERNEL);
41977 -       if (!desc_src)
41978 -               return -ENOMEM;
41979 +       if (!desc_src) {
41980 +               ret = -ENOMEM;
41981 +               goto out;
41982 +       }
41984         for (i = 0; i < dfx_reg_type_num; i++) {
41985                 bd_num = bd_num_list[i];
41986 @@ -11534,6 +11545,8 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
41987         }
41989         kfree(desc_src);
41990 +out:
41991 +       kfree(bd_num_list);
41992         return ret;
41995 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
41996 index 51a36e74f088..c3bb16b1f060 100644
41997 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
41998 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
41999 @@ -535,7 +535,7 @@ static void hclge_get_link_mode(struct hclge_vport *vport,
42000         unsigned long advertising;
42001         unsigned long supported;
42002         unsigned long send_data;
42003 -       u8 msg_data[10];
42004 +       u8 msg_data[10] = {};
42005         u8 dest_vfid;
42007         advertising = hdev->hw.mac.advertising[0];
42008 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
42009 index e89820702540..c194bba187d6 100644
42010 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
42011 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
42012 @@ -255,6 +255,8 @@ void hclge_mac_start_phy(struct hclge_dev *hdev)
42013         if (!phydev)
42014                 return;
42016 +       phy_loopback(phydev, false);
42018         phy_start(phydev);
42021 diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
42022 index 15f93b355099..5069f690cf0b 100644
42023 --- a/drivers/net/ethernet/intel/i40e/i40e.h
42024 +++ b/drivers/net/ethernet/intel/i40e/i40e.h
42025 @@ -1142,7 +1142,6 @@ static inline bool i40e_is_sw_dcb(struct i40e_pf *pf)
42026         return !!(pf->flags & I40E_FLAG_DISABLE_FW_LLDP);
42029 -void i40e_set_lldp_forwarding(struct i40e_pf *pf, bool enable);
42030  #ifdef CONFIG_I40E_DCB
42031  void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
42032                            struct i40e_dcbx_config *old_cfg,
42033 diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
42034 index ce626eace692..140b677f114d 100644
42035 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
42036 +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
42037 @@ -1566,8 +1566,10 @@ enum i40e_aq_phy_type {
42038         I40E_PHY_TYPE_25GBASE_LR                = 0x22,
42039         I40E_PHY_TYPE_25GBASE_AOC               = 0x23,
42040         I40E_PHY_TYPE_25GBASE_ACC               = 0x24,
42041 -       I40E_PHY_TYPE_2_5GBASE_T                = 0x30,
42042 -       I40E_PHY_TYPE_5GBASE_T                  = 0x31,
42043 +       I40E_PHY_TYPE_2_5GBASE_T                = 0x26,
42044 +       I40E_PHY_TYPE_5GBASE_T                  = 0x27,
42045 +       I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS    = 0x30,
42046 +       I40E_PHY_TYPE_5GBASE_T_LINK_STATUS      = 0x31,
42047         I40E_PHY_TYPE_MAX,
42048         I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP   = 0xFD,
42049         I40E_PHY_TYPE_EMPTY                     = 0xFE,
42050 diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
42051 index a2dba32383f6..32f3facbed1a 100644
42052 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c
42053 +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
42054 @@ -375,6 +375,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
42055                                 clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
42056                                           &cdev->state);
42057                                 i40e_client_del_instance(pf);
42058 +                               return;
42059                         }
42060                 }
42061         }
42062 diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
42063 index ec19e18305ec..ce35e064cf60 100644
42064 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c
42065 +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
42066 @@ -1154,8 +1154,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
42067                 break;
42068         case I40E_PHY_TYPE_100BASE_TX:
42069         case I40E_PHY_TYPE_1000BASE_T:
42070 -       case I40E_PHY_TYPE_2_5GBASE_T:
42071 -       case I40E_PHY_TYPE_5GBASE_T:
42072 +       case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
42073 +       case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
42074         case I40E_PHY_TYPE_10GBASE_T:
42075                 media = I40E_MEDIA_TYPE_BASET;
42076                 break;
42077 diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
42078 index 0e92668012e3..93dd58fda272 100644
42079 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
42080 +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
42081 @@ -841,8 +841,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
42082                                                              10000baseT_Full);
42083                 break;
42084         case I40E_PHY_TYPE_10GBASE_T:
42085 -       case I40E_PHY_TYPE_5GBASE_T:
42086 -       case I40E_PHY_TYPE_2_5GBASE_T:
42087 +       case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
42088 +       case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
42089         case I40E_PHY_TYPE_1000BASE_T:
42090         case I40E_PHY_TYPE_100BASE_TX:
42091                 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
42092 @@ -1409,7 +1409,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
42094                 memset(&config, 0, sizeof(config));
42095                 config.phy_type = abilities.phy_type;
42096 -               config.abilities = abilities.abilities;
42097 +               config.abilities = abilities.abilities |
42098 +                                  I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
42099                 config.phy_type_ext = abilities.phy_type_ext;
42100                 config.link_speed = abilities.link_speed;
42101                 config.eee_capability = abilities.eee_capability;
42102 @@ -5287,7 +5288,6 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
42103                         i40e_aq_cfg_lldp_mib_change_event(&pf->hw, false, NULL);
42104                         i40e_aq_stop_lldp(&pf->hw, true, false, NULL);
42105                 } else {
42106 -                       i40e_set_lldp_forwarding(pf, false);
42107                         status = i40e_aq_start_lldp(&pf->hw, false, NULL);
42108                         if (status) {
42109                                 adq_err = pf->hw.aq.asq_last_status;
42110 diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
42111 index 527023ee4c07..ac4b44fc19f1 100644
42112 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
42113 +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
42114 @@ -6878,40 +6878,6 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
42116  #endif /* CONFIG_I40E_DCB */
42118 -/**
42119 - * i40e_set_lldp_forwarding - set forwarding of lldp frames
42120 - * @pf: PF being configured
42121 - * @enable: if forwarding to OS shall be enabled
42122 - *
42123 - * Toggle forwarding of lldp frames behavior,
42124 - * When passing DCB control from firmware to software
42125 - * lldp frames must be forwarded to the software based
42126 - * lldp agent.
42127 - */
42128 -void i40e_set_lldp_forwarding(struct i40e_pf *pf, bool enable)
42130 -       if (pf->lan_vsi == I40E_NO_VSI)
42131 -               return;
42133 -       if (!pf->vsi[pf->lan_vsi])
42134 -               return;
42136 -       /* No need to check the outcome, commands may fail
42137 -        * if desired value is already set
42138 -        */
42139 -       i40e_aq_add_rem_control_packet_filter(&pf->hw, NULL, ETH_P_LLDP,
42140 -                                             I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX |
42141 -                                             I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC,
42142 -                                             pf->vsi[pf->lan_vsi]->seid, 0,
42143 -                                             enable, NULL, NULL);
42145 -       i40e_aq_add_rem_control_packet_filter(&pf->hw, NULL, ETH_P_LLDP,
42146 -                                             I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX |
42147 -                                             I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC,
42148 -                                             pf->vsi[pf->lan_vsi]->seid, 0,
42149 -                                             enable, NULL, NULL);
42152  /**
42153   * i40e_print_link_message - print link up or down
42154   * @vsi: the VSI for which link needs a message
42155 @@ -10735,10 +10701,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
42156          */
42157         i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
42158                                                        pf->main_vsi_seid);
42159 -#ifdef CONFIG_I40E_DCB
42160 -       if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)
42161 -               i40e_set_lldp_forwarding(pf, true);
42162 -#endif /* CONFIG_I40E_DCB */
42164         /* restart the VSIs that were rebuilt and running before the reset */
42165         i40e_pf_unquiesce_all_vsi(pf);
42166 @@ -15753,10 +15715,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
42167          */
42168         i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
42169                                                        pf->main_vsi_seid);
42170 -#ifdef CONFIG_I40E_DCB
42171 -       if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)
42172 -               i40e_set_lldp_forwarding(pf, true);
42173 -#endif /* CONFIG_I40E_DCB */
42175         if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
42176                 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
42177 diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
42178 index 06b4271219b1..70b515049540 100644
42179 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
42180 +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
42181 @@ -1961,10 +1961,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
42182                                  union i40e_rx_desc *rx_desc)
42185 -       /* XDP packets use error pointer so abort at this point */
42186 -       if (IS_ERR(skb))
42187 -               return true;
42189         /* ERR_MASK will only have valid bits if EOP set, and
42190          * what we are doing here is actually checking
42191          * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
42192 @@ -2534,7 +2530,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
42193                 }
42195                 /* exit if we failed to retrieve a buffer */
42196 -               if (!skb) {
42197 +               if (!xdp_res && !skb) {
42198                         rx_ring->rx_stats.alloc_buff_failed++;
42199                         rx_buffer->pagecnt_bias++;
42200                         break;
42201 @@ -2547,7 +2543,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
42202                 if (i40e_is_non_eop(rx_ring, rx_desc))
42203                         continue;
42205 -               if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
42206 +               if (xdp_res || i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
42207                         skb = NULL;
42208                         continue;
42209                 }
42210 diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
42211 index 5c10faaca790..c81109a63e90 100644
42212 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h
42213 +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
42214 @@ -239,11 +239,8 @@ struct i40e_phy_info {
42215  #define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \
42216                                              I40E_PHY_TYPE_OFFSET)
42217  /* Offset for 2.5G/5G PHY Types value to bit number conversion */
42218 -#define I40E_PHY_TYPE_OFFSET2 (-10)
42219 -#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T + \
42220 -                                            I40E_PHY_TYPE_OFFSET2)
42221 -#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T + \
42222 -                                            I40E_PHY_TYPE_OFFSET2)
42223 +#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T)
42224 +#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T)
42225  #define I40E_HW_CAP_MAX_GPIO                   30
42226  /* Capabilities of a PF or a VF or the whole device */
42227  struct i40e_hw_capabilities {
42228 diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
42229 index dc5b3c06d1e0..ebd08543791b 100644
42230 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c
42231 +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
42232 @@ -3899,8 +3899,6 @@ static void iavf_remove(struct pci_dev *pdev)
42234         iounmap(hw->hw_addr);
42235         pci_release_regions(pdev);
42236 -       iavf_free_all_tx_resources(adapter);
42237 -       iavf_free_all_rx_resources(adapter);
42238         iavf_free_queues(adapter);
42239         kfree(adapter->vf_res);
42240         spin_lock_bh(&adapter->mac_vlan_list_lock);
42241 diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
42242 index d13c7fc8fb0a..195d122c9cb2 100644
42243 --- a/drivers/net/ethernet/intel/ice/ice_lib.c
42244 +++ b/drivers/net/ethernet/intel/ice/ice_lib.c
42245 @@ -2818,38 +2818,46 @@ int ice_vsi_release(struct ice_vsi *vsi)
42248  /**
42249 - * ice_vsi_rebuild_update_coalesce - set coalesce for a q_vector
42250 + * ice_vsi_rebuild_update_coalesce_intrl - set interrupt rate limit for a q_vector
42251   * @q_vector: pointer to q_vector which is being updated
42252 - * @coalesce: pointer to array of struct with stored coalesce
42253 + * @stored_intrl_setting: original INTRL setting
42254   *
42255   * Set coalesce param in q_vector and update these parameters in HW.
42256   */
42257  static void
42258 -ice_vsi_rebuild_update_coalesce(struct ice_q_vector *q_vector,
42259 -                               struct ice_coalesce_stored *coalesce)
42260 +ice_vsi_rebuild_update_coalesce_intrl(struct ice_q_vector *q_vector,
42261 +                                     u16 stored_intrl_setting)
42263 -       struct ice_ring_container *rx_rc = &q_vector->rx;
42264 -       struct ice_ring_container *tx_rc = &q_vector->tx;
42265         struct ice_hw *hw = &q_vector->vsi->back->hw;
42267 -       tx_rc->itr_setting = coalesce->itr_tx;
42268 -       rx_rc->itr_setting = coalesce->itr_rx;
42270 -       /* dynamic ITR values will be updated during Tx/Rx */
42271 -       if (!ITR_IS_DYNAMIC(tx_rc->itr_setting))
42272 -               wr32(hw, GLINT_ITR(tx_rc->itr_idx, q_vector->reg_idx),
42273 -                    ITR_REG_ALIGN(tx_rc->itr_setting) >>
42274 -                    ICE_ITR_GRAN_S);
42275 -       if (!ITR_IS_DYNAMIC(rx_rc->itr_setting))
42276 -               wr32(hw, GLINT_ITR(rx_rc->itr_idx, q_vector->reg_idx),
42277 -                    ITR_REG_ALIGN(rx_rc->itr_setting) >>
42278 -                    ICE_ITR_GRAN_S);
42280 -       q_vector->intrl = coalesce->intrl;
42281 +       q_vector->intrl = stored_intrl_setting;
42282         wr32(hw, GLINT_RATE(q_vector->reg_idx),
42283              ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
42286 +/**
42287 + * ice_vsi_rebuild_update_coalesce_itr - set coalesce for a q_vector
42288 + * @q_vector: pointer to q_vector which is being updated
42289 + * @rc: pointer to ring container
42290 + * @stored_itr_setting: original ITR setting
42291 + *
42292 + * Set coalesce param in q_vector and update these parameters in HW.
42293 + */
42294 +static void
42295 +ice_vsi_rebuild_update_coalesce_itr(struct ice_q_vector *q_vector,
42296 +                                   struct ice_ring_container *rc,
42297 +                                   u16 stored_itr_setting)
42299 +       struct ice_hw *hw = &q_vector->vsi->back->hw;
42301 +       rc->itr_setting = stored_itr_setting;
42303 +       /* dynamic ITR values will be updated during Tx/Rx */
42304 +       if (!ITR_IS_DYNAMIC(rc->itr_setting))
42305 +               wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
42306 +                    ITR_REG_ALIGN(rc->itr_setting) >> ICE_ITR_GRAN_S);
42309  /**
42310   * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
42311   * @vsi: VSI connected with q_vectors
42312 @@ -2869,6 +2877,11 @@ ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
42313                 coalesce[i].itr_tx = q_vector->tx.itr_setting;
42314                 coalesce[i].itr_rx = q_vector->rx.itr_setting;
42315                 coalesce[i].intrl = q_vector->intrl;
42317 +               if (i < vsi->num_txq)
42318 +                       coalesce[i].tx_valid = true;
42319 +               if (i < vsi->num_rxq)
42320 +                       coalesce[i].rx_valid = true;
42321         }
42323         return vsi->num_q_vectors;
42324 @@ -2893,17 +2906,59 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
42325         if ((size && !coalesce) || !vsi)
42326                 return;
42328 -       for (i = 0; i < size && i < vsi->num_q_vectors; i++)
42329 -               ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
42330 -                                               &coalesce[i]);
42332 -       /* number of q_vectors increased, so assume coalesce settings were
42333 -        * changed globally (i.e. ethtool -C eth0 instead of per-queue) and use
42334 -        * the previous settings from q_vector 0 for all of the new q_vectors
42335 +       /* There are a couple of cases that have to be handled here:
42336 +        *   1. The case where the number of queue vectors stays the same, but
42337 +        *      the number of Tx or Rx rings changes (the first for loop)
42338 +        *   2. The case where the number of queue vectors increased (the
42339 +        *      second for loop)
42340          */
42341 -       for (; i < vsi->num_q_vectors; i++)
42342 -               ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
42343 -                                               &coalesce[0]);
42344 +       for (i = 0; i < size && i < vsi->num_q_vectors; i++) {
42345 +               /* There are 2 cases to handle here and they are the same for
42346 +                * both Tx and Rx:
42347 +                *   if the entry was valid previously (coalesce[i].[tr]x_valid
42348 +                *   and the loop variable is less than the number of rings
42349 +                *   allocated, then write the previous values
42350 +                *
42351 +                *   if the entry was not valid previously, but the number of
42352 +                *   rings is less than are allocated (this means the number of
42353 +                *   rings increased from previously), then write out the
42354 +                *   values in the first element
42355 +                */
42356 +               if (i < vsi->alloc_rxq && coalesce[i].rx_valid)
42357 +                       ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
42358 +                                                           &vsi->q_vectors[i]->rx,
42359 +                                                           coalesce[i].itr_rx);
42360 +               else if (i < vsi->alloc_rxq)
42361 +                       ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
42362 +                                                           &vsi->q_vectors[i]->rx,
42363 +                                                           coalesce[0].itr_rx);
42365 +               if (i < vsi->alloc_txq && coalesce[i].tx_valid)
42366 +                       ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
42367 +                                                           &vsi->q_vectors[i]->tx,
42368 +                                                           coalesce[i].itr_tx);
42369 +               else if (i < vsi->alloc_txq)
42370 +                       ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
42371 +                                                           &vsi->q_vectors[i]->tx,
42372 +                                                           coalesce[0].itr_tx);
42374 +               ice_vsi_rebuild_update_coalesce_intrl(vsi->q_vectors[i],
42375 +                                                     coalesce[i].intrl);
42376 +       }
42378 +       /* the number of queue vectors increased so write whatever is in
42379 +        * the first element
42380 +        */
42381 +       for (; i < vsi->num_q_vectors; i++) {
42382 +               ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
42383 +                                                   &vsi->q_vectors[i]->tx,
42384 +                                                   coalesce[0].itr_tx);
42385 +               ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
42386 +                                                   &vsi->q_vectors[i]->rx,
42387 +                                                   coalesce[0].itr_rx);
42388 +               ice_vsi_rebuild_update_coalesce_intrl(vsi->q_vectors[i],
42389 +                                                     coalesce[0].intrl);
42390 +       }
42393  /**
42394 @@ -2932,9 +2987,11 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
42396         coalesce = kcalloc(vsi->num_q_vectors,
42397                            sizeof(struct ice_coalesce_stored), GFP_KERNEL);
42398 -       if (coalesce)
42399 -               prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi,
42400 -                                                                 coalesce);
42401 +       if (!coalesce)
42402 +               return -ENOMEM;
42404 +       prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
42406         ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
42407         ice_vsi_free_q_vectors(vsi);
42409 diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
42410 index 5dab77504fa5..672a7ff0ee36 100644
42411 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h
42412 +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
42413 @@ -351,6 +351,8 @@ struct ice_coalesce_stored {
42414         u16 itr_tx;
42415         u16 itr_rx;
42416         u8 intrl;
42417 +       u8 tx_valid;
42418 +       u8 rx_valid;
42419  };
42421  /* iterator for handling rings in ring container */
42422 diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
42423 index 25dd903a3e92..d849b0f65de2 100644
42424 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
42425 +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
42426 @@ -431,7 +431,8 @@ static void prestera_port_handle_event(struct prestera_switch *sw,
42427                         netif_carrier_on(port->dev);
42428                         if (!delayed_work_pending(caching_dw))
42429                                 queue_delayed_work(prestera_wq, caching_dw, 0);
42430 -               } else {
42431 +               } else if (netif_running(port->dev) &&
42432 +                          netif_carrier_ok(port->dev)) {
42433                         netif_carrier_off(port->dev);
42434                         if (delayed_work_pending(caching_dw))
42435                                 cancel_delayed_work(caching_dw);
42436 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
42437 index 01d3ee4b5829..bcd5e7ae8482 100644
42438 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
42439 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
42440 @@ -1319,7 +1319,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
42441                 skb->protocol = eth_type_trans(skb, netdev);
42443                 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
42444 -                   RX_DMA_VID(trxd.rxd3))
42445 +                   (trxd.rxd2 & RX_DMA_VTAG))
42446                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
42447                                                RX_DMA_VID(trxd.rxd3));
42448                 skb_record_rx_queue(skb, 0);
42449 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
42450 index fd3cec8f06ba..c47272100615 100644
42451 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
42452 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
42453 @@ -296,6 +296,7 @@
42454  #define RX_DMA_LSO             BIT(30)
42455  #define RX_DMA_PLEN0(_x)       (((_x) & 0x3fff) << 16)
42456  #define RX_DMA_GET_PLEN0(_x)   (((_x) >> 16) & 0x3fff)
42457 +#define RX_DMA_VTAG            BIT(15)
42459  /* QDMA descriptor rxd3 */
42460  #define RX_DMA_VID(_x)         ((_x) & 0xfff)
42461 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
42462 index bdbffe484fce..d2efe2455955 100644
42463 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
42464 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
42465 @@ -576,7 +576,7 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
42467         pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
42468         wqe = MLX5E_TX_FETCH_WQE(sq, pi);
42469 -       prefetchw(wqe->data);
42470 +       net_prefetchw(wqe->data);
42472         *session = (struct mlx5e_tx_mpwqe) {
42473                 .wqe = wqe,
42474 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
42475 index 22bee4990232..bb61f52d782d 100644
42476 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
42477 +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
42478 @@ -850,7 +850,7 @@ mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
42479                 return;
42480         }
42482 -       if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action &
42483 +       if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action ==
42484             MLX5_ACCEL_ESP_ACTION_DECRYPT)
42485                 ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle);
42487 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
42488 index 9143ec326ebf..f146c618a78e 100644
42489 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
42490 +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
42491 @@ -1532,6 +1532,7 @@ static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *val
42493         DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port);
42494         DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn);
42495 +       misc_mask->source_eswitch_owner_vhca_id = 0;
42498  static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
42499 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
42500 index 7846a21555ef..1f6bc0c7e91d 100644
42501 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
42502 +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
42503 @@ -535,6 +535,16 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
42504         u16 erif_index = 0;
42505         int err;
42507 +       /* Add the eRIF */
42508 +       if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
42509 +               erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
42510 +               err = mr->mr_ops->route_erif_add(mlxsw_sp,
42511 +                                                rve->mr_route->route_priv,
42512 +                                                erif_index);
42513 +               if (err)
42514 +                       return err;
42515 +       }
42517         /* Update the route action, as the new eVIF can be a tunnel or a pimreg
42518          * device which will require updating the action.
42519          */
42520 @@ -544,17 +554,7 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
42521                                                       rve->mr_route->route_priv,
42522                                                       route_action);
42523                 if (err)
42524 -                       return err;
42525 -       }
42527 -       /* Add the eRIF */
42528 -       if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
42529 -               erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
42530 -               err = mr->mr_ops->route_erif_add(mlxsw_sp,
42531 -                                                rve->mr_route->route_priv,
42532 -                                                erif_index);
42533 -               if (err)
42534 -                       goto err_route_erif_add;
42535 +                       goto err_route_action_update;
42536         }
42538         /* Update the minimum MTU */
42539 @@ -572,14 +572,14 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
42540         return 0;
42542  err_route_min_mtu_update:
42543 -       if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
42544 -               mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
42545 -                                          erif_index);
42546 -err_route_erif_add:
42547         if (route_action != rve->mr_route->route_action)
42548                 mr->mr_ops->route_action_update(mlxsw_sp,
42549                                                 rve->mr_route->route_priv,
42550                                                 rve->mr_route->route_action);
42551 +err_route_action_update:
42552 +       if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
42553 +               mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
42554 +                                          erif_index);
42555         return err;
42558 diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
42559 index 713ee3041d49..bea978df7713 100644
42560 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
42561 +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
42562 @@ -364,6 +364,7 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
42564         attrs.split = eth_port.is_split;
42565         attrs.splittable = !attrs.split;
42566 +       attrs.lanes = eth_port.port_lanes;
42567         attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
42568         attrs.phys.port_number = eth_port.label_port;
42569         attrs.phys.split_subport_number = eth_port.label_subport;
42570 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
42571 index d8a3ecaed3fc..d8f0863b3934 100644
42572 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
42573 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
42574 @@ -1048,7 +1048,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
42575         for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
42576                 skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
42577                 if (!skb)
42578 -                       break;
42579 +                       goto error;
42580                 qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
42581                 skb_put(skb, QLCNIC_ILB_PKT_SIZE);
42582                 adapter->ahw->diag_cnt = 0;
42583 @@ -1072,6 +1072,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
42584                         cnt++;
42585         }
42586         if (cnt != i) {
42587 +error:
42588                 dev_err(&adapter->pdev->dev,
42589                         "LB Test: failed, TX[%d], RX[%d]\n", i, cnt);
42590                 if (mode != QLCNIC_ILB_MODE)
42591 diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
42592 index 117188e3c7de..87b8c032195d 100644
42593 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
42594 +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
42595 @@ -1437,6 +1437,7 @@ netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
42597         struct emac_tpd tpd;
42598         u32 prod_idx;
42599 +       int len;
42601         memset(&tpd, 0, sizeof(tpd));
42603 @@ -1456,9 +1457,10 @@ netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
42604         if (skb_network_offset(skb) != ETH_HLEN)
42605                 TPD_TYP_SET(&tpd, 1);
42607 +       len = skb->len;
42608         emac_tx_fill_tpd(adpt, tx_q, skb, &tpd);
42610 -       netdev_sent_queue(adpt->netdev, skb->len);
42611 +       netdev_sent_queue(adpt->netdev, len);
42613         /* Make sure the are enough free descriptors to hold one
42614          * maximum-sized SKB.  We need one desc for each fragment,
42615 diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
42616 index eb0c03bdb12d..cad57d58d764 100644
42617 --- a/drivers/net/ethernet/renesas/ravb_main.c
42618 +++ b/drivers/net/ethernet/renesas/ravb_main.c
42619 @@ -911,31 +911,20 @@ static int ravb_poll(struct napi_struct *napi, int budget)
42620         int q = napi - priv->napi;
42621         int mask = BIT(q);
42622         int quota = budget;
42623 -       u32 ris0, tis;
42625 -       for (;;) {
42626 -               tis = ravb_read(ndev, TIS);
42627 -               ris0 = ravb_read(ndev, RIS0);
42628 -               if (!((ris0 & mask) || (tis & mask)))
42629 -                       break;
42630 +       /* Processing RX Descriptor Ring */
42631 +       /* Clear RX interrupt */
42632 +       ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
42633 +       if (ravb_rx(ndev, &quota, q))
42634 +               goto out;
42636 -               /* Processing RX Descriptor Ring */
42637 -               if (ris0 & mask) {
42638 -                       /* Clear RX interrupt */
42639 -                       ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
42640 -                       if (ravb_rx(ndev, &quota, q))
42641 -                               goto out;
42642 -               }
42643 -               /* Processing TX Descriptor Ring */
42644 -               if (tis & mask) {
42645 -                       spin_lock_irqsave(&priv->lock, flags);
42646 -                       /* Clear TX interrupt */
42647 -                       ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
42648 -                       ravb_tx_free(ndev, q, true);
42649 -                       netif_wake_subqueue(ndev, q);
42650 -                       spin_unlock_irqrestore(&priv->lock, flags);
42651 -               }
42652 -       }
42653 +       /* Processing RX Descriptor Ring */
42654 +       spin_lock_irqsave(&priv->lock, flags);
42655 +       /* Clear TX interrupt */
42656 +       ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
42657 +       ravb_tx_free(ndev, q, true);
42658 +       netif_wake_subqueue(ndev, q);
42659 +       spin_unlock_irqrestore(&priv->lock, flags);
42661         napi_complete(napi);
42663 diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
42664 index da6886dcac37..4fa72b573c17 100644
42665 --- a/drivers/net/ethernet/sfc/ef10.c
42666 +++ b/drivers/net/ethernet/sfc/ef10.c
42667 @@ -2928,8 +2928,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
42669         /* Get the transmit queue */
42670         tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
42671 -       tx_queue = efx_channel_get_tx_queue(channel,
42672 -                                           tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
42673 +       tx_queue = channel->tx_queue + (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
42675         if (!tx_queue->timestamping) {
42676                 /* Transmit completion */
42677 diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
42678 index 1bfeee283ea9..a3ca406a3561 100644
42679 --- a/drivers/net/ethernet/sfc/efx_channels.c
42680 +++ b/drivers/net/ethernet/sfc/efx_channels.c
42681 @@ -914,6 +914,8 @@ int efx_set_channels(struct efx_nic *efx)
42682                         }
42683                 }
42684         }
42685 +       if (xdp_queue_number)
42686 +               efx->xdp_tx_queue_count = xdp_queue_number;
42688         rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
42689         if (rc)
42690 diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
42691 index d75cf5ff5686..49df02ecee91 100644
42692 --- a/drivers/net/ethernet/sfc/farch.c
42693 +++ b/drivers/net/ethernet/sfc/farch.c
42694 @@ -835,14 +835,14 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
42695                 /* Transmit completion */
42696                 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
42697                 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
42698 -               tx_queue = efx_channel_get_tx_queue(
42699 -                       channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
42700 +               tx_queue = channel->tx_queue +
42701 +                               (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
42702                 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
42703         } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
42704                 /* Rewrite the FIFO write pointer */
42705                 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
42706 -               tx_queue = efx_channel_get_tx_queue(
42707 -                       channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
42708 +               tx_queue = channel->tx_queue +
42709 +                               (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
42711                 netif_tx_lock(efx->net_dev);
42712                 efx_farch_notify_tx_desc(tx_queue);
42713 @@ -1081,16 +1081,16 @@ static void
42714  efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
42716         struct efx_tx_queue *tx_queue;
42717 +       struct efx_channel *channel;
42718         int qid;
42720         qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
42721         if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
42722 -               tx_queue = efx_get_tx_queue(efx, qid / EFX_MAX_TXQ_PER_CHANNEL,
42723 -                                           qid % EFX_MAX_TXQ_PER_CHANNEL);
42724 -               if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
42725 +               channel = efx_get_tx_channel(efx, qid / EFX_MAX_TXQ_PER_CHANNEL);
42726 +               tx_queue = channel->tx_queue + (qid % EFX_MAX_TXQ_PER_CHANNEL);
42727 +               if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0))
42728                         efx_farch_magic_event(tx_queue->channel,
42729                                               EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
42730 -               }
42731         }
42734 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
42735 index bf3250e0e59c..749585fe6fc9 100644
42736 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
42737 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
42738 @@ -352,6 +352,8 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
42739         plat_dat->bsp_priv = gmac;
42740         plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
42741         plat_dat->multicast_filter_bins = 0;
42742 +       plat_dat->tx_fifo_size = 8192;
42743 +       plat_dat->rx_fifo_size = 8192;
42745         err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
42746         if (err)
42747 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
42748 index 0e1ca2cba3c7..e18dee7fe687 100644
42749 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
42750 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
42751 @@ -30,7 +30,7 @@ struct sunxi_priv_data {
42752  static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
42754         struct sunxi_priv_data *gmac = priv;
42755 -       int ret;
42756 +       int ret = 0;
42758         if (gmac->regulator) {
42759                 ret = regulator_enable(gmac->regulator);
42760 @@ -51,11 +51,11 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
42761         } else {
42762                 clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
42763                 ret = clk_prepare(gmac->tx_clk);
42764 -               if (ret)
42765 -                       return ret;
42766 +               if (ret && gmac->regulator)
42767 +                       regulator_disable(gmac->regulator);
42768         }
42770 -       return 0;
42771 +       return ret;
42774  static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
42775 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
42776 index 29f765a246a0..aaf37598cbd3 100644
42777 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
42778 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
42779 @@ -638,6 +638,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
42780         value &= ~GMAC_PACKET_FILTER_PCF;
42781         value &= ~GMAC_PACKET_FILTER_PM;
42782         value &= ~GMAC_PACKET_FILTER_PR;
42783 +       value &= ~GMAC_PACKET_FILTER_RA;
42784         if (dev->flags & IFF_PROMISC) {
42785                 /* VLAN Tag Filter Fail Packets Queuing */
42786                 if (hw->vlan_fail_q_en) {
42787 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
42788 index 62aa0e95beb7..a7249e4071f1 100644
42789 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
42790 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
42791 @@ -222,7 +222,7 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
42792                                        u32 channel, int fifosz, u8 qmode)
42794         unsigned int rqs = fifosz / 256 - 1;
42795 -       u32 mtl_rx_op, mtl_rx_int;
42796 +       u32 mtl_rx_op;
42798         mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
42800 @@ -283,11 +283,6 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
42801         }
42803         writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
42805 -       /* Enable MTL RX overflow */
42806 -       mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
42807 -       writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
42808 -              ioaddr + MTL_CHAN_INT_CTRL(channel));
42811  static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
42812 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
42813 index 4749bd0af160..369d7cde3993 100644
42814 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
42815 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
42816 @@ -2757,8 +2757,15 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
42818         /* Enable TSO */
42819         if (priv->tso) {
42820 -               for (chan = 0; chan < tx_cnt; chan++)
42821 +               for (chan = 0; chan < tx_cnt; chan++) {
42822 +                       struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
42824 +                       /* TSO and TBS cannot co-exist */
42825 +                       if (tx_q->tbs & STMMAC_TBS_AVAIL)
42826 +                               continue;
42828                         stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
42829 +               }
42830         }
42832         /* Enable Split Header */
42833 @@ -2850,9 +2857,8 @@ static int stmmac_open(struct net_device *dev)
42834                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
42835                 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
42837 +               /* Setup per-TXQ tbs flag before TX descriptor alloc */
42838                 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
42839 -               if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
42840 -                       tx_q->tbs &= ~STMMAC_TBS_AVAIL;
42841         }
42843         ret = alloc_dma_desc_resources(priv);
42844 @@ -4162,7 +4168,6 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
42845         /* To handle GMAC own interrupts */
42846         if ((priv->plat->has_gmac) || xmac) {
42847                 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
42848 -               int mtl_status;
42850                 if (unlikely(status)) {
42851                         /* For LPI we need to save the tx status */
42852 @@ -4173,17 +4178,8 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
42853                 }
42855                 for (queue = 0; queue < queues_count; queue++) {
42856 -                       struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
42858 -                       mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
42859 -                                                               queue);
42860 -                       if (mtl_status != -EINVAL)
42861 -                               status |= mtl_status;
42863 -                       if (status & CORE_IRQ_MTL_RX_OVERFLOW)
42864 -                               stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
42865 -                                                      rx_q->rx_tail_addr,
42866 -                                                      queue);
42867 +                       status = stmmac_host_mtl_irq_status(priv, priv->hw,
42868 +                                                           queue);
42869                 }
42871                 /* PCS link status */
42872 diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
42873 index 707ccdd03b19..74e748662ec0 100644
42874 --- a/drivers/net/ethernet/sun/niu.c
42875 +++ b/drivers/net/ethernet/sun/niu.c
42876 @@ -8144,10 +8144,10 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
42877                                      "VPD_SCAN: Reading in property [%s] len[%d]\n",
42878                                      namebuf, prop_len);
42879                         for (i = 0; i < prop_len; i++) {
42880 -                               err = niu_pci_eeprom_read(np, off + i);
42881 -                               if (err >= 0)
42882 -                                       *prop_buf = err;
42883 -                               ++prop_buf;
42884 +                               err =  niu_pci_eeprom_read(np, off + i);
42885 +                               if (err < 0)
42886 +                                       return err;
42887 +                               *prop_buf++ = err;
42888                         }
42889                 }
42891 @@ -8158,14 +8158,14 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
42894  /* ESPC_PIO_EN_ENABLE must be set */
42895 -static void niu_pci_vpd_fetch(struct niu *np, u32 start)
42896 +static int niu_pci_vpd_fetch(struct niu *np, u32 start)
42898         u32 offset;
42899         int err;
42901         err = niu_pci_eeprom_read16_swp(np, start + 1);
42902         if (err < 0)
42903 -               return;
42904 +               return err;
42906         offset = err + 3;
42908 @@ -8174,12 +8174,14 @@ static void niu_pci_vpd_fetch(struct niu *np, u32 start)
42909                 u32 end;
42911                 err = niu_pci_eeprom_read(np, here);
42912 +               if (err < 0)
42913 +                       return err;
42914                 if (err != 0x90)
42915 -                       return;
42916 +                       return -EINVAL;
42918                 err = niu_pci_eeprom_read16_swp(np, here + 1);
42919                 if (err < 0)
42920 -                       return;
42921 +                       return err;
42923                 here = start + offset + 3;
42924                 end = start + offset + err;
42925 @@ -8187,9 +8189,12 @@ static void niu_pci_vpd_fetch(struct niu *np, u32 start)
42926                 offset += err;
42928                 err = niu_pci_vpd_scan_props(np, here, end);
42929 -               if (err < 0 || err == 1)
42930 -                       return;
42931 +               if (err < 0)
42932 +                       return err;
42933 +               if (err == 1)
42934 +                       return -EINVAL;
42935         }
42936 +       return 0;
42939  /* ESPC_PIO_EN_ENABLE must be set */
42940 @@ -9280,8 +9285,11 @@ static int niu_get_invariants(struct niu *np)
42941                 offset = niu_pci_vpd_offset(np);
42942                 netif_printk(np, probe, KERN_DEBUG, np->dev,
42943                              "%s() VPD offset [%08x]\n", __func__, offset);
42944 -               if (offset)
42945 -                       niu_pci_vpd_fetch(np, offset);
42946 +               if (offset) {
42947 +                       err = niu_pci_vpd_fetch(np, offset);
42948 +                       if (err < 0)
42949 +                               return err;
42950 +               }
42951                 nw64(ESPC_PIO_EN, 0);
42953                 if (np->flags & NIU_FLAGS_VPD_VALID) {
42954 diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
42955 index c7031e1960d4..03055c96f076 100644
42956 --- a/drivers/net/ethernet/ti/davinci_emac.c
42957 +++ b/drivers/net/ethernet/ti/davinci_emac.c
42958 @@ -169,11 +169,11 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
42959  /* EMAC mac_status register */
42960  #define EMAC_MACSTATUS_TXERRCODE_MASK  (0xF00000)
42961  #define EMAC_MACSTATUS_TXERRCODE_SHIFT (20)
42962 -#define EMAC_MACSTATUS_TXERRCH_MASK    (0x7)
42963 +#define EMAC_MACSTATUS_TXERRCH_MASK    (0x70000)
42964  #define EMAC_MACSTATUS_TXERRCH_SHIFT   (16)
42965  #define EMAC_MACSTATUS_RXERRCODE_MASK  (0xF000)
42966  #define EMAC_MACSTATUS_RXERRCODE_SHIFT (12)
42967 -#define EMAC_MACSTATUS_RXERRCH_MASK    (0x7)
42968 +#define EMAC_MACSTATUS_RXERRCH_MASK    (0x700)
42969  #define EMAC_MACSTATUS_RXERRCH_SHIFT   (8)
42971  /* EMAC RX register masks */
42972 diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
42973 index c6eb7f2368aa..911b5ef9e680 100644
42974 --- a/drivers/net/ethernet/xilinx/Kconfig
42975 +++ b/drivers/net/ethernet/xilinx/Kconfig
42976 @@ -18,12 +18,14 @@ if NET_VENDOR_XILINX
42978  config XILINX_EMACLITE
42979         tristate "Xilinx 10/100 Ethernet Lite support"
42980 +       depends on HAS_IOMEM
42981         select PHYLIB
42982         help
42983           This driver supports the 10/100 Ethernet Lite from Xilinx.
42985  config XILINX_AXI_EMAC
42986         tristate "Xilinx 10/100/1000 AXI Ethernet support"
42987 +       depends on HAS_IOMEM
42988         select PHYLINK
42989         help
42990           This driver supports the 10/100/1000 Ethernet from Xilinx for the
42991 @@ -31,6 +33,7 @@ config XILINX_AXI_EMAC
42993  config XILINX_LL_TEMAC
42994         tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
42995 +       depends on HAS_IOMEM
42996         select PHYLIB
42997         help
42998           This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
42999 diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
43000 index 0152f1e70783..9defaa21a1a9 100644
43001 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
43002 +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
43003 @@ -1085,7 +1085,7 @@ static int init_queues(struct port *port)
43004         int i;
43006         if (!ports_open) {
43007 -               dma_pool = dma_pool_create(DRV_NAME, port->netdev->dev.parent,
43008 +               dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
43009                                            POOL_ALLOC_SIZE, 32, 0);
43010                 if (!dma_pool)
43011                         return -ENOMEM;
43012 @@ -1435,6 +1435,9 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
43013         ndev->netdev_ops = &ixp4xx_netdev_ops;
43014         ndev->ethtool_ops = &ixp4xx_ethtool_ops;
43015         ndev->tx_queue_len = 100;
43016 +       /* Inherit the DMA masks from the platform device */
43017 +       ndev->dev.dma_mask = dev->dma_mask;
43018 +       ndev->dev.coherent_dma_mask = dev->coherent_dma_mask;
43020         netif_napi_add(ndev, &port->napi, eth_poll, NAPI_WEIGHT);
43022 diff --git a/drivers/net/fddi/Kconfig b/drivers/net/fddi/Kconfig
43023 index f722079dfb6a..f99c1048c97e 100644
43024 --- a/drivers/net/fddi/Kconfig
43025 +++ b/drivers/net/fddi/Kconfig
43026 @@ -40,17 +40,20 @@ config DEFXX
43028  config DEFXX_MMIO
43029         bool
43030 -       prompt "Use MMIO instead of PIO" if PCI || EISA
43031 +       prompt "Use MMIO instead of IOP" if PCI || EISA
43032         depends on DEFXX
43033 -       default n if PCI || EISA
43034 +       default n if EISA
43035         default y
43036         help
43037           This instructs the driver to use EISA or PCI memory-mapped I/O
43038 -         (MMIO) as appropriate instead of programmed I/O ports (PIO).
43039 +         (MMIO) as appropriate instead of programmed I/O ports (IOP).
43040           Enabling this gives an improvement in processing time in parts
43041 -         of the driver, but it may cause problems with EISA (DEFEA)
43042 -         adapters.  TURBOchannel does not have the concept of I/O ports,
43043 -         so MMIO is always used for these (DEFTA) adapters.
43044 +         of the driver, but it requires a memory window to be configured
43045 +         for EISA (DEFEA) adapters that may not always be available.
43046 +         Conversely some PCIe host bridges do not support IOP, so MMIO
43047 +         may be required to access PCI (DEFPA) adapters on downstream PCI
43048 +         buses with some systems.  TURBOchannel does not have the concept
43049 +         of I/O ports, so MMIO is always used for these (DEFTA) adapters.
43051           If unsure, say N.
43053 diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
43054 index 077c68498f04..c7ce6d5491af 100644
43055 --- a/drivers/net/fddi/defxx.c
43056 +++ b/drivers/net/fddi/defxx.c
43057 @@ -495,6 +495,25 @@ static const struct net_device_ops dfx_netdev_ops = {
43058         .ndo_set_mac_address    = dfx_ctl_set_mac_address,
43059  };
43061 +static void dfx_register_res_alloc_err(const char *print_name, bool mmio,
43062 +                                      bool eisa)
43064 +       pr_err("%s: Cannot use %s, no address set, aborting\n",
43065 +              print_name, mmio ? "MMIO" : "I/O");
43066 +       pr_err("%s: Recompile driver with \"CONFIG_DEFXX_MMIO=%c\"\n",
43067 +              print_name, mmio ? 'n' : 'y');
43068 +       if (eisa && mmio)
43069 +               pr_err("%s: Or run ECU and set adapter's MMIO location\n",
43070 +                      print_name);
43073 +static void dfx_register_res_err(const char *print_name, bool mmio,
43074 +                                unsigned long start, unsigned long len)
43076 +       pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, aborting\n",
43077 +              print_name, mmio ? "MMIO" : "I/O", len, start);
43080  /*
43081   * ================
43082   * = dfx_register =
43083 @@ -568,15 +587,12 @@ static int dfx_register(struct device *bdev)
43084         dev_set_drvdata(bdev, dev);
43086         dfx_get_bars(bdev, bar_start, bar_len);
43087 -       if (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0) {
43088 -               pr_err("%s: Cannot use MMIO, no address set, aborting\n",
43089 -                      print_name);
43090 -               pr_err("%s: Run ECU and set adapter's MMIO location\n",
43091 -                      print_name);
43092 -               pr_err("%s: Or recompile driver with \"CONFIG_DEFXX_MMIO=n\""
43093 -                      "\n", print_name);
43094 +       if (bar_len[0] == 0 ||
43095 +           (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0)) {
43096 +               dfx_register_res_alloc_err(print_name, dfx_use_mmio,
43097 +                                          dfx_bus_eisa);
43098                 err = -ENXIO;
43099 -               goto err_out;
43100 +               goto err_out_disable;
43101         }
43103         if (dfx_use_mmio)
43104 @@ -585,18 +601,16 @@ static int dfx_register(struct device *bdev)
43105         else
43106                 region = request_region(bar_start[0], bar_len[0], print_name);
43107         if (!region) {
43108 -               pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, "
43109 -                      "aborting\n", dfx_use_mmio ? "MMIO" : "I/O", print_name,
43110 -                      (long)bar_len[0], (long)bar_start[0]);
43111 +               dfx_register_res_err(print_name, dfx_use_mmio,
43112 +                                    bar_start[0], bar_len[0]);
43113                 err = -EBUSY;
43114                 goto err_out_disable;
43115         }
43116         if (bar_start[1] != 0) {
43117                 region = request_region(bar_start[1], bar_len[1], print_name);
43118                 if (!region) {
43119 -                       pr_err("%s: Cannot reserve I/O resource "
43120 -                              "0x%lx @ 0x%lx, aborting\n", print_name,
43121 -                              (long)bar_len[1], (long)bar_start[1]);
43122 +                       dfx_register_res_err(print_name, 0,
43123 +                                            bar_start[1], bar_len[1]);
43124                         err = -EBUSY;
43125                         goto err_out_csr_region;
43126                 }
43127 @@ -604,9 +618,8 @@ static int dfx_register(struct device *bdev)
43128         if (bar_start[2] != 0) {
43129                 region = request_region(bar_start[2], bar_len[2], print_name);
43130                 if (!region) {
43131 -                       pr_err("%s: Cannot reserve I/O resource "
43132 -                              "0x%lx @ 0x%lx, aborting\n", print_name,
43133 -                              (long)bar_len[2], (long)bar_start[2]);
43134 +                       dfx_register_res_err(print_name, 0,
43135 +                                            bar_start[2], bar_len[2]);
43136                         err = -EBUSY;
43137                         goto err_out_bh_region;
43138                 }
43139 diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
43140 index 42f31c681846..61cd3dd4deab 100644
43141 --- a/drivers/net/geneve.c
43142 +++ b/drivers/net/geneve.c
43143 @@ -891,7 +891,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
43144         __be16 sport;
43145         int err;
43147 -       if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
43148 +       if (!pskb_inet_may_pull(skb))
43149                 return -EINVAL;
43151         sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
43152 @@ -988,7 +988,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
43153         __be16 sport;
43154         int err;
43156 -       if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
43157 +       if (!pskb_inet_may_pull(skb))
43158                 return -EINVAL;
43160         sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
43161 diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
43162 index 390d3403386a..144892060718 100644
43163 --- a/drivers/net/ipa/gsi.c
43164 +++ b/drivers/net/ipa/gsi.c
43165 @@ -211,8 +211,8 @@ static void gsi_irq_setup(struct gsi *gsi)
43166         iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
43168         /* The inter-EE registers are in the non-adjusted address range */
43169 -       iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
43170 -       iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
43171 +       iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET);
43172 +       iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET);
43174         iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
43176 diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
43177 index 1622d8cf8dea..48ef04afab79 100644
43178 --- a/drivers/net/ipa/gsi_reg.h
43179 +++ b/drivers/net/ipa/gsi_reg.h
43180 @@ -53,15 +53,15 @@
43181  #define GSI_EE_REG_ADJUST                      0x0000d000      /* IPA v4.5+ */
43183  /* The two inter-EE IRQ register offsets are relative to gsi->virt_raw */
43184 -#define GSI_INTER_EE_SRC_CH_IRQ_OFFSET \
43185 -                       GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
43186 -#define GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(ee) \
43187 -                       (0x0000c018 + 0x1000 * (ee))
43189 -#define GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET \
43190 -                       GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(GSI_EE_AP)
43191 -#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(ee) \
43192 -                       (0x0000c01c + 0x1000 * (ee))
43193 +#define GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET \
43194 +                       GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
43195 +#define GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(ee) \
43196 +                       (0x0000c020 + 0x1000 * (ee))
43198 +#define GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET \
43199 +                       GSI_INTER_EE_N_SRC_EV_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
43200 +#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_MSK_OFFSET(ee) \
43201 +                       (0x0000c024 + 0x1000 * (ee))
43203  /* All other register offsets are relative to gsi->virt */
43204  #define GSI_CH_C_CNTXT_0_OFFSET(ch) \
43205 diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
43206 index 6eac50d4b42f..d453ec016168 100644
43207 --- a/drivers/net/phy/intel-xway.c
43208 +++ b/drivers/net/phy/intel-xway.c
43209 @@ -11,6 +11,18 @@
43211  #define XWAY_MDIO_IMASK                        0x19    /* interrupt mask */
43212  #define XWAY_MDIO_ISTAT                        0x1A    /* interrupt status */
43213 +#define XWAY_MDIO_LED                  0x1B    /* led control */
43215 +/* bit 15:12 are reserved */
43216 +#define XWAY_MDIO_LED_LED3_EN          BIT(11) /* Enable the integrated function of LED3 */
43217 +#define XWAY_MDIO_LED_LED2_EN          BIT(10) /* Enable the integrated function of LED2 */
43218 +#define XWAY_MDIO_LED_LED1_EN          BIT(9)  /* Enable the integrated function of LED1 */
43219 +#define XWAY_MDIO_LED_LED0_EN          BIT(8)  /* Enable the integrated function of LED0 */
43220 +/* bit 7:4 are reserved */
43221 +#define XWAY_MDIO_LED_LED3_DA          BIT(3)  /* Direct Access to LED3 */
43222 +#define XWAY_MDIO_LED_LED2_DA          BIT(2)  /* Direct Access to LED2 */
43223 +#define XWAY_MDIO_LED_LED1_DA          BIT(1)  /* Direct Access to LED1 */
43224 +#define XWAY_MDIO_LED_LED0_DA          BIT(0)  /* Direct Access to LED0 */
43226  #define XWAY_MDIO_INIT_WOL             BIT(15) /* Wake-On-LAN */
43227  #define XWAY_MDIO_INIT_MSRE            BIT(14)
43228 @@ -159,6 +171,15 @@ static int xway_gphy_config_init(struct phy_device *phydev)
43229         /* Clear all pending interrupts */
43230         phy_read(phydev, XWAY_MDIO_ISTAT);
43232 +       /* Ensure that integrated led function is enabled for all leds */
43233 +       err = phy_write(phydev, XWAY_MDIO_LED,
43234 +                       XWAY_MDIO_LED_LED0_EN |
43235 +                       XWAY_MDIO_LED_LED1_EN |
43236 +                       XWAY_MDIO_LED_LED2_EN |
43237 +                       XWAY_MDIO_LED_LED3_EN);
43238 +       if (err)
43239 +               return err;
43241         phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LEDCH,
43242                       XWAY_MMD_LEDCH_NACS_NONE |
43243                       XWAY_MMD_LEDCH_SBF_F02HZ |
43244 diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
43245 index 8018ddf7f316..f86c9ddc609e 100644
43246 --- a/drivers/net/phy/marvell.c
43247 +++ b/drivers/net/phy/marvell.c
43248 @@ -967,22 +967,28 @@ static int m88e1111_get_downshift(struct phy_device *phydev, u8 *data)
43250  static int m88e1111_set_downshift(struct phy_device *phydev, u8 cnt)
43252 -       int val;
43253 +       int val, err;
43255         if (cnt > MII_M1111_PHY_EXT_CR_DOWNSHIFT_MAX)
43256                 return -E2BIG;
43258 -       if (!cnt)
43259 -               return phy_clear_bits(phydev, MII_M1111_PHY_EXT_CR,
43260 -                                     MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN);
43261 +       if (!cnt) {
43262 +               err = phy_clear_bits(phydev, MII_M1111_PHY_EXT_CR,
43263 +                                    MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN);
43264 +       } else {
43265 +               val = MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN;
43266 +               val |= FIELD_PREP(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, cnt - 1);
43268 -       val = MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN;
43269 -       val |= FIELD_PREP(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, cnt - 1);
43270 +               err = phy_modify(phydev, MII_M1111_PHY_EXT_CR,
43271 +                                MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN |
43272 +                                MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK,
43273 +                                val);
43274 +       }
43276 -       return phy_modify(phydev, MII_M1111_PHY_EXT_CR,
43277 -                         MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN |
43278 -                         MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK,
43279 -                         val);
43280 +       if (err < 0)
43281 +               return err;
43283 +       return genphy_soft_reset(phydev);
43286  static int m88e1111_get_tunable(struct phy_device *phydev,
43287 @@ -1025,22 +1031,28 @@ static int m88e1011_get_downshift(struct phy_device *phydev, u8 *data)
43289  static int m88e1011_set_downshift(struct phy_device *phydev, u8 cnt)
43291 -       int val;
43292 +       int val, err;
43294         if (cnt > MII_M1011_PHY_SCR_DOWNSHIFT_MAX)
43295                 return -E2BIG;
43297 -       if (!cnt)
43298 -               return phy_clear_bits(phydev, MII_M1011_PHY_SCR,
43299 -                                     MII_M1011_PHY_SCR_DOWNSHIFT_EN);
43300 +       if (!cnt) {
43301 +               err = phy_clear_bits(phydev, MII_M1011_PHY_SCR,
43302 +                                    MII_M1011_PHY_SCR_DOWNSHIFT_EN);
43303 +       } else {
43304 +               val = MII_M1011_PHY_SCR_DOWNSHIFT_EN;
43305 +               val |= FIELD_PREP(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, cnt - 1);
43307 -       val = MII_M1011_PHY_SCR_DOWNSHIFT_EN;
43308 -       val |= FIELD_PREP(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, cnt - 1);
43309 +               err = phy_modify(phydev, MII_M1011_PHY_SCR,
43310 +                                MII_M1011_PHY_SCR_DOWNSHIFT_EN |
43311 +                                MII_M1011_PHY_SCR_DOWNSHIFT_MASK,
43312 +                                val);
43313 +       }
43315 -       return phy_modify(phydev, MII_M1011_PHY_SCR,
43316 -                         MII_M1011_PHY_SCR_DOWNSHIFT_EN |
43317 -                         MII_M1011_PHY_SCR_DOWNSHIFT_MASK,
43318 -                         val);
43319 +       if (err < 0)
43320 +               return err;
43322 +       return genphy_soft_reset(phydev);
43325  static int m88e1011_get_tunable(struct phy_device *phydev,
43326 diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
43327 index cc38e326405a..af2e1759b523 100644
43328 --- a/drivers/net/phy/phy_device.c
43329 +++ b/drivers/net/phy/phy_device.c
43330 @@ -273,6 +273,9 @@ static __maybe_unused int mdio_bus_phy_suspend(struct device *dev)
43332         struct phy_device *phydev = to_phy_device(dev);
43334 +       if (phydev->mac_managed_pm)
43335 +               return 0;
43337         /* We must stop the state machine manually, otherwise it stops out of
43338          * control, possibly with the phydev->lock held. Upon resume, netdev
43339          * may call phy routines that try to grab the same lock, and that may
43340 @@ -294,6 +297,9 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
43341         struct phy_device *phydev = to_phy_device(dev);
43342         int ret;
43344 +       if (phydev->mac_managed_pm)
43345 +               return 0;
43347         if (!phydev->suspended_by_mdio_bus)
43348                 goto no_resume;
43350 diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
43351 index ddb78fb4d6dc..d8cac02a79b9 100644
43352 --- a/drivers/net/phy/smsc.c
43353 +++ b/drivers/net/phy/smsc.c
43354 @@ -185,10 +185,13 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
43355         return genphy_config_aneg(phydev);
43358 -static int lan87xx_config_aneg_ext(struct phy_device *phydev)
43359 +static int lan95xx_config_aneg_ext(struct phy_device *phydev)
43361         int rc;
43363 +       if (phydev->phy_id != 0x0007c0f0) /* not (LAN9500A or LAN9505A) */
43364 +               return lan87xx_config_aneg(phydev);
43366         /* Extend Manual AutoMDIX timer */
43367         rc = phy_read(phydev, PHY_EDPD_CONFIG);
43368         if (rc < 0)
43369 @@ -441,7 +444,7 @@ static struct phy_driver smsc_phy_driver[] = {
43370         .read_status    = lan87xx_read_status,
43371         .config_init    = smsc_phy_config_init,
43372         .soft_reset     = smsc_phy_reset,
43373 -       .config_aneg    = lan87xx_config_aneg_ext,
43374 +       .config_aneg    = lan95xx_config_aneg_ext,
43376         /* IRQ related */
43377         .config_intr    = smsc_phy_config_intr,
43378 diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
43379 index d650b39b6e5d..c1316718304d 100644
43380 --- a/drivers/net/usb/ax88179_178a.c
43381 +++ b/drivers/net/usb/ax88179_178a.c
43382 @@ -296,12 +296,12 @@ static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
43383         int ret;
43385         if (2 == size) {
43386 -               u16 buf;
43387 +               u16 buf = 0;
43388                 ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
43389                 le16_to_cpus(&buf);
43390                 *((u16 *)data) = buf;
43391         } else if (4 == size) {
43392 -               u32 buf;
43393 +               u32 buf = 0;
43394                 ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
43395                 le32_to_cpus(&buf);
43396                 *((u32 *)data) = buf;
43397 @@ -1296,6 +1296,8 @@ static void ax88179_get_mac_addr(struct usbnet *dev)
43399         u8 mac[ETH_ALEN];
43401 +       memset(mac, 0, sizeof(mac));
43403         /* Maybe the boot loader passed the MAC address via device tree */
43404         if (!eth_platform_get_mac_address(&dev->udev->dev, mac)) {
43405                 netif_dbg(dev, ifup, dev->net,
43406 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
43407 index 9bc58e64b5b7..3ef4b2841402 100644
43408 --- a/drivers/net/usb/hso.c
43409 +++ b/drivers/net/usb/hso.c
43410 @@ -3104,7 +3104,7 @@ static void hso_free_interface(struct usb_interface *interface)
43411                         cancel_work_sync(&serial_table[i]->async_put_intf);
43412                         cancel_work_sync(&serial_table[i]->async_get_intf);
43413                         hso_serial_tty_unregister(serial);
43414 -                       kref_put(&serial_table[i]->ref, hso_serial_ref_free);
43415 +                       kref_put(&serial->parent->ref, hso_serial_ref_free);
43416                 }
43417         }
43419 diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
43420 index e81c5699c952..d2b360cfb402 100644
43421 --- a/drivers/net/usb/lan78xx.c
43422 +++ b/drivers/net/usb/lan78xx.c
43423 @@ -2655,7 +2655,7 @@ static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
43424         while (!skb_queue_empty(&dev->rxq) &&
43425                !skb_queue_empty(&dev->txq) &&
43426                !skb_queue_empty(&dev->done)) {
43427 -               schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
43428 +               schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
43429                 set_current_state(TASK_UNINTERRUPTIBLE);
43430                 netif_dbg(dev, ifdown, dev->net,
43431                           "waited for %d urb completions\n", temp);
43432 diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
43433 index f4f37ecfed58..36647378e016 100644
43434 --- a/drivers/net/usb/usbnet.c
43435 +++ b/drivers/net/usb/usbnet.c
43436 @@ -764,7 +764,7 @@ static void wait_skb_queue_empty(struct sk_buff_head *q)
43437         spin_lock_irqsave(&q->lock, flags);
43438         while (!skb_queue_empty(q)) {
43439                 spin_unlock_irqrestore(&q->lock, flags);
43440 -               schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
43441 +               schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
43442                 set_current_state(TASK_UNINTERRUPTIBLE);
43443                 spin_lock_irqsave(&q->lock, flags);
43444         }
43445 diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
43446 index 4d9dc7d15908..0720f5f92caa 100644
43447 --- a/drivers/net/wan/hdlc_fr.c
43448 +++ b/drivers/net/wan/hdlc_fr.c
43449 @@ -415,7 +415,7 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
43451                 if (pad > 0) { /* Pad the frame with zeros */
43452                         if (__skb_pad(skb, pad, false))
43453 -                               goto out;
43454 +                               goto drop;
43455                         skb_put(skb, pad);
43456                 }
43457         }
43458 @@ -448,9 +448,8 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
43459         return NETDEV_TX_OK;
43461  drop:
43462 -       kfree_skb(skb);
43463 -out:
43464         dev->stats.tx_dropped++;
43465 +       kfree_skb(skb);
43466         return NETDEV_TX_OK;
43469 diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
43470 index c3372498f4f1..8fda0446ff71 100644
43471 --- a/drivers/net/wan/lapbether.c
43472 +++ b/drivers/net/wan/lapbether.c
43473 @@ -51,6 +51,8 @@ struct lapbethdev {
43474         struct list_head        node;
43475         struct net_device       *ethdev;        /* link to ethernet device */
43476         struct net_device       *axdev;         /* lapbeth device (lapb#) */
43477 +       bool                    up;
43478 +       spinlock_t              up_lock;        /* Protects "up" */
43479  };
43481  static LIST_HEAD(lapbeth_devices);
43482 @@ -101,8 +103,9 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
43483         rcu_read_lock();
43484         lapbeth = lapbeth_get_x25_dev(dev);
43485         if (!lapbeth)
43486 -               goto drop_unlock;
43487 -       if (!netif_running(lapbeth->axdev))
43488 +               goto drop_unlock_rcu;
43489 +       spin_lock_bh(&lapbeth->up_lock);
43490 +       if (!lapbeth->up)
43491                 goto drop_unlock;
43493         len = skb->data[0] + skb->data[1] * 256;
43494 @@ -117,11 +120,14 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
43495                 goto drop_unlock;
43496         }
43497  out:
43498 +       spin_unlock_bh(&lapbeth->up_lock);
43499         rcu_read_unlock();
43500         return 0;
43501  drop_unlock:
43502         kfree_skb(skb);
43503         goto out;
43504 +drop_unlock_rcu:
43505 +       rcu_read_unlock();
43506  drop:
43507         kfree_skb(skb);
43508         return 0;
43509 @@ -151,13 +157,11 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
43510  static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
43511                                       struct net_device *dev)
43513 +       struct lapbethdev *lapbeth = netdev_priv(dev);
43514         int err;
43516 -       /*
43517 -        * Just to be *really* sure not to send anything if the interface
43518 -        * is down, the ethernet device may have gone.
43519 -        */
43520 -       if (!netif_running(dev))
43521 +       spin_lock_bh(&lapbeth->up_lock);
43522 +       if (!lapbeth->up)
43523                 goto drop;
43525         /* There should be a pseudo header of 1 byte added by upper layers.
43526 @@ -194,6 +198,7 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
43527                 goto drop;
43528         }
43529  out:
43530 +       spin_unlock_bh(&lapbeth->up_lock);
43531         return NETDEV_TX_OK;
43532  drop:
43533         kfree_skb(skb);
43534 @@ -285,6 +290,7 @@ static const struct lapb_register_struct lapbeth_callbacks = {
43535   */
43536  static int lapbeth_open(struct net_device *dev)
43538 +       struct lapbethdev *lapbeth = netdev_priv(dev);
43539         int err;
43541         if ((err = lapb_register(dev, &lapbeth_callbacks)) != LAPB_OK) {
43542 @@ -292,13 +298,22 @@ static int lapbeth_open(struct net_device *dev)
43543                 return -ENODEV;
43544         }
43546 +       spin_lock_bh(&lapbeth->up_lock);
43547 +       lapbeth->up = true;
43548 +       spin_unlock_bh(&lapbeth->up_lock);
43550         return 0;
43553  static int lapbeth_close(struct net_device *dev)
43555 +       struct lapbethdev *lapbeth = netdev_priv(dev);
43556         int err;
43558 +       spin_lock_bh(&lapbeth->up_lock);
43559 +       lapbeth->up = false;
43560 +       spin_unlock_bh(&lapbeth->up_lock);
43562         if ((err = lapb_unregister(dev)) != LAPB_OK)
43563                 pr_err("lapb_unregister error: %d\n", err);
43565 @@ -356,6 +371,9 @@ static int lapbeth_new_device(struct net_device *dev)
43566         dev_hold(dev);
43567         lapbeth->ethdev = dev;
43569 +       lapbeth->up = false;
43570 +       spin_lock_init(&lapbeth->up_lock);
43572         rc = -EIO;
43573         if (register_netdevice(ndev))
43574                 goto fail;
43575 diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
43576 index 0a37be6a7d33..fab398046a3f 100644
43577 --- a/drivers/net/wireless/ath/ath10k/htc.c
43578 +++ b/drivers/net/wireless/ath/ath10k/htc.c
43579 @@ -669,7 +669,7 @@ static int ath10k_htc_send_bundle(struct ath10k_htc_ep *ep,
43581         ath10k_dbg(ar, ATH10K_DBG_HTC,
43582                    "bundle tx status %d eid %d req count %d count %d len %d\n",
43583 -                  ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, bundle_skb->len);
43584 +                  ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, skb_len);
43585         return ret;
43588 diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
43589 index d97b33f789e4..7efbe03fbca8 100644
43590 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
43591 +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
43592 @@ -592,6 +592,9 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
43593                                         GFP_ATOMIC
43594                                         );
43595                 break;
43596 +       default:
43597 +               kfree(tb);
43598 +               return;
43599         }
43601  exit:
43602 diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
43603 index cccfd3bd4d27..ca5cda890d58 100644
43604 --- a/drivers/net/wireless/ath/ath11k/wmi.c
43605 +++ b/drivers/net/wireless/ath/ath11k/wmi.c
43606 @@ -5417,31 +5417,6 @@ int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
43607         return 0;
43610 -static int
43611 -ath11k_pull_pdev_temp_ev(struct ath11k_base *ab, u8 *evt_buf,
43612 -                        u32 len, const struct wmi_pdev_temperature_event *ev)
43614 -       const void **tb;
43615 -       int ret;
43617 -       tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
43618 -       if (IS_ERR(tb)) {
43619 -               ret = PTR_ERR(tb);
43620 -               ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
43621 -               return ret;
43622 -       }
43624 -       ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
43625 -       if (!ev) {
43626 -               ath11k_warn(ab, "failed to fetch pdev temp ev");
43627 -               kfree(tb);
43628 -               return -EPROTO;
43629 -       }
43631 -       kfree(tb);
43632 -       return 0;
43635  size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head)
43637         struct ath11k_fw_stats_vdev *i;
43638 @@ -6849,23 +6824,37 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
43639                                   struct sk_buff *skb)
43641         struct ath11k *ar;
43642 -       struct wmi_pdev_temperature_event ev = {0};
43643 +       const void **tb;
43644 +       const struct wmi_pdev_temperature_event *ev;
43645 +       int ret;
43647 +       tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
43648 +       if (IS_ERR(tb)) {
43649 +               ret = PTR_ERR(tb);
43650 +               ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
43651 +               return;
43652 +       }
43654 -       if (ath11k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) {
43655 -               ath11k_warn(ab, "failed to extract pdev temperature event");
43656 +       ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
43657 +       if (!ev) {
43658 +               ath11k_warn(ab, "failed to fetch pdev temp ev");
43659 +               kfree(tb);
43660                 return;
43661         }
43663         ath11k_dbg(ab, ATH11K_DBG_WMI,
43664 -                  "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
43665 +                  "pdev temperature ev temp %d pdev_id %d\n", ev->temp, ev->pdev_id);
43667 -       ar = ath11k_mac_get_ar_by_pdev_id(ab, ev.pdev_id);
43668 +       ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
43669         if (!ar) {
43670 -               ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
43671 +               ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id);
43672 +               kfree(tb);
43673                 return;
43674         }
43676 -       ath11k_thermal_event_temperature(ar, ev.temp);
43677 +       ath11k_thermal_event_temperature(ar, ev->temp);
43679 +       kfree(tb);
43682  static void ath11k_fils_discovery_event(struct ath11k_base *ab,
43683 diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
43684 index db0c6fa9c9dc..ff61ae34ecdf 100644
43685 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
43686 +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
43687 @@ -246,7 +246,7 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
43688         if (unlikely(r)) {
43689                 ath_dbg(common, WMI, "REGISTER READ FAILED: (0x%04x, %d)\n",
43690                         reg_offset, r);
43691 -               return -EIO;
43692 +               return -1;
43693         }
43695         return be32_to_cpu(val);
43696 diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
43697 index 5abc2a5526ec..2ca3b86714a9 100644
43698 --- a/drivers/net/wireless/ath/ath9k/hw.c
43699 +++ b/drivers/net/wireless/ath/ath9k/hw.c
43700 @@ -286,7 +286,7 @@ static bool ath9k_hw_read_revisions(struct ath_hw *ah)
43702         srev = REG_READ(ah, AR_SREV);
43704 -       if (srev == -EIO) {
43705 +       if (srev == -1) {
43706                 ath_err(ath9k_hw_common(ah),
43707                         "Failed to read SREV register");
43708                 return false;
43709 diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
43710 index 60db38c38960..fd37d4d2983b 100644
43711 --- a/drivers/net/wireless/cisco/airo.c
43712 +++ b/drivers/net/wireless/cisco/airo.c
43713 @@ -3817,6 +3817,68 @@ static inline void set_auth_type(struct airo_info *local, int auth_type)
43714                 local->last_auth = auth_type;
43717 +static int noinline_for_stack airo_readconfig(struct airo_info *ai, u8 *mac, int lock)
43719 +       int i, status;
43720 +       /* large variables, so don't inline this function,
43721 +        * maybe change to kmalloc
43722 +        */
43723 +       tdsRssiRid rssi_rid;
43724 +       CapabilityRid cap_rid;
43726 +       kfree(ai->SSID);
43727 +       ai->SSID = NULL;
43728 +       // general configuration (read/modify/write)
43729 +       status = readConfigRid(ai, lock);
43730 +       if (status != SUCCESS) return ERROR;
43732 +       status = readCapabilityRid(ai, &cap_rid, lock);
43733 +       if (status != SUCCESS) return ERROR;
43735 +       status = PC4500_readrid(ai, RID_RSSI, &rssi_rid, sizeof(rssi_rid), lock);
43736 +       if (status == SUCCESS) {
43737 +               if (ai->rssi || (ai->rssi = kmalloc(512, GFP_KERNEL)) != NULL)
43738 +                       memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */
43739 +       }
43740 +       else {
43741 +               kfree(ai->rssi);
43742 +               ai->rssi = NULL;
43743 +               if (cap_rid.softCap & cpu_to_le16(8))
43744 +                       ai->config.rmode |= RXMODE_NORMALIZED_RSSI;
43745 +               else
43746 +                       airo_print_warn(ai->dev->name, "unknown received signal "
43747 +                                       "level scale");
43748 +       }
43749 +       ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS;
43750 +       set_auth_type(ai, AUTH_OPEN);
43751 +       ai->config.modulation = MOD_CCK;
43753 +       if (le16_to_cpu(cap_rid.len) >= sizeof(cap_rid) &&
43754 +           (cap_rid.extSoftCap & cpu_to_le16(1)) &&
43755 +           micsetup(ai) == SUCCESS) {
43756 +               ai->config.opmode |= MODE_MIC;
43757 +               set_bit(FLAG_MIC_CAPABLE, &ai->flags);
43758 +       }
43760 +       /* Save off the MAC */
43761 +       for (i = 0; i < ETH_ALEN; i++) {
43762 +               mac[i] = ai->config.macAddr[i];
43763 +       }
43765 +       /* Check to see if there are any insmod configured
43766 +          rates to add */
43767 +       if (rates[0]) {
43768 +               memset(ai->config.rates, 0, sizeof(ai->config.rates));
43769 +               for (i = 0; i < 8 && rates[i]; i++) {
43770 +                       ai->config.rates[i] = rates[i];
43771 +               }
43772 +       }
43773 +       set_bit (FLAG_COMMIT, &ai->flags);
43775 +       return SUCCESS;
43779  static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
43781         Cmd cmd;
43782 @@ -3863,58 +3925,9 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
43783         if (lock)
43784                 up(&ai->sem);
43785         if (ai->config.len == 0) {
43786 -               int i;
43787 -               tdsRssiRid rssi_rid;
43788 -               CapabilityRid cap_rid;
43790 -               kfree(ai->SSID);
43791 -               ai->SSID = NULL;
43792 -               // general configuration (read/modify/write)
43793 -               status = readConfigRid(ai, lock);
43794 -               if (status != SUCCESS) return ERROR;
43796 -               status = readCapabilityRid(ai, &cap_rid, lock);
43797 -               if (status != SUCCESS) return ERROR;
43799 -               status = PC4500_readrid(ai, RID_RSSI,&rssi_rid, sizeof(rssi_rid), lock);
43800 -               if (status == SUCCESS) {
43801 -                       if (ai->rssi || (ai->rssi = kmalloc(512, GFP_KERNEL)) != NULL)
43802 -                               memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */
43803 -               }
43804 -               else {
43805 -                       kfree(ai->rssi);
43806 -                       ai->rssi = NULL;
43807 -                       if (cap_rid.softCap & cpu_to_le16(8))
43808 -                               ai->config.rmode |= RXMODE_NORMALIZED_RSSI;
43809 -                       else
43810 -                               airo_print_warn(ai->dev->name, "unknown received signal "
43811 -                                               "level scale");
43812 -               }
43813 -               ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS;
43814 -               set_auth_type(ai, AUTH_OPEN);
43815 -               ai->config.modulation = MOD_CCK;
43817 -               if (le16_to_cpu(cap_rid.len) >= sizeof(cap_rid) &&
43818 -                   (cap_rid.extSoftCap & cpu_to_le16(1)) &&
43819 -                   micsetup(ai) == SUCCESS) {
43820 -                       ai->config.opmode |= MODE_MIC;
43821 -                       set_bit(FLAG_MIC_CAPABLE, &ai->flags);
43822 -               }
43824 -               /* Save off the MAC */
43825 -               for (i = 0; i < ETH_ALEN; i++) {
43826 -                       mac[i] = ai->config.macAddr[i];
43827 -               }
43829 -               /* Check to see if there are any insmod configured
43830 -                  rates to add */
43831 -               if (rates[0]) {
43832 -                       memset(ai->config.rates, 0, sizeof(ai->config.rates));
43833 -                       for (i = 0; i < 8 && rates[i]; i++) {
43834 -                               ai->config.rates[i] = rates[i];
43835 -                       }
43836 -               }
43837 -               set_bit (FLAG_COMMIT, &ai->flags);
43838 +               status = airo_readconfig(ai, mac, lock);
43839 +               if (status != SUCCESS)
43840 +                       return ERROR;
43841         }
43843         /* Setup the SSIDs if present */
43844 diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
43845 index 23fbddd0c1f8..534ab3b894e2 100644
43846 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
43847 +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
43848 @@ -815,7 +815,7 @@ static int ipw2100_hw_send_command(struct ipw2100_priv *priv,
43849          * doesn't seem to have as many firmware restart cycles...
43850          *
43851          * As a test, we're sticking in a 1/100s delay here */
43852 -       schedule_timeout_uninterruptible(msecs_to_jiffies(10));
43853 +       schedule_msec_hrtimeout_uninterruptible((10));
43855         return 0;
43857 @@ -1266,7 +1266,7 @@ static int ipw2100_start_adapter(struct ipw2100_priv *priv)
43858         IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n");
43859         i = 5000;
43860         do {
43861 -               schedule_timeout_uninterruptible(msecs_to_jiffies(40));
43862 +               schedule_msec_hrtimeout_uninterruptible((40));
43863                 /* Todo... wait for sync command ... */
43865                 read_register(priv->net_dev, IPW_REG_INTA, &inta);
43866 diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
43867 index a0cf78c418ac..903de34028ef 100644
43868 --- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
43869 +++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
43870 @@ -633,8 +633,10 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee,
43871         }
43873         if (ext->alg != IW_ENCODE_ALG_NONE) {
43874 -               memcpy(sec.keys[idx], ext->key, ext->key_len);
43875 -               sec.key_sizes[idx] = ext->key_len;
43876 +               int key_len = clamp_val(ext->key_len, 0, SCM_KEY_LEN);
43878 +               memcpy(sec.keys[idx], ext->key, key_len);
43879 +               sec.key_sizes[idx] = key_len;
43880                 sec.flags |= (1 << idx);
43881                 if (ext->alg == IW_ENCODE_ALG_WEP) {
43882                         sec.encode_alg[idx] = SEC_ALG_WEP;
43883 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
43884 index 579bc81cc0ae..4cd8c39cc3e9 100644
43885 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
43886 +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
43887 @@ -1,6 +1,6 @@
43888  // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
43889  /*
43890 - * Copyright (C) 2018-2020 Intel Corporation
43891 + * Copyright (C) 2018-2021 Intel Corporation
43892   */
43893  #include <linux/firmware.h>
43894  #include "iwl-drv.h"
43895 @@ -426,7 +426,8 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
43896         const struct firmware *fw;
43897         int res;
43899 -       if (!iwlwifi_mod_params.enable_ini)
43900 +       if (!iwlwifi_mod_params.enable_ini ||
43901 +           trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_9000)
43902                 return;
43904         res = firmware_request_nowarn(&fw, "iwl-debug-yoyo.bin", dev);
43905 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
43906 index 60e0db4a5e20..9236f9106826 100644
43907 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
43908 +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
43909 @@ -2,7 +2,7 @@
43910  /*
43911   * Copyright (C) 2015 Intel Mobile Communications GmbH
43912   * Copyright (C) 2016-2017 Intel Deutschland GmbH
43913 - * Copyright (C) 2019-2020 Intel Corporation
43914 + * Copyright (C) 2019-2021 Intel Corporation
43915   */
43916  #include <linux/kernel.h>
43917  #include <linux/bsearch.h>
43918 @@ -21,7 +21,6 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
43919                                   const struct iwl_cfg_trans_params *cfg_trans)
43921         struct iwl_trans *trans;
43922 -       int txcmd_size, txcmd_align;
43923  #ifdef CONFIG_LOCKDEP
43924         static struct lock_class_key __key;
43925  #endif
43926 @@ -31,10 +30,40 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
43927                 return NULL;
43929         trans->trans_cfg = cfg_trans;
43930 -       if (!cfg_trans->gen2) {
43932 +#ifdef CONFIG_LOCKDEP
43933 +       lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
43934 +                        &__key, 0);
43935 +#endif
43937 +       trans->dev = dev;
43938 +       trans->ops = ops;
43939 +       trans->num_rx_queues = 1;
43941 +       WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
43943 +       if (trans->trans_cfg->use_tfh) {
43944 +               trans->txqs.tfd.addr_size = 64;
43945 +               trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
43946 +               trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
43947 +       } else {
43948 +               trans->txqs.tfd.addr_size = 36;
43949 +               trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
43950 +               trans->txqs.tfd.size = sizeof(struct iwl_tfd);
43951 +       }
43952 +       trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans);
43954 +       return trans;
43957 +int iwl_trans_init(struct iwl_trans *trans)
43959 +       int txcmd_size, txcmd_align;
43961 +       if (!trans->trans_cfg->gen2) {
43962                 txcmd_size = sizeof(struct iwl_tx_cmd);
43963                 txcmd_align = sizeof(void *);
43964 -       } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) {
43965 +       } else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
43966                 txcmd_size = sizeof(struct iwl_tx_cmd_gen2);
43967                 txcmd_align = 64;
43968         } else {
43969 @@ -46,17 +75,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
43970         txcmd_size += 36; /* biggest possible 802.11 header */
43972         /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
43973 -       if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align))
43974 -               return ERR_PTR(-EINVAL);
43976 -#ifdef CONFIG_LOCKDEP
43977 -       lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
43978 -                        &__key, 0);
43979 -#endif
43981 -       trans->dev = dev;
43982 -       trans->ops = ops;
43983 -       trans->num_rx_queues = 1;
43984 +       if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align))
43985 +               return -EINVAL;
43987         if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
43988                 trans->txqs.bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
43989 @@ -68,23 +88,16 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
43990          * allocate here.
43991          */
43992         if (trans->trans_cfg->gen2) {
43993 -               trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", dev,
43994 +               trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", trans->dev,
43995                                                        trans->txqs.bc_tbl_size,
43996                                                        256, 0);
43997                 if (!trans->txqs.bc_pool)
43998 -                       return NULL;
43999 +                       return -ENOMEM;
44000         }
44002 -       if (trans->trans_cfg->use_tfh) {
44003 -               trans->txqs.tfd.addr_size = 64;
44004 -               trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
44005 -               trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
44006 -       } else {
44007 -               trans->txqs.tfd.addr_size = 36;
44008 -               trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
44009 -               trans->txqs.tfd.size = sizeof(struct iwl_tfd);
44010 -       }
44011 -       trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans);
44012 +       /* Some things must not change even if the config does */
44013 +       WARN_ON(trans->txqs.tfd.addr_size !=
44014 +               (trans->trans_cfg->use_tfh ? 64 : 36));
44016         snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
44017                  "iwl_cmd_pool:%s", dev_name(trans->dev));
44018 @@ -93,35 +106,35 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
44019                                   txcmd_size, txcmd_align,
44020                                   SLAB_HWCACHE_ALIGN, NULL);
44021         if (!trans->dev_cmd_pool)
44022 -               return NULL;
44024 -       WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
44025 +               return -ENOMEM;
44027         trans->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
44028         if (!trans->txqs.tso_hdr_page) {
44029                 kmem_cache_destroy(trans->dev_cmd_pool);
44030 -               return NULL;
44031 +               return -ENOMEM;
44032         }
44034         /* Initialize the wait queue for commands */
44035         init_waitqueue_head(&trans->wait_command_queue);
44037 -       return trans;
44038 +       return 0;
44041  void iwl_trans_free(struct iwl_trans *trans)
44043         int i;
44045 -       for_each_possible_cpu(i) {
44046 -               struct iwl_tso_hdr_page *p =
44047 -                       per_cpu_ptr(trans->txqs.tso_hdr_page, i);
44048 +       if (trans->txqs.tso_hdr_page) {
44049 +               for_each_possible_cpu(i) {
44050 +                       struct iwl_tso_hdr_page *p =
44051 +                               per_cpu_ptr(trans->txqs.tso_hdr_page, i);
44053 -               if (p->page)
44054 -                       __free_page(p->page);
44055 -       }
44056 +                       if (p && p->page)
44057 +                               __free_page(p->page);
44058 +               }
44060 -       free_percpu(trans->txqs.tso_hdr_page);
44061 +               free_percpu(trans->txqs.tso_hdr_page);
44062 +       }
44064         kmem_cache_destroy(trans->dev_cmd_pool);
44066 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
44067 index 4a5822c1be13..3e0df6fbb642 100644
44068 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
44069 +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
44070 @@ -1438,6 +1438,7 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
44071                           struct device *dev,
44072                           const struct iwl_trans_ops *ops,
44073                           const struct iwl_cfg_trans_params *cfg_trans);
44074 +int iwl_trans_init(struct iwl_trans *trans);
44075  void iwl_trans_free(struct iwl_trans *trans);
44077  /*****************************************************
44078 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
44079 index 8772b65c9dab..2d58cb969918 100644
44080 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
44081 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
44082 @@ -1,7 +1,7 @@
44083  // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
44084  /*
44085   * Copyright (C) 2017 Intel Deutschland GmbH
44086 - * Copyright (C) 2018-2020 Intel Corporation
44087 + * Copyright (C) 2018-2021 Intel Corporation
44088   */
44089  #include "rs.h"
44090  #include "fw-api.h"
44091 @@ -72,19 +72,15 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
44092         bool vht_ena = vht_cap->vht_supported;
44093         u16 flags = 0;
44095 +       /* get STBC flags */
44096         if (mvm->cfg->ht_params->stbc &&
44097             (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1)) {
44098 -               if (he_cap->has_he) {
44099 -                       if (he_cap->he_cap_elem.phy_cap_info[2] &
44100 -                           IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
44101 -                               flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
44103 -                       if (he_cap->he_cap_elem.phy_cap_info[7] &
44104 -                           IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ)
44105 -                               flags |= IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK;
44106 -               } else if ((ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) ||
44107 -                          (vht_ena &&
44108 -                           (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)))
44109 +               if (he_cap->has_he && he_cap->he_cap_elem.phy_cap_info[2] &
44110 +                                     IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
44111 +                       flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
44112 +               else if (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
44113 +                       flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
44114 +               else if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)
44115                         flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
44116         }
44118 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
44119 index 558a0b2ef0fc..66faf7914bd8 100644
44120 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
44121 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
44122 @@ -17,10 +17,20 @@
44123  #include "iwl-prph.h"
44124  #include "internal.h"
44126 +#define TRANS_CFG_MARKER BIT(0)
44127 +#define _IS_A(cfg, _struct) __builtin_types_compatible_p(typeof(cfg),  \
44128 +                                                        struct _struct)
44129 +extern int _invalid_type;
44130 +#define _TRANS_CFG_MARKER(cfg)                                         \
44131 +       (__builtin_choose_expr(_IS_A(cfg, iwl_cfg_trans_params),        \
44132 +                              TRANS_CFG_MARKER,                        \
44133 +        __builtin_choose_expr(_IS_A(cfg, iwl_cfg), 0, _invalid_type)))
44134 +#define _ASSIGN_CFG(cfg) (_TRANS_CFG_MARKER(cfg) + (kernel_ulong_t)&(cfg))
44136  #define IWL_PCI_DEVICE(dev, subdev, cfg) \
44137         .vendor = PCI_VENDOR_ID_INTEL,  .device = (dev), \
44138         .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
44139 -       .driver_data = (kernel_ulong_t)&(cfg)
44140 +       .driver_data = _ASSIGN_CFG(cfg)
44142  /* Hardware specific file defines the PCI IDs table for that hardware module */
44143  static const struct pci_device_id iwl_hw_card_ids[] = {
44144 @@ -1075,19 +1085,22 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
44146  static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
44148 -       const struct iwl_cfg_trans_params *trans =
44149 -               (struct iwl_cfg_trans_params *)(ent->driver_data);
44150 +       const struct iwl_cfg_trans_params *trans;
44151         const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
44152         struct iwl_trans *iwl_trans;
44153         struct iwl_trans_pcie *trans_pcie;
44154         int i, ret;
44155 +       const struct iwl_cfg *cfg;
44157 +       trans = (void *)(ent->driver_data & ~TRANS_CFG_MARKER);
44159         /*
44160          * This is needed for backwards compatibility with the old
44161          * tables, so we don't need to change all the config structs
44162          * at the same time.  The cfg is used to compare with the old
44163          * full cfg structs.
44164          */
44165 -       const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
44166 +       cfg = (void *)(ent->driver_data & ~TRANS_CFG_MARKER);
44168         /* make sure trans is the first element in iwl_cfg */
44169         BUILD_BUG_ON(offsetof(struct iwl_cfg, trans));
44170 @@ -1202,11 +1215,19 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
44172  #endif
44173         /*
44174 -        * If we didn't set the cfg yet, assume the trans is actually
44175 -        * a full cfg from the old tables.
44176 +        * If we didn't set the cfg yet, the PCI ID table entry should have
44177 +        * been a full config - if yes, use it, otherwise fail.
44178          */
44179 -       if (!iwl_trans->cfg)
44180 +       if (!iwl_trans->cfg) {
44181 +               if (ent->driver_data & TRANS_CFG_MARKER) {
44182 +                       pr_err("No config found for PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n",
44183 +                              pdev->device, pdev->subsystem_device,
44184 +                              iwl_trans->hw_rev, iwl_trans->hw_rf_id);
44185 +                       ret = -EINVAL;
44186 +                       goto out_free_trans;
44187 +               }
44188                 iwl_trans->cfg = cfg;
44189 +       }
44191         /* if we don't have a name yet, copy name from the old cfg */
44192         if (!iwl_trans->name)
44193 @@ -1222,6 +1243,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
44194                 trans_pcie->num_rx_bufs = RX_QUEUE_SIZE;
44195         }
44197 +       ret = iwl_trans_init(iwl_trans);
44198 +       if (ret)
44199 +               goto out_free_trans;
44201         pci_set_drvdata(pdev, iwl_trans);
44202         iwl_trans->drv = iwl_drv_start(iwl_trans);
44204 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
44205 index 94ffc1ae484d..af9412bd697e 100644
44206 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
44207 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
44208 @@ -1,7 +1,7 @@
44209  // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
44210  /*
44211   * Copyright (C) 2017 Intel Deutschland GmbH
44212 - * Copyright (C) 2018-2020 Intel Corporation
44213 + * Copyright (C) 2018-2021 Intel Corporation
44214   */
44215  #include "iwl-trans.h"
44216  #include "iwl-prph.h"
44217 @@ -143,7 +143,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
44218         if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
44219                 IWL_DEBUG_INFO(trans,
44220                                "DEVICE_ENABLED bit was set and is now cleared\n");
44221 -               iwl_txq_gen2_tx_stop(trans);
44222 +               iwl_txq_gen2_tx_free(trans);
44223                 iwl_pcie_rx_stop(trans);
44224         }
44226 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
44227 index 4456abb9a074..34bde8c87324 100644
44228 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
44229 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
44230 @@ -40,6 +40,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
44231         const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
44232         u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
44233         struct iwl_tfh_tfd *tfd;
44234 +       unsigned long flags;
44236         copy_size = sizeof(struct iwl_cmd_header_wide);
44237         cmd_size = sizeof(struct iwl_cmd_header_wide);
44238 @@ -108,14 +109,14 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
44239                 goto free_dup_buf;
44240         }
44242 -       spin_lock_bh(&txq->lock);
44243 +       spin_lock_irqsave(&txq->lock, flags);
44245         idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
44246         tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
44247         memset(tfd, 0, sizeof(*tfd));
44249         if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
44250 -               spin_unlock_bh(&txq->lock);
44251 +               spin_unlock_irqrestore(&txq->lock, flags);
44253                 IWL_ERR(trans, "No space in command queue\n");
44254                 iwl_op_mode_cmd_queue_full(trans->op_mode);
44255 @@ -250,7 +251,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
44256         spin_unlock(&trans_pcie->reg_lock);
44258  out:
44259 -       spin_unlock_bh(&txq->lock);
44260 +       spin_unlock_irqrestore(&txq->lock, flags);
44261  free_dup_buf:
44262         if (idx < 0)
44263                 kfree(dup_buf);
44264 diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
44265 index 833f43d1ca7a..810dcb3df242 100644
44266 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
44267 +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
44268 @@ -13,30 +13,6 @@
44269  #include "iwl-scd.h"
44270  #include <linux/dmapool.h>
44273 - * iwl_txq_gen2_tx_stop - Stop all Tx DMA channels
44274 - */
44275 -void iwl_txq_gen2_tx_stop(struct iwl_trans *trans)
44277 -       int txq_id;
44279 -       /*
44280 -        * This function can be called before the op_mode disabled the
44281 -        * queues. This happens when we have an rfkill interrupt.
44282 -        * Since we stop Tx altogether - mark the queues as stopped.
44283 -        */
44284 -       memset(trans->txqs.queue_stopped, 0,
44285 -              sizeof(trans->txqs.queue_stopped));
44286 -       memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
44288 -       /* Unmap DMA from host system and free skb's */
44289 -       for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) {
44290 -               if (!trans->txqs.txq[txq_id])
44291 -                       continue;
44292 -               iwl_txq_gen2_unmap(trans, txq_id);
44293 -       }
44296  /*
44297   * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
44298   */
44299 @@ -1189,6 +1165,12 @@ static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
44300                 goto error_free_resp;
44301         }
44303 +       if (WARN_ONCE(trans->txqs.txq[qid],
44304 +                     "queue %d already allocated\n", qid)) {
44305 +               ret = -EIO;
44306 +               goto error_free_resp;
44307 +       }
44309         txq->id = qid;
44310         trans->txqs.txq[qid] = txq;
44311         wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
44312 diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
44313 index af1dbdf5617a..20efc62acf13 100644
44314 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h
44315 +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
44316 @@ -1,6 +1,6 @@
44317  /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
44318  /*
44319 - * Copyright (C) 2020 Intel Corporation
44320 + * Copyright (C) 2020-2021 Intel Corporation
44321   */
44322  #ifndef __iwl_trans_queue_tx_h__
44323  #define __iwl_trans_queue_tx_h__
44324 @@ -123,7 +123,6 @@ int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
44325  void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
44326  void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
44327  void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
44328 -void iwl_txq_gen2_tx_stop(struct iwl_trans *trans);
44329  void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
44330  int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
44331                  bool cmd_queue);
44332 diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
44333 index c9f8c056aa51..84b32a5f01ee 100644
44334 --- a/drivers/net/wireless/marvell/mwl8k.c
44335 +++ b/drivers/net/wireless/marvell/mwl8k.c
44336 @@ -1473,6 +1473,7 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
44337         if (txq->skb == NULL) {
44338                 dma_free_coherent(&priv->pdev->dev, size, txq->txd,
44339                                   txq->txd_dma);
44340 +               txq->txd = NULL;
44341                 return -ENOMEM;
44342         }
44344 diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
44345 index 2f27c43ad76d..7196fa9047e6 100644
44346 --- a/drivers/net/wireless/mediatek/mt76/dma.c
44347 +++ b/drivers/net/wireless/mediatek/mt76/dma.c
44348 @@ -309,7 +309,7 @@ static int
44349  mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
44350                           struct sk_buff *skb, u32 tx_info)
44352 -       struct mt76_queue_buf buf;
44353 +       struct mt76_queue_buf buf = {};
44354         dma_addr_t addr;
44356         if (q->queued + 1 >= q->ndesc - 1)
44357 diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
44358 index 8bf45497cfca..36a430f09f64 100644
44359 --- a/drivers/net/wireless/mediatek/mt76/mt76.h
44360 +++ b/drivers/net/wireless/mediatek/mt76/mt76.h
44361 @@ -222,6 +222,7 @@ struct mt76_wcid {
44363         u16 idx;
44364         u8 hw_key_idx;
44365 +       u8 hw_key_idx2;
44367         u8 sta:1;
44368         u8 ext_phy:1;
44369 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
44370 index 2eab23898c77..6dbaaf95ee38 100644
44371 --- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
44372 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
44373 @@ -86,6 +86,7 @@ static int mt7615_check_eeprom(struct mt76_dev *dev)
44374         switch (val) {
44375         case 0x7615:
44376         case 0x7622:
44377 +       case 0x7663:
44378                 return 0;
44379         default:
44380                 return -EINVAL;
44381 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
44382 index 59fdd0fc2ad4..8dccb589b756 100644
44383 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
44384 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
44385 @@ -690,7 +690,7 @@ mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp)
44387         int i;
44389 -       for (i = 1; i < txp->nbuf; i++)
44390 +       for (i = 0; i < txp->nbuf; i++)
44391                 dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
44392                                  le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
44394 @@ -966,6 +966,7 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
44395         struct mt7615_dev *dev = phy->dev;
44396         struct mt7615_rate_desc rd;
44397         u32 w5, w27, addr;
44398 +       u16 idx = sta->vif->mt76.omac_idx;
44400         if (!mt76_is_mmio(&dev->mt76)) {
44401                 mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates);
44402 @@ -1017,7 +1018,10 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
44404         mt76_wr(dev, addr + 27 * 4, w27);
44406 -       mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
44407 +       idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
44408 +       addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
44410 +       mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */
44411         sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0);
44412         sta->rate_set_tsf |= rd.rateset;
44414 @@ -1033,7 +1037,7 @@ EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
44415  static int
44416  mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
44417                            struct ieee80211_key_conf *key,
44418 -                          enum mt7615_cipher_type cipher,
44419 +                          enum mt7615_cipher_type cipher, u16 cipher_mask,
44420                            enum set_key_cmd cmd)
44422         u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
44423 @@ -1050,22 +1054,22 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
44424                         memcpy(data + 16, key->key + 24, 8);
44425                         memcpy(data + 24, key->key + 16, 8);
44426                 } else {
44427 -                       if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher)
44428 -                               memmove(data + 16, data, 16);
44429 -                       if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
44430 +                       if (cipher_mask == BIT(cipher))
44431                                 memcpy(data, key->key, key->keylen);
44432 -                       else if (cipher == MT_CIPHER_BIP_CMAC_128)
44433 +                       else if (cipher != MT_CIPHER_BIP_CMAC_128)
44434 +                               memcpy(data, key->key, 16);
44435 +                       if (cipher == MT_CIPHER_BIP_CMAC_128)
44436                                 memcpy(data + 16, key->key, 16);
44437                 }
44438         } else {
44439 -               if (wcid->cipher & ~BIT(cipher)) {
44440 -                       if (cipher != MT_CIPHER_BIP_CMAC_128)
44441 -                               memmove(data, data + 16, 16);
44442 +               if (cipher == MT_CIPHER_BIP_CMAC_128)
44443                         memset(data + 16, 0, 16);
44444 -               } else {
44445 +               else if (cipher_mask)
44446 +                       memset(data, 0, 16);
44447 +               if (!cipher_mask)
44448                         memset(data, 0, sizeof(data));
44449 -               }
44450         }
44452         mt76_wr_copy(dev, addr, data, sizeof(data));
44454         return 0;
44455 @@ -1073,7 +1077,7 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
44457  static int
44458  mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
44459 -                         enum mt7615_cipher_type cipher,
44460 +                         enum mt7615_cipher_type cipher, u16 cipher_mask,
44461                           int keyidx, enum set_key_cmd cmd)
44463         u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
44464 @@ -1083,20 +1087,23 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
44466         w0 = mt76_rr(dev, addr);
44467         w1 = mt76_rr(dev, addr + 4);
44468 -       if (cmd == SET_KEY) {
44469 -               w0 |= MT_WTBL_W0_RX_KEY_VALID |
44470 -                     FIELD_PREP(MT_WTBL_W0_RX_IK_VALID,
44471 -                                cipher == MT_CIPHER_BIP_CMAC_128);
44472 -               if (cipher != MT_CIPHER_BIP_CMAC_128 ||
44473 -                   !wcid->cipher)
44474 -                       w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
44475 -       }  else {
44476 -               if (!(wcid->cipher & ~BIT(cipher)))
44477 -                       w0 &= ~(MT_WTBL_W0_RX_KEY_VALID |
44478 -                               MT_WTBL_W0_KEY_IDX);
44479 -               if (cipher == MT_CIPHER_BIP_CMAC_128)
44480 -                       w0 &= ~MT_WTBL_W0_RX_IK_VALID;
44482 +       if (cipher_mask)
44483 +               w0 |= MT_WTBL_W0_RX_KEY_VALID;
44484 +       else
44485 +               w0 &= ~(MT_WTBL_W0_RX_KEY_VALID | MT_WTBL_W0_KEY_IDX);
44486 +       if (cipher_mask & BIT(MT_CIPHER_BIP_CMAC_128))
44487 +               w0 |= MT_WTBL_W0_RX_IK_VALID;
44488 +       else
44489 +               w0 &= ~MT_WTBL_W0_RX_IK_VALID;
44491 +       if (cmd == SET_KEY &&
44492 +           (cipher != MT_CIPHER_BIP_CMAC_128 ||
44493 +            cipher_mask == BIT(cipher))) {
44494 +               w0 &= ~MT_WTBL_W0_KEY_IDX;
44495 +               w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
44496         }
44498         mt76_wr(dev, MT_WTBL_RICR0, w0);
44499         mt76_wr(dev, MT_WTBL_RICR1, w1);
44501 @@ -1109,24 +1116,25 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
44503  static void
44504  mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
44505 -                             enum mt7615_cipher_type cipher,
44506 +                             enum mt7615_cipher_type cipher, u16 cipher_mask,
44507                               enum set_key_cmd cmd)
44509         u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
44511 -       if (cmd == SET_KEY) {
44512 -               if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
44513 -                       mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
44514 -                                FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher));
44515 -       } else {
44516 -               if (cipher != MT_CIPHER_BIP_CMAC_128 &&
44517 -                   wcid->cipher & BIT(MT_CIPHER_BIP_CMAC_128))
44518 -                       mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
44519 -                                FIELD_PREP(MT_WTBL_W2_KEY_TYPE,
44520 -                                           MT_CIPHER_BIP_CMAC_128));
44521 -               else if (!(wcid->cipher & ~BIT(cipher)))
44522 -                       mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
44523 +       if (!cipher_mask) {
44524 +               mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
44525 +               return;
44526         }
44528 +       if (cmd != SET_KEY)
44529 +               return;
44531 +       if (cipher == MT_CIPHER_BIP_CMAC_128 &&
44532 +           cipher_mask & ~BIT(MT_CIPHER_BIP_CMAC_128))
44533 +               return;
44535 +       mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
44536 +                FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher));
44539  int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
44540 @@ -1135,25 +1143,30 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
44541                               enum set_key_cmd cmd)
44543         enum mt7615_cipher_type cipher;
44544 +       u16 cipher_mask = wcid->cipher;
44545         int err;
44547         cipher = mt7615_mac_get_cipher(key->cipher);
44548         if (cipher == MT_CIPHER_NONE)
44549                 return -EOPNOTSUPP;
44551 -       mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cmd);
44552 -       err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cmd);
44553 +       if (cmd == SET_KEY)
44554 +               cipher_mask |= BIT(cipher);
44555 +       else
44556 +               cipher_mask &= ~BIT(cipher);
44558 +       mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask, cmd);
44559 +       err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask,
44560 +                                        cmd);
44561         if (err < 0)
44562                 return err;
44564 -       err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx, cmd);
44565 +       err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, cipher_mask,
44566 +                                       key->keyidx, cmd);
44567         if (err < 0)
44568                 return err;
44570 -       if (cmd == SET_KEY)
44571 -               wcid->cipher |= BIT(cipher);
44572 -       else
44573 -               wcid->cipher &= ~BIT(cipher);
44574 +       wcid->cipher = cipher_mask;
44576         return 0;
44578 @@ -1821,10 +1834,8 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
44579         int i, aggr;
44580         u32 val, val2;
44582 -       memset(mib, 0, sizeof(*mib));
44584 -       mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
44585 -                                         MT_MIB_SDR3_FCS_ERR_MASK);
44586 +       mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
44587 +                                          MT_MIB_SDR3_FCS_ERR_MASK);
44589         val = mt76_get_field(dev, MT_MIB_SDR14(ext_phy),
44590                              MT_MIB_AMPDU_MPDU_COUNT);
44591 @@ -1837,24 +1848,16 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
44592         aggr = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
44593         for (i = 0; i < 4; i++) {
44594                 val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
44596 -               val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
44597 -               if (val2 > mib->ack_fail_cnt)
44598 -                       mib->ack_fail_cnt = val2;
44600 -               val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
44601 -               if (val2 > mib->ba_miss_cnt)
44602 -                       mib->ba_miss_cnt = val2;
44603 +               mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
44604 +               mib->ack_fail_cnt += FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK,
44605 +                                              val);
44607                 val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
44608 -               val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
44609 -               if (val2 > mib->rts_retries_cnt) {
44610 -                       mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
44611 -                       mib->rts_retries_cnt = val2;
44612 -               }
44613 +               mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
44614 +               mib->rts_retries_cnt += FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK,
44615 +                                                 val);
44617                 val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
44619                 dev->mt76.aggr_stats[aggr++] += val & 0xffff;
44620                 dev->mt76.aggr_stats[aggr++] += val >> 16;
44621         }
44622 @@ -1976,15 +1979,17 @@ void mt7615_dma_reset(struct mt7615_dev *dev)
44623         mt76_clear(dev, MT_WPDMA_GLO_CFG,
44624                    MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
44625                    MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
44627         usleep_range(1000, 2000);
44629 -       mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
44630         for (i = 0; i < __MT_TXQ_MAX; i++)
44631                 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
44633 -       mt76_for_each_q_rx(&dev->mt76, i) {
44634 +       for (i = 0; i < __MT_MCUQ_MAX; i++)
44635 +               mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
44637 +       mt76_for_each_q_rx(&dev->mt76, i)
44638                 mt76_queue_rx_reset(dev, i);
44639 -       }
44641         mt76_set(dev, MT_WPDMA_GLO_CFG,
44642                  MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
44643 @@ -2000,8 +2005,12 @@ void mt7615_tx_token_put(struct mt7615_dev *dev)
44644         spin_lock_bh(&dev->token_lock);
44645         idr_for_each_entry(&dev->token, txwi, id) {
44646                 mt7615_txp_skb_unmap(&dev->mt76, txwi);
44647 -               if (txwi->skb)
44648 -                       dev_kfree_skb_any(txwi->skb);
44649 +               if (txwi->skb) {
44650 +                       struct ieee80211_hw *hw;
44652 +                       hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
44653 +                       ieee80211_free_txskb(hw, txwi->skb);
44654 +               }
44655                 mt76_put_txwi(&dev->mt76, txwi);
44656         }
44657         spin_unlock_bh(&dev->token_lock);
44658 @@ -2304,8 +2313,10 @@ void mt7615_coredump_work(struct work_struct *work)
44659                         break;
44661                 skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
44662 -               if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ)
44663 -                       break;
44664 +               if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
44665 +                       dev_kfree_skb(skb);
44666 +                       continue;
44667 +               }
44669                 memcpy(data, skb->data, skb->len);
44670                 data += skb->len;
44671 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
44672 index 25faf486d279..d334491667a4 100644
44673 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
44674 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
44675 @@ -217,8 +217,6 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
44676         ret = mt7615_mcu_add_dev_info(phy, vif, true);
44677         if (ret)
44678                 goto out;
44680 -       mt7615_mac_set_beacon_filter(phy, vif, true);
44681  out:
44682         mt7615_mutex_release(dev);
44684 @@ -244,7 +242,6 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
44686         mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
44688 -       mt7615_mac_set_beacon_filter(phy, vif, false);
44689         mt7615_mcu_add_dev_info(phy, vif, false);
44691         rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
44692 @@ -337,7 +334,8 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
44693         struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv :
44694                                   &mvif->sta;
44695         struct mt76_wcid *wcid = &msta->wcid;
44696 -       int idx = key->keyidx, err;
44697 +       int idx = key->keyidx, err = 0;
44698 +       u8 *wcid_keyidx = &wcid->hw_key_idx;
44700         /* The hardware does not support per-STA RX GTK, fallback
44701          * to software mode for these.
44702 @@ -352,6 +350,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
44703         /* fall back to sw encryption for unsupported ciphers */
44704         switch (key->cipher) {
44705         case WLAN_CIPHER_SUITE_AES_CMAC:
44706 +               wcid_keyidx = &wcid->hw_key_idx2;
44707                 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
44708                 break;
44709         case WLAN_CIPHER_SUITE_TKIP:
44710 @@ -369,12 +368,13 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
44712         mt7615_mutex_acquire(dev);
44714 -       if (cmd == SET_KEY) {
44715 -               key->hw_key_idx = wcid->idx;
44716 -               wcid->hw_key_idx = idx;
44717 -       } else if (idx == wcid->hw_key_idx) {
44718 -               wcid->hw_key_idx = -1;
44719 -       }
44720 +       if (cmd == SET_KEY)
44721 +               *wcid_keyidx = idx;
44722 +       else if (idx == *wcid_keyidx)
44723 +               *wcid_keyidx = -1;
44724 +       else
44725 +               goto out;
44727         mt76_wcid_key_setup(&dev->mt76, wcid,
44728                             cmd == SET_KEY ? key : NULL);
44730 @@ -383,6 +383,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
44731         else
44732                 err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
44734 +out:
44735         mt7615_mutex_release(dev);
44737         return err;
44738 @@ -544,6 +545,9 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
44739         if (changed & BSS_CHANGED_ARP_FILTER)
44740                 mt7615_mcu_update_arp_filter(hw, vif, info);
44742 +       if (changed & BSS_CHANGED_ASSOC)
44743 +               mt7615_mac_set_beacon_filter(phy, vif, info->assoc);
44745         mt7615_mutex_release(dev);
44748 @@ -803,26 +807,38 @@ mt7615_get_stats(struct ieee80211_hw *hw,
44749         struct mt7615_phy *phy = mt7615_hw_phy(hw);
44750         struct mib_stats *mib = &phy->mib;
44752 +       mt7615_mutex_acquire(phy->dev);
44754         stats->dot11RTSSuccessCount = mib->rts_cnt;
44755         stats->dot11RTSFailureCount = mib->rts_retries_cnt;
44756         stats->dot11FCSErrorCount = mib->fcs_err_cnt;
44757         stats->dot11ACKFailureCount = mib->ack_fail_cnt;
44759 +       memset(mib, 0, sizeof(*mib));
44761 +       mt7615_mutex_release(phy->dev);
44763         return 0;
44766  static u64
44767  mt7615_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
44769 +       struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
44770         struct mt7615_dev *dev = mt7615_hw_dev(hw);
44771         union {
44772                 u64 t64;
44773                 u32 t32[2];
44774         } tsf;
44775 +       u16 idx = mvif->mt76.omac_idx;
44776 +       u32 reg;
44778 +       idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
44779 +       reg = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
44781         mt7615_mutex_acquire(dev);
44783 -       mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
44784 +       mt76_set(dev, reg, MT_LPON_TCR_MODE); /* TSF read */
44785         tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0);
44786         tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1);
44788 @@ -835,18 +851,24 @@ static void
44789  mt7615_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
44790                u64 timestamp)
44792 +       struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
44793         struct mt7615_dev *dev = mt7615_hw_dev(hw);
44794         union {
44795                 u64 t64;
44796                 u32 t32[2];
44797         } tsf = { .t64 = timestamp, };
44798 +       u16 idx = mvif->mt76.omac_idx;
44799 +       u32 reg;
44801 +       idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
44802 +       reg = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
44804         mt7615_mutex_acquire(dev);
44806         mt76_wr(dev, MT_LPON_UTTR0, tsf.t32[0]);
44807         mt76_wr(dev, MT_LPON_UTTR1, tsf.t32[1]);
44808         /* TSF software overwrite */
44809 -       mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_WRITE);
44810 +       mt76_set(dev, reg, MT_LPON_TCR_WRITE);
44812         mt7615_mutex_release(dev);
44814 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
44815 index 631596fc2f36..198e9025b681 100644
44816 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
44817 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
44818 @@ -291,12 +291,20 @@ static int mt7615_mcu_drv_pmctrl(struct mt7615_dev *dev)
44819         u32 addr;
44820         int err;
44822 -       addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST;
44823 +       if (is_mt7663(mdev)) {
44824 +               /* Clear firmware own via N9 eint */
44825 +               mt76_wr(dev, MT_PCIE_DOORBELL_PUSH, MT_CFG_LPCR_HOST_DRV_OWN);
44826 +               mt76_poll(dev, MT_CONN_ON_MISC, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000);
44828 +               addr = MT_CONN_HIF_ON_LPCTL;
44829 +       } else {
44830 +               addr = MT_CFG_LPCR_HOST;
44831 +       }
44833         mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
44835         mt7622_trigger_hif_int(dev, true);
44837 -       addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
44838         err = !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000);
44840         mt7622_trigger_hif_int(dev, false);
44841 @@ -1040,6 +1048,9 @@ mt7615_mcu_sta_ba(struct mt7615_dev *dev,
44843         wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
44844                                                   WTBL_SET, sta_wtbl, &skb);
44845 +       if (IS_ERR(wtbl_hdr))
44846 +               return PTR_ERR(wtbl_hdr);
44848         mt76_connac_mcu_wtbl_ba_tlv(&dev->mt76, skb, params, enable, tx,
44849                                     sta_wtbl, wtbl_hdr);
44851 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
44852 index 491841bc6291..4bc0c379c579 100644
44853 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
44854 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
44855 @@ -133,11 +133,11 @@ struct mt7615_vif {
44856  };
44858  struct mib_stats {
44859 -       u16 ack_fail_cnt;
44860 -       u16 fcs_err_cnt;
44861 -       u16 rts_cnt;
44862 -       u16 rts_retries_cnt;
44863 -       u16 ba_miss_cnt;
44864 +       u32 ack_fail_cnt;
44865 +       u32 fcs_err_cnt;
44866 +       u32 rts_cnt;
44867 +       u32 rts_retries_cnt;
44868 +       u32 ba_miss_cnt;
44869         unsigned long aggr_per;
44870  };
44872 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
44873 index 72395925ddee..15b417d6d889 100644
44874 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
44875 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
44876 @@ -163,10 +163,9 @@ void mt7615_unregister_device(struct mt7615_dev *dev)
44877         mt76_unregister_device(&dev->mt76);
44878         if (mcu_running)
44879                 mt7615_mcu_exit(dev);
44880 -       mt7615_dma_cleanup(dev);
44882         mt7615_tx_token_put(dev);
44884 +       mt7615_dma_cleanup(dev);
44885         tasklet_disable(&dev->irq_tasklet);
44887         mt76_free_device(&dev->mt76);
44888 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
44889 index 6e5db015b32c..6e4710d3ddd3 100644
44890 --- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
44891 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
44892 @@ -447,9 +447,10 @@ enum mt7615_reg_base {
44894  #define MT_LPON(_n)                    ((dev)->reg_map[MT_LPON_BASE] + (_n))
44896 -#define MT_LPON_T0CR                   MT_LPON(0x010)
44897 -#define MT_LPON_T0CR_MODE              GENMASK(1, 0)
44898 -#define MT_LPON_T0CR_WRITE             BIT(0)
44899 +#define MT_LPON_TCR0(_n)               MT_LPON(0x010 + ((_n) * 4))
44900 +#define MT_LPON_TCR2(_n)               MT_LPON(0x0f8 + ((_n) - 2) * 4)
44901 +#define MT_LPON_TCR_MODE               GENMASK(1, 0)
44902 +#define MT_LPON_TCR_WRITE              BIT(0)
44904  #define MT_LPON_UTTR0                  MT_LPON(0x018)
44905  #define MT_LPON_UTTR1                  MT_LPON(0x01c)
44906 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
44907 index 9fb506f2ace6..4393dd21ebbb 100644
44908 --- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
44909 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
44910 @@ -218,12 +218,15 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
44911         int qid, err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
44912         bool mcu = q == dev->q_mcu[MT_MCUQ_WM];
44913         struct mt76_sdio *sdio = &dev->sdio;
44914 +       u8 pad;
44916         qid = mcu ? ARRAY_SIZE(sdio->xmit_buf) - 1 : q->qid;
44917         while (q->first != q->head) {
44918                 struct mt76_queue_entry *e = &q->entry[q->first];
44919                 struct sk_buff *iter;
44921 +               smp_rmb();
44923                 if (!test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) {
44924                         __skb_put_zero(e->skb, 4);
44925                         err = __mt7663s_xmit_queue(dev, e->skb->data,
44926 @@ -234,7 +237,8 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
44927                         goto next;
44928                 }
44930 -               if (len + e->skb->len + 4 > MT76S_XMIT_BUF_SZ)
44931 +               pad = roundup(e->skb->len, 4) - e->skb->len;
44932 +               if (len + e->skb->len + pad + 4 > MT76S_XMIT_BUF_SZ)
44933                         break;
44935                 if (mt7663s_tx_pick_quota(sdio, mcu, e->buf_sz, &pse_sz,
44936 @@ -252,6 +256,11 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
44937                         len += iter->len;
44938                         nframes++;
44939                 }
44941 +               if (unlikely(pad)) {
44942 +                       memset(sdio->xmit_buf[qid] + len, 0, pad);
44943 +                       len += pad;
44944 +               }
44945  next:
44946                 q->first = (q->first + 1) % q->ndesc;
44947                 e->done = true;
44948 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
44949 index 203256862dfd..f8d3673c2cae 100644
44950 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
44951 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
44952 @@ -67,6 +67,7 @@ static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev,
44953         struct mt7615_rate_desc *rate = &wrd->rate;
44954         struct mt7615_sta *sta = wrd->sta;
44955         u32 w5, w27, addr, val;
44956 +       u16 idx;
44958         lockdep_assert_held(&dev->mt76.mutex);
44960 @@ -118,7 +119,11 @@ static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev,
44962         sta->rate_probe = sta->rateset[rate->rateset].probe_rate.idx != -1;
44964 -       mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
44965 +       idx = sta->vif->mt76.omac_idx;
44966 +       idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
44967 +       addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
44969 +       mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */
44970         val = mt76_rr(dev, MT_LPON_UTTR0);
44971         sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset;
44973 diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
44974 index 6cbccfb05f8b..cefd33b74a87 100644
44975 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
44976 +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
44977 @@ -833,6 +833,9 @@ int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
44978         wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, wcid,
44979                                                   WTBL_RESET_AND_SET,
44980                                                   sta_wtbl, &skb);
44981 +       if (IS_ERR(wtbl_hdr))
44982 +               return PTR_ERR(wtbl_hdr);
44984         if (enable) {
44985                 mt76_connac_mcu_wtbl_generic_tlv(dev, skb, vif, sta, sta_wtbl,
44986                                                  wtbl_hdr);
44987 @@ -946,6 +949,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
44989         switch (vif->type) {
44990         case NL80211_IFTYPE_MESH_POINT:
44991 +       case NL80211_IFTYPE_MONITOR:
44992         case NL80211_IFTYPE_AP:
44993                 basic_req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
44994                 break;
44995 @@ -1195,6 +1199,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
44996                         .center_chan = ieee80211_frequency_to_channel(freq1),
44997                         .center_chan2 = ieee80211_frequency_to_channel(freq2),
44998                         .tx_streams = hweight8(phy->antenna_mask),
44999 +                       .ht_op_info = 4, /* set HT 40M allowed */
45000                         .rx_streams = phy->chainmask,
45001                         .short_st = true,
45002                 },
45003 @@ -1287,6 +1292,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
45004         case NL80211_CHAN_WIDTH_20:
45005         default:
45006                 rlm_req.rlm.bw = CMD_CBW_20MHZ;
45007 +               rlm_req.rlm.ht_op_info = 0;
45008                 break;
45009         }
45011 @@ -1306,7 +1312,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
45013         struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
45014         struct cfg80211_scan_request *sreq = &scan_req->req;
45015 -       int n_ssids = 0, err, i, duration = MT76_CONNAC_SCAN_CHANNEL_TIME;
45016 +       int n_ssids = 0, err, i, duration;
45017         int ext_channels_num = max_t(int, sreq->n_channels - 32, 0);
45018         struct ieee80211_channel **scan_list = sreq->channels;
45019         struct mt76_dev *mdev = phy->dev;
45020 @@ -1343,6 +1349,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
45021         req->ssid_type_ext = n_ssids ? BIT(0) : 0;
45022         req->ssids_num = n_ssids;
45024 +       duration = is_mt7921(phy->dev) ? 0 : MT76_CONNAC_SCAN_CHANNEL_TIME;
45025         /* increase channel time for passive scan */
45026         if (!sreq->n_ssids)
45027                 duration *= 2;
45028 diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
45029 index ab671e21f882..02db5d66735d 100644
45030 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
45031 +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
45032 @@ -447,6 +447,10 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
45033             !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
45034                 return -EOPNOTSUPP;
45036 +       /* MT76x0 GTK offloading does not work with more than one VIF */
45037 +       if (is_mt76x0(dev) && !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
45038 +               return -EOPNOTSUPP;
45040         msta = sta ? (struct mt76x02_sta *)sta->drv_priv : NULL;
45041         wcid = msta ? &msta->wcid : &mvif->group_wcid;
45043 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
45044 index 77dcd71e49a5..2f706620686e 100644
45045 --- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
45046 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
45047 @@ -124,7 +124,7 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
45048                 range[i] = mt76_rr(dev, MT_MIB_ARNG(ext_phy, i));
45050         for (i = 0; i < ARRAY_SIZE(bound); i++)
45051 -               bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i) + 1;
45052 +               bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1;
45054         seq_printf(file, "\nPhy %d\n", ext_phy);
45056 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
45057 index 660398ac53c2..738ecf8f4fa2 100644
45058 --- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
45059 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
45060 @@ -124,7 +124,7 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
45061                                    struct ieee80211_channel *chan,
45062                                    u8 chain_idx)
45064 -       int index;
45065 +       int index, target_power;
45066         bool tssi_on;
45068         if (chain_idx > 3)
45069 @@ -133,15 +133,22 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
45070         tssi_on = mt7915_tssi_enabled(dev, chan->band);
45072         if (chan->band == NL80211_BAND_2GHZ) {
45073 -               index = MT_EE_TX0_POWER_2G + chain_idx * 3 + !tssi_on;
45074 +               index = MT_EE_TX0_POWER_2G + chain_idx * 3;
45075 +               target_power = mt7915_eeprom_read(dev, index);
45077 +               if (!tssi_on)
45078 +                       target_power += mt7915_eeprom_read(dev, index + 1);
45079         } else {
45080 -               int group = tssi_on ?
45081 -                           mt7915_get_channel_group(chan->hw_value) : 8;
45082 +               int group = mt7915_get_channel_group(chan->hw_value);
45084 +               index = MT_EE_TX0_POWER_5G + chain_idx * 12;
45085 +               target_power = mt7915_eeprom_read(dev, index + group);
45087 -               index = MT_EE_TX0_POWER_5G + chain_idx * 12 + group;
45088 +               if (!tssi_on)
45089 +                       target_power += mt7915_eeprom_read(dev, index + 8);
45090         }
45092 -       return mt7915_eeprom_read(dev, index);
45093 +       return target_power;
45096  static const u8 sku_cck_delta_map[] = {
45097 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
45098 index ad4e5b95158b..c7d4268d860a 100644
45099 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
45100 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
45101 @@ -4,6 +4,7 @@
45102  #include <linux/etherdevice.h>
45103  #include "mt7915.h"
45104  #include "mac.h"
45105 +#include "mcu.h"
45106  #include "eeprom.h"
45108  #define CCK_RATE(_idx, _rate) {                                                \
45109 @@ -283,9 +284,50 @@ static void mt7915_init_work(struct work_struct *work)
45110         mt7915_register_ext_phy(dev);
45113 +static void mt7915_wfsys_reset(struct mt7915_dev *dev)
45115 +       u32 val = MT_TOP_PWR_KEY | MT_TOP_PWR_SW_PWR_ON | MT_TOP_PWR_PWR_ON;
45116 +       u32 reg = mt7915_reg_map_l1(dev, MT_TOP_MISC);
45118 +#define MT_MCU_DUMMY_RANDOM    GENMASK(15, 0)
45119 +#define MT_MCU_DUMMY_DEFAULT   GENMASK(31, 16)
45121 +       mt76_wr(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_RANDOM);
45123 +       /* change to software control */
45124 +       val |= MT_TOP_PWR_SW_RST;
45125 +       mt76_wr(dev, MT_TOP_PWR_CTRL, val);
45127 +       /* reset wfsys */
45128 +       val &= ~MT_TOP_PWR_SW_RST;
45129 +       mt76_wr(dev, MT_TOP_PWR_CTRL, val);
45131 +       /* release wfsys then mcu re-excutes romcode */
45132 +       val |= MT_TOP_PWR_SW_RST;
45133 +       mt76_wr(dev, MT_TOP_PWR_CTRL, val);
45135 +       /* switch to hw control */
45136 +       val &= ~MT_TOP_PWR_SW_RST;
45137 +       val |= MT_TOP_PWR_HW_CTRL;
45138 +       mt76_wr(dev, MT_TOP_PWR_CTRL, val);
45140 +       /* check whether mcu resets to default */
45141 +       if (!mt76_poll_msec(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_DEFAULT,
45142 +                           MT_MCU_DUMMY_DEFAULT, 1000)) {
45143 +               dev_err(dev->mt76.dev, "wifi subsystem reset failure\n");
45144 +               return;
45145 +       }
45147 +       /* wfsys reset won't clear host registers */
45148 +       mt76_clear(dev, reg, MT_TOP_MISC_FW_STATE);
45150 +       msleep(100);
45153  static int mt7915_init_hardware(struct mt7915_dev *dev)
45155         int ret, idx;
45156 +       u32 val;
45158         mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
45160 @@ -295,6 +337,12 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
45162         dev->dbdc_support = !!(mt7915_l1_rr(dev, MT_HW_BOUND) & BIT(5));
45164 +       val = mt76_rr(dev, mt7915_reg_map_l1(dev, MT_TOP_MISC));
45166 +       /* If MCU was already running, it is likely in a bad state */
45167 +       if (FIELD_GET(MT_TOP_MISC_FW_STATE, val) > FW_STATE_FW_DOWNLOAD)
45168 +               mt7915_wfsys_reset(dev);
45170         ret = mt7915_dma_init(dev);
45171         if (ret)
45172                 return ret;
45173 @@ -308,8 +356,14 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
45174         mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
45176         ret = mt7915_mcu_init(dev);
45177 -       if (ret)
45178 -               return ret;
45179 +       if (ret) {
45180 +               /* Reset and try again */
45181 +               mt7915_wfsys_reset(dev);
45183 +               ret = mt7915_mcu_init(dev);
45184 +               if (ret)
45185 +                       return ret;
45186 +       }
45188         ret = mt7915_eeprom_init(dev);
45189         if (ret < 0)
45190 @@ -675,9 +729,8 @@ void mt7915_unregister_device(struct mt7915_dev *dev)
45191         mt7915_unregister_ext_phy(dev);
45192         mt76_unregister_device(&dev->mt76);
45193         mt7915_mcu_exit(dev);
45194 -       mt7915_dma_cleanup(dev);
45196         mt7915_tx_token_put(dev);
45197 +       mt7915_dma_cleanup(dev);
45199         mt76_free_device(&dev->mt76);
45201 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
45202 index e5a258958ac9..819670767521 100644
45203 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
45204 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
45205 @@ -1091,7 +1091,7 @@ void mt7915_txp_skb_unmap(struct mt76_dev *dev,
45206         int i;
45208         txp = mt7915_txwi_to_txp(dev, t);
45209 -       for (i = 1; i < txp->nbuf; i++)
45210 +       for (i = 0; i < txp->nbuf; i++)
45211                 dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
45212                                  le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
45214 @@ -1470,9 +1470,8 @@ mt7915_update_beacons(struct mt7915_dev *dev)
45217  static void
45218 -mt7915_dma_reset(struct mt7915_phy *phy)
45219 +mt7915_dma_reset(struct mt7915_dev *dev)
45221 -       struct mt7915_dev *dev = phy->dev;
45222         struct mt76_phy *mphy_ext = dev->mt76.phy2;
45223         u32 hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE;
45224         int i;
45225 @@ -1489,18 +1488,20 @@ mt7915_dma_reset(struct mt7915_phy *phy)
45226                            (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
45227                             MT_WFDMA1_GLO_CFG_RX_DMA_EN));
45228         }
45230         usleep_range(1000, 2000);
45232 -       mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], true);
45233         for (i = 0; i < __MT_TXQ_MAX; i++) {
45234 -               mt76_queue_tx_cleanup(dev, phy->mt76->q_tx[i], true);
45235 +               mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
45236                 if (mphy_ext)
45237                         mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true);
45238         }
45240 -       mt76_for_each_q_rx(&dev->mt76, i) {
45241 +       for (i = 0; i < __MT_MCUQ_MAX; i++)
45242 +               mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
45244 +       mt76_for_each_q_rx(&dev->mt76, i)
45245                 mt76_queue_rx_reset(dev, i);
45246 -       }
45248         /* re-init prefetch settings after reset */
45249         mt7915_dma_prefetch(dev);
45250 @@ -1584,7 +1585,7 @@ void mt7915_mac_reset_work(struct work_struct *work)
45251         idr_init(&dev->token);
45253         if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
45254 -               mt7915_dma_reset(&dev->phy);
45255 +               mt7915_dma_reset(dev);
45257                 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
45258                 mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
45259 @@ -1633,39 +1634,30 @@ mt7915_mac_update_mib_stats(struct mt7915_phy *phy)
45260         bool ext_phy = phy != &dev->phy;
45261         int i, aggr0, aggr1;
45263 -       memset(mib, 0, sizeof(*mib));
45265 -       mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
45266 -                                         MT_MIB_SDR3_FCS_ERR_MASK);
45267 +       mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
45268 +                                          MT_MIB_SDR3_FCS_ERR_MASK);
45270         aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
45271         for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
45272 -               u32 val, val2;
45273 +               u32 val;
45275                 val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
45277 -               val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
45278 -               if (val2 > mib->ack_fail_cnt)
45279 -                       mib->ack_fail_cnt = val2;
45281 -               val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
45282 -               if (val2 > mib->ba_miss_cnt)
45283 -                       mib->ba_miss_cnt = val2;
45284 +               mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
45285 +               mib->ack_fail_cnt +=
45286 +                       FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
45288                 val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
45289 -               val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
45290 -               if (val2 > mib->rts_retries_cnt) {
45291 -                       mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
45292 -                       mib->rts_retries_cnt = val2;
45293 -               }
45294 +               mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
45295 +               mib->rts_retries_cnt +=
45296 +                       FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
45298                 val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
45299 -               val2 = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
45301                 dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
45302                 dev->mt76.aggr_stats[aggr0++] += val >> 16;
45303 -               dev->mt76.aggr_stats[aggr1++] += val2 & 0xffff;
45304 -               dev->mt76.aggr_stats[aggr1++] += val2 >> 16;
45306 +               val = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
45307 +               dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
45308 +               dev->mt76.aggr_stats[aggr1++] += val >> 16;
45309         }
45312 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
45313 index d4969b2e1ffb..bf032d943f74 100644
45314 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
45315 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
45316 @@ -317,7 +317,9 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
45317         struct mt7915_sta *msta = sta ? (struct mt7915_sta *)sta->drv_priv :
45318                                   &mvif->sta;
45319         struct mt76_wcid *wcid = &msta->wcid;
45320 +       u8 *wcid_keyidx = &wcid->hw_key_idx;
45321         int idx = key->keyidx;
45322 +       int err = 0;
45324         /* The hardware does not support per-STA RX GTK, fallback
45325          * to software mode for these.
45326 @@ -332,6 +334,7 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
45327         /* fall back to sw encryption for unsupported ciphers */
45328         switch (key->cipher) {
45329         case WLAN_CIPHER_SUITE_AES_CMAC:
45330 +               wcid_keyidx = &wcid->hw_key_idx2;
45331                 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
45332                 break;
45333         case WLAN_CIPHER_SUITE_TKIP:
45334 @@ -347,16 +350,24 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
45335                 return -EOPNOTSUPP;
45336         }
45338 -       if (cmd == SET_KEY) {
45339 -               key->hw_key_idx = wcid->idx;
45340 -               wcid->hw_key_idx = idx;
45341 -       } else if (idx == wcid->hw_key_idx) {
45342 -               wcid->hw_key_idx = -1;
45343 -       }
45344 +       mutex_lock(&dev->mt76.mutex);
45346 +       if (cmd == SET_KEY)
45347 +               *wcid_keyidx = idx;
45348 +       else if (idx == *wcid_keyidx)
45349 +               *wcid_keyidx = -1;
45350 +       else
45351 +               goto out;
45353         mt76_wcid_key_setup(&dev->mt76, wcid,
45354                             cmd == SET_KEY ? key : NULL);
45356 -       return mt7915_mcu_add_key(dev, vif, msta, key, cmd);
45357 +       err = mt7915_mcu_add_key(dev, vif, msta, key, cmd);
45359 +out:
45360 +       mutex_unlock(&dev->mt76.mutex);
45362 +       return err;
45365  static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
45366 @@ -717,13 +728,19 @@ mt7915_get_stats(struct ieee80211_hw *hw,
45367                  struct ieee80211_low_level_stats *stats)
45369         struct mt7915_phy *phy = mt7915_hw_phy(hw);
45370 +       struct mt7915_dev *dev = mt7915_hw_dev(hw);
45371         struct mib_stats *mib = &phy->mib;
45373 +       mutex_lock(&dev->mt76.mutex);
45374         stats->dot11RTSSuccessCount = mib->rts_cnt;
45375         stats->dot11RTSFailureCount = mib->rts_retries_cnt;
45376         stats->dot11FCSErrorCount = mib->fcs_err_cnt;
45377         stats->dot11ACKFailureCount = mib->ack_fail_cnt;
45379 +       memset(mib, 0, sizeof(*mib));
45381 +       mutex_unlock(&dev->mt76.mutex);
45383         return 0;
45386 @@ -833,9 +850,12 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
45387         struct mt7915_phy *phy = mt7915_hw_phy(hw);
45388         struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
45389         struct mt7915_sta_stats *stats = &msta->stats;
45390 +       struct rate_info rxrate = {};
45392 -       if (mt7915_mcu_get_rx_rate(phy, vif, sta, &sinfo->rxrate) == 0)
45393 +       if (!mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
45394 +               sinfo->rxrate = rxrate;
45395                 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
45396 +       }
45398         if (!stats->tx_rate.legacy && !stats->tx_rate.flags)
45399                 return;
45400 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
45401 index 195929242b72..f069a5a03e14 100644
45402 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
45403 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
45404 @@ -351,54 +351,62 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
45405         dev->hw_pattern++;
45408 -static void
45409 +static int
45410  mt7915_mcu_tx_rate_parse(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra,
45411                          struct rate_info *rate, u16 r)
45413         struct ieee80211_supported_band *sband;
45414         u16 ru_idx = le16_to_cpu(ra->ru_idx);
45415 -       u16 flags = 0;
45416 +       bool cck = false;
45418         rate->mcs = FIELD_GET(MT_RA_RATE_MCS, r);
45419         rate->nss = FIELD_GET(MT_RA_RATE_NSS, r) + 1;
45421         switch (FIELD_GET(MT_RA_RATE_TX_MODE, r)) {
45422         case MT_PHY_TYPE_CCK:
45423 +               cck = true;
45424 +               fallthrough;
45425         case MT_PHY_TYPE_OFDM:
45426                 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
45427                         sband = &mphy->sband_5g.sband;
45428                 else
45429                         sband = &mphy->sband_2g.sband;
45431 +               rate->mcs = mt76_get_rate(mphy->dev, sband, rate->mcs, cck);
45432                 rate->legacy = sband->bitrates[rate->mcs].bitrate;
45433                 break;
45434         case MT_PHY_TYPE_HT:
45435         case MT_PHY_TYPE_HT_GF:
45436                 rate->mcs += (rate->nss - 1) * 8;
45437 -               flags |= RATE_INFO_FLAGS_MCS;
45438 +               if (rate->mcs > 31)
45439 +                       return -EINVAL;
45441 +               rate->flags = RATE_INFO_FLAGS_MCS;
45442                 if (ra->gi)
45443 -                       flags |= RATE_INFO_FLAGS_SHORT_GI;
45444 +                       rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
45445                 break;
45446         case MT_PHY_TYPE_VHT:
45447 -               flags |= RATE_INFO_FLAGS_VHT_MCS;
45448 +               if (rate->mcs > 9)
45449 +                       return -EINVAL;
45451 +               rate->flags = RATE_INFO_FLAGS_VHT_MCS;
45452                 if (ra->gi)
45453 -                       flags |= RATE_INFO_FLAGS_SHORT_GI;
45454 +                       rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
45455                 break;
45456         case MT_PHY_TYPE_HE_SU:
45457         case MT_PHY_TYPE_HE_EXT_SU:
45458         case MT_PHY_TYPE_HE_TB:
45459         case MT_PHY_TYPE_HE_MU:
45460 +               if (ra->gi > NL80211_RATE_INFO_HE_GI_3_2 || rate->mcs > 11)
45461 +                       return -EINVAL;
45463                 rate->he_gi = ra->gi;
45464                 rate->he_dcm = FIELD_GET(MT_RA_RATE_DCM_EN, r);
45466 -               flags |= RATE_INFO_FLAGS_HE_MCS;
45467 +               rate->flags = RATE_INFO_FLAGS_HE_MCS;
45468                 break;
45469         default:
45470 -               break;
45471 +               return -EINVAL;
45472         }
45473 -       rate->flags = flags;
45475         if (ru_idx) {
45476                 switch (ru_idx) {
45477 @@ -435,6 +443,8 @@ mt7915_mcu_tx_rate_parse(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra,
45478                         break;
45479                 }
45480         }
45482 +       return 0;
45485  static void
45486 @@ -465,12 +475,12 @@ mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb)
45487                 mphy = dev->mt76.phy2;
45489         /* current rate */
45490 -       mt7915_mcu_tx_rate_parse(mphy, ra, &rate, curr);
45491 -       stats->tx_rate = rate;
45492 +       if (!mt7915_mcu_tx_rate_parse(mphy, ra, &rate, curr))
45493 +               stats->tx_rate = rate;
45495         /* probing rate */
45496 -       mt7915_mcu_tx_rate_parse(mphy, ra, &prob_rate, probe);
45497 -       stats->prob_rate = prob_rate;
45498 +       if (!mt7915_mcu_tx_rate_parse(mphy, ra, &prob_rate, probe))
45499 +               stats->prob_rate = prob_rate;
45501         if (attempts) {
45502                 u16 success = le16_to_cpu(ra->success);
45503 @@ -1188,6 +1198,9 @@ mt7915_mcu_sta_ba(struct mt7915_dev *dev,
45505         wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
45506                                              &skb);
45507 +       if (IS_ERR(wtbl_hdr))
45508 +               return PTR_ERR(wtbl_hdr);
45510         mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr);
45512         ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
45513 @@ -1704,6 +1717,9 @@ int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev,
45514                 return -ENOMEM;
45516         wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, NULL, &skb);
45517 +       if (IS_ERR(wtbl_hdr))
45518 +               return PTR_ERR(wtbl_hdr);
45520         mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, NULL, wtbl_hdr);
45522         return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD(WTBL_UPDATE),
45523 @@ -1728,6 +1744,9 @@ int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
45525         wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
45526                                              &skb);
45527 +       if (IS_ERR(wtbl_hdr))
45528 +               return PTR_ERR(wtbl_hdr);
45530         mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr);
45532         return mt76_mcu_skb_send_msg(&dev->mt76, skb,
45533 @@ -2253,6 +2272,9 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
45535         wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET,
45536                                              sta_wtbl, &skb);
45537 +       if (IS_ERR(wtbl_hdr))
45538 +               return PTR_ERR(wtbl_hdr);
45540         if (enable) {
45541                 mt7915_mcu_wtbl_generic_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr);
45542                 mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr);
45543 @@ -2742,21 +2764,8 @@ static int mt7915_load_ram(struct mt7915_dev *dev)
45545  static int mt7915_load_firmware(struct mt7915_dev *dev)
45547 +       u32 reg = mt7915_reg_map_l1(dev, MT_TOP_MISC);
45548         int ret;
45549 -       u32 val, reg = mt7915_reg_map_l1(dev, MT_TOP_MISC);
45551 -       val = FIELD_PREP(MT_TOP_MISC_FW_STATE, FW_STATE_FW_DOWNLOAD);
45553 -       if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE, val, 1000)) {
45554 -               /* restart firmware once */
45555 -               __mt76_mcu_restart(&dev->mt76);
45556 -               if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE,
45557 -                                   val, 1000)) {
45558 -                       dev_err(dev->mt76.dev,
45559 -                               "Firmware is not ready for download\n");
45560 -                       return -EIO;
45561 -               }
45562 -       }
45564         ret = mt7915_load_patch(dev);
45565         if (ret)
45566 @@ -3501,9 +3510,8 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
45567         struct ieee80211_supported_band *sband;
45568         struct mt7915_mcu_phy_rx_info *res;
45569         struct sk_buff *skb;
45570 -       u16 flags = 0;
45571         int ret;
45572 -       int i;
45573 +       bool cck = false;
45575         ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(PHY_STAT_INFO),
45576                                         &req, sizeof(req), true, &skb);
45577 @@ -3517,48 +3525,53 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
45579         switch (res->mode) {
45580         case MT_PHY_TYPE_CCK:
45581 +               cck = true;
45582 +               fallthrough;
45583         case MT_PHY_TYPE_OFDM:
45584                 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
45585                         sband = &mphy->sband_5g.sband;
45586                 else
45587                         sband = &mphy->sband_2g.sband;
45589 -               for (i = 0; i < sband->n_bitrates; i++) {
45590 -                       if (rate->mcs != (sband->bitrates[i].hw_value & 0xf))
45591 -                               continue;
45593 -                       rate->legacy = sband->bitrates[i].bitrate;
45594 -                       break;
45595 -               }
45596 +               rate->mcs = mt76_get_rate(&dev->mt76, sband, rate->mcs, cck);
45597 +               rate->legacy = sband->bitrates[rate->mcs].bitrate;
45598                 break;
45599         case MT_PHY_TYPE_HT:
45600         case MT_PHY_TYPE_HT_GF:
45601 -               if (rate->mcs > 31)
45602 -                       return -EINVAL;
45604 -               flags |= RATE_INFO_FLAGS_MCS;
45605 +               if (rate->mcs > 31) {
45606 +                       ret = -EINVAL;
45607 +                       goto out;
45608 +               }
45610 +               rate->flags = RATE_INFO_FLAGS_MCS;
45611                 if (res->gi)
45612 -                       flags |= RATE_INFO_FLAGS_SHORT_GI;
45613 +                       rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
45614                 break;
45615         case MT_PHY_TYPE_VHT:
45616 -               flags |= RATE_INFO_FLAGS_VHT_MCS;
45617 +               if (rate->mcs > 9) {
45618 +                       ret = -EINVAL;
45619 +                       goto out;
45620 +               }
45622 +               rate->flags = RATE_INFO_FLAGS_VHT_MCS;
45623                 if (res->gi)
45624 -                       flags |= RATE_INFO_FLAGS_SHORT_GI;
45625 +                       rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
45626                 break;
45627         case MT_PHY_TYPE_HE_SU:
45628         case MT_PHY_TYPE_HE_EXT_SU:
45629         case MT_PHY_TYPE_HE_TB:
45630         case MT_PHY_TYPE_HE_MU:
45631 +               if (res->gi > NL80211_RATE_INFO_HE_GI_3_2 || rate->mcs > 11) {
45632 +                       ret = -EINVAL;
45633 +                       goto out;
45634 +               }
45635                 rate->he_gi = res->gi;
45637 -               flags |= RATE_INFO_FLAGS_HE_MCS;
45638 +               rate->flags = RATE_INFO_FLAGS_HE_MCS;
45639                 break;
45640         default:
45641 -               break;
45642 +               ret = -EINVAL;
45643 +               goto out;
45644         }
45645 -       rate->flags = flags;
45647         switch (res->bw) {
45648         case IEEE80211_STA_RX_BW_160:
45649 @@ -3575,7 +3588,8 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
45650                 break;
45651         }
45653 +out:
45654         dev_kfree_skb(skb);
45656 -       return 0;
45657 +       return ret;
45659 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
45660 index 5c7eefdf2013..1160d1bf8a7c 100644
45661 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
45662 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
45663 @@ -108,11 +108,11 @@ struct mt7915_vif {
45664  };
45666  struct mib_stats {
45667 -       u16 ack_fail_cnt;
45668 -       u16 fcs_err_cnt;
45669 -       u16 rts_cnt;
45670 -       u16 rts_retries_cnt;
45671 -       u16 ba_miss_cnt;
45672 +       u32 ack_fail_cnt;
45673 +       u32 fcs_err_cnt;
45674 +       u32 rts_cnt;
45675 +       u32 rts_retries_cnt;
45676 +       u32 ba_miss_cnt;
45677  };
45679  struct mt7915_hif {
45680 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
45681 index ed0c9a24bb53..dfb8880657bf 100644
45682 --- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
45683 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
45684 @@ -4,6 +4,11 @@
45685  #ifndef __MT7915_REGS_H
45686  #define __MT7915_REGS_H
45688 +/* MCU WFDMA0 */
45689 +#define MT_MCU_WFDMA0_BASE             0x2000
45690 +#define MT_MCU_WFDMA0(ofs)             (MT_MCU_WFDMA0_BASE + (ofs))
45691 +#define MT_MCU_WFDMA0_DUMMY_CR         MT_MCU_WFDMA0(0x120)
45693  /* MCU WFDMA1 */
45694  #define MT_MCU_WFDMA1_BASE             0x3000
45695  #define MT_MCU_WFDMA1(ofs)             (MT_MCU_WFDMA1_BASE + (ofs))
45696 @@ -396,6 +401,14 @@
45697  #define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1      BIT(1)
45698  #define MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO       BIT(2)
45700 +#define MT_TOP_RGU_BASE                                0xf0000
45701 +#define MT_TOP_PWR_CTRL                                (MT_TOP_RGU_BASE + (0x0))
45702 +#define MT_TOP_PWR_KEY                         (0x5746 << 16)
45703 +#define MT_TOP_PWR_SW_RST                      BIT(0)
45704 +#define MT_TOP_PWR_SW_PWR_ON                   GENMASK(3, 2)
45705 +#define MT_TOP_PWR_HW_CTRL                     BIT(4)
45706 +#define MT_TOP_PWR_PWR_ON                      BIT(7)
45708  #define MT_INFRA_CFG_BASE              0xf1000
45709  #define MT_INFRA(ofs)                  (MT_INFRA_CFG_BASE + (ofs))
45711 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
45712 index 0dc8e25e18e4..87a7ea12f3b3 100644
45713 --- a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
45714 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
45715 @@ -9,10 +9,13 @@ mt7921_fw_debug_set(void *data, u64 val)
45717         struct mt7921_dev *dev = data;
45719 -       dev->fw_debug = (u8)val;
45720 +       mt7921_mutex_acquire(dev);
45722 +       dev->fw_debug = (u8)val;
45723         mt7921_mcu_fw_log_2_host(dev, dev->fw_debug);
45725 +       mt7921_mutex_release(dev);
45727         return 0;
45730 @@ -44,14 +47,13 @@ mt7921_ampdu_stat_read_phy(struct mt7921_phy *phy,
45731                 range[i] = mt76_rr(dev, MT_MIB_ARNG(0, i));
45733         for (i = 0; i < ARRAY_SIZE(bound); i++)
45734 -               bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i) + 1;
45735 +               bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1;
45737         seq_printf(file, "\nPhy0\n");
45739         seq_printf(file, "Length: %8d | ", bound[0]);
45740         for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
45741 -               seq_printf(file, "%3d -%3d | ",
45742 -                          bound[i] + 1, bound[i + 1]);
45743 +               seq_printf(file, "%3d  %3d | ", bound[i] + 1, bound[i + 1]);
45745         seq_puts(file, "\nCount:  ");
45746         for (i = 0; i < ARRAY_SIZE(bound); i++)
45747 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
45748 index 3f9097481a5e..a6d2a25b3495 100644
45749 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
45750 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
45751 @@ -400,7 +400,9 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
45753         /* RXD Group 3 - P-RXV */
45754         if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
45755 -               u32 v0, v1, v2;
45756 +               u8 stbc, gi;
45757 +               u32 v0, v1;
45758 +               bool cck;
45760                 rxv = rxd;
45761                 rxd += 2;
45762 @@ -409,7 +411,6 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
45764                 v0 = le32_to_cpu(rxv[0]);
45765                 v1 = le32_to_cpu(rxv[1]);
45766 -               v2 = le32_to_cpu(rxv[2]);
45768                 if (v0 & MT_PRXV_HT_AD_CODE)
45769                         status->enc_flags |= RX_ENC_FLAG_LDPC;
45770 @@ -429,87 +430,87 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
45771                                              status->chain_signal[i]);
45772                 }
45774 -               /* RXD Group 5 - C-RXV */
45775 -               if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
45776 -                       u8 stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
45777 -                       u8 gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
45778 -                       bool cck = false;
45779 +               stbc = FIELD_GET(MT_PRXV_STBC, v0);
45780 +               gi = FIELD_GET(MT_PRXV_SGI, v0);
45781 +               cck = false;
45783 -                       rxd += 18;
45784 -                       if ((u8 *)rxd - skb->data >= skb->len)
45785 -                               return -EINVAL;
45786 +               idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
45787 +               mode = FIELD_GET(MT_PRXV_TX_MODE, v0);
45789 -                       idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
45790 -                       mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
45792 -                       switch (mode) {
45793 -                       case MT_PHY_TYPE_CCK:
45794 -                               cck = true;
45795 -                               fallthrough;
45796 -                       case MT_PHY_TYPE_OFDM:
45797 -                               i = mt76_get_rate(&dev->mt76, sband, i, cck);
45798 -                               break;
45799 -                       case MT_PHY_TYPE_HT_GF:
45800 -                       case MT_PHY_TYPE_HT:
45801 -                               status->encoding = RX_ENC_HT;
45802 -                               if (i > 31)
45803 -                                       return -EINVAL;
45804 -                               break;
45805 -                       case MT_PHY_TYPE_VHT:
45806 -                               status->nss =
45807 -                                       FIELD_GET(MT_PRXV_NSTS, v0) + 1;
45808 -                               status->encoding = RX_ENC_VHT;
45809 -                               if (i > 9)
45810 -                                       return -EINVAL;
45811 -                               break;
45812 -                       case MT_PHY_TYPE_HE_MU:
45813 -                               status->flag |= RX_FLAG_RADIOTAP_HE_MU;
45814 -                               fallthrough;
45815 -                       case MT_PHY_TYPE_HE_SU:
45816 -                       case MT_PHY_TYPE_HE_EXT_SU:
45817 -                       case MT_PHY_TYPE_HE_TB:
45818 -                               status->nss =
45819 -                                       FIELD_GET(MT_PRXV_NSTS, v0) + 1;
45820 -                               status->encoding = RX_ENC_HE;
45821 -                               status->flag |= RX_FLAG_RADIOTAP_HE;
45822 -                               i &= GENMASK(3, 0);
45824 -                               if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
45825 -                                       status->he_gi = gi;
45827 -                               status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
45828 -                               break;
45829 -                       default:
45830 +               switch (mode) {
45831 +               case MT_PHY_TYPE_CCK:
45832 +                       cck = true;
45833 +                       fallthrough;
45834 +               case MT_PHY_TYPE_OFDM:
45835 +                       i = mt76_get_rate(&dev->mt76, sband, i, cck);
45836 +                       break;
45837 +               case MT_PHY_TYPE_HT_GF:
45838 +               case MT_PHY_TYPE_HT:
45839 +                       status->encoding = RX_ENC_HT;
45840 +                       if (i > 31)
45841                                 return -EINVAL;
45842 -                       }
45843 -                       status->rate_idx = i;
45845 -                       switch (FIELD_GET(MT_CRXV_FRAME_MODE, v2)) {
45846 -                       case IEEE80211_STA_RX_BW_20:
45847 -                               break;
45848 -                       case IEEE80211_STA_RX_BW_40:
45849 -                               if (mode & MT_PHY_TYPE_HE_EXT_SU &&
45850 -                                   (idx & MT_PRXV_TX_ER_SU_106T)) {
45851 -                                       status->bw = RATE_INFO_BW_HE_RU;
45852 -                                       status->he_ru =
45853 -                                               NL80211_RATE_INFO_HE_RU_ALLOC_106;
45854 -                               } else {
45855 -                                       status->bw = RATE_INFO_BW_40;
45856 -                               }
45857 -                               break;
45858 -                       case IEEE80211_STA_RX_BW_80:
45859 -                               status->bw = RATE_INFO_BW_80;
45860 -                               break;
45861 -                       case IEEE80211_STA_RX_BW_160:
45862 -                               status->bw = RATE_INFO_BW_160;
45863 -                               break;
45864 -                       default:
45865 +                       break;
45866 +               case MT_PHY_TYPE_VHT:
45867 +                       status->nss =
45868 +                               FIELD_GET(MT_PRXV_NSTS, v0) + 1;
45869 +                       status->encoding = RX_ENC_VHT;
45870 +                       if (i > 9)
45871                                 return -EINVAL;
45872 +                       break;
45873 +               case MT_PHY_TYPE_HE_MU:
45874 +                       status->flag |= RX_FLAG_RADIOTAP_HE_MU;
45875 +                       fallthrough;
45876 +               case MT_PHY_TYPE_HE_SU:
45877 +               case MT_PHY_TYPE_HE_EXT_SU:
45878 +               case MT_PHY_TYPE_HE_TB:
45879 +                       status->nss =
45880 +                               FIELD_GET(MT_PRXV_NSTS, v0) + 1;
45881 +                       status->encoding = RX_ENC_HE;
45882 +                       status->flag |= RX_FLAG_RADIOTAP_HE;
45883 +                       i &= GENMASK(3, 0);
45885 +                       if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
45886 +                               status->he_gi = gi;
45888 +                       status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
45889 +                       break;
45890 +               default:
45891 +                       return -EINVAL;
45892 +               }
45894 +               status->rate_idx = i;
45896 +               switch (FIELD_GET(MT_PRXV_FRAME_MODE, v0)) {
45897 +               case IEEE80211_STA_RX_BW_20:
45898 +                       break;
45899 +               case IEEE80211_STA_RX_BW_40:
45900 +                       if (mode & MT_PHY_TYPE_HE_EXT_SU &&
45901 +                           (idx & MT_PRXV_TX_ER_SU_106T)) {
45902 +                               status->bw = RATE_INFO_BW_HE_RU;
45903 +                               status->he_ru =
45904 +                                       NL80211_RATE_INFO_HE_RU_ALLOC_106;
45905 +                       } else {
45906 +                               status->bw = RATE_INFO_BW_40;
45907                         }
45908 +                       break;
45909 +               case IEEE80211_STA_RX_BW_80:
45910 +                       status->bw = RATE_INFO_BW_80;
45911 +                       break;
45912 +               case IEEE80211_STA_RX_BW_160:
45913 +                       status->bw = RATE_INFO_BW_160;
45914 +                       break;
45915 +               default:
45916 +                       return -EINVAL;
45917 +               }
45919 -                       status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
45920 -                       if (mode < MT_PHY_TYPE_HE_SU && gi)
45921 -                               status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
45922 +               status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
45923 +               if (mode < MT_PHY_TYPE_HE_SU && gi)
45924 +                       status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
45926 +               if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
45927 +                       rxd += 18;
45928 +                       if ((u8 *)rxd - skb->data >= skb->len)
45929 +                               return -EINVAL;
45930                 }
45931         }
45933 @@ -1317,31 +1318,20 @@ mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
45934         struct mib_stats *mib = &phy->mib;
45935         int i, aggr0 = 0, aggr1;
45937 -       memset(mib, 0, sizeof(*mib));
45939 -       mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(0),
45940 -                                         MT_MIB_SDR3_FCS_ERR_MASK);
45941 +       mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(0),
45942 +                                          MT_MIB_SDR3_FCS_ERR_MASK);
45943 +       mib->ack_fail_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR3(0),
45944 +                                           MT_MIB_ACK_FAIL_COUNT_MASK);
45945 +       mib->ba_miss_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR2(0),
45946 +                                          MT_MIB_BA_FAIL_COUNT_MASK);
45947 +       mib->rts_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR0(0),
45948 +                                      MT_MIB_RTS_COUNT_MASK);
45949 +       mib->rts_retries_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR1(0),
45950 +                                              MT_MIB_RTS_FAIL_COUNT_MASK);
45952         for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
45953                 u32 val, val2;
45955 -               val = mt76_rr(dev, MT_MIB_MB_SDR1(0, i));
45957 -               val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
45958 -               if (val2 > mib->ack_fail_cnt)
45959 -                       mib->ack_fail_cnt = val2;
45961 -               val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
45962 -               if (val2 > mib->ba_miss_cnt)
45963 -                       mib->ba_miss_cnt = val2;
45965 -               val = mt76_rr(dev, MT_MIB_MB_SDR0(0, i));
45966 -               val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
45967 -               if (val2 > mib->rts_retries_cnt) {
45968 -                       mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
45969 -                       mib->rts_retries_cnt = val2;
45970 -               }
45972                 val = mt76_rr(dev, MT_TX_AGG_CNT(0, i));
45973                 val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
45975 @@ -1503,8 +1493,10 @@ void mt7921_coredump_work(struct work_struct *work)
45976                         break;
45978                 skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
45979 -               if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ)
45980 -                       break;
45981 +               if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
45982 +                       dev_kfree_skb(skb);
45983 +                       continue;
45984 +               }
45986                 memcpy(data, skb->data, skb->len);
45987                 data += skb->len;
45988 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
45989 index a0c1fa0f20e4..109c8849d106 100644
45990 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
45991 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
45992 @@ -97,18 +97,24 @@ enum rx_pkt_type {
45993  #define MT_RXD3_NORMAL_PF_MODE         BIT(29)
45994  #define MT_RXD3_NORMAL_PF_STS          GENMASK(31, 30)
45996 -/* P-RXV */
45997 +/* P-RXV DW0 */
45998  #define MT_PRXV_TX_RATE                        GENMASK(6, 0)
45999  #define MT_PRXV_TX_DCM                 BIT(4)
46000  #define MT_PRXV_TX_ER_SU_106T          BIT(5)
46001  #define MT_PRXV_NSTS                   GENMASK(9, 7)
46002  #define MT_PRXV_HT_AD_CODE             BIT(11)
46003 +#define MT_PRXV_FRAME_MODE             GENMASK(14, 12)
46004 +#define MT_PRXV_SGI                    GENMASK(16, 15)
46005 +#define MT_PRXV_STBC                   GENMASK(23, 22)
46006 +#define MT_PRXV_TX_MODE                        GENMASK(27, 24)
46007  #define MT_PRXV_HE_RU_ALLOC_L          GENMASK(31, 28)
46008 -#define MT_PRXV_HE_RU_ALLOC_H          GENMASK(3, 0)
46010 +/* P-RXV DW1 */
46011  #define MT_PRXV_RCPI3                  GENMASK(31, 24)
46012  #define MT_PRXV_RCPI2                  GENMASK(23, 16)
46013  #define MT_PRXV_RCPI1                  GENMASK(15, 8)
46014  #define MT_PRXV_RCPI0                  GENMASK(7, 0)
46015 +#define MT_PRXV_HE_RU_ALLOC_H          GENMASK(3, 0)
46017  /* C-RXV */
46018  #define MT_CRXV_HT_STBC                        GENMASK(1, 0)
46019 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
46020 index 729f6c42cdde..ada943c7a950 100644
46021 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
46022 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
46023 @@ -348,6 +348,7 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw,
46024         if (vif == phy->monitor_vif)
46025                 phy->monitor_vif = NULL;
46027 +       mt7921_mutex_acquire(dev);
46028         mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
46030         if (dev->pm.enable) {
46031 @@ -360,7 +361,6 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw,
46033         rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
46035 -       mt7921_mutex_acquire(dev);
46036         dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx);
46037         phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
46038         mt7921_mutex_release(dev);
46039 @@ -413,7 +413,8 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
46040         struct mt7921_sta *msta = sta ? (struct mt7921_sta *)sta->drv_priv :
46041                                   &mvif->sta;
46042         struct mt76_wcid *wcid = &msta->wcid;
46043 -       int idx = key->keyidx;
46044 +       u8 *wcid_keyidx = &wcid->hw_key_idx;
46045 +       int idx = key->keyidx, err = 0;
46047         /* The hardware does not support per-STA RX GTK, fallback
46048          * to software mode for these.
46049 @@ -429,6 +430,7 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
46050         switch (key->cipher) {
46051         case WLAN_CIPHER_SUITE_AES_CMAC:
46052                 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
46053 +               wcid_keyidx = &wcid->hw_key_idx2;
46054                 break;
46055         case WLAN_CIPHER_SUITE_TKIP:
46056         case WLAN_CIPHER_SUITE_CCMP:
46057 @@ -443,16 +445,23 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
46058                 return -EOPNOTSUPP;
46059         }
46061 -       if (cmd == SET_KEY) {
46062 -               key->hw_key_idx = wcid->idx;
46063 -               wcid->hw_key_idx = idx;
46064 -       } else if (idx == wcid->hw_key_idx) {
46065 -               wcid->hw_key_idx = -1;
46066 -       }
46067 +       mt7921_mutex_acquire(dev);
46069 +       if (cmd == SET_KEY)
46070 +               *wcid_keyidx = idx;
46071 +       else if (idx == *wcid_keyidx)
46072 +               *wcid_keyidx = -1;
46073 +       else
46074 +               goto out;
46076         mt76_wcid_key_setup(&dev->mt76, wcid,
46077                             cmd == SET_KEY ? key : NULL);
46079 -       return mt7921_mcu_add_key(dev, vif, msta, key, cmd);
46080 +       err = mt7921_mcu_add_key(dev, vif, msta, key, cmd);
46081 +out:
46082 +       mt7921_mutex_release(dev);
46084 +       return err;
46087  static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
46088 @@ -587,6 +596,9 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
46089         if (changed & BSS_CHANGED_PS)
46090                 mt7921_mcu_uni_bss_ps(dev, vif);
46092 +       if (changed & BSS_CHANGED_ARP_FILTER)
46093 +               mt7921_mcu_update_arp_filter(hw, vif, info);
46095         mt7921_mutex_release(dev);
46098 @@ -814,11 +826,17 @@ mt7921_get_stats(struct ieee80211_hw *hw,
46099         struct mt7921_phy *phy = mt7921_hw_phy(hw);
46100         struct mib_stats *mib = &phy->mib;
46102 +       mt7921_mutex_acquire(phy->dev);
46104         stats->dot11RTSSuccessCount = mib->rts_cnt;
46105         stats->dot11RTSFailureCount = mib->rts_retries_cnt;
46106         stats->dot11FCSErrorCount = mib->fcs_err_cnt;
46107         stats->dot11ACKFailureCount = mib->ack_fail_cnt;
46109 +       memset(mib, 0, sizeof(*mib));
46111 +       mt7921_mutex_release(phy->dev);
46113         return 0;
46116 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
46117 index b5cc72e7e81c..62afbad77596 100644
46118 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
46119 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
46120 @@ -1304,3 +1304,47 @@ mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
46121                 mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
46122         }
46125 +int mt7921_mcu_update_arp_filter(struct ieee80211_hw *hw,
46126 +                                struct ieee80211_vif *vif,
46127 +                                struct ieee80211_bss_conf *info)
46129 +       struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
46130 +       struct mt7921_dev *dev = mt7921_hw_dev(hw);
46131 +       struct sk_buff *skb;
46132 +       int i, len = min_t(int, info->arp_addr_cnt,
46133 +                          IEEE80211_BSS_ARP_ADDR_LIST_LEN);
46134 +       struct {
46135 +               struct {
46136 +                       u8 bss_idx;
46137 +                       u8 pad[3];
46138 +               } __packed hdr;
46139 +               struct mt76_connac_arpns_tlv arp;
46140 +       } req_hdr = {
46141 +               .hdr = {
46142 +                       .bss_idx = mvif->mt76.idx,
46143 +               },
46144 +               .arp = {
46145 +                       .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
46146 +                       .len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)),
46147 +                       .ips_num = len,
46148 +                       .mode = 2,  /* update */
46149 +                       .option = 1,
46150 +               },
46151 +       };
46153 +       skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
46154 +                                sizeof(req_hdr) + len * sizeof(__be32));
46155 +       if (!skb)
46156 +               return -ENOMEM;
46158 +       skb_put_data(skb, &req_hdr, sizeof(req_hdr));
46159 +       for (i = 0; i < len; i++) {
46160 +               u8 *addr = (u8 *)skb_put(skb, sizeof(__be32));
46162 +               memcpy(addr, &info->arp_addr_list[i], sizeof(__be32));
46163 +       }
46165 +       return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_UNI_CMD_OFFLOAD,
46166 +                                    true);
46168 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
46169 index 46e6aeec35ae..25a1a6acb6ba 100644
46170 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
46171 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
46172 @@ -102,11 +102,11 @@ struct mt7921_vif {
46173  };
46175  struct mib_stats {
46176 -       u16 ack_fail_cnt;
46177 -       u16 fcs_err_cnt;
46178 -       u16 rts_cnt;
46179 -       u16 rts_retries_cnt;
46180 -       u16 ba_miss_cnt;
46181 +       u32 ack_fail_cnt;
46182 +       u32 fcs_err_cnt;
46183 +       u32 rts_cnt;
46184 +       u32 rts_retries_cnt;
46185 +       u32 ba_miss_cnt;
46186  };
46188  struct mt7921_phy {
46189 @@ -339,4 +339,7 @@ int mt7921_mac_set_beacon_filter(struct mt7921_phy *phy,
46190                                  bool enable);
46191  void mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif);
46192  void mt7921_coredump_work(struct work_struct *work);
46193 +int mt7921_mcu_update_arp_filter(struct ieee80211_hw *hw,
46194 +                                struct ieee80211_vif *vif,
46195 +                                struct ieee80211_bss_conf *info);
46196  #endif
46197 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
46198 index 5570b4a50531..80f6f29892a4 100644
46199 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
46200 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
46201 @@ -137,7 +137,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
46203         mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
46205 -       mt7921_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
46206 +       mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
46208         ret = devm_request_irq(mdev->dev, pdev->irq, mt7921_irq_handler,
46209                                IRQF_SHARED, KBUILD_MODNAME, dev);
46210 @@ -146,10 +146,12 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
46212         ret = mt7921_register_device(dev);
46213         if (ret)
46214 -               goto err_free_dev;
46215 +               goto err_free_irq;
46217         return 0;
46219 +err_free_irq:
46220 +       devm_free_irq(&pdev->dev, pdev->irq, dev);
46221  err_free_dev:
46222         mt76_free_device(&dev->mt76);
46223  err_free_pci_vec:
46224 @@ -193,7 +195,6 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
46225         mt76_for_each_q_rx(mdev, i) {
46226                 napi_disable(&mdev->napi[i]);
46227         }
46228 -       tasklet_kill(&dev->irq_tasklet);
46230         pci_enable_wake(pdev, pci_choose_state(pdev, state), true);
46232 @@ -208,13 +209,16 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
46234         /* disable interrupt */
46235         mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
46236 +       mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
46237 +       synchronize_irq(pdev->irq);
46238 +       tasklet_kill(&dev->irq_tasklet);
46240 -       pci_save_state(pdev);
46241 -       err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
46242 +       err = mt7921_mcu_fw_pmctrl(dev);
46243         if (err)
46244                 goto restore;
46246 -       err = mt7921_mcu_drv_pmctrl(dev);
46247 +       pci_save_state(pdev);
46248 +       err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
46249         if (err)
46250                 goto restore;
46252 @@ -237,18 +241,18 @@ static int mt7921_pci_resume(struct pci_dev *pdev)
46253         struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
46254         int i, err;
46256 -       err = mt7921_mcu_fw_pmctrl(dev);
46257 -       if (err < 0)
46258 -               return err;
46260         err = pci_set_power_state(pdev, PCI_D0);
46261         if (err)
46262                 return err;
46264         pci_restore_state(pdev);
46266 +       err = mt7921_mcu_drv_pmctrl(dev);
46267 +       if (err < 0)
46268 +               return err;
46270         /* enable interrupt */
46271 -       mt7921_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
46272 +       mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
46273         mt7921_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
46274                           MT_INT_MCU_CMD);
46276 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
46277 index 6dad7f6ab09d..73878d3e2495 100644
46278 --- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
46279 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
46280 @@ -96,8 +96,8 @@
46281  #define MT_WF_MIB_BASE(_band)          ((_band) ? 0xa4800 : 0x24800)
46282  #define MT_WF_MIB(_band, ofs)          (MT_WF_MIB_BASE(_band) + (ofs))
46284 -#define MT_MIB_SDR3(_band)             MT_WF_MIB(_band, 0x014)
46285 -#define MT_MIB_SDR3_FCS_ERR_MASK       GENMASK(15, 0)
46286 +#define MT_MIB_SDR3(_band)             MT_WF_MIB(_band, 0x698)
46287 +#define MT_MIB_SDR3_FCS_ERR_MASK       GENMASK(31, 16)
46289  #define MT_MIB_SDR9(_band)             MT_WF_MIB(_band, 0x02c)
46290  #define MT_MIB_SDR9_BUSY_MASK          GENMASK(23, 0)
46291 @@ -121,16 +121,21 @@
46292  #define MT_MIB_RTS_RETRIES_COUNT_MASK  GENMASK(31, 16)
46293  #define MT_MIB_RTS_COUNT_MASK          GENMASK(15, 0)
46295 -#define MT_MIB_MB_SDR1(_band, n)       MT_WF_MIB(_band, 0x104 + ((n) << 4))
46296 -#define MT_MIB_BA_MISS_COUNT_MASK      GENMASK(15, 0)
46297 -#define MT_MIB_ACK_FAIL_COUNT_MASK     GENMASK(31, 16)
46298 +#define MT_MIB_MB_BSDR0(_band)         MT_WF_MIB(_band, 0x688)
46299 +#define MT_MIB_RTS_COUNT_MASK          GENMASK(15, 0)
46300 +#define MT_MIB_MB_BSDR1(_band)         MT_WF_MIB(_band, 0x690)
46301 +#define MT_MIB_RTS_FAIL_COUNT_MASK     GENMASK(15, 0)
46302 +#define MT_MIB_MB_BSDR2(_band)         MT_WF_MIB(_band, 0x518)
46303 +#define MT_MIB_BA_FAIL_COUNT_MASK      GENMASK(15, 0)
46304 +#define MT_MIB_MB_BSDR3(_band)         MT_WF_MIB(_band, 0x520)
46305 +#define MT_MIB_ACK_FAIL_COUNT_MASK     GENMASK(15, 0)
46307  #define MT_MIB_MB_SDR2(_band, n)       MT_WF_MIB(_band, 0x108 + ((n) << 4))
46308  #define MT_MIB_FRAME_RETRIES_COUNT_MASK        GENMASK(15, 0)
46310 -#define MT_TX_AGG_CNT(_band, n)                MT_WF_MIB(_band, 0x0a8 + ((n) << 2))
46311 -#define MT_TX_AGG_CNT2(_band, n)       MT_WF_MIB(_band, 0x164 + ((n) << 2))
46312 -#define MT_MIB_ARNG(_band, n)          MT_WF_MIB(_band, 0x4b8 + ((n) << 2))
46313 +#define MT_TX_AGG_CNT(_band, n)                MT_WF_MIB(_band, 0x7dc + ((n) << 2))
46314 +#define MT_TX_AGG_CNT2(_band, n)       MT_WF_MIB(_band, 0x7ec + ((n) << 2))
46315 +#define MT_MIB_ARNG(_band, n)          MT_WF_MIB(_band, 0x0b0 + ((n) << 2))
46316  #define MT_MIB_ARNCR_RANGE(val, n)     (((val) >> ((n) << 3)) & GENMASK(7, 0))
46318  #define MT_WTBLON_TOP_BASE             0x34000
46319 @@ -357,11 +362,11 @@
46320  #define MT_INFRA_CFG_BASE              0xfe000
46321  #define MT_INFRA(ofs)                  (MT_INFRA_CFG_BASE + (ofs))
46323 -#define MT_HIF_REMAP_L1                        MT_INFRA(0x260)
46324 +#define MT_HIF_REMAP_L1                        MT_INFRA(0x24c)
46325  #define MT_HIF_REMAP_L1_MASK           GENMASK(15, 0)
46326  #define MT_HIF_REMAP_L1_OFFSET         GENMASK(15, 0)
46327  #define MT_HIF_REMAP_L1_BASE           GENMASK(31, 16)
46328 -#define MT_HIF_REMAP_BASE_L1           0xe0000
46329 +#define MT_HIF_REMAP_BASE_L1           0x40000
46331  #define MT_SWDEF_BASE                  0x41f200
46332  #define MT_SWDEF(ofs)                  (MT_SWDEF_BASE + (ofs))
46333 @@ -384,7 +389,7 @@
46334  #define MT_HW_CHIPID                   0x70010200
46335  #define MT_HW_REV                      0x70010204
46337 -#define MT_PCIE_MAC_BASE               0x74030000
46338 +#define MT_PCIE_MAC_BASE               0x10000
46339  #define MT_PCIE_MAC(ofs)               (MT_PCIE_MAC_BASE + (ofs))
46340  #define MT_PCIE_MAC_INT_ENABLE         MT_PCIE_MAC(0x188)
46342 diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
46343 index 0b6facb17ff7..a18d2896ee1f 100644
46344 --- a/drivers/net/wireless/mediatek/mt76/sdio.c
46345 +++ b/drivers/net/wireless/mediatek/mt76/sdio.c
46346 @@ -256,6 +256,9 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
46348         q->entry[q->head].skb = tx_info.skb;
46349         q->entry[q->head].buf_sz = len;
46351 +       smp_wmb();
46353         q->head = (q->head + 1) % q->ndesc;
46354         q->queued++;
46356 diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
46357 index b8fe8adc43a3..451ed60c6296 100644
46358 --- a/drivers/net/wireless/mediatek/mt76/tx.c
46359 +++ b/drivers/net/wireless/mediatek/mt76/tx.c
46360 @@ -461,11 +461,11 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
46361         int ret = 0;
46363         while (1) {
46364 +               int n_frames = 0;
46366                 if (test_bit(MT76_STATE_PM, &phy->state) ||
46367 -                   test_bit(MT76_RESET, &phy->state)) {
46368 -                       ret = -EBUSY;
46369 -                       break;
46370 -               }
46371 +                   test_bit(MT76_RESET, &phy->state))
46372 +                       return -EBUSY;
46374                 if (dev->queue_ops->tx_cleanup &&
46375                     q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
46376 @@ -497,11 +497,16 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
46377                 }
46379                 if (!mt76_txq_stopped(q))
46380 -                       ret += mt76_txq_send_burst(phy, q, mtxq);
46381 +                       n_frames = mt76_txq_send_burst(phy, q, mtxq);
46383                 spin_unlock_bh(&q->lock);
46385                 ieee80211_return_txq(phy->hw, txq, false);
46387 +               if (unlikely(n_frames < 0))
46388 +                       return n_frames;
46390 +               ret += n_frames;
46391         }
46393         return ret;
46394 diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
46395 index c868582c5d22..aa3b64902cf9 100644
46396 --- a/drivers/net/wireless/mediatek/mt7601u/eeprom.c
46397 +++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
46398 @@ -99,7 +99,7 @@ mt7601u_has_tssi(struct mt7601u_dev *dev, u8 *eeprom)
46400         u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
46402 -       return ~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
46403 +       return (u16)~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
46406  static void
46407 diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
46408 index 1b205e7d97a8..37f40039e4ca 100644
46409 --- a/drivers/net/wireless/microchip/wilc1000/netdev.c
46410 +++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
46411 @@ -575,7 +575,6 @@ static int wilc_mac_open(struct net_device *ndev)
46413         struct wilc_vif *vif = netdev_priv(ndev);
46414         struct wilc *wl = vif->wilc;
46415 -       unsigned char mac_add[ETH_ALEN] = {0};
46416         int ret = 0;
46417         struct mgmt_frame_regs mgmt_regs = {};
46419 @@ -598,9 +597,12 @@ static int wilc_mac_open(struct net_device *ndev)
46421         wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), vif->iftype,
46422                                 vif->idx);
46423 -       wilc_get_mac_address(vif, mac_add);
46424 -       netdev_dbg(ndev, "Mac address: %pM\n", mac_add);
46425 -       ether_addr_copy(ndev->dev_addr, mac_add);
46427 +       if (is_valid_ether_addr(ndev->dev_addr))
46428 +               wilc_set_mac_address(vif, ndev->dev_addr);
46429 +       else
46430 +               wilc_get_mac_address(vif, ndev->dev_addr);
46431 +       netdev_dbg(ndev, "Mac address: %pM\n", ndev->dev_addr);
46433         if (!is_valid_ether_addr(ndev->dev_addr)) {
46434                 netdev_err(ndev, "Wrong MAC address\n");
46435 @@ -639,7 +641,14 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
46436         int srcu_idx;
46438         if (!is_valid_ether_addr(addr->sa_data))
46439 -               return -EINVAL;
46440 +               return -EADDRNOTAVAIL;
46442 +       if (!vif->mac_opened) {
46443 +               eth_commit_mac_addr_change(dev, p);
46444 +               return 0;
46445 +       }
46447 +       /* Verify MAC Address is not already in use: */
46449         srcu_idx = srcu_read_lock(&wilc->srcu);
46450         list_for_each_entry_rcu(tmp_vif, &wilc->vif_list, list) {
46451 @@ -647,7 +656,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
46452                 if (ether_addr_equal(addr->sa_data, mac_addr)) {
46453                         if (vif != tmp_vif) {
46454                                 srcu_read_unlock(&wilc->srcu, srcu_idx);
46455 -                               return -EINVAL;
46456 +                               return -EADDRNOTAVAIL;
46457                         }
46458                         srcu_read_unlock(&wilc->srcu, srcu_idx);
46459                         return 0;
46460 @@ -659,9 +668,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
46461         if (result)
46462                 return result;
46464 -       ether_addr_copy(vif->bssid, addr->sa_data);
46465 -       ether_addr_copy(vif->ndev->dev_addr, addr->sa_data);
46467 +       eth_commit_mac_addr_change(dev, p);
46468         return result;
46471 diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
46472 index 351ff909ab1c..e14b9fc2c67a 100644
46473 --- a/drivers/net/wireless/microchip/wilc1000/sdio.c
46474 +++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
46475 @@ -947,7 +947,7 @@ static int wilc_sdio_sync_ext(struct wilc *wilc, int nint)
46476                         for (i = 0; (i < 3) && (nint > 0); i++, nint--)
46477                                 reg |= BIT(i);
46479 -                       ret = wilc_sdio_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
46480 +                       ret = wilc_sdio_write_reg(wilc, WILC_INTR2_ENABLE, reg);
46481                         if (ret) {
46482                                 dev_err(&func->dev,
46483                                         "Failed write reg (%08x)...\n",
46484 diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
46485 index c775c177933b..8dc80574d08d 100644
46486 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c
46487 +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
46488 @@ -570,8 +570,10 @@ qtnf_event_handle_external_auth(struct qtnf_vif *vif,
46489                 return 0;
46491         if (ev->ssid_len) {
46492 -               memcpy(auth.ssid.ssid, ev->ssid, ev->ssid_len);
46493 -               auth.ssid.ssid_len = ev->ssid_len;
46494 +               int len = clamp_val(ev->ssid_len, 0, IEEE80211_MAX_SSID_LEN);
46496 +               memcpy(auth.ssid.ssid, ev->ssid, len);
46497 +               auth.ssid.ssid_len = len;
46498         }
46500         auth.key_mgmt_suite = le32_to_cpu(ev->akm_suite);
46501 diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
46502 index 6e8bd99e8911..1866f6c2acab 100644
46503 --- a/drivers/net/wireless/realtek/rtlwifi/base.c
46504 +++ b/drivers/net/wireless/realtek/rtlwifi/base.c
46505 @@ -440,9 +440,14 @@ static void rtl_watchdog_wq_callback(struct work_struct *work);
46506  static void rtl_fwevt_wq_callback(struct work_struct *work);
46507  static void rtl_c2hcmd_wq_callback(struct work_struct *work);
46509 -static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
46510 +static int _rtl_init_deferred_work(struct ieee80211_hw *hw)
46512         struct rtl_priv *rtlpriv = rtl_priv(hw);
46513 +       struct workqueue_struct *wq;
46515 +       wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
46516 +       if (!wq)
46517 +               return -ENOMEM;
46519         /* <1> timer */
46520         timer_setup(&rtlpriv->works.watchdog_timer,
46521 @@ -451,11 +456,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
46522                     rtl_easy_concurrent_retrytimer_callback, 0);
46523         /* <2> work queue */
46524         rtlpriv->works.hw = hw;
46525 -       rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
46526 -       if (unlikely(!rtlpriv->works.rtl_wq)) {
46527 -               pr_err("Failed to allocate work queue\n");
46528 -               return;
46529 -       }
46530 +       rtlpriv->works.rtl_wq = wq;
46532         INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
46533                           rtl_watchdog_wq_callback);
46534 @@ -466,6 +467,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
46535                           rtl_swlps_rfon_wq_callback);
46536         INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq, rtl_fwevt_wq_callback);
46537         INIT_DELAYED_WORK(&rtlpriv->works.c2hcmd_wq, rtl_c2hcmd_wq_callback);
46538 +       return 0;
46541  void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq)
46542 @@ -565,9 +567,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
46543         rtlmac->link_state = MAC80211_NOLINK;
46545         /* <6> init deferred work */
46546 -       _rtl_init_deferred_work(hw);
46548 -       return 0;
46549 +       return _rtl_init_deferred_work(hw);
46551  EXPORT_SYMBOL_GPL(rtl_init_core);
46553 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
46554 index 27c8a5d96520..fcaaf664cbec 100644
46555 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
46556 +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
46557 @@ -249,7 +249,7 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
46558         0x824, 0x00030FE0,
46559         0x828, 0x00000000,
46560         0x82C, 0x002081DD,
46561 -       0x830, 0x2AAA8E24,
46562 +       0x830, 0x2AAAEEC8,
46563         0x834, 0x0037A706,
46564         0x838, 0x06489B44,
46565         0x83C, 0x0000095B,
46566 @@ -324,10 +324,10 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
46567         0x9D8, 0x00000000,
46568         0x9DC, 0x00000000,
46569         0x9E0, 0x00005D00,
46570 -       0x9E4, 0x00000002,
46571 +       0x9E4, 0x00000003,
46572         0x9E8, 0x00000001,
46573         0xA00, 0x00D047C8,
46574 -       0xA04, 0x01FF000C,
46575 +       0xA04, 0x01FF800C,
46576         0xA08, 0x8C8A8300,
46577         0xA0C, 0x2E68000F,
46578         0xA10, 0x9500BB78,
46579 @@ -1320,7 +1320,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46580                 0x083, 0x00021800,
46581                 0x084, 0x00028000,
46582                 0x085, 0x00048000,
46583 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
46584 +               0x086, 0x0009483A,
46585 +       0xA0000000,     0x00000000,
46586                 0x086, 0x00094838,
46587 +       0xB0000000,     0x00000000,
46588                 0x087, 0x00044980,
46589                 0x088, 0x00048000,
46590                 0x089, 0x0000D480,
46591 @@ -1409,36 +1413,32 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46592                 0x03C, 0x000CA000,
46593                 0x0EF, 0x00000000,
46594                 0x0EF, 0x00001100,
46595 -       0xFF0F0104, 0xABCD,
46596 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
46597                 0x034, 0x0004ADF3,
46598                 0x034, 0x00049DF0,
46599 -       0xFF0F0204, 0xCDEF,
46600 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
46601                 0x034, 0x0004ADF3,
46602                 0x034, 0x00049DF0,
46603 -       0xFF0F0404, 0xCDEF,
46604 -               0x034, 0x0004ADF3,
46605 -               0x034, 0x00049DF0,
46606 -       0xFF0F0200, 0xCDEF,
46607 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
46608                 0x034, 0x0004ADF5,
46609                 0x034, 0x00049DF2,
46610 -       0xFF0F02C0, 0xCDEF,
46611 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
46612 +               0x034, 0x0004A0F3,
46613 +               0x034, 0x000490B1,
46614 +               0x9000040c,     0x00000000,     0x40000000,     0x00000000,
46615                 0x034, 0x0004A0F3,
46616                 0x034, 0x000490B1,
46617 -       0xCDCDCDCD, 0xCDCD,
46618 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
46619 +               0x034, 0x0004ADF5,
46620 +               0x034, 0x00049DF2,
46621 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
46622 +               0x034, 0x0004ADF3,
46623 +               0x034, 0x00049DF0,
46624 +       0xA0000000,     0x00000000,
46625                 0x034, 0x0004ADF7,
46626                 0x034, 0x00049DF3,
46627 -       0xFF0F0104, 0xDEAD,
46628 -       0xFF0F0104, 0xABCD,
46629 -               0x034, 0x00048DED,
46630 -               0x034, 0x00047DEA,
46631 -               0x034, 0x00046DE7,
46632 -               0x034, 0x00045CE9,
46633 -               0x034, 0x00044CE6,
46634 -               0x034, 0x000438C6,
46635 -               0x034, 0x00042886,
46636 -               0x034, 0x00041486,
46637 -               0x034, 0x00040447,
46638 -       0xFF0F0204, 0xCDEF,
46639 +       0xB0000000,     0x00000000,
46640 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
46641                 0x034, 0x00048DED,
46642                 0x034, 0x00047DEA,
46643                 0x034, 0x00046DE7,
46644 @@ -1448,7 +1448,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46645                 0x034, 0x00042886,
46646                 0x034, 0x00041486,
46647                 0x034, 0x00040447,
46648 -       0xFF0F0404, 0xCDEF,
46649 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
46650                 0x034, 0x00048DED,
46651                 0x034, 0x00047DEA,
46652                 0x034, 0x00046DE7,
46653 @@ -1458,7 +1458,17 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46654                 0x034, 0x00042886,
46655                 0x034, 0x00041486,
46656                 0x034, 0x00040447,
46657 -       0xFF0F02C0, 0xCDEF,
46658 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
46659 +               0x034, 0x000480AE,
46660 +               0x034, 0x000470AB,
46661 +               0x034, 0x0004608B,
46662 +               0x034, 0x00045069,
46663 +               0x034, 0x00044048,
46664 +               0x034, 0x00043045,
46665 +               0x034, 0x00042026,
46666 +               0x034, 0x00041023,
46667 +               0x034, 0x00040002,
46668 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
46669                 0x034, 0x000480AE,
46670                 0x034, 0x000470AB,
46671                 0x034, 0x0004608B,
46672 @@ -1468,7 +1478,17 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46673                 0x034, 0x00042026,
46674                 0x034, 0x00041023,
46675                 0x034, 0x00040002,
46676 -       0xCDCDCDCD, 0xCDCD,
46677 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
46678 +               0x034, 0x00048DED,
46679 +               0x034, 0x00047DEA,
46680 +               0x034, 0x00046DE7,
46681 +               0x034, 0x00045CE9,
46682 +               0x034, 0x00044CE6,
46683 +               0x034, 0x000438C6,
46684 +               0x034, 0x00042886,
46685 +               0x034, 0x00041486,
46686 +               0x034, 0x00040447,
46687 +       0xA0000000,     0x00000000,
46688                 0x034, 0x00048DEF,
46689                 0x034, 0x00047DEC,
46690                 0x034, 0x00046DE9,
46691 @@ -1478,38 +1498,36 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46692                 0x034, 0x0004248A,
46693                 0x034, 0x0004108D,
46694                 0x034, 0x0004008A,
46695 -       0xFF0F0104, 0xDEAD,
46696 -       0xFF0F0200, 0xABCD,
46697 +       0xB0000000,     0x00000000,
46698 +       0x80000210,     0x00000000,     0x40000000,     0x00000000,
46699                 0x034, 0x0002ADF4,
46700 -       0xFF0F02C0, 0xCDEF,
46701 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
46702 +               0x034, 0x0002A0F3,
46703 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
46704                 0x034, 0x0002A0F3,
46705 -       0xCDCDCDCD, 0xCDCD,
46706 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
46707 +               0x034, 0x0002ADF4,
46708 +       0xA0000000,     0x00000000,
46709                 0x034, 0x0002ADF7,
46710 -       0xFF0F0200, 0xDEAD,
46711 -       0xFF0F0104, 0xABCD,
46712 -               0x034, 0x00029DF4,
46713 -       0xFF0F0204, 0xCDEF,
46714 +       0xB0000000,     0x00000000,
46715 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
46716                 0x034, 0x00029DF4,
46717 -       0xFF0F0404, 0xCDEF,
46718 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
46719                 0x034, 0x00029DF4,
46720 -       0xFF0F0200, 0xCDEF,
46721 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
46722                 0x034, 0x00029DF1,
46723 -       0xFF0F02C0, 0xCDEF,
46724 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
46725 +               0x034, 0x000290F0,
46726 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
46727                 0x034, 0x000290F0,
46728 -       0xCDCDCDCD, 0xCDCD,
46729 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
46730 +               0x034, 0x00029DF1,
46731 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
46732 +               0x034, 0x00029DF4,
46733 +       0xA0000000,     0x00000000,
46734                 0x034, 0x00029DF2,
46735 -       0xFF0F0104, 0xDEAD,
46736 -       0xFF0F0104, 0xABCD,
46737 -               0x034, 0x00028DF1,
46738 -               0x034, 0x00027DEE,
46739 -               0x034, 0x00026DEB,
46740 -               0x034, 0x00025CEC,
46741 -               0x034, 0x00024CE9,
46742 -               0x034, 0x000238CA,
46743 -               0x034, 0x00022889,
46744 -               0x034, 0x00021489,
46745 -               0x034, 0x0002044A,
46746 -       0xFF0F0204, 0xCDEF,
46747 +       0xB0000000,     0x00000000,
46748 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
46749                 0x034, 0x00028DF1,
46750                 0x034, 0x00027DEE,
46751                 0x034, 0x00026DEB,
46752 @@ -1519,7 +1537,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46753                 0x034, 0x00022889,
46754                 0x034, 0x00021489,
46755                 0x034, 0x0002044A,
46756 -       0xFF0F0404, 0xCDEF,
46757 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
46758                 0x034, 0x00028DF1,
46759                 0x034, 0x00027DEE,
46760                 0x034, 0x00026DEB,
46761 @@ -1529,7 +1547,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46762                 0x034, 0x00022889,
46763                 0x034, 0x00021489,
46764                 0x034, 0x0002044A,
46765 -       0xFF0F02C0, 0xCDEF,
46766 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
46767                 0x034, 0x000280AF,
46768                 0x034, 0x000270AC,
46769                 0x034, 0x0002608B,
46770 @@ -1539,7 +1557,27 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46771                 0x034, 0x00022026,
46772                 0x034, 0x00021023,
46773                 0x034, 0x00020002,
46774 -       0xCDCDCDCD, 0xCDCD,
46775 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
46776 +               0x034, 0x000280AF,
46777 +               0x034, 0x000270AC,
46778 +               0x034, 0x0002608B,
46779 +               0x034, 0x00025069,
46780 +               0x034, 0x00024048,
46781 +               0x034, 0x00023045,
46782 +               0x034, 0x00022026,
46783 +               0x034, 0x00021023,
46784 +               0x034, 0x00020002,
46785 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
46786 +               0x034, 0x00028DF1,
46787 +               0x034, 0x00027DEE,
46788 +               0x034, 0x00026DEB,
46789 +               0x034, 0x00025CEC,
46790 +               0x034, 0x00024CE9,
46791 +               0x034, 0x000238CA,
46792 +               0x034, 0x00022889,
46793 +               0x034, 0x00021489,
46794 +               0x034, 0x0002044A,
46795 +       0xA0000000,     0x00000000,
46796                 0x034, 0x00028DEE,
46797                 0x034, 0x00027DEB,
46798                 0x034, 0x00026CCD,
46799 @@ -1549,27 +1587,24 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46800                 0x034, 0x00022849,
46801                 0x034, 0x00021449,
46802                 0x034, 0x0002004D,
46803 -       0xFF0F0104, 0xDEAD,
46804 -       0xFF0F02C0, 0xABCD,
46805 +       0xB0000000,     0x00000000,
46806 +       0x8000020c,     0x00000000,     0x40000000,     0x00000000,
46807 +               0x034, 0x0000A0D7,
46808 +               0x034, 0x000090D3,
46809 +               0x034, 0x000080B1,
46810 +               0x034, 0x000070AE,
46811 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
46812                 0x034, 0x0000A0D7,
46813                 0x034, 0x000090D3,
46814                 0x034, 0x000080B1,
46815                 0x034, 0x000070AE,
46816 -       0xCDCDCDCD, 0xCDCD,
46817 +       0xA0000000,     0x00000000,
46818                 0x034, 0x0000ADF7,
46819                 0x034, 0x00009DF4,
46820                 0x034, 0x00008DF1,
46821                 0x034, 0x00007DEE,
46822 -       0xFF0F02C0, 0xDEAD,
46823 -       0xFF0F0104, 0xABCD,
46824 -               0x034, 0x00006DEB,
46825 -               0x034, 0x00005CEC,
46826 -               0x034, 0x00004CE9,
46827 -               0x034, 0x000038CA,
46828 -               0x034, 0x00002889,
46829 -               0x034, 0x00001489,
46830 -               0x034, 0x0000044A,
46831 -       0xFF0F0204, 0xCDEF,
46832 +       0xB0000000,     0x00000000,
46833 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
46834                 0x034, 0x00006DEB,
46835                 0x034, 0x00005CEC,
46836                 0x034, 0x00004CE9,
46837 @@ -1577,7 +1612,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46838                 0x034, 0x00002889,
46839                 0x034, 0x00001489,
46840                 0x034, 0x0000044A,
46841 -       0xFF0F0404, 0xCDEF,
46842 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
46843                 0x034, 0x00006DEB,
46844                 0x034, 0x00005CEC,
46845                 0x034, 0x00004CE9,
46846 @@ -1585,7 +1620,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46847                 0x034, 0x00002889,
46848                 0x034, 0x00001489,
46849                 0x034, 0x0000044A,
46850 -       0xFF0F02C0, 0xCDEF,
46851 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
46852                 0x034, 0x0000608D,
46853                 0x034, 0x0000506B,
46854                 0x034, 0x0000404A,
46855 @@ -1593,7 +1628,23 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46856                 0x034, 0x00002044,
46857                 0x034, 0x00001025,
46858                 0x034, 0x00000004,
46859 -       0xCDCDCDCD, 0xCDCD,
46860 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
46861 +               0x034, 0x0000608D,
46862 +               0x034, 0x0000506B,
46863 +               0x034, 0x0000404A,
46864 +               0x034, 0x00003047,
46865 +               0x034, 0x00002044,
46866 +               0x034, 0x00001025,
46867 +               0x034, 0x00000004,
46868 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
46869 +               0x034, 0x00006DEB,
46870 +               0x034, 0x00005CEC,
46871 +               0x034, 0x00004CE9,
46872 +               0x034, 0x000038CA,
46873 +               0x034, 0x00002889,
46874 +               0x034, 0x00001489,
46875 +               0x034, 0x0000044A,
46876 +       0xA0000000,     0x00000000,
46877                 0x034, 0x00006DCD,
46878                 0x034, 0x00005CCD,
46879                 0x034, 0x00004CCA,
46880 @@ -1601,11 +1652,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46881                 0x034, 0x00002888,
46882                 0x034, 0x00001488,
46883                 0x034, 0x00000486,
46884 -       0xFF0F0104, 0xDEAD,
46885 +       0xB0000000,     0x00000000,
46886                 0x0EF, 0x00000000,
46887                 0x018, 0x0001712A,
46888                 0x0EF, 0x00000040,
46889 -       0xFF0F0104, 0xABCD,
46890 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
46891                 0x035, 0x00000187,
46892                 0x035, 0x00008187,
46893                 0x035, 0x00010187,
46894 @@ -1615,7 +1666,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46895                 0x035, 0x00040188,
46896                 0x035, 0x00048188,
46897                 0x035, 0x00050188,
46898 -       0xFF0F0204, 0xCDEF,
46899 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
46900                 0x035, 0x00000187,
46901                 0x035, 0x00008187,
46902                 0x035, 0x00010187,
46903 @@ -1625,7 +1676,37 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46904                 0x035, 0x00040188,
46905                 0x035, 0x00048188,
46906                 0x035, 0x00050188,
46907 -       0xFF0F0404, 0xCDEF,
46908 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
46909 +               0x035, 0x00000128,
46910 +               0x035, 0x00008128,
46911 +               0x035, 0x00010128,
46912 +               0x035, 0x000201C8,
46913 +               0x035, 0x000281C8,
46914 +               0x035, 0x000301C8,
46915 +               0x035, 0x000401C8,
46916 +               0x035, 0x000481C8,
46917 +               0x035, 0x000501C8,
46918 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
46919 +               0x035, 0x00000145,
46920 +               0x035, 0x00008145,
46921 +               0x035, 0x00010145,
46922 +               0x035, 0x00020196,
46923 +               0x035, 0x00028196,
46924 +               0x035, 0x00030196,
46925 +               0x035, 0x000401C7,
46926 +               0x035, 0x000481C7,
46927 +               0x035, 0x000501C7,
46928 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
46929 +               0x035, 0x00000128,
46930 +               0x035, 0x00008128,
46931 +               0x035, 0x00010128,
46932 +               0x035, 0x000201C8,
46933 +               0x035, 0x000281C8,
46934 +               0x035, 0x000301C8,
46935 +               0x035, 0x000401C8,
46936 +               0x035, 0x000481C8,
46937 +               0x035, 0x000501C8,
46938 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
46939                 0x035, 0x00000187,
46940                 0x035, 0x00008187,
46941                 0x035, 0x00010187,
46942 @@ -1635,7 +1716,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46943                 0x035, 0x00040188,
46944                 0x035, 0x00048188,
46945                 0x035, 0x00050188,
46946 -       0xCDCDCDCD, 0xCDCD,
46947 +       0xA0000000,     0x00000000,
46948                 0x035, 0x00000145,
46949                 0x035, 0x00008145,
46950                 0x035, 0x00010145,
46951 @@ -1645,11 +1726,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46952                 0x035, 0x000401C7,
46953                 0x035, 0x000481C7,
46954                 0x035, 0x000501C7,
46955 -       0xFF0F0104, 0xDEAD,
46956 +       0xB0000000,     0x00000000,
46957                 0x0EF, 0x00000000,
46958                 0x018, 0x0001712A,
46959                 0x0EF, 0x00000010,
46960 -       0xFF0F0104, 0xABCD,
46961 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
46962                 0x036, 0x00085733,
46963                 0x036, 0x0008D733,
46964                 0x036, 0x00095733,
46965 @@ -1662,7 +1743,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46966                 0x036, 0x000CE4B4,
46967                 0x036, 0x000D64B4,
46968                 0x036, 0x000DE4B4,
46969 -       0xFF0F0204, 0xCDEF,
46970 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
46971                 0x036, 0x00085733,
46972                 0x036, 0x0008D733,
46973                 0x036, 0x00095733,
46974 @@ -1675,7 +1756,46 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
46975                 0x036, 0x000CE4B4,
46976                 0x036, 0x000D64B4,
46977                 0x036, 0x000DE4B4,
46978 -       0xFF0F0404, 0xCDEF,
46979 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
46980 +               0x036, 0x000063B5,
46981 +               0x036, 0x0000E3B5,
46982 +               0x036, 0x000163B5,
46983 +               0x036, 0x0001E3B5,
46984 +               0x036, 0x000263B5,
46985 +               0x036, 0x0002E3B5,
46986 +               0x036, 0x000363B5,
46987 +               0x036, 0x0003E3B5,
46988 +               0x036, 0x000463B5,
46989 +               0x036, 0x0004E3B5,
46990 +               0x036, 0x000563B5,
46991 +               0x036, 0x0005E3B5,
46992 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
46993 +               0x036, 0x000056B3,
46994 +               0x036, 0x0000D6B3,
46995 +               0x036, 0x000156B3,
46996 +               0x036, 0x0001D6B3,
46997 +               0x036, 0x00026634,
46998 +               0x036, 0x0002E634,
46999 +               0x036, 0x00036634,
47000 +               0x036, 0x0003E634,
47001 +               0x036, 0x000467B4,
47002 +               0x036, 0x0004E7B4,
47003 +               0x036, 0x000567B4,
47004 +               0x036, 0x0005E7B4,
47005 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
47006 +               0x036, 0x000063B5,
47007 +               0x036, 0x0000E3B5,
47008 +               0x036, 0x000163B5,
47009 +               0x036, 0x0001E3B5,
47010 +               0x036, 0x000263B5,
47011 +               0x036, 0x0002E3B5,
47012 +               0x036, 0x000363B5,
47013 +               0x036, 0x0003E3B5,
47014 +               0x036, 0x000463B5,
47015 +               0x036, 0x0004E3B5,
47016 +               0x036, 0x000563B5,
47017 +               0x036, 0x0005E3B5,
47018 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
47019                 0x036, 0x00085733,
47020                 0x036, 0x0008D733,
47021                 0x036, 0x00095733,
47022 @@ -1688,7 +1808,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
47023                 0x036, 0x000CE4B4,
47024                 0x036, 0x000D64B4,
47025                 0x036, 0x000DE4B4,
47026 -       0xCDCDCDCD, 0xCDCD,
47027 +       0xA0000000,     0x00000000,
47028                 0x036, 0x000056B3,
47029                 0x036, 0x0000D6B3,
47030                 0x036, 0x000156B3,
47031 @@ -1701,103 +1821,162 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
47032                 0x036, 0x0004E7B4,
47033                 0x036, 0x000567B4,
47034                 0x036, 0x0005E7B4,
47035 -       0xFF0F0104, 0xDEAD,
47036 +       0xB0000000,     0x00000000,
47037                 0x0EF, 0x00000000,
47038                 0x0EF, 0x00000008,
47039 -       0xFF0F0104, 0xABCD,
47040 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
47041                 0x03C, 0x000001C8,
47042                 0x03C, 0x00000492,
47043 -       0xFF0F0204, 0xCDEF,
47044 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
47045                 0x03C, 0x000001C8,
47046                 0x03C, 0x00000492,
47047 -       0xFF0F0404, 0xCDEF,
47048 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
47049 +               0x03C, 0x000001B6,
47050 +               0x03C, 0x00000492,
47051 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
47052 +               0x03C, 0x0000022A,
47053 +               0x03C, 0x00000594,
47054 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
47055 +               0x03C, 0x000001B6,
47056 +               0x03C, 0x00000492,
47057 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
47058                 0x03C, 0x000001C8,
47059                 0x03C, 0x00000492,
47060 -       0xCDCDCDCD, 0xCDCD,
47061 +       0xA0000000,     0x00000000,
47062                 0x03C, 0x0000022A,
47063                 0x03C, 0x00000594,
47064 -       0xFF0F0104, 0xDEAD,
47065 -       0xFF0F0104, 0xABCD,
47066 +       0xB0000000,     0x00000000,
47067 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
47068                 0x03C, 0x00000800,
47069 -       0xFF0F0204, 0xCDEF,
47070 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
47071                 0x03C, 0x00000800,
47072 -       0xFF0F0404, 0xCDEF,
47073 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
47074                 0x03C, 0x00000800,
47075 -       0xFF0F02C0, 0xCDEF,
47076 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
47077                 0x03C, 0x00000820,
47078 -       0xCDCDCDCD, 0xCDCD,
47079 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
47080 +               0x03C, 0x00000820,
47081 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
47082 +               0x03C, 0x00000800,
47083 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
47084 +               0x03C, 0x00000800,
47085 +       0xA0000000,     0x00000000,
47086                 0x03C, 0x00000900,
47087 -       0xFF0F0104, 0xDEAD,
47088 +       0xB0000000,     0x00000000,
47089                 0x0EF, 0x00000000,
47090                 0x018, 0x0001712A,
47091                 0x0EF, 0x00000002,
47092 -       0xFF0F0104, 0xABCD,
47093 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
47094                 0x008, 0x0004E400,
47095 -       0xFF0F0204, 0xCDEF,
47096 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
47097                 0x008, 0x0004E400,
47098 -       0xFF0F0404, 0xCDEF,
47099 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
47100 +               0x008, 0x00002000,
47101 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
47102 +               0x008, 0x00002000,
47103 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
47104 +               0x008, 0x00002000,
47105 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
47106 +               0x008, 0x00002000,
47107 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
47108                 0x008, 0x0004E400,
47109 -       0xCDCDCDCD, 0xCDCD,
47110 +       0xA0000000,     0x00000000,
47111                 0x008, 0x00002000,
47112 -       0xFF0F0104, 0xDEAD,
47113 +       0xB0000000,     0x00000000,
47114                 0x0EF, 0x00000000,
47115                 0x0DF, 0x000000C0,
47116 -               0x01F, 0x00040064,
47117 -       0xFF0F0104, 0xABCD,
47118 +               0x01F, 0x00000064,
47119 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
47120                 0x058, 0x000A7284,
47121                 0x059, 0x000600EC,
47122 -       0xFF0F0204, 0xCDEF,
47123 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
47124                 0x058, 0x000A7284,
47125                 0x059, 0x000600EC,
47126 -       0xFF0F0404, 0xCDEF,
47127 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
47128 +               0x058, 0x00081184,
47129 +               0x059, 0x0006016C,
47130 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
47131 +               0x058, 0x00081184,
47132 +               0x059, 0x0006016C,
47133 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
47134 +               0x058, 0x00081184,
47135 +               0x059, 0x0006016C,
47136 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
47137                 0x058, 0x000A7284,
47138                 0x059, 0x000600EC,
47139 -       0xCDCDCDCD, 0xCDCD,
47140 +       0xA0000000,     0x00000000,
47141                 0x058, 0x00081184,
47142                 0x059, 0x0006016C,
47143 -       0xFF0F0104, 0xDEAD,
47144 -       0xFF0F0104, 0xABCD,
47145 +       0xB0000000,     0x00000000,
47146 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
47147                 0x061, 0x000E8D73,
47148                 0x062, 0x00093FC5,
47149 -       0xFF0F0204, 0xCDEF,
47150 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
47151                 0x061, 0x000E8D73,
47152                 0x062, 0x00093FC5,
47153 -       0xFF0F0404, 0xCDEF,
47154 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
47155 +               0x061, 0x000EFD83,
47156 +               0x062, 0x00093FCC,
47157 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
47158 +               0x061, 0x000EAD53,
47159 +               0x062, 0x00093BC4,
47160 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
47161 +               0x061, 0x000EFD83,
47162 +               0x062, 0x00093FCC,
47163 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
47164                 0x061, 0x000E8D73,
47165                 0x062, 0x00093FC5,
47166 -       0xCDCDCDCD, 0xCDCD,
47167 +       0xA0000000,     0x00000000,
47168                 0x061, 0x000EAD53,
47169                 0x062, 0x00093BC4,
47170 -       0xFF0F0104, 0xDEAD,
47171 -       0xFF0F0104, 0xABCD,
47172 +       0xB0000000,     0x00000000,
47173 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
47174                 0x063, 0x000110E9,
47175 -       0xFF0F0204, 0xCDEF,
47176 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
47177                 0x063, 0x000110E9,
47178 -       0xFF0F0404, 0xCDEF,
47179 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
47180 +               0x063, 0x000110EB,
47181 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
47182                 0x063, 0x000110E9,
47183 -       0xFF0F0200, 0xCDEF,
47184 -               0x063, 0x000710E9,
47185 -       0xFF0F02C0, 0xCDEF,
47186 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
47187                 0x063, 0x000110E9,
47188 -       0xCDCDCDCD, 0xCDCD,
47189 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
47190 +               0x063, 0x000110EB,
47191 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
47192 +               0x063, 0x000110E9,
47193 +       0xA0000000,     0x00000000,
47194                 0x063, 0x000714E9,
47195 -       0xFF0F0104, 0xDEAD,
47196 -       0xFF0F0104, 0xABCD,
47197 +       0xB0000000,     0x00000000,
47198 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
47199 +               0x064, 0x0001C27C,
47200 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
47201 +               0x064, 0x0001C27C,
47202 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
47203                 0x064, 0x0001C27C,
47204 -       0xFF0F0204, 0xCDEF,
47205 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
47206 +               0x064, 0x0001C67C,
47207 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
47208                 0x064, 0x0001C27C,
47209 -       0xFF0F0404, 0xCDEF,
47210 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
47211                 0x064, 0x0001C27C,
47212 -       0xCDCDCDCD, 0xCDCD,
47213 +       0xA0000000,     0x00000000,
47214                 0x064, 0x0001C67C,
47215 -       0xFF0F0104, 0xDEAD,
47216 -       0xFF0F0200, 0xABCD,
47217 +       0xB0000000,     0x00000000,
47218 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
47219 +               0x065, 0x00091016,
47220 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
47221 +               0x065, 0x00091016,
47222 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
47223                 0x065, 0x00093016,
47224 -       0xFF0F02C0, 0xCDEF,
47225 +               0x9000020c,     0x00000000,     0x40000000,     0x00000000,
47226                 0x065, 0x00093015,
47227 -       0xCDCDCDCD, 0xCDCD,
47228 +               0x9000040c,     0x00000000,     0x40000000,     0x00000000,
47229 +               0x065, 0x00093015,
47230 +               0x90000200,     0x00000000,     0x40000000,     0x00000000,
47231 +               0x065, 0x00093016,
47232 +               0xA0000000,     0x00000000,
47233                 0x065, 0x00091016,
47234 -       0xFF0F0200, 0xDEAD,
47235 +               0xB0000000,     0x00000000,
47236                 0x018, 0x00000006,
47237                 0x0EF, 0x00002000,
47238                 0x03B, 0x0003824B,
47239 @@ -1895,9 +2074,10 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
47240                 0x0B4, 0x0001214C,
47241                 0x0B7, 0x0003000C,
47242                 0x01C, 0x000539D2,
47243 +               0x0C4, 0x000AFE00,
47244                 0x018, 0x0001F12A,
47245 -               0x0FE, 0x00000000,
47246 -               0x0FE, 0x00000000,
47247 +               0xFFE, 0x00000000,
47248 +               0xFFE, 0x00000000,
47249                 0x018, 0x0001712A,
47251  };
47252 @@ -2017,6 +2197,7 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = {
47253  u32 RTL8812AE_MAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8812AE_MAC_REG_ARRAY);
47255  u32 RTL8821AE_MAC_REG_ARRAY[] = {
47256 +               0x421, 0x0000000F,
47257                 0x428, 0x0000000A,
47258                 0x429, 0x00000010,
47259                 0x430, 0x00000000,
47260 @@ -2485,7 +2666,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
47261                 0x81C, 0xA6360001,
47262                 0x81C, 0xA5380001,
47263                 0x81C, 0xA43A0001,
47264 -               0x81C, 0xA33C0001,
47265 +               0x81C, 0x683C0001,
47266                 0x81C, 0x673E0001,
47267                 0x81C, 0x66400001,
47268                 0x81C, 0x65420001,
47269 @@ -2519,7 +2700,66 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
47270                 0x81C, 0x017A0001,
47271                 0x81C, 0x017C0001,
47272                 0x81C, 0x017E0001,
47273 -       0xFF0F02C0, 0xABCD,
47274 +       0x8000020c,     0x00000000,     0x40000000,     0x00000000,
47275 +               0x81C, 0xFB000101,
47276 +               0x81C, 0xFA020101,
47277 +               0x81C, 0xF9040101,
47278 +               0x81C, 0xF8060101,
47279 +               0x81C, 0xF7080101,
47280 +               0x81C, 0xF60A0101,
47281 +               0x81C, 0xF50C0101,
47282 +               0x81C, 0xF40E0101,
47283 +               0x81C, 0xF3100101,
47284 +               0x81C, 0xF2120101,
47285 +               0x81C, 0xF1140101,
47286 +               0x81C, 0xF0160101,
47287 +               0x81C, 0xEF180101,
47288 +               0x81C, 0xEE1A0101,
47289 +               0x81C, 0xED1C0101,
47290 +               0x81C, 0xEC1E0101,
47291 +               0x81C, 0xEB200101,
47292 +               0x81C, 0xEA220101,
47293 +               0x81C, 0xE9240101,
47294 +               0x81C, 0xE8260101,
47295 +               0x81C, 0xE7280101,
47296 +               0x81C, 0xE62A0101,
47297 +               0x81C, 0xE52C0101,
47298 +               0x81C, 0xE42E0101,
47299 +               0x81C, 0xE3300101,
47300 +               0x81C, 0xA5320101,
47301 +               0x81C, 0xA4340101,
47302 +               0x81C, 0xA3360101,
47303 +               0x81C, 0x87380101,
47304 +               0x81C, 0x863A0101,
47305 +               0x81C, 0x853C0101,
47306 +               0x81C, 0x843E0101,
47307 +               0x81C, 0x69400101,
47308 +               0x81C, 0x68420101,
47309 +               0x81C, 0x67440101,
47310 +               0x81C, 0x66460101,
47311 +               0x81C, 0x49480101,
47312 +               0x81C, 0x484A0101,
47313 +               0x81C, 0x474C0101,
47314 +               0x81C, 0x2A4E0101,
47315 +               0x81C, 0x29500101,
47316 +               0x81C, 0x28520101,
47317 +               0x81C, 0x27540101,
47318 +               0x81C, 0x26560101,
47319 +               0x81C, 0x25580101,
47320 +               0x81C, 0x245A0101,
47321 +               0x81C, 0x235C0101,
47322 +               0x81C, 0x055E0101,
47323 +               0x81C, 0x04600101,
47324 +               0x81C, 0x03620101,
47325 +               0x81C, 0x02640101,
47326 +               0x81C, 0x01660101,
47327 +               0x81C, 0x01680101,
47328 +               0x81C, 0x016A0101,
47329 +               0x81C, 0x016C0101,
47330 +               0x81C, 0x016E0101,
47331 +               0x81C, 0x01700101,
47332 +               0x81C, 0x01720101,
47333 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
47334                 0x81C, 0xFB000101,
47335                 0x81C, 0xFA020101,
47336                 0x81C, 0xF9040101,
47337 @@ -2578,7 +2818,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
47338                 0x81C, 0x016E0101,
47339                 0x81C, 0x01700101,
47340                 0x81C, 0x01720101,
47341 -       0xCDCDCDCD, 0xCDCD,
47342 +       0xA0000000,     0x00000000,
47343                 0x81C, 0xFF000101,
47344                 0x81C, 0xFF020101,
47345                 0x81C, 0xFE040101,
47346 @@ -2637,7 +2877,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
47347                 0x81C, 0x046E0101,
47348                 0x81C, 0x03700101,
47349                 0x81C, 0x02720101,
47350 -       0xFF0F02C0, 0xDEAD,
47351 +       0xB0000000,     0x00000000,
47352                 0x81C, 0x01740101,
47353                 0x81C, 0x01760101,
47354                 0x81C, 0x01780101,
47355 diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
47356 index 948cb79050ea..e7d51ac9b689 100644
47357 --- a/drivers/net/wireless/realtek/rtw88/debug.c
47358 +++ b/drivers/net/wireless/realtek/rtw88/debug.c
47359 @@ -270,7 +270,7 @@ static ssize_t rtw_debugfs_set_rsvd_page(struct file *filp,
47361         if (num != 2) {
47362                 rtw_warn(rtwdev, "invalid arguments\n");
47363 -               return num;
47364 +               return -EINVAL;
47365         }
47367         debugfs_priv->rsvd_page.page_offset = offset;
47368 diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
47369 index 35afea91fd29..92b9cf1f9525 100644
47370 --- a/drivers/net/wireless/realtek/rtw88/main.h
47371 +++ b/drivers/net/wireless/realtek/rtw88/main.h
47372 @@ -1166,6 +1166,7 @@ struct rtw_chip_info {
47373         bool en_dis_dpd;
47374         u16 dpd_ratemask;
47375         u8 iqk_threshold;
47376 +       u8 lck_threshold;
47377         const struct rtw_pwr_track_tbl *pwr_track_tbl;
47379         u8 bfer_su_max_num;
47380 @@ -1534,6 +1535,7 @@ struct rtw_dm_info {
47381         u32 rrsr_mask_min;
47382         u8 thermal_avg[RTW_RF_PATH_MAX];
47383         u8 thermal_meter_k;
47384 +       u8 thermal_meter_lck;
47385         s8 delta_power_index[RTW_RF_PATH_MAX];
47386         s8 delta_power_index_last[RTW_RF_PATH_MAX];
47387         u8 default_ofdm_index;
47388 diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
47389 index 786a48649946..6b5c885798a4 100644
47390 --- a/drivers/net/wireless/realtek/rtw88/pci.c
47391 +++ b/drivers/net/wireless/realtek/rtw88/pci.c
47392 @@ -581,23 +581,30 @@ static int rtw_pci_start(struct rtw_dev *rtwdev)
47394         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
47396 +       rtw_pci_napi_start(rtwdev);
47398         spin_lock_bh(&rtwpci->irq_lock);
47399 +       rtwpci->running = true;
47400         rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
47401         spin_unlock_bh(&rtwpci->irq_lock);
47403 -       rtw_pci_napi_start(rtwdev);
47405         return 0;
47408  static void rtw_pci_stop(struct rtw_dev *rtwdev)
47410         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
47411 +       struct pci_dev *pdev = rtwpci->pdev;
47413 +       spin_lock_bh(&rtwpci->irq_lock);
47414 +       rtwpci->running = false;
47415 +       rtw_pci_disable_interrupt(rtwdev, rtwpci);
47416 +       spin_unlock_bh(&rtwpci->irq_lock);
47418 +       synchronize_irq(pdev->irq);
47419         rtw_pci_napi_stop(rtwdev);
47421         spin_lock_bh(&rtwpci->irq_lock);
47422 -       rtw_pci_disable_interrupt(rtwdev, rtwpci);
47423         rtw_pci_dma_release(rtwdev, rtwpci);
47424         spin_unlock_bh(&rtwpci->irq_lock);
47426 @@ -1138,7 +1145,8 @@ static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
47427                 rtw_fw_c2h_cmd_isr(rtwdev);
47429         /* all of the jobs for this interrupt have been done */
47430 -       rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);
47431 +       if (rtwpci->running)
47432 +               rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);
47433         spin_unlock_bh(&rtwpci->irq_lock);
47435         return IRQ_HANDLED;
47436 @@ -1558,7 +1566,8 @@ static int rtw_pci_napi_poll(struct napi_struct *napi, int budget)
47437         if (work_done < budget) {
47438                 napi_complete_done(napi, work_done);
47439                 spin_lock_bh(&rtwpci->irq_lock);
47440 -               rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
47441 +               if (rtwpci->running)
47442 +                       rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
47443                 spin_unlock_bh(&rtwpci->irq_lock);
47444                 /* When ISR happens during polling and before napi_complete
47445                  * while no further data is received. Data on the dma_ring will
47446 diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
47447 index e76fc549a788..0ffae887527a 100644
47448 --- a/drivers/net/wireless/realtek/rtw88/pci.h
47449 +++ b/drivers/net/wireless/realtek/rtw88/pci.h
47450 @@ -211,6 +211,7 @@ struct rtw_pci {
47451         spinlock_t irq_lock;
47452         u32 irq_mask[4];
47453         bool irq_enabled;
47454 +       bool running;
47456         /* napi structure */
47457         struct net_device netdev;
47458 diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
47459 index e114ddecac09..21e77fcfa4d5 100644
47460 --- a/drivers/net/wireless/realtek/rtw88/phy.c
47461 +++ b/drivers/net/wireless/realtek/rtw88/phy.c
47462 @@ -1584,7 +1584,7 @@ void rtw_phy_load_tables(struct rtw_dev *rtwdev)
47464  EXPORT_SYMBOL(rtw_phy_load_tables);
47466 -static u8 rtw_get_channel_group(u8 channel)
47467 +static u8 rtw_get_channel_group(u8 channel, u8 rate)
47469         switch (channel) {
47470         default:
47471 @@ -1628,6 +1628,7 @@ static u8 rtw_get_channel_group(u8 channel)
47472         case 106:
47473                 return 4;
47474         case 14:
47475 +               return rate <= DESC_RATE11M ? 5 : 4;
47476         case 108:
47477         case 110:
47478         case 112:
47479 @@ -1879,7 +1880,7 @@ void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw,
47480         s8 *remnant = &pwr_param->pwr_remnant;
47482         pwr_idx = &rtwdev->efuse.txpwr_idx_table[path];
47483 -       group = rtw_get_channel_group(ch);
47484 +       group = rtw_get_channel_group(ch, rate);
47486         /* base power index for 2.4G/5G */
47487         if (IS_CH_2G_BAND(ch)) {
47488 @@ -2219,6 +2220,20 @@ s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev,
47490  EXPORT_SYMBOL(rtw_phy_pwrtrack_get_pwridx);
47492 +bool rtw_phy_pwrtrack_need_lck(struct rtw_dev *rtwdev)
47494 +       struct rtw_dm_info *dm_info = &rtwdev->dm_info;
47495 +       u8 delta_lck;
47497 +       delta_lck = abs(dm_info->thermal_avg[0] - dm_info->thermal_meter_lck);
47498 +       if (delta_lck >= rtwdev->chip->lck_threshold) {
47499 +               dm_info->thermal_meter_lck = dm_info->thermal_avg[0];
47500 +               return true;
47501 +       }
47502 +       return false;
47504 +EXPORT_SYMBOL(rtw_phy_pwrtrack_need_lck);
47506  bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev)
47508         struct rtw_dm_info *dm_info = &rtwdev->dm_info;
47509 diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
47510 index a4fcfb878550..a0742a69446d 100644
47511 --- a/drivers/net/wireless/realtek/rtw88/phy.h
47512 +++ b/drivers/net/wireless/realtek/rtw88/phy.h
47513 @@ -55,6 +55,7 @@ u8 rtw_phy_pwrtrack_get_delta(struct rtw_dev *rtwdev, u8 path);
47514  s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev,
47515                                struct rtw_swing_table *swing_table,
47516                                u8 tbl_path, u8 therm_path, u8 delta);
47517 +bool rtw_phy_pwrtrack_need_lck(struct rtw_dev *rtwdev);
47518  bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev);
47519  void rtw_phy_config_swing_table(struct rtw_dev *rtwdev,
47520                                 struct rtw_swing_table *swing_table);
47521 diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
47522 index ea518aa78552..819af34dac34 100644
47523 --- a/drivers/net/wireless/realtek/rtw88/reg.h
47524 +++ b/drivers/net/wireless/realtek/rtw88/reg.h
47525 @@ -652,8 +652,13 @@
47526  #define RF_TXATANK     0x64
47527  #define RF_TRXIQ       0x66
47528  #define RF_RXIQGEN     0x8d
47529 +#define RF_SYN_PFD     0xb0
47530  #define RF_XTALX2      0xb8
47531 +#define RF_SYN_CTRL    0xbb
47532  #define RF_MALSEL      0xbe
47533 +#define RF_SYN_AAC     0xc9
47534 +#define RF_AAC_CTRL    0xca
47535 +#define RF_FAST_LCK    0xcc
47536  #define RF_RCKD                0xde
47537  #define RF_TXADBG      0xde
47538  #define RF_LUTDBG      0xdf
47539 diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
47540 index dd560c28abb2..448922cb2e63 100644
47541 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
47542 +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
47543 @@ -1126,6 +1126,7 @@ static void rtw8822c_pwrtrack_init(struct rtw_dev *rtwdev)
47545         dm_info->pwr_trk_triggered = false;
47546         dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k;
47547 +       dm_info->thermal_meter_lck = rtwdev->efuse.thermal_meter_k;
47550  static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
47551 @@ -2108,6 +2109,26 @@ static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev)
47552         rtw_write32_set(rtwdev, REG_RX_BREAK, BIT_COM_RX_GCK_EN);
47555 +static void rtw8822c_do_lck(struct rtw_dev *rtwdev)
47557 +       u32 val;
47559 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_CTRL, RFREG_MASK, 0x80010);
47560 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_PFD, RFREG_MASK, 0x1F0FA);
47561 +       fsleep(1);
47562 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_AAC_CTRL, RFREG_MASK, 0x80000);
47563 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_AAC, RFREG_MASK, 0x80001);
47564 +       read_poll_timeout(rtw_read_rf, val, val != 0x1, 1000, 100000,
47565 +                         true, rtwdev, RF_PATH_A, RF_AAC_CTRL, 0x1000);
47566 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_PFD, RFREG_MASK, 0x1F0F8);
47567 +       rtw_write_rf(rtwdev, RF_PATH_B, RF_SYN_CTRL, RFREG_MASK, 0x80010);
47569 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x0f000);
47570 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x4f000);
47571 +       fsleep(1);
47572 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x0f000);
47575  static void rtw8822c_do_iqk(struct rtw_dev *rtwdev)
47577         struct rtw_iqk_para para = {0};
47578 @@ -3538,11 +3559,12 @@ static void __rtw8822c_pwr_track(struct rtw_dev *rtwdev)
47580         rtw_phy_config_swing_table(rtwdev, &swing_table);
47582 +       if (rtw_phy_pwrtrack_need_lck(rtwdev))
47583 +               rtw8822c_do_lck(rtwdev);
47585         for (i = 0; i < rtwdev->hal.rf_path_num; i++)
47586                 rtw8822c_pwr_track_path(rtwdev, &swing_table, i);
47588 -       if (rtw_phy_pwrtrack_need_iqk(rtwdev))
47589 -               rtw8822c_do_iqk(rtwdev);
47592  static void rtw8822c_pwr_track(struct rtw_dev *rtwdev)
47593 @@ -4351,6 +4373,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
47594         .dpd_ratemask = DIS_DPD_RATEALL,
47595         .pwr_track_tbl = &rtw8822c_rtw_pwr_track_tbl,
47596         .iqk_threshold = 8,
47597 +       .lck_threshold = 8,
47598         .bfer_su_max_num = 2,
47599         .bfer_mu_max_num = 1,
47600         .rx_ldpc = true,
47601 diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
47602 index fe0287b22a25..e0c502bc4270 100644
47603 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
47604 +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
47605 @@ -1513,7 +1513,7 @@ static int rsi_restore(struct device *dev)
47607  static const struct dev_pm_ops rsi_pm_ops = {
47608         .suspend = rsi_suspend,
47609 -       .resume = rsi_resume,
47610 +       .resume_noirq = rsi_resume,
47611         .freeze = rsi_freeze,
47612         .thaw = rsi_thaw,
47613         .restore = rsi_restore,
47614 diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c
47615 index e14d88e558f0..85abd0a2d1c9 100644
47616 --- a/drivers/net/wireless/ti/wlcore/boot.c
47617 +++ b/drivers/net/wireless/ti/wlcore/boot.c
47618 @@ -72,6 +72,7 @@ static int wlcore_validate_fw_ver(struct wl1271 *wl)
47619         unsigned int *min_ver = (wl->fw_type == WL12XX_FW_TYPE_MULTI) ?
47620                 wl->min_mr_fw_ver : wl->min_sr_fw_ver;
47621         char min_fw_str[32] = "";
47622 +       int off = 0;
47623         int i;
47625         /* the chip must be exactly equal */
47626 @@ -105,13 +106,15 @@ static int wlcore_validate_fw_ver(struct wl1271 *wl)
47627         return 0;
47629  fail:
47630 -       for (i = 0; i < NUM_FW_VER; i++)
47631 +       for (i = 0; i < NUM_FW_VER && off < sizeof(min_fw_str); i++)
47632                 if (min_ver[i] == WLCORE_FW_VER_IGNORE)
47633 -                       snprintf(min_fw_str, sizeof(min_fw_str),
47634 -                                 "%s*.", min_fw_str);
47635 +                       off += snprintf(min_fw_str + off,
47636 +                                       sizeof(min_fw_str) - off,
47637 +                                       "*.");
47638                 else
47639 -                       snprintf(min_fw_str, sizeof(min_fw_str),
47640 -                                 "%s%u.", min_fw_str, min_ver[i]);
47641 +                       off += snprintf(min_fw_str + off,
47642 +                                       sizeof(min_fw_str) - off,
47643 +                                       "%u.", min_ver[i]);
47645         wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is invalid.\n"
47646                      "Please use at least FW %s\n"
47647 diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
47648 index b143293e694f..a9e13e6d65c5 100644
47649 --- a/drivers/net/wireless/ti/wlcore/debugfs.h
47650 +++ b/drivers/net/wireless/ti/wlcore/debugfs.h
47651 @@ -78,13 +78,14 @@ static ssize_t sub## _ ##name## _read(struct file *file,            \
47652         struct wl1271 *wl = file->private_data;                         \
47653         struct struct_type *stats = wl->stats.fw_stats;                 \
47654         char buf[DEBUGFS_FORMAT_BUFFER_SIZE] = "";                      \
47655 +       int pos = 0;                                                    \
47656         int i;                                                          \
47657                                                                         \
47658         wl1271_debugfs_update_stats(wl);                                \
47659                                                                         \
47660 -       for (i = 0; i < len; i++)                                       \
47661 -               snprintf(buf, sizeof(buf), "%s[%d] = %d\n",             \
47662 -                        buf, i, stats->sub.name[i]);                   \
47663 +       for (i = 0; i < len && pos < sizeof(buf); i++)                  \
47664 +               pos += snprintf(buf + pos, sizeof(buf) - pos,           \
47665 +                        "[%d] = %d\n", i, stats->sub.name[i]);         \
47666                                                                         \
47667         return wl1271_format_buffer(userbuf, count, ppos, "%s", buf);   \
47668  }                                                                      \
47669 diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
47670 index e98e04ee9a2c..59b7b93c5963 100644
47671 --- a/drivers/net/wireless/wl3501.h
47672 +++ b/drivers/net/wireless/wl3501.h
47673 @@ -379,16 +379,7 @@ struct wl3501_get_confirm {
47674         u8      mib_value[100];
47675  };
47677 -struct wl3501_join_req {
47678 -       u16                         next_blk;
47679 -       u8                          sig_id;
47680 -       u8                          reserved;
47681 -       struct iw_mgmt_data_rset    operational_rset;
47682 -       u16                         reserved2;
47683 -       u16                         timeout;
47684 -       u16                         probe_delay;
47685 -       u8                          timestamp[8];
47686 -       u8                          local_time[8];
47687 +struct wl3501_req {
47688         u16                         beacon_period;
47689         u16                         dtim_period;
47690         u16                         cap_info;
47691 @@ -401,6 +392,19 @@ struct wl3501_join_req {
47692         struct iw_mgmt_data_rset    bss_basic_rset;
47693  };
47695 +struct wl3501_join_req {
47696 +       u16                         next_blk;
47697 +       u8                          sig_id;
47698 +       u8                          reserved;
47699 +       struct iw_mgmt_data_rset    operational_rset;
47700 +       u16                         reserved2;
47701 +       u16                         timeout;
47702 +       u16                         probe_delay;
47703 +       u8                          timestamp[8];
47704 +       u8                          local_time[8];
47705 +       struct wl3501_req           req;
47708  struct wl3501_join_confirm {
47709         u16     next_blk;
47710         u8      sig_id;
47711 @@ -443,16 +447,7 @@ struct wl3501_scan_confirm {
47712         u16                         status;
47713         char                        timestamp[8];
47714         char                        localtime[8];
47715 -       u16                         beacon_period;
47716 -       u16                         dtim_period;
47717 -       u16                         cap_info;
47718 -       u8                          bss_type;
47719 -       u8                          bssid[ETH_ALEN];
47720 -       struct iw_mgmt_essid_pset   ssid;
47721 -       struct iw_mgmt_ds_pset      ds_pset;
47722 -       struct iw_mgmt_cf_pset      cf_pset;
47723 -       struct iw_mgmt_ibss_pset    ibss_pset;
47724 -       struct iw_mgmt_data_rset    bss_basic_rset;
47725 +       struct wl3501_req           req;
47726         u8                          rssi;
47727  };
47729 @@ -471,8 +466,10 @@ struct wl3501_md_req {
47730         u16     size;
47731         u8      pri;
47732         u8      service_class;
47733 -       u8      daddr[ETH_ALEN];
47734 -       u8      saddr[ETH_ALEN];
47735 +       struct {
47736 +               u8      daddr[ETH_ALEN];
47737 +               u8      saddr[ETH_ALEN];
47738 +       } addr;
47739  };
47741  struct wl3501_md_ind {
47742 @@ -484,8 +481,10 @@ struct wl3501_md_ind {
47743         u8      reception;
47744         u8      pri;
47745         u8      service_class;
47746 -       u8      daddr[ETH_ALEN];
47747 -       u8      saddr[ETH_ALEN];
47748 +       struct {
47749 +               u8      daddr[ETH_ALEN];
47750 +               u8      saddr[ETH_ALEN];
47751 +       } addr;
47752  };
47754  struct wl3501_md_confirm {
47755 diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
47756 index 8ca5789c7b37..672f5d5f3f2c 100644
47757 --- a/drivers/net/wireless/wl3501_cs.c
47758 +++ b/drivers/net/wireless/wl3501_cs.c
47759 @@ -469,6 +469,7 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len)
47760         struct wl3501_md_req sig = {
47761                 .sig_id = WL3501_SIG_MD_REQ,
47762         };
47763 +       size_t sig_addr_len = sizeof(sig.addr);
47764         u8 *pdata = (char *)data;
47765         int rc = -EIO;
47767 @@ -484,9 +485,9 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len)
47768                         goto out;
47769                 }
47770                 rc = 0;
47771 -               memcpy(&sig.daddr[0], pdata, 12);
47772 -               pktlen = len - 12;
47773 -               pdata += 12;
47774 +               memcpy(&sig.addr, pdata, sig_addr_len);
47775 +               pktlen = len - sig_addr_len;
47776 +               pdata += sig_addr_len;
47777                 sig.data = bf;
47778                 if (((*pdata) * 256 + (*(pdata + 1))) > 1500) {
47779                         u8 addr4[ETH_ALEN] = {
47780 @@ -589,7 +590,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas)
47781         struct wl3501_join_req sig = {
47782                 .sig_id           = WL3501_SIG_JOIN_REQ,
47783                 .timeout          = 10,
47784 -               .ds_pset = {
47785 +               .req.ds_pset = {
47786                         .el = {
47787                                 .id  = IW_MGMT_INFO_ELEMENT_DS_PARAMETER_SET,
47788                                 .len = 1,
47789 @@ -598,7 +599,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas)
47790                 },
47791         };
47793 -       memcpy(&sig.beacon_period, &this->bss_set[stas].beacon_period, 72);
47794 +       memcpy(&sig.req, &this->bss_set[stas].req, sizeof(sig.req));
47795         return wl3501_esbq_exec(this, &sig, sizeof(sig));
47798 @@ -666,35 +667,37 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr)
47799         if (sig.status == WL3501_STATUS_SUCCESS) {
47800                 pr_debug("success");
47801                 if ((this->net_type == IW_MODE_INFRA &&
47802 -                    (sig.cap_info & WL3501_MGMT_CAPABILITY_ESS)) ||
47803 +                    (sig.req.cap_info & WL3501_MGMT_CAPABILITY_ESS)) ||
47804                     (this->net_type == IW_MODE_ADHOC &&
47805 -                    (sig.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) ||
47806 +                    (sig.req.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) ||
47807                     this->net_type == IW_MODE_AUTO) {
47808                         if (!this->essid.el.len)
47809                                 matchflag = 1;
47810                         else if (this->essid.el.len == 3 &&
47811                                  !memcmp(this->essid.essid, "ANY", 3))
47812                                 matchflag = 1;
47813 -                       else if (this->essid.el.len != sig.ssid.el.len)
47814 +                       else if (this->essid.el.len != sig.req.ssid.el.len)
47815                                 matchflag = 0;
47816 -                       else if (memcmp(this->essid.essid, sig.ssid.essid,
47817 +                       else if (memcmp(this->essid.essid, sig.req.ssid.essid,
47818                                         this->essid.el.len))
47819                                 matchflag = 0;
47820                         else
47821                                 matchflag = 1;
47822                         if (matchflag) {
47823                                 for (i = 0; i < this->bss_cnt; i++) {
47824 -                                       if (ether_addr_equal_unaligned(this->bss_set[i].bssid, sig.bssid)) {
47825 +                                       if (ether_addr_equal_unaligned(this->bss_set[i].req.bssid,
47826 +                                                                      sig.req.bssid)) {
47827                                                 matchflag = 0;
47828                                                 break;
47829                                         }
47830                                 }
47831                         }
47832                         if (matchflag && (i < 20)) {
47833 -                               memcpy(&this->bss_set[i].beacon_period,
47834 -                                      &sig.beacon_period, 73);
47835 +                               memcpy(&this->bss_set[i].req,
47836 +                                      &sig.req, sizeof(sig.req));
47837                                 this->bss_cnt++;
47838                                 this->rssi = sig.rssi;
47839 +                               this->bss_set[i].rssi = sig.rssi;
47840                         }
47841                 }
47842         } else if (sig.status == WL3501_STATUS_TIMEOUT) {
47843 @@ -886,19 +889,19 @@ static void wl3501_mgmt_join_confirm(struct net_device *dev, u16 addr)
47844                         if (this->join_sta_bss < this->bss_cnt) {
47845                                 const int i = this->join_sta_bss;
47846                                 memcpy(this->bssid,
47847 -                                      this->bss_set[i].bssid, ETH_ALEN);
47848 -                               this->chan = this->bss_set[i].ds_pset.chan;
47849 +                                      this->bss_set[i].req.bssid, ETH_ALEN);
47850 +                               this->chan = this->bss_set[i].req.ds_pset.chan;
47851                                 iw_copy_mgmt_info_element(&this->keep_essid.el,
47852 -                                                    &this->bss_set[i].ssid.el);
47853 +                                                    &this->bss_set[i].req.ssid.el);
47854                                 wl3501_mgmt_auth(this);
47855                         }
47856                 } else {
47857                         const int i = this->join_sta_bss;
47859 -                       memcpy(&this->bssid, &this->bss_set[i].bssid, ETH_ALEN);
47860 -                       this->chan = this->bss_set[i].ds_pset.chan;
47861 +                       memcpy(&this->bssid, &this->bss_set[i].req.bssid, ETH_ALEN);
47862 +                       this->chan = this->bss_set[i].req.ds_pset.chan;
47863                         iw_copy_mgmt_info_element(&this->keep_essid.el,
47864 -                                                 &this->bss_set[i].ssid.el);
47865 +                                                 &this->bss_set[i].req.ssid.el);
47866                         wl3501_online(dev);
47867                 }
47868         } else {
47869 @@ -980,7 +983,8 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev,
47870         } else {
47871                 skb->dev = dev;
47872                 skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */
47873 -               skb_copy_to_linear_data(skb, (unsigned char *)&sig.daddr, 12);
47874 +               skb_copy_to_linear_data(skb, (unsigned char *)&sig.addr,
47875 +                                       sizeof(sig.addr));
47876                 wl3501_receive(this, skb->data, pkt_len);
47877                 skb_put(skb, pkt_len);
47878                 skb->protocol   = eth_type_trans(skb, dev);
47879 @@ -1571,30 +1575,30 @@ static int wl3501_get_scan(struct net_device *dev, struct iw_request_info *info,
47880         for (i = 0; i < this->bss_cnt; ++i) {
47881                 iwe.cmd                 = SIOCGIWAP;
47882                 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
47883 -               memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].bssid, ETH_ALEN);
47884 +               memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].req.bssid, ETH_ALEN);
47885                 current_ev = iwe_stream_add_event(info, current_ev,
47886                                                   extra + IW_SCAN_MAX_DATA,
47887                                                   &iwe, IW_EV_ADDR_LEN);
47888                 iwe.cmd           = SIOCGIWESSID;
47889                 iwe.u.data.flags  = 1;
47890 -               iwe.u.data.length = this->bss_set[i].ssid.el.len;
47891 +               iwe.u.data.length = this->bss_set[i].req.ssid.el.len;
47892                 current_ev = iwe_stream_add_point(info, current_ev,
47893                                                   extra + IW_SCAN_MAX_DATA,
47894                                                   &iwe,
47895 -                                                 this->bss_set[i].ssid.essid);
47896 +                                                 this->bss_set[i].req.ssid.essid);
47897                 iwe.cmd    = SIOCGIWMODE;
47898 -               iwe.u.mode = this->bss_set[i].bss_type;
47899 +               iwe.u.mode = this->bss_set[i].req.bss_type;
47900                 current_ev = iwe_stream_add_event(info, current_ev,
47901                                                   extra + IW_SCAN_MAX_DATA,
47902                                                   &iwe, IW_EV_UINT_LEN);
47903                 iwe.cmd = SIOCGIWFREQ;
47904 -               iwe.u.freq.m = this->bss_set[i].ds_pset.chan;
47905 +               iwe.u.freq.m = this->bss_set[i].req.ds_pset.chan;
47906                 iwe.u.freq.e = 0;
47907                 current_ev = iwe_stream_add_event(info, current_ev,
47908                                                   extra + IW_SCAN_MAX_DATA,
47909                                                   &iwe, IW_EV_FREQ_LEN);
47910                 iwe.cmd = SIOCGIWENCODE;
47911 -               if (this->bss_set[i].cap_info & WL3501_MGMT_CAPABILITY_PRIVACY)
47912 +               if (this->bss_set[i].req.cap_info & WL3501_MGMT_CAPABILITY_PRIVACY)
47913                         iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
47914                 else
47915                         iwe.u.data.flags = IW_ENCODE_DISABLED;
47916 diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
47917 index f1469ac8ff42..3fe5b81eda2d 100644
47918 --- a/drivers/nfc/pn533/pn533.c
47919 +++ b/drivers/nfc/pn533/pn533.c
47920 @@ -706,6 +706,9 @@ static bool pn533_target_type_a_is_valid(struct pn533_target_type_a *type_a,
47921         if (PN533_TYPE_A_SEL_CASCADE(type_a->sel_res) != 0)
47922                 return false;
47924 +       if (type_a->nfcid_len > NFC_NFCID1_MAXSIZE)
47925 +               return false;
47927         return true;
47930 diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
47931 index 0896e21642be..091b2e77d39b 100644
47932 --- a/drivers/nvme/host/core.c
47933 +++ b/drivers/nvme/host/core.c
47934 @@ -2681,7 +2681,8 @@ static void nvme_set_latency_tolerance(struct device *dev, s32 val)
47936         if (ctrl->ps_max_latency_us != latency) {
47937                 ctrl->ps_max_latency_us = latency;
47938 -               nvme_configure_apst(ctrl);
47939 +               if (ctrl->state == NVME_CTRL_LIVE)
47940 +                       nvme_configure_apst(ctrl);
47941         }
47944 @@ -3189,7 +3190,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
47945                 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
47946         }
47948 -       ret = nvme_mpath_init(ctrl, id);
47949 +       ret = nvme_mpath_init_identify(ctrl, id);
47950         kfree(id);
47952         if (ret < 0)
47953 @@ -4579,6 +4580,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
47954                 min(default_ps_max_latency_us, (unsigned long)S32_MAX));
47956         nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
47957 +       nvme_mpath_init_ctrl(ctrl);
47959         return 0;
47960  out_free_name:
47961 diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
47962 index 6ffa8de2a0d7..5eee603bc249 100644
47963 --- a/drivers/nvme/host/fc.c
47964 +++ b/drivers/nvme/host/fc.c
47965 @@ -2460,6 +2460,18 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
47966  static void
47967  __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
47969 +       int q;
47971 +       /*
47972 +        * if aborting io, the queues are no longer good, mark them
47973 +        * all as not live.
47974 +        */
47975 +       if (ctrl->ctrl.queue_count > 1) {
47976 +               for (q = 1; q < ctrl->ctrl.queue_count; q++)
47977 +                       clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags);
47978 +       }
47979 +       clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
47981         /*
47982          * If io queues are present, stop them and terminate all outstanding
47983          * ios on them. As FC allocates FC exchange for each io, the
47984 diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
47985 index a1d476e1ac02..56852e6edd81 100644
47986 --- a/drivers/nvme/host/multipath.c
47987 +++ b/drivers/nvme/host/multipath.c
47988 @@ -668,6 +668,10 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
47989                 if (desc.state) {
47990                         /* found the group desc: update */
47991                         nvme_update_ns_ana_state(&desc, ns);
47992 +               } else {
47993 +                       /* group desc not found: trigger a re-read */
47994 +                       set_bit(NVME_NS_ANA_PENDING, &ns->flags);
47995 +                       queue_work(nvme_wq, &ns->ctrl->ana_work);
47996                 }
47997         } else {
47998                 ns->ana_state = NVME_ANA_OPTIMIZED; 
47999 @@ -705,9 +709,18 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
48000         put_disk(head->disk);
48003 -int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
48004 +void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
48006 -       int error;
48007 +       mutex_init(&ctrl->ana_lock);
48008 +       timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
48009 +       INIT_WORK(&ctrl->ana_work, nvme_ana_work);
48012 +int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
48014 +       size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT;
48015 +       size_t ana_log_size;
48016 +       int error = 0;
48018         /* check if multipath is enabled and we have the capability */
48019         if (!multipath || !ctrl->subsys ||
48020 @@ -719,37 +732,31 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
48021         ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
48022         ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
48024 -       mutex_init(&ctrl->ana_lock);
48025 -       timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
48026 -       ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
48027 -               ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
48028 -       ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
48030 -       if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
48031 +       ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
48032 +               ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) +
48033 +               ctrl->max_namespaces * sizeof(__le32);
48034 +       if (ana_log_size > max_transfer_size) {
48035                 dev_err(ctrl->device,
48036 -                       "ANA log page size (%zd) larger than MDTS (%d).\n",
48037 -                       ctrl->ana_log_size,
48038 -                       ctrl->max_hw_sectors << SECTOR_SHIFT);
48039 +                       "ANA log page size (%zd) larger than MDTS (%zd).\n",
48040 +                       ana_log_size, max_transfer_size);
48041                 dev_err(ctrl->device, "disabling ANA support.\n");
48042 -               return 0;
48043 +               goto out_uninit;
48044         }
48046 -       INIT_WORK(&ctrl->ana_work, nvme_ana_work);
48047 -       kfree(ctrl->ana_log_buf);
48048 -       ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
48049 -       if (!ctrl->ana_log_buf) {
48050 -               error = -ENOMEM;
48051 -               goto out;
48052 +       if (ana_log_size > ctrl->ana_log_size) {
48053 +               nvme_mpath_stop(ctrl);
48054 +               kfree(ctrl->ana_log_buf);
48055 +               ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL);
48056 +               if (!ctrl->ana_log_buf)
48057 +                       return -ENOMEM;
48058         }
48060 +       ctrl->ana_log_size = ana_log_size;
48061         error = nvme_read_ana_log(ctrl);
48062         if (error)
48063 -               goto out_free_ana_log_buf;
48064 +               goto out_uninit;
48065         return 0;
48066 -out_free_ana_log_buf:
48067 -       kfree(ctrl->ana_log_buf);
48068 -       ctrl->ana_log_buf = NULL;
48069 -out:
48071 +out_uninit:
48072 +       nvme_mpath_uninit(ctrl);
48073         return error;
48076 diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
48077 index 07b34175c6ce..447b0720aef5 100644
48078 --- a/drivers/nvme/host/nvme.h
48079 +++ b/drivers/nvme/host/nvme.h
48080 @@ -668,7 +668,8 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
48081  int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
48082  void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
48083  void nvme_mpath_remove_disk(struct nvme_ns_head *head);
48084 -int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
48085 +int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
48086 +void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
48087  void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
48088  void nvme_mpath_stop(struct nvme_ctrl *ctrl);
48089  bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
48090 @@ -742,7 +743,10 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
48091  static inline void nvme_trace_bio_complete(struct request *req)
48094 -static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
48095 +static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
48098 +static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
48099                 struct nvme_id_ctrl *id)
48101         if (ctrl->subsys->cmic & (1 << 3))
48102 diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
48103 index 7249ae74f71f..c92a15c3fbc5 100644
48104 --- a/drivers/nvme/host/pci.c
48105 +++ b/drivers/nvme/host/pci.c
48106 @@ -852,7 +852,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
48107                                 return nvme_setup_prp_simple(dev, req,
48108                                                              &cmnd->rw, &bv);
48110 -                       if (iod->nvmeq->qid &&
48111 +                       if (iod->nvmeq->qid && sgl_threshold &&
48112                             dev->ctrl.sgls & ((1 << 0) | (1 << 1)))
48113                                 return nvme_setup_sgl_simple(dev, req,
48114                                                              &cmnd->rw, &bv);
48115 diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
48116 index a0f00cb8f9f3..8c2ae6284c3b 100644
48117 --- a/drivers/nvme/host/tcp.c
48118 +++ b/drivers/nvme/host/tcp.c
48119 @@ -874,7 +874,7 @@ static void nvme_tcp_state_change(struct sock *sk)
48121         struct nvme_tcp_queue *queue;
48123 -       read_lock(&sk->sk_callback_lock);
48124 +       read_lock_bh(&sk->sk_callback_lock);
48125         queue = sk->sk_user_data;
48126         if (!queue)
48127                 goto done;
48128 @@ -895,7 +895,7 @@ static void nvme_tcp_state_change(struct sock *sk)
48130         queue->state_change(sk);
48131  done:
48132 -       read_unlock(&sk->sk_callback_lock);
48133 +       read_unlock_bh(&sk->sk_callback_lock);
48136  static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
48137 @@ -940,7 +940,6 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
48138                 if (ret <= 0)
48139                         return ret;
48141 -               nvme_tcp_advance_req(req, ret);
48142                 if (queue->data_digest)
48143                         nvme_tcp_ddgst_update(queue->snd_hash, page,
48144                                         offset, ret);
48145 @@ -957,6 +956,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
48146                         }
48147                         return 1;
48148                 }
48149 +               nvme_tcp_advance_req(req, ret);
48150         }
48151         return -EAGAIN;
48153 @@ -1137,7 +1137,8 @@ static void nvme_tcp_io_work(struct work_struct *w)
48154                                 pending = true;
48155                         else if (unlikely(result < 0))
48156                                 break;
48157 -               }
48158 +               } else
48159 +                       pending = !llist_empty(&queue->req_list);
48161                 result = nvme_tcp_try_recv(queue);
48162                 if (result > 0)
48163 diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
48164 index fe6b8aa90b53..5a1ab49908c3 100644
48165 --- a/drivers/nvme/target/admin-cmd.c
48166 +++ b/drivers/nvme/target/admin-cmd.c
48167 @@ -307,7 +307,7 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
48168         case NVME_LOG_ANA:
48169                 return nvmet_execute_get_log_page_ana(req);
48170         }
48171 -       pr_err("unhandled lid %d on qid %d\n",
48172 +       pr_debug("unhandled lid %d on qid %d\n",
48173                req->cmd->get_log_page.lid, req->sq->qid);
48174         req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
48175         nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
48176 @@ -659,7 +659,7 @@ static void nvmet_execute_identify(struct nvmet_req *req)
48177                 return nvmet_execute_identify_desclist(req);
48178         }
48180 -       pr_err("unhandled identify cns %d on qid %d\n",
48181 +       pr_debug("unhandled identify cns %d on qid %d\n",
48182                req->cmd->identify.cns, req->sq->qid);
48183         req->error_loc = offsetof(struct nvme_identify, cns);
48184         nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
48185 @@ -919,15 +919,21 @@ void nvmet_execute_async_event(struct nvmet_req *req)
48186  void nvmet_execute_keep_alive(struct nvmet_req *req)
48188         struct nvmet_ctrl *ctrl = req->sq->ctrl;
48189 +       u16 status = 0;
48191         if (!nvmet_check_transfer_len(req, 0))
48192                 return;
48194 +       if (!ctrl->kato) {
48195 +               status = NVME_SC_KA_TIMEOUT_INVALID;
48196 +               goto out;
48197 +       }
48199         pr_debug("ctrl %d update keep-alive timer for %d secs\n",
48200                 ctrl->cntlid, ctrl->kato);
48202         mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
48203 -       nvmet_req_complete(req, 0);
48204 +out:
48205 +       nvmet_req_complete(req, status);
48208  u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
48209 @@ -971,7 +977,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
48210                 return 0;
48211         }
48213 -       pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
48214 +       pr_debug("unhandled cmd %d on qid %d\n", cmd->common.opcode,
48215                req->sq->qid);
48216         req->error_loc = offsetof(struct nvme_common_command, opcode);
48217         return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
48218 diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
48219 index a027433b8be8..348057fdc568 100644
48220 --- a/drivers/nvme/target/core.c
48221 +++ b/drivers/nvme/target/core.c
48222 @@ -1371,7 +1371,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
48223                 goto out_free_changed_ns_list;
48225         if (subsys->cntlid_min > subsys->cntlid_max)
48226 -               goto out_free_changed_ns_list;
48227 +               goto out_free_sqs;
48229         ret = ida_simple_get(&cntlid_ida,
48230                              subsys->cntlid_min, subsys->cntlid_max,
48231 diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
48232 index 682854e0e079..4845d12e374a 100644
48233 --- a/drivers/nvme/target/discovery.c
48234 +++ b/drivers/nvme/target/discovery.c
48235 @@ -178,12 +178,14 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
48236         if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
48237                 req->error_loc =
48238                         offsetof(struct nvme_get_log_page_command, lid);
48239 -               status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
48240 +               status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
48241                 goto out;
48242         }
48244         /* Spec requires dword aligned offsets */
48245         if (offset & 0x3) {
48246 +               req->error_loc =
48247 +                       offsetof(struct nvme_get_log_page_command, lpo);
48248                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
48249                 goto out;
48250         }
48251 @@ -250,7 +252,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
48253         if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
48254                 req->error_loc = offsetof(struct nvme_identify, cns);
48255 -               status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
48256 +               status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
48257                 goto out;
48258         }
48260 diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
48261 index 9a8b3726a37c..429263ca9b97 100644
48262 --- a/drivers/nvme/target/io-cmd-bdev.c
48263 +++ b/drivers/nvme/target/io-cmd-bdev.c
48264 @@ -258,7 +258,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
48266         sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
48268 -       if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
48269 +       if (nvmet_use_inline_bvec(req)) {
48270                 bio = &req->b.inline_bio;
48271                 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
48272         } else {
48273 diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
48274 index 715d4376c997..7fdbdc496597 100644
48275 --- a/drivers/nvme/target/io-cmd-file.c
48276 +++ b/drivers/nvme/target/io-cmd-file.c
48277 @@ -49,9 +49,11 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
48279         ns->file = filp_open(ns->device_path, flags, 0);
48280         if (IS_ERR(ns->file)) {
48281 -               pr_err("failed to open file %s: (%ld)\n",
48282 -                               ns->device_path, PTR_ERR(ns->file));
48283 -               return PTR_ERR(ns->file);
48284 +               ret = PTR_ERR(ns->file);
48285 +               pr_err("failed to open file %s: (%d)\n",
48286 +                       ns->device_path, ret);
48287 +               ns->file = NULL;
48288 +               return ret;
48289         }
48291         ret = nvmet_file_ns_revalidate(ns);
48292 diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
48293 index 3e189e753bcf..14913a4588ec 100644
48294 --- a/drivers/nvme/target/loop.c
48295 +++ b/drivers/nvme/target/loop.c
48296 @@ -588,8 +588,10 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
48298         ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
48299                                 0 /* no quirks, we're perfect! */);
48300 -       if (ret)
48301 +       if (ret) {
48302 +               kfree(ctrl);
48303                 goto out;
48304 +       }
48306         if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
48307                 WARN_ON_ONCE(1);
48308 diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
48309 index 4b84edb49f22..5aad34b106dc 100644
48310 --- a/drivers/nvme/target/nvmet.h
48311 +++ b/drivers/nvme/target/nvmet.h
48312 @@ -614,4 +614,10 @@ static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
48313         return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
48316 +static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
48318 +       return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
48319 +              req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
48322  #endif /* _NVMET_H */
48323 diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
48324 index 2798944899b7..39b1473f7204 100644
48325 --- a/drivers/nvme/target/passthru.c
48326 +++ b/drivers/nvme/target/passthru.c
48327 @@ -194,7 +194,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
48328         if (req->sg_cnt > BIO_MAX_VECS)
48329                 return -EINVAL;
48331 -       if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
48332 +       if (nvmet_use_inline_bvec(req)) {
48333                 bio = &req->p.inline_bio;
48334                 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
48335         } else {
48336 diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
48337 index 6c1f3ab7649c..7d607f435e36 100644
48338 --- a/drivers/nvme/target/rdma.c
48339 +++ b/drivers/nvme/target/rdma.c
48340 @@ -700,7 +700,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
48342         struct nvmet_rdma_rsp *rsp =
48343                 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
48344 -       struct nvmet_rdma_queue *queue = cq->cq_context;
48345 +       struct nvmet_rdma_queue *queue = wc->qp->qp_context;
48347         nvmet_rdma_release_rsp(rsp);
48349 @@ -786,7 +786,7 @@ static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc)
48351         struct nvmet_rdma_rsp *rsp =
48352                 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe);
48353 -       struct nvmet_rdma_queue *queue = cq->cq_context;
48354 +       struct nvmet_rdma_queue *queue = wc->qp->qp_context;
48355         struct rdma_cm_id *cm_id = rsp->queue->cm_id;
48356         u16 status;
48358 diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
48359 index d658c6e8263a..d958b5da9b88 100644
48360 --- a/drivers/nvme/target/tcp.c
48361 +++ b/drivers/nvme/target/tcp.c
48362 @@ -525,11 +525,36 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
48363         struct nvmet_tcp_cmd *cmd =
48364                 container_of(req, struct nvmet_tcp_cmd, req);
48365         struct nvmet_tcp_queue  *queue = cmd->queue;
48366 +       struct nvme_sgl_desc *sgl;
48367 +       u32 len;
48369 +       if (unlikely(cmd == queue->cmd)) {
48370 +               sgl = &cmd->req.cmd->common.dptr.sgl;
48371 +               len = le32_to_cpu(sgl->length);
48373 +               /*
48374 +                * Wait for inline data before processing the response.
48375 +                * Avoid using helpers, this might happen before
48376 +                * nvmet_req_init is completed.
48377 +                */
48378 +               if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
48379 +                   len && len < cmd->req.port->inline_data_size &&
48380 +                   nvme_is_write(cmd->req.cmd))
48381 +                       return;
48382 +       }
48384         llist_add(&cmd->lentry, &queue->resp_list);
48385         queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
48388 +static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
48390 +       if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
48391 +               nvmet_tcp_queue_response(&cmd->req);
48392 +       else
48393 +               cmd->req.execute(&cmd->req);
48396  static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
48398         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
48399 @@ -961,7 +986,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
48400                         le32_to_cpu(req->cmd->common.dptr.sgl.length));
48402                 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
48403 -               return -EAGAIN;
48404 +               return 0;
48405         }
48407         ret = nvmet_tcp_map_data(queue->cmd);
48408 @@ -1104,10 +1129,8 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
48409                 return 0;
48410         }
48412 -       if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
48413 -           cmd->rbytes_done == cmd->req.transfer_len) {
48414 -               cmd->req.execute(&cmd->req);
48415 -       }
48416 +       if (cmd->rbytes_done == cmd->req.transfer_len)
48417 +               nvmet_tcp_execute_request(cmd);
48419         nvmet_prepare_receive_pdu(queue);
48420         return 0;
48421 @@ -1144,9 +1167,9 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
48422                 goto out;
48423         }
48425 -       if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
48426 -           cmd->rbytes_done == cmd->req.transfer_len)
48427 -               cmd->req.execute(&cmd->req);
48428 +       if (cmd->rbytes_done == cmd->req.transfer_len)
48429 +               nvmet_tcp_execute_request(cmd);
48431         ret = 0;
48432  out:
48433         nvmet_prepare_receive_pdu(queue);
48434 @@ -1434,7 +1457,7 @@ static void nvmet_tcp_state_change(struct sock *sk)
48436         struct nvmet_tcp_queue *queue;
48438 -       write_lock_bh(&sk->sk_callback_lock);
48439 +       read_lock_bh(&sk->sk_callback_lock);
48440         queue = sk->sk_user_data;
48441         if (!queue)
48442                 goto done;
48443 @@ -1452,7 +1475,7 @@ static void nvmet_tcp_state_change(struct sock *sk)
48444                         queue->idx, sk->sk_state);
48445         }
48446  done:
48447 -       write_unlock_bh(&sk->sk_callback_lock);
48448 +       read_unlock_bh(&sk->sk_callback_lock);
48451  static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
48452 diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
48453 index 75d2594c16e1..267a0d9e99ba 100644
48454 --- a/drivers/nvmem/Kconfig
48455 +++ b/drivers/nvmem/Kconfig
48456 @@ -272,6 +272,7 @@ config SPRD_EFUSE
48458  config NVMEM_RMEM
48459         tristate "Reserved Memory Based Driver Support"
48460 +       depends on HAS_IOMEM
48461         help
48462           This driver maps reserved memory into an nvmem device. It might be
48463           useful to expose information left by firmware in memory.
48464 diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
48465 index 6cace24dfbf7..100d69d8f2e1 100644
48466 --- a/drivers/nvmem/qfprom.c
48467 +++ b/drivers/nvmem/qfprom.c
48468 @@ -127,6 +127,16 @@ static void qfprom_disable_fuse_blowing(const struct qfprom_priv *priv,
48470         int ret;
48472 +       /*
48473 +        * This may be a shared rail and may be able to run at a lower rate
48474 +        * when we're not blowing fuses.  At the moment, the regulator framework
48475 +        * applies voltage constraints even on disabled rails, so remove our
48476 +        * constraints and allow the rail to be adjusted by other users.
48477 +        */
48478 +       ret = regulator_set_voltage(priv->vcc, 0, INT_MAX);
48479 +       if (ret)
48480 +               dev_warn(priv->dev, "Failed to set 0 voltage (ignoring)\n");
48482         ret = regulator_disable(priv->vcc);
48483         if (ret)
48484                 dev_warn(priv->dev, "Failed to disable regulator (ignoring)\n");
48485 @@ -172,6 +182,17 @@ static int qfprom_enable_fuse_blowing(const struct qfprom_priv *priv,
48486                 goto err_clk_prepared;
48487         }
48489 +       /*
48490 +        * Hardware requires 1.8V min for fuse blowing; this may be
48491 +        * a rail shared do don't specify a max--regulator constraints
48492 +        * will handle.
48493 +        */
48494 +       ret = regulator_set_voltage(priv->vcc, 1800000, INT_MAX);
48495 +       if (ret) {
48496 +               dev_err(priv->dev, "Failed to set 1.8 voltage\n");
48497 +               goto err_clk_rate_set;
48498 +       }
48500         ret = regulator_enable(priv->vcc);
48501         if (ret) {
48502                 dev_err(priv->dev, "Failed to enable regulator\n");
48503 diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
48504 index 23effe5e50ec..2d132949572d 100644
48505 --- a/drivers/of/overlay.c
48506 +++ b/drivers/of/overlay.c
48507 @@ -796,6 +796,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
48508                 if (!fragment->target) {
48509                         of_node_put(fragment->overlay);
48510                         ret = -EINVAL;
48511 +                       of_node_put(node);
48512                         goto err_free_fragments;
48513                 }
48515 diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
48516 index 4547ac44c8d4..8fa1a7fdf12c 100644
48517 --- a/drivers/parport/ieee1284.c
48518 +++ b/drivers/parport/ieee1284.c
48519 @@ -202,7 +202,7 @@ int parport_wait_peripheral(struct parport *port,
48520                         /* parport_wait_event didn't time out, but the
48521                          * peripheral wasn't actually ready either.
48522                          * Wait for another 10ms. */
48523 -                       schedule_timeout_interruptible(msecs_to_jiffies(10));
48524 +                       schedule_msec_hrtimeout_interruptible((10));
48525                 }
48526         }
48528 diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
48529 index 2c11bd3fe1fd..8cb6b61c0880 100644
48530 --- a/drivers/parport/ieee1284_ops.c
48531 +++ b/drivers/parport/ieee1284_ops.c
48532 @@ -520,7 +520,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
48533                         /* Yield the port for a while. */
48534                         if (count && dev->port->irq != PARPORT_IRQ_NONE) {
48535                                 parport_release (dev);
48536 -                               schedule_timeout_interruptible(msecs_to_jiffies(40));
48537 +                               schedule_msec_hrtimeout_interruptible((40));
48538                                 parport_claim_or_block (dev);
48539                         }
48540                         else
48541 diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
48542 index 53aa35cb3a49..a59ecbec601f 100644
48543 --- a/drivers/pci/controller/dwc/pci-keystone.c
48544 +++ b/drivers/pci/controller/dwc/pci-keystone.c
48545 @@ -798,7 +798,8 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
48546         int ret;
48548         pp->bridge->ops = &ks_pcie_ops;
48549 -       pp->bridge->child_ops = &ks_child_pcie_ops;
48550 +       if (!ks_pcie->is_am6)
48551 +               pp->bridge->child_ops = &ks_child_pcie_ops;
48553         ret = ks_pcie_config_legacy_irq(ks_pcie);
48554         if (ret)
48555 diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
48556 index 1c25d8337151..8d028a88b375 100644
48557 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c
48558 +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
48559 @@ -705,6 +705,8 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
48560                 }
48561         }
48563 +       dw_pcie_iatu_detect(pci);
48565         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
48566         if (!res)
48567                 return -EINVAL;
48568 diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
48569 index 7e55b2b66182..24192b40e3a2 100644
48570 --- a/drivers/pci/controller/dwc/pcie-designware-host.c
48571 +++ b/drivers/pci/controller/dwc/pcie-designware-host.c
48572 @@ -398,6 +398,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
48573                 if (ret)
48574                         goto err_free_msi;
48575         }
48576 +       dw_pcie_iatu_detect(pci);
48578         dw_pcie_setup_rc(pp);
48579         dw_pcie_msi_init(pp);
48580 diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
48581 index 004cb860e266..a945f0c0e73d 100644
48582 --- a/drivers/pci/controller/dwc/pcie-designware.c
48583 +++ b/drivers/pci/controller/dwc/pcie-designware.c
48584 @@ -660,11 +660,9 @@ static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
48585         pci->num_ob_windows = ob;
48588 -void dw_pcie_setup(struct dw_pcie *pci)
48589 +void dw_pcie_iatu_detect(struct dw_pcie *pci)
48591 -       u32 val;
48592         struct device *dev = pci->dev;
48593 -       struct device_node *np = dev->of_node;
48594         struct platform_device *pdev = to_platform_device(dev);
48596         if (pci->version >= 0x480A || (!pci->version &&
48597 @@ -693,6 +691,13 @@ void dw_pcie_setup(struct dw_pcie *pci)
48599         dev_info(pci->dev, "Detected iATU regions: %u outbound, %u inbound",
48600                  pci->num_ob_windows, pci->num_ib_windows);
48603 +void dw_pcie_setup(struct dw_pcie *pci)
48605 +       u32 val;
48606 +       struct device *dev = pci->dev;
48607 +       struct device_node *np = dev->of_node;
48609         if (pci->link_gen > 0)
48610                 dw_pcie_link_set_max_speed(pci, pci->link_gen);
48611 diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
48612 index 7247c8b01f04..7d6e9b7576be 100644
48613 --- a/drivers/pci/controller/dwc/pcie-designware.h
48614 +++ b/drivers/pci/controller/dwc/pcie-designware.h
48615 @@ -306,6 +306,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
48616  void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
48617                          enum dw_pcie_region_type type);
48618  void dw_pcie_setup(struct dw_pcie *pci);
48619 +void dw_pcie_iatu_detect(struct dw_pcie *pci);
48621  static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
48623 diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
48624 index 6fa216e52d14..0e94190ca4e8 100644
48625 --- a/drivers/pci/controller/dwc/pcie-tegra194.c
48626 +++ b/drivers/pci/controller/dwc/pcie-tegra194.c
48627 @@ -1645,7 +1645,7 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
48628         if (pcie->ep_state == EP_STATE_ENABLED)
48629                 return;
48631 -       ret = pm_runtime_get_sync(dev);
48632 +       ret = pm_runtime_resume_and_get(dev);
48633         if (ret < 0) {
48634                 dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
48635                         ret);
48636 diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c
48637 index f964fd26f7e0..ffd84656544f 100644
48638 --- a/drivers/pci/controller/pci-thunder-ecam.c
48639 +++ b/drivers/pci/controller/pci-thunder-ecam.c
48640 @@ -116,7 +116,7 @@ static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn,
48641          * the config space access window.  Since we are working with
48642          * the high-order 32 bits, shift everything down by 32 bits.
48643          */
48644 -       node_bits = (cfg->res.start >> 32) & (1 << 12);
48645 +       node_bits = upper_32_bits(cfg->res.start) & (1 << 12);
48647         v |= node_bits;
48648         set_val(v, where, size, val);
48649 diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c
48650 index 1a3f70ac61fc..0660b9da204f 100644
48651 --- a/drivers/pci/controller/pci-thunder-pem.c
48652 +++ b/drivers/pci/controller/pci-thunder-pem.c
48653 @@ -12,6 +12,7 @@
48654  #include <linux/pci-acpi.h>
48655  #include <linux/pci-ecam.h>
48656  #include <linux/platform_device.h>
48657 +#include <linux/io-64-nonatomic-lo-hi.h>
48658  #include "../pci.h"
48660  #if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
48661 @@ -324,9 +325,9 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
48662          * structure here for the BAR.
48663          */
48664         bar4_start = res_pem->start + 0xf00000;
48665 -       pem_pci->ea_entry[0] = (u32)bar4_start | 2;
48666 -       pem_pci->ea_entry[1] = (u32)(res_pem->end - bar4_start) & ~3u;
48667 -       pem_pci->ea_entry[2] = (u32)(bar4_start >> 32);
48668 +       pem_pci->ea_entry[0] = lower_32_bits(bar4_start) | 2;
48669 +       pem_pci->ea_entry[1] = lower_32_bits(res_pem->end - bar4_start) & ~3u;
48670 +       pem_pci->ea_entry[2] = upper_32_bits(bar4_start);
48672         cfg->priv = pem_pci;
48673         return 0;
48674 @@ -334,9 +335,9 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
48676  #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
48678 -#define PEM_RES_BASE           0x87e0c0000000UL
48679 -#define PEM_NODE_MASK          GENMASK(45, 44)
48680 -#define PEM_INDX_MASK          GENMASK(26, 24)
48681 +#define PEM_RES_BASE           0x87e0c0000000ULL
48682 +#define PEM_NODE_MASK          GENMASK_ULL(45, 44)
48683 +#define PEM_INDX_MASK          GENMASK_ULL(26, 24)
48684  #define PEM_MIN_DOM_IN_NODE    4
48685  #define PEM_MAX_DOM_IN_NODE    10
48687 diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
48688 index 2afdc865253e..7f503dd4ff81 100644
48689 --- a/drivers/pci/controller/pci-xgene.c
48690 +++ b/drivers/pci/controller/pci-xgene.c
48691 @@ -354,7 +354,8 @@ static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
48692         if (IS_ERR(port->csr_base))
48693                 return PTR_ERR(port->csr_base);
48695 -       port->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
48696 +       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
48697 +       port->cfg_base = devm_ioremap_resource(dev, res);
48698         if (IS_ERR(port->cfg_base))
48699                 return PTR_ERR(port->cfg_base);
48700         port->cfg_addr = res->start;
48701 diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
48702 index e330e6811f0b..08bc788d9422 100644
48703 --- a/drivers/pci/controller/pcie-brcmstb.c
48704 +++ b/drivers/pci/controller/pcie-brcmstb.c
48705 @@ -1148,6 +1148,7 @@ static int brcm_pcie_suspend(struct device *dev)
48707         brcm_pcie_turn_off(pcie);
48708         ret = brcm_phy_stop(pcie);
48709 +       reset_control_rearm(pcie->rescal);
48710         clk_disable_unprepare(pcie->clk);
48712         return ret;
48713 @@ -1163,9 +1164,13 @@ static int brcm_pcie_resume(struct device *dev)
48714         base = pcie->base;
48715         clk_prepare_enable(pcie->clk);
48717 +       ret = reset_control_reset(pcie->rescal);
48718 +       if (ret)
48719 +               goto err_disable_clk;
48721         ret = brcm_phy_start(pcie);
48722         if (ret)
48723 -               goto err;
48724 +               goto err_reset;
48726         /* Take bridge out of reset so we can access the SERDES reg */
48727         pcie->bridge_sw_init_set(pcie, 0);
48728 @@ -1180,14 +1185,16 @@ static int brcm_pcie_resume(struct device *dev)
48730         ret = brcm_pcie_setup(pcie);
48731         if (ret)
48732 -               goto err;
48733 +               goto err_reset;
48735         if (pcie->msi)
48736                 brcm_msi_set_regs(pcie->msi);
48738         return 0;
48740 -err:
48741 +err_reset:
48742 +       reset_control_rearm(pcie->rescal);
48743 +err_disable_clk:
48744         clk_disable_unprepare(pcie->clk);
48745         return ret;
48747 @@ -1197,7 +1204,7 @@ static void __brcm_pcie_remove(struct brcm_pcie *pcie)
48748         brcm_msi_remove(pcie);
48749         brcm_pcie_turn_off(pcie);
48750         brcm_phy_stop(pcie);
48751 -       reset_control_assert(pcie->rescal);
48752 +       reset_control_rearm(pcie->rescal);
48753         clk_disable_unprepare(pcie->clk);
48756 @@ -1278,13 +1285,13 @@ static int brcm_pcie_probe(struct platform_device *pdev)
48757                 return PTR_ERR(pcie->perst_reset);
48758         }
48760 -       ret = reset_control_deassert(pcie->rescal);
48761 +       ret = reset_control_reset(pcie->rescal);
48762         if (ret)
48763                 dev_err(&pdev->dev, "failed to deassert 'rescal'\n");
48765         ret = brcm_phy_start(pcie);
48766         if (ret) {
48767 -               reset_control_assert(pcie->rescal);
48768 +               reset_control_rearm(pcie->rescal);
48769                 clk_disable_unprepare(pcie->clk);
48770                 return ret;
48771         }
48772 @@ -1296,6 +1303,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
48773         pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
48774         if (pcie->type == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
48775                 dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n");
48776 +               ret = -ENODEV;
48777                 goto fail;
48778         }
48780 diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
48781 index 908475d27e0e..eede4e8f3f75 100644
48782 --- a/drivers/pci/controller/pcie-iproc-msi.c
48783 +++ b/drivers/pci/controller/pcie-iproc-msi.c
48784 @@ -271,7 +271,7 @@ static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
48785                                     NULL, NULL);
48786         }
48788 -       return hwirq;
48789 +       return 0;
48792  static void iproc_msi_irq_domain_free(struct irq_domain *domain,
48793 diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
48794 index c0ac4e9cbe72..f9760e73d568 100644
48795 --- a/drivers/pci/endpoint/functions/pci-epf-test.c
48796 +++ b/drivers/pci/endpoint/functions/pci-epf-test.c
48797 @@ -833,15 +833,18 @@ static int pci_epf_test_bind(struct pci_epf *epf)
48798                 return -EINVAL;
48800         epc_features = pci_epc_get_features(epc, epf->func_no);
48801 -       if (epc_features) {
48802 -               linkup_notifier = epc_features->linkup_notifier;
48803 -               core_init_notifier = epc_features->core_init_notifier;
48804 -               test_reg_bar = pci_epc_get_first_free_bar(epc_features);
48805 -               if (test_reg_bar < 0)
48806 -                       return -EINVAL;
48807 -               pci_epf_configure_bar(epf, epc_features);
48808 +       if (!epc_features) {
48809 +               dev_err(&epf->dev, "epc_features not implemented\n");
48810 +               return -EOPNOTSUPP;
48811         }
48813 +       linkup_notifier = epc_features->linkup_notifier;
48814 +       core_init_notifier = epc_features->core_init_notifier;
48815 +       test_reg_bar = pci_epc_get_first_free_bar(epc_features);
48816 +       if (test_reg_bar < 0)
48817 +               return -EINVAL;
48818 +       pci_epf_configure_bar(epf, epc_features);
48820         epf_test->test_reg_bar = test_reg_bar;
48821         epf_test->epc_features = epc_features;
48823 @@ -922,6 +925,7 @@ static int __init pci_epf_test_init(void)
48825         ret = pci_epf_register_driver(&test_driver);
48826         if (ret) {
48827 +               destroy_workqueue(kpcitest_workqueue);
48828                 pr_err("Failed to register pci epf test driver --> %d\n", ret);
48829                 return ret;
48830         }
48831 @@ -932,6 +936,8 @@ module_init(pci_epf_test_init);
48833  static void __exit pci_epf_test_exit(void)
48835 +       if (kpcitest_workqueue)
48836 +               destroy_workqueue(kpcitest_workqueue);
48837         pci_epf_unregister_driver(&test_driver);
48839  module_exit(pci_epf_test_exit);
48840 diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
48841 index 3365c93abf0e..f031302ad401 100644
48842 --- a/drivers/pci/hotplug/acpiphp_glue.c
48843 +++ b/drivers/pci/hotplug/acpiphp_glue.c
48844 @@ -533,6 +533,7 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
48845                         slot->flags &= ~SLOT_ENABLED;
48846                         continue;
48847                 }
48848 +               pci_dev_put(dev);
48849         }
48852 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
48853 index 16a17215f633..e4d4e399004b 100644
48854 --- a/drivers/pci/pci.c
48855 +++ b/drivers/pci/pci.c
48856 @@ -1870,20 +1870,10 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
48857         int err;
48858         int i, bars = 0;
48860 -       /*
48861 -        * Power state could be unknown at this point, either due to a fresh
48862 -        * boot or a device removal call.  So get the current power state
48863 -        * so that things like MSI message writing will behave as expected
48864 -        * (e.g. if the device really is in D0 at enable time).
48865 -        */
48866 -       if (dev->pm_cap) {
48867 -               u16 pmcsr;
48868 -               pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
48869 -               dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
48870 -       }
48872 -       if (atomic_inc_return(&dev->enable_cnt) > 1)
48873 +       if (atomic_inc_return(&dev->enable_cnt) > 1) {
48874 +               pci_update_current_state(dev, dev->current_state);
48875                 return 0;               /* already enabled */
48876 +       }
48878         bridge = pci_upstream_bridge(dev);
48879         if (bridge)
48880 diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
48881 index ef7c4661314f..9684b468267f 100644
48882 --- a/drivers/pci/pci.h
48883 +++ b/drivers/pci/pci.h
48884 @@ -624,6 +624,12 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
48885  #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
48886  int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
48887                           struct resource *res);
48888 +#else
48889 +static inline int acpi_get_rc_resources(struct device *dev, const char *hid,
48890 +                                       u16 segment, struct resource *res)
48892 +       return -ENODEV;
48894  #endif
48896  int pci_rebar_get_current_size(struct pci_dev *pdev, int bar);
48897 diff --git a/drivers/pci/pcie/rcec.c b/drivers/pci/pcie/rcec.c
48898 index 2c5c552994e4..d0bcd141ac9c 100644
48899 --- a/drivers/pci/pcie/rcec.c
48900 +++ b/drivers/pci/pcie/rcec.c
48901 @@ -32,7 +32,7 @@ static bool rcec_assoc_rciep(struct pci_dev *rcec, struct pci_dev *rciep)
48903         /* Same bus, so check bitmap */
48904         for_each_set_bit(devn, &bitmap, 32)
48905 -               if (devn == rciep->devfn)
48906 +               if (devn == PCI_SLOT(rciep->devfn))
48907                         return true;
48909         return false;
48910 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
48911 index 953f15abc850..be51670572fa 100644
48912 --- a/drivers/pci/probe.c
48913 +++ b/drivers/pci/probe.c
48914 @@ -2353,6 +2353,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
48915         pci_set_of_node(dev);
48917         if (pci_setup_device(dev)) {
48918 +               pci_release_of_node(dev);
48919                 pci_bus_put(dev->bus);
48920                 kfree(dev);
48921                 return NULL;
48922 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
48923 index 653660e3ba9e..c87fd7a275e4 100644
48924 --- a/drivers/pci/quirks.c
48925 +++ b/drivers/pci/quirks.c
48926 @@ -3558,6 +3558,106 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
48927         dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
48930 +static bool acs_on_downstream;
48931 +static bool acs_on_multifunction;
48933 +#define NUM_ACS_IDS 16
48934 +struct acs_on_id {
48935 +       unsigned short vendor;
48936 +       unsigned short device;
48938 +static struct acs_on_id acs_on_ids[NUM_ACS_IDS];
48939 +static u8 max_acs_id;
48941 +static __init int pcie_acs_override_setup(char *p)
48943 +       if (!p)
48944 +               return -EINVAL;
48946 +       while (*p) {
48947 +               if (!strncmp(p, "downstream", 10))
48948 +                       acs_on_downstream = true;
48949 +               if (!strncmp(p, "multifunction", 13))
48950 +                       acs_on_multifunction = true;
48951 +               if (!strncmp(p, "id:", 3)) {
48952 +                       char opt[5];
48953 +                       int ret;
48954 +                       long val;
48956 +                       if (max_acs_id >= NUM_ACS_IDS - 1) {
48957 +                               pr_warn("Out of PCIe ACS override slots (%d)\n",
48958 +                                               NUM_ACS_IDS);
48959 +                               goto next;
48960 +                       }
48962 +                       p += 3;
48963 +                       snprintf(opt, 5, "%s", p);
48964 +                       ret = kstrtol(opt, 16, &val);
48965 +                       if (ret) {
48966 +                               pr_warn("PCIe ACS ID parse error %d\n", ret);
48967 +                               goto next;
48968 +                       }
48969 +                       acs_on_ids[max_acs_id].vendor = val;
48971 +                       p += strcspn(p, ":");
48972 +                       if (*p != ':') {
48973 +                               pr_warn("PCIe ACS invalid ID\n");
48974 +                               goto next;
48975 +                       }
48977 +                       p++;
48978 +                       snprintf(opt, 5, "%s", p);
48979 +                       ret = kstrtol(opt, 16, &val);
48980 +                       if (ret) {
48981 +                               pr_warn("PCIe ACS ID parse error %d\n", ret);
48982 +                               goto next;
48983 +                       }
48984 +                       acs_on_ids[max_acs_id].device = val;
48985 +                       max_acs_id++;
48986 +               }
48987 +next:
48988 +               p += strcspn(p, ",");
48989 +               if (*p == ',')
48990 +                       p++;
48991 +       }
48993 +       if (acs_on_downstream || acs_on_multifunction || max_acs_id)
48994 +               pr_warn("Warning: PCIe ACS overrides enabled; This may allow non-IOMMU protected peer-to-peer DMA\n");
48996 +       return 0;
48998 +early_param("pcie_acs_override", pcie_acs_override_setup);
49000 +static int pcie_acs_overrides(struct pci_dev *dev, u16 acs_flags)
49002 +       int i;
49004 +       /* Never override ACS for legacy devices or devices with ACS caps */
49005 +       if (!pci_is_pcie(dev) ||
49006 +               pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS))
49007 +                       return -ENOTTY;
49009 +       for (i = 0; i < max_acs_id; i++)
49010 +               if (acs_on_ids[i].vendor == dev->vendor &&
49011 +                       acs_on_ids[i].device == dev->device)
49012 +                               return 1;
49014 +       switch (pci_pcie_type(dev)) {
49015 +       case PCI_EXP_TYPE_DOWNSTREAM:
49016 +       case PCI_EXP_TYPE_ROOT_PORT:
49017 +               if (acs_on_downstream)
49018 +                       return 1;
49019 +               break;
49020 +       case PCI_EXP_TYPE_ENDPOINT:
49021 +       case PCI_EXP_TYPE_UPSTREAM:
49022 +       case PCI_EXP_TYPE_LEG_END:
49023 +       case PCI_EXP_TYPE_RC_END:
49024 +               if (acs_on_multifunction && dev->multifunction)
49025 +                       return 1;
49026 +       }
49028 +       return -ENOTTY;
49030  /*
49031   * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
49032   * The device will throw a Link Down error on AER-capable systems and
49033 @@ -4773,6 +4873,7 @@ static const struct pci_dev_acs_enabled {
49034         { PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
49035         /* Zhaoxin Root/Downstream Ports */
49036         { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
49037 +       { PCI_ANY_ID, PCI_ANY_ID, pcie_acs_overrides },
49038         { 0 }
49039  };
49041 diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
49042 index 7915d10f9aa1..bd549070c011 100644
49043 --- a/drivers/pci/vpd.c
49044 +++ b/drivers/pci/vpd.c
49045 @@ -570,7 +570,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
49046  DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
49047  DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
49048                 quirk_blacklist_vpd);
49049 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
49050  /*
49051   * The Amazon Annapurna Labs 0x0031 device id is reused for other non Root Port
49052   * device types, so the quirk is registered for the PCI_CLASS_BRIDGE_PCI class.
49053 diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
49054 index 933bd8410fc2..ef9676418c9f 100644
49055 --- a/drivers/perf/arm_pmu_platform.c
49056 +++ b/drivers/perf/arm_pmu_platform.c
49057 @@ -6,6 +6,7 @@
49058   * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
49059   */
49060  #define pr_fmt(fmt) "hw perfevents: " fmt
49061 +#define dev_fmt pr_fmt
49063  #include <linux/bug.h>
49064  #include <linux/cpumask.h>
49065 @@ -100,10 +101,8 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
49066         struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
49068         num_irqs = platform_irq_count(pdev);
49069 -       if (num_irqs < 0) {
49070 -               pr_err("unable to count PMU IRQs\n");
49071 -               return num_irqs;
49072 -       }
49073 +       if (num_irqs < 0)
49074 +               return dev_err_probe(&pdev->dev, num_irqs, "unable to count PMU IRQs\n");
49076         /*
49077          * In this case we have no idea which CPUs are covered by the PMU.
49078 @@ -236,7 +235,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
49080         ret = armpmu_register(pmu);
49081         if (ret)
49082 -               goto out_free;
49083 +               goto out_free_irqs;
49085         return 0;
49087 diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
49088 index 26a0badabe38..19f32ae877b9 100644
49089 --- a/drivers/phy/cadence/phy-cadence-sierra.c
49090 +++ b/drivers/phy/cadence/phy-cadence-sierra.c
49091 @@ -319,6 +319,12 @@ static int cdns_sierra_phy_on(struct phy *gphy)
49092         u32 val;
49093         int ret;
49095 +       ret = reset_control_deassert(sp->phy_rst);
49096 +       if (ret) {
49097 +               dev_err(dev, "Failed to take the PHY out of reset\n");
49098 +               return ret;
49099 +       }
49101         /* Take the PHY lane group out of reset */
49102         ret = reset_control_deassert(ins->lnk_rst);
49103         if (ret) {
49104 @@ -616,7 +622,6 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
49106         pm_runtime_enable(dev);
49107         phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
49108 -       reset_control_deassert(sp->phy_rst);
49109         return PTR_ERR_OR_ZERO(phy_provider);
49111  put_child:
49112 diff --git a/drivers/phy/ingenic/phy-ingenic-usb.c b/drivers/phy/ingenic/phy-ingenic-usb.c
49113 index ea127b177f46..28c28d816484 100644
49114 --- a/drivers/phy/ingenic/phy-ingenic-usb.c
49115 +++ b/drivers/phy/ingenic/phy-ingenic-usb.c
49116 @@ -352,8 +352,8 @@ static int ingenic_usb_phy_probe(struct platform_device *pdev)
49117         }
49119         priv->phy = devm_phy_create(dev, NULL, &ingenic_usb_phy_ops);
49120 -       if (IS_ERR(priv))
49121 -               return PTR_ERR(priv);
49122 +       if (IS_ERR(priv->phy))
49123 +               return PTR_ERR(priv->phy);
49125         phy_set_drvdata(priv->phy, priv);
49127 diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
49128 index 6c96f2bf5266..c8ee23fc3a83 100644
49129 --- a/drivers/phy/marvell/Kconfig
49130 +++ b/drivers/phy/marvell/Kconfig
49131 @@ -3,8 +3,8 @@
49132  # Phy drivers for Marvell platforms
49134  config ARMADA375_USBCLUSTER_PHY
49135 -       def_bool y
49136 -       depends on MACH_ARMADA_375 || COMPILE_TEST
49137 +       bool "Armada 375 USB cluster PHY support" if COMPILE_TEST
49138 +       default y if MACH_ARMADA_375
49139         depends on OF && HAS_IOMEM
49140         select GENERIC_PHY
49142 diff --git a/drivers/phy/ralink/phy-mt7621-pci.c b/drivers/phy/ralink/phy-mt7621-pci.c
49143 index 9a610b414b1f..753cb5bab930 100644
49144 --- a/drivers/phy/ralink/phy-mt7621-pci.c
49145 +++ b/drivers/phy/ralink/phy-mt7621-pci.c
49146 @@ -62,7 +62,7 @@
49148  #define RG_PE1_FRC_MSTCKDIV                    BIT(5)
49150 -#define XTAL_MASK                              GENMASK(7, 6)
49151 +#define XTAL_MASK                              GENMASK(8, 6)
49153  #define MAX_PHYS       2
49155 @@ -319,9 +319,9 @@ static int mt7621_pci_phy_probe(struct platform_device *pdev)
49156                 return PTR_ERR(phy->regmap);
49158         phy->phy = devm_phy_create(dev, dev->of_node, &mt7621_pci_phy_ops);
49159 -       if (IS_ERR(phy)) {
49160 +       if (IS_ERR(phy->phy)) {
49161                 dev_err(dev, "failed to create phy\n");
49162 -               return PTR_ERR(phy);
49163 +               return PTR_ERR(phy->phy);
49164         }
49166         phy_set_drvdata(phy->phy, phy);
49167 diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
49168 index c9cfafe89cbf..e28e25f98708 100644
49169 --- a/drivers/phy/ti/phy-j721e-wiz.c
49170 +++ b/drivers/phy/ti/phy-j721e-wiz.c
49171 @@ -615,6 +615,12 @@ static void wiz_clock_cleanup(struct wiz *wiz, struct device_node *node)
49172                 of_clk_del_provider(clk_node);
49173                 of_node_put(clk_node);
49174         }
49176 +       for (i = 0; i < wiz->clk_div_sel_num; i++) {
49177 +               clk_node = of_get_child_by_name(node, clk_div_sel[i].node_name);
49178 +               of_clk_del_provider(clk_node);
49179 +               of_node_put(clk_node);
49180 +       }
49183  static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
49184 @@ -947,27 +953,24 @@ static int wiz_probe(struct platform_device *pdev)
49185                 goto err_get_sync;
49186         }
49188 +       ret = wiz_init(wiz);
49189 +       if (ret) {
49190 +               dev_err(dev, "WIZ initialization failed\n");
49191 +               goto err_wiz_init;
49192 +       }
49194         serdes_pdev = of_platform_device_create(child_node, NULL, dev);
49195         if (!serdes_pdev) {
49196                 dev_WARN(dev, "Unable to create SERDES platform device\n");
49197                 ret = -ENOMEM;
49198 -               goto err_pdev_create;
49199 -       }
49200 -       wiz->serdes_pdev = serdes_pdev;
49202 -       ret = wiz_init(wiz);
49203 -       if (ret) {
49204 -               dev_err(dev, "WIZ initialization failed\n");
49205                 goto err_wiz_init;
49206         }
49207 +       wiz->serdes_pdev = serdes_pdev;
49209         of_node_put(child_node);
49210         return 0;
49212  err_wiz_init:
49213 -       of_platform_device_destroy(&serdes_pdev->dev, NULL);
49215 -err_pdev_create:
49216         wiz_clock_cleanup(wiz, node);
49218  err_get_sync:
49219 diff --git a/drivers/phy/ti/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c
49220 index 9887f908f540..812e5409d359 100644
49221 --- a/drivers/phy/ti/phy-twl4030-usb.c
49222 +++ b/drivers/phy/ti/phy-twl4030-usb.c
49223 @@ -779,7 +779,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
49225         usb_remove_phy(&twl->phy);
49226         pm_runtime_get_sync(twl->dev);
49227 -       cancel_delayed_work(&twl->id_workaround_work);
49228 +       cancel_delayed_work_sync(&twl->id_workaround_work);
49229         device_remove_file(twl->dev, &dev_attr_vbus);
49231         /* set transceiver mode to power on defaults */
49232 diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
49233 index e71ebccc479c..03c32b2c5d30 100644
49234 --- a/drivers/pinctrl/pinctrl-at91-pio4.c
49235 +++ b/drivers/pinctrl/pinctrl-at91-pio4.c
49236 @@ -801,6 +801,10 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
49238         conf = atmel_pin_config_read(pctldev, pin_id);
49240 +       /* Keep slew rate enabled by default. */
49241 +       if (atmel_pioctrl->slew_rate_support)
49242 +               conf |= ATMEL_PIO_SR_MASK;
49244         for (i = 0; i < num_configs; i++) {
49245                 unsigned int param = pinconf_to_config_param(configs[i]);
49246                 unsigned int arg = pinconf_to_config_argument(configs[i]);
49247 @@ -808,10 +812,6 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
49248                 dev_dbg(pctldev->dev, "%s: pin=%u, config=0x%lx\n",
49249                         __func__, pin_id, configs[i]);
49251 -               /* Keep slew rate enabled by default. */
49252 -               if (atmel_pioctrl->slew_rate_support)
49253 -                       conf |= ATMEL_PIO_SR_MASK;
49255                 switch (param) {
49256                 case PIN_CONFIG_BIAS_DISABLE:
49257                         conf &= (~ATMEL_PIO_PUEN_MASK);
49258 diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
49259 index f2746125b077..3de0f767b7d1 100644
49260 --- a/drivers/pinctrl/pinctrl-ingenic.c
49261 +++ b/drivers/pinctrl/pinctrl-ingenic.c
49262 @@ -667,7 +667,9 @@ static int jz4770_pwm_pwm7_pins[] = { 0x6b, };
49263  static int jz4770_mac_rmii_pins[] = {
49264         0xa9, 0xab, 0xaa, 0xac, 0xa5, 0xa4, 0xad, 0xae, 0xa6, 0xa8,
49265  };
49266 -static int jz4770_mac_mii_pins[] = { 0xa7, 0xaf, };
49267 +static int jz4770_mac_mii_pins[] = {
49268 +       0x7b, 0x7a, 0x7d, 0x7c, 0xa7, 0x24, 0xaf,
49271  static const struct group_desc jz4770_groups[] = {
49272         INGENIC_PIN_GROUP("uart0-data", jz4770_uart0_data, 0),
49273 @@ -2107,26 +2109,48 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
49274         enum pin_config_param param = pinconf_to_config_param(*config);
49275         unsigned int idx = pin % PINS_PER_GPIO_CHIP;
49276         unsigned int offt = pin / PINS_PER_GPIO_CHIP;
49277 -       bool pull;
49278 +       unsigned int bias;
49279 +       bool pull, pullup, pulldown;
49281 -       if (jzpc->info->version >= ID_JZ4770)
49282 -               pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
49283 -       else
49284 -               pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
49285 +       if (jzpc->info->version >= ID_X1830) {
49286 +               unsigned int half = PINS_PER_GPIO_CHIP / 2;
49287 +               unsigned int idxh = (pin % half) * 2;
49289 +               if (idx < half)
49290 +                       regmap_read(jzpc->map, offt * jzpc->info->reg_offset +
49291 +                                       X1830_GPIO_PEL, &bias);
49292 +               else
49293 +                       regmap_read(jzpc->map, offt * jzpc->info->reg_offset +
49294 +                                       X1830_GPIO_PEH, &bias);
49296 +               bias = (bias >> idxh) & (GPIO_PULL_UP | GPIO_PULL_DOWN);
49298 +               pullup = (bias == GPIO_PULL_UP) && (jzpc->info->pull_ups[offt] & BIT(idx));
49299 +               pulldown = (bias == GPIO_PULL_DOWN) && (jzpc->info->pull_downs[offt] & BIT(idx));
49301 +       } else {
49302 +               if (jzpc->info->version >= ID_JZ4770)
49303 +                       pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
49304 +               else
49305 +                       pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
49307 +               pullup = pull && (jzpc->info->pull_ups[offt] & BIT(idx));
49308 +               pulldown = pull && (jzpc->info->pull_downs[offt] & BIT(idx));
49309 +       }
49311         switch (param) {
49312         case PIN_CONFIG_BIAS_DISABLE:
49313 -               if (pull)
49314 +               if (pullup || pulldown)
49315                         return -EINVAL;
49316                 break;
49318         case PIN_CONFIG_BIAS_PULL_UP:
49319 -               if (!pull || !(jzpc->info->pull_ups[offt] & BIT(idx)))
49320 +               if (!pullup)
49321                         return -EINVAL;
49322                 break;
49324         case PIN_CONFIG_BIAS_PULL_DOWN:
49325 -               if (!pull || !(jzpc->info->pull_downs[offt] & BIT(idx)))
49326 +               if (!pulldown)
49327                         return -EINVAL;
49328                 break;
49330 @@ -2144,7 +2168,7 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
49331         if (jzpc->info->version >= ID_X1830) {
49332                 unsigned int idx = pin % PINS_PER_GPIO_CHIP;
49333                 unsigned int half = PINS_PER_GPIO_CHIP / 2;
49334 -               unsigned int idxh = pin % half * 2;
49335 +               unsigned int idxh = (pin % half) * 2;
49336                 unsigned int offt = pin / PINS_PER_GPIO_CHIP;
49338                 if (idx < half) {
49339 diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
49340 index 7771316dfffa..10890fde9a75 100644
49341 --- a/drivers/pinctrl/pinctrl-single.c
49342 +++ b/drivers/pinctrl/pinctrl-single.c
49343 @@ -270,20 +270,44 @@ static void __maybe_unused pcs_writel(unsigned val, void __iomem *reg)
49344         writel(val, reg);
49347 +static unsigned int pcs_pin_reg_offset_get(struct pcs_device *pcs,
49348 +                                          unsigned int pin)
49350 +       unsigned int mux_bytes = pcs->width / BITS_PER_BYTE;
49352 +       if (pcs->bits_per_mux) {
49353 +               unsigned int pin_offset_bytes;
49355 +               pin_offset_bytes = (pcs->bits_per_pin * pin) / BITS_PER_BYTE;
49356 +               return (pin_offset_bytes / mux_bytes) * mux_bytes;
49357 +       }
49359 +       return pin * mux_bytes;
49362 +static unsigned int pcs_pin_shift_reg_get(struct pcs_device *pcs,
49363 +                                         unsigned int pin)
49365 +       return (pin % (pcs->width / pcs->bits_per_pin)) * pcs->bits_per_pin;
49368  static void pcs_pin_dbg_show(struct pinctrl_dev *pctldev,
49369                                         struct seq_file *s,
49370                                         unsigned pin)
49372         struct pcs_device *pcs;
49373 -       unsigned val, mux_bytes;
49374 +       unsigned int val;
49375         unsigned long offset;
49376         size_t pa;
49378         pcs = pinctrl_dev_get_drvdata(pctldev);
49380 -       mux_bytes = pcs->width / BITS_PER_BYTE;
49381 -       offset = pin * mux_bytes;
49382 +       offset = pcs_pin_reg_offset_get(pcs, pin);
49383         val = pcs->read(pcs->base + offset);
49385 +       if (pcs->bits_per_mux)
49386 +               val &= pcs->fmask << pcs_pin_shift_reg_get(pcs, pin);
49388         pa = pcs->res->start + offset;
49390         seq_printf(s, "%zx %08x %s ", pa, val, DRIVER_NAME);
49391 @@ -384,7 +408,6 @@ static int pcs_request_gpio(struct pinctrl_dev *pctldev,
49392         struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev);
49393         struct pcs_gpiofunc_range *frange = NULL;
49394         struct list_head *pos, *tmp;
49395 -       int mux_bytes = 0;
49396         unsigned data;
49398         /* If function mask is null, return directly. */
49399 @@ -392,29 +415,27 @@ static int pcs_request_gpio(struct pinctrl_dev *pctldev,
49400                 return -ENOTSUPP;
49402         list_for_each_safe(pos, tmp, &pcs->gpiofuncs) {
49403 +               u32 offset;
49405                 frange = list_entry(pos, struct pcs_gpiofunc_range, node);
49406                 if (pin >= frange->offset + frange->npins
49407                         || pin < frange->offset)
49408                         continue;
49409 -               mux_bytes = pcs->width / BITS_PER_BYTE;
49411 -               if (pcs->bits_per_mux) {
49412 -                       int byte_num, offset, pin_shift;
49413 +               offset = pcs_pin_reg_offset_get(pcs, pin);
49415 -                       byte_num = (pcs->bits_per_pin * pin) / BITS_PER_BYTE;
49416 -                       offset = (byte_num / mux_bytes) * mux_bytes;
49417 -                       pin_shift = pin % (pcs->width / pcs->bits_per_pin) *
49418 -                                   pcs->bits_per_pin;
49419 +               if (pcs->bits_per_mux) {
49420 +                       int pin_shift = pcs_pin_shift_reg_get(pcs, pin);
49422                         data = pcs->read(pcs->base + offset);
49423                         data &= ~(pcs->fmask << pin_shift);
49424                         data |= frange->gpiofunc << pin_shift;
49425                         pcs->write(data, pcs->base + offset);
49426                 } else {
49427 -                       data = pcs->read(pcs->base + pin * mux_bytes);
49428 +                       data = pcs->read(pcs->base + offset);
49429                         data &= ~pcs->fmask;
49430                         data |= frange->gpiofunc;
49431 -                       pcs->write(data, pcs->base + pin * mux_bytes);
49432 +                       pcs->write(data, pcs->base + offset);
49433                 }
49434                 break;
49435         }
49436 @@ -656,10 +677,8 @@ static const struct pinconf_ops pcs_pinconf_ops = {
49437   * pcs_add_pin() - add a pin to the static per controller pin array
49438   * @pcs: pcs driver instance
49439   * @offset: register offset from base
49440 - * @pin_pos: unused
49441   */
49442 -static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
49443 -               unsigned pin_pos)
49444 +static int pcs_add_pin(struct pcs_device *pcs, unsigned int offset)
49446         struct pcs_soc_data *pcs_soc = &pcs->socdata;
49447         struct pinctrl_pin_desc *pin;
49448 @@ -728,17 +747,9 @@ static int pcs_allocate_pin_table(struct pcs_device *pcs)
49449         for (i = 0; i < pcs->desc.npins; i++) {
49450                 unsigned offset;
49451                 int res;
49452 -               int byte_num;
49453 -               int pin_pos = 0;
49455 -               if (pcs->bits_per_mux) {
49456 -                       byte_num = (pcs->bits_per_pin * i) / BITS_PER_BYTE;
49457 -                       offset = (byte_num / mux_bytes) * mux_bytes;
49458 -                       pin_pos = i % num_pins_in_register;
49459 -               } else {
49460 -                       offset = i * mux_bytes;
49461 -               }
49462 -               res = pcs_add_pin(pcs, offset, pin_pos);
49463 +               offset = pcs_pin_reg_offset_get(pcs, i);
49464 +               res = pcs_add_pin(pcs, offset);
49465                 if (res < 0) {
49466                         dev_err(pcs->dev, "error adding pins: %i\n", res);
49467                         return res;
49468 diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
49469 index 0cd7f33cdf25..2b99f4130e1e 100644
49470 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c
49471 +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
49472 @@ -55,7 +55,7 @@ static void exynos_irq_mask(struct irq_data *irqd)
49473         struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
49474         struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
49475         unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
49476 -       unsigned long mask;
49477 +       unsigned int mask;
49478         unsigned long flags;
49480         raw_spin_lock_irqsave(&bank->slock, flags);
49481 @@ -83,7 +83,7 @@ static void exynos_irq_unmask(struct irq_data *irqd)
49482         struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
49483         struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
49484         unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
49485 -       unsigned long mask;
49486 +       unsigned int mask;
49487         unsigned long flags;
49489         /*
49490 @@ -483,7 +483,7 @@ static void exynos_irq_eint0_15(struct irq_desc *desc)
49491         chained_irq_exit(chip, desc);
49494 -static inline void exynos_irq_demux_eint(unsigned long pend,
49495 +static inline void exynos_irq_demux_eint(unsigned int pend,
49496                                                 struct irq_domain *domain)
49498         unsigned int irq;
49499 @@ -500,8 +500,8 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
49501         struct irq_chip *chip = irq_desc_get_chip(desc);
49502         struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc);
49503 -       unsigned long pend;
49504 -       unsigned long mask;
49505 +       unsigned int pend;
49506 +       unsigned int mask;
49507         int i;
49509         chained_irq_enter(chip, desc);
49510 diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
49511 index 0811562deecc..24be8f550ae0 100644
49512 --- a/drivers/platform/chrome/cros_ec_typec.c
49513 +++ b/drivers/platform/chrome/cros_ec_typec.c
49514 @@ -483,6 +483,11 @@ static int cros_typec_enable_dp(struct cros_typec_data *typec,
49515                 return -ENOTSUPP;
49516         }
49518 +       if (!pd_ctrl->dp_mode) {
49519 +               dev_err(typec->dev, "No valid DP mode provided.\n");
49520 +               return -EINVAL;
49521 +       }
49523         /* Status VDO. */
49524         dp_data.status = DP_STATUS_ENABLED;
49525         if (port->mux_flags & USB_PD_MUX_HPD_IRQ)
49526 diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
49527 index bbc4e71a16ff..38800e86ed8a 100644
49528 --- a/drivers/platform/mellanox/mlxbf-tmfifo.c
49529 +++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
49530 @@ -294,6 +294,9 @@ mlxbf_tmfifo_get_next_desc(struct mlxbf_tmfifo_vring *vring)
49531         if (vring->next_avail == virtio16_to_cpu(vdev, vr->avail->idx))
49532                 return NULL;
49534 +       /* Make sure 'avail->idx' is visible already. */
49535 +       virtio_rmb(false);
49537         idx = vring->next_avail % vr->num;
49538         head = virtio16_to_cpu(vdev, vr->avail->ring[idx]);
49539         if (WARN_ON(head >= vr->num))
49540 @@ -322,7 +325,7 @@ static void mlxbf_tmfifo_release_desc(struct mlxbf_tmfifo_vring *vring,
49541          * done or not. Add a memory barrier here to make sure the update above
49542          * completes before updating the idx.
49543          */
49544 -       mb();
49545 +       virtio_mb(false);
49546         vr->used->idx = cpu_to_virtio16(vdev, vr_idx + 1);
49549 @@ -733,6 +736,12 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
49550                 desc = NULL;
49551                 fifo->vring[is_rx] = NULL;
49553 +               /*
49554 +                * Make sure the load/store are in order before
49555 +                * returning back to virtio.
49556 +                */
49557 +               virtio_mb(false);
49559                 /* Notify upper layer that packet is done. */
49560                 spin_lock_irqsave(&fifo->spin_lock[is_rx], flags);
49561                 vring_interrupt(0, vring->vq);
49562 diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
49563 index 5bcb59ed579d..89761d3e1a47 100644
49564 --- a/drivers/platform/surface/aggregator/controller.c
49565 +++ b/drivers/platform/surface/aggregator/controller.c
49566 @@ -1040,7 +1040,7 @@ static int ssam_dsm_load_u32(acpi_handle handle, u64 funcs, u64 func, u32 *ret)
49567         union acpi_object *obj;
49568         u64 val;
49570 -       if (!(funcs & BIT(func)))
49571 +       if (!(funcs & BIT_ULL(func)))
49572                 return 0; /* Not supported, leave *ret at its default value */
49574         obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_GUID,
49575 diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
49576 index 461ec61530eb..205a096e9cee 100644
49577 --- a/drivers/platform/x86/Kconfig
49578 +++ b/drivers/platform/x86/Kconfig
49579 @@ -688,7 +688,7 @@ config INTEL_HID_EVENT
49581  config INTEL_INT0002_VGPIO
49582         tristate "Intel ACPI INT0002 Virtual GPIO driver"
49583 -       depends on GPIOLIB && ACPI
49584 +       depends on GPIOLIB && ACPI && PM_SLEEP
49585         select GPIOLIB_IRQCHIP
49586         help
49587           Some peripherals on Bay Trail and Cherry Trail platforms signal a
49588 diff --git a/drivers/platform/x86/dell/dell-smbios-wmi.c b/drivers/platform/x86/dell/dell-smbios-wmi.c
49589 index 27a298b7c541..c97bd4a45242 100644
49590 --- a/drivers/platform/x86/dell/dell-smbios-wmi.c
49591 +++ b/drivers/platform/x86/dell/dell-smbios-wmi.c
49592 @@ -271,7 +271,8 @@ int init_dell_smbios_wmi(void)
49594  void exit_dell_smbios_wmi(void)
49596 -       wmi_driver_unregister(&dell_smbios_wmi_driver);
49597 +       if (wmi_supported)
49598 +               wmi_driver_unregister(&dell_smbios_wmi_driver);
49601  MODULE_DEVICE_TABLE(wmi, dell_smbios_wmi_id_table);
49602 diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
49603 index 7410ccae650c..a90ae6ba4a73 100644
49604 --- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
49605 +++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
49606 @@ -399,6 +399,7 @@ static int init_bios_attributes(int attr_type, const char *guid)
49607         union acpi_object *obj = NULL;
49608         union acpi_object *elements;
49609         struct kset *tmp_set;
49610 +       int min_elements;
49612         /* instance_id needs to be reset for each type GUID
49613          * also, instance IDs are unique within GUID but not across
49614 @@ -409,14 +410,38 @@ static int init_bios_attributes(int attr_type, const char *guid)
49615         retval = alloc_attributes_data(attr_type);
49616         if (retval)
49617                 return retval;
49619 +       switch (attr_type) {
49620 +       case ENUM:      min_elements = 8;       break;
49621 +       case INT:       min_elements = 9;       break;
49622 +       case STR:       min_elements = 8;       break;
49623 +       case PO:        min_elements = 4;       break;
49624 +       default:
49625 +               pr_err("Error: Unknown attr_type: %d\n", attr_type);
49626 +               return -EINVAL;
49627 +       }
49629         /* need to use specific instance_id and guid combination to get right data */
49630         obj = get_wmiobj_pointer(instance_id, guid);
49631 -       if (!obj || obj->type != ACPI_TYPE_PACKAGE)
49632 +       if (!obj)
49633                 return -ENODEV;
49634 -       elements = obj->package.elements;
49636         mutex_lock(&wmi_priv.mutex);
49637 -       while (elements) {
49638 +       while (obj) {
49639 +               if (obj->type != ACPI_TYPE_PACKAGE) {
49640 +                       pr_err("Error: Expected ACPI-package type, got: %d\n", obj->type);
49641 +                       retval = -EIO;
49642 +                       goto err_attr_init;
49643 +               }
49645 +               if (obj->package.count < min_elements) {
49646 +                       pr_err("Error: ACPI-package does not have enough elements: %d < %d\n",
49647 +                              obj->package.count, min_elements);
49648 +                       goto nextobj;
49649 +               }
49651 +               elements = obj->package.elements;
49653                 /* sanity checking */
49654                 if (elements[ATTR_NAME].type != ACPI_TYPE_STRING) {
49655                         pr_debug("incorrect element type\n");
49656 @@ -481,7 +506,6 @@ static int init_bios_attributes(int attr_type, const char *guid)
49657                 kfree(obj);
49658                 instance_id++;
49659                 obj = get_wmiobj_pointer(instance_id, guid);
49660 -               elements = obj ? obj->package.elements : NULL;
49661         }
49663         mutex_unlock(&wmi_priv.mutex);
49664 diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
49665 index 6cb5ad4be231..387817290921 100644
49666 --- a/drivers/platform/x86/ideapad-laptop.c
49667 +++ b/drivers/platform/x86/ideapad-laptop.c
49668 @@ -57,8 +57,8 @@ enum {
49669  };
49671  enum {
49672 -       SMBC_CONSERVATION_ON  = 3,
49673 -       SMBC_CONSERVATION_OFF = 5,
49674 +       SBMC_CONSERVATION_ON  = 3,
49675 +       SBMC_CONSERVATION_OFF = 5,
49676  };
49678  enum {
49679 @@ -182,9 +182,9 @@ static int eval_gbmd(acpi_handle handle, unsigned long *res)
49680         return eval_int(handle, "GBMD", res);
49683 -static int exec_smbc(acpi_handle handle, unsigned long arg)
49684 +static int exec_sbmc(acpi_handle handle, unsigned long arg)
49686 -       return exec_simple_method(handle, "SMBC", arg);
49687 +       return exec_simple_method(handle, "SBMC", arg);
49690  static int eval_hals(acpi_handle handle, unsigned long *res)
49691 @@ -477,7 +477,7 @@ static ssize_t conservation_mode_store(struct device *dev,
49692         if (err)
49693                 return err;
49695 -       err = exec_smbc(priv->adev->handle, state ? SMBC_CONSERVATION_ON : SMBC_CONSERVATION_OFF);
49696 +       err = exec_sbmc(priv->adev->handle, state ? SBMC_CONSERVATION_ON : SBMC_CONSERVATION_OFF);
49697         if (err)
49698                 return err;
49700 @@ -809,6 +809,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
49702         struct ideapad_dytc_priv *dytc = container_of(pprof, struct ideapad_dytc_priv, pprof);
49703         struct ideapad_private *priv = dytc->priv;
49704 +       unsigned long output;
49705         int err;
49707         err = mutex_lock_interruptible(&dytc->mutex);
49708 @@ -829,7 +830,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
49710                 /* Determine if we are in CQL mode. This alters the commands we do */
49711                 err = dytc_cql_command(priv, DYTC_SET_COMMAND(DYTC_FUNCTION_MMC, perfmode, 1),
49712 -                                      NULL);
49713 +                                      &output);
49714                 if (err)
49715                         goto unlock;
49716         }
49717 diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c
49718 index 289c6655d425..569342aa8926 100644
49719 --- a/drivers/platform/x86/intel_int0002_vgpio.c
49720 +++ b/drivers/platform/x86/intel_int0002_vgpio.c
49721 @@ -51,6 +51,12 @@
49722  #define GPE0A_STS_PORT                 0x420
49723  #define GPE0A_EN_PORT                  0x428
49725 +struct int0002_data {
49726 +       struct gpio_chip chip;
49727 +       int parent_irq;
49728 +       int wake_enable_count;
49731  /*
49732   * As this is not a real GPIO at all, but just a hack to model an event in
49733   * ACPI the get / set functions are dummy functions.
49734 @@ -98,14 +104,16 @@ static void int0002_irq_mask(struct irq_data *data)
49735  static int int0002_irq_set_wake(struct irq_data *data, unsigned int on)
49737         struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
49738 -       struct platform_device *pdev = to_platform_device(chip->parent);
49739 -       int irq = platform_get_irq(pdev, 0);
49740 +       struct int0002_data *int0002 = container_of(chip, struct int0002_data, chip);
49742 -       /* Propagate to parent irq */
49743 +       /*
49744 +        * Applying of the wakeup flag to our parent IRQ is delayed till system
49745 +        * suspend, because we only want to do this when using s2idle.
49746 +        */
49747         if (on)
49748 -               enable_irq_wake(irq);
49749 +               int0002->wake_enable_count++;
49750         else
49751 -               disable_irq_wake(irq);
49752 +               int0002->wake_enable_count--;
49754         return 0;
49756 @@ -135,7 +143,7 @@ static bool int0002_check_wake(void *data)
49757         return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT);
49760 -static struct irq_chip int0002_byt_irqchip = {
49761 +static struct irq_chip int0002_irqchip = {
49762         .name                   = DRV_NAME,
49763         .irq_ack                = int0002_irq_ack,
49764         .irq_mask               = int0002_irq_mask,
49765 @@ -143,21 +151,9 @@ static struct irq_chip int0002_byt_irqchip = {
49766         .irq_set_wake           = int0002_irq_set_wake,
49767  };
49769 -static struct irq_chip int0002_cht_irqchip = {
49770 -       .name                   = DRV_NAME,
49771 -       .irq_ack                = int0002_irq_ack,
49772 -       .irq_mask               = int0002_irq_mask,
49773 -       .irq_unmask             = int0002_irq_unmask,
49774 -       /*
49775 -        * No set_wake, on CHT the IRQ is typically shared with the ACPI SCI
49776 -        * and we don't want to mess with the ACPI SCI irq settings.
49777 -        */
49778 -       .flags                  = IRQCHIP_SKIP_SET_WAKE,
49781  static const struct x86_cpu_id int0002_cpu_ids[] = {
49782 -       X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT,     &int0002_byt_irqchip),
49783 -       X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT,        &int0002_cht_irqchip),
49784 +       X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL),
49785 +       X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
49786         {}
49787  };
49789 @@ -172,8 +168,9 @@ static int int0002_probe(struct platform_device *pdev)
49791         struct device *dev = &pdev->dev;
49792         const struct x86_cpu_id *cpu_id;
49793 -       struct gpio_chip *chip;
49794 +       struct int0002_data *int0002;
49795         struct gpio_irq_chip *girq;
49796 +       struct gpio_chip *chip;
49797         int irq, ret;
49799         /* Menlow has a different INT0002 device? <sigh> */
49800 @@ -185,10 +182,13 @@ static int int0002_probe(struct platform_device *pdev)
49801         if (irq < 0)
49802                 return irq;
49804 -       chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
49805 -       if (!chip)
49806 +       int0002 = devm_kzalloc(dev, sizeof(*int0002), GFP_KERNEL);
49807 +       if (!int0002)
49808                 return -ENOMEM;
49810 +       int0002->parent_irq = irq;
49812 +       chip = &int0002->chip;
49813         chip->label = DRV_NAME;
49814         chip->parent = dev;
49815         chip->owner = THIS_MODULE;
49816 @@ -214,7 +214,7 @@ static int int0002_probe(struct platform_device *pdev)
49817         }
49819         girq = &chip->irq;
49820 -       girq->chip = (struct irq_chip *)cpu_id->driver_data;
49821 +       girq->chip = &int0002_irqchip;
49822         /* This let us handle the parent IRQ in the driver */
49823         girq->parent_handler = NULL;
49824         girq->num_parents = 0;
49825 @@ -230,6 +230,7 @@ static int int0002_probe(struct platform_device *pdev)
49827         acpi_register_wakeup_handler(irq, int0002_check_wake, NULL);
49828         device_init_wakeup(dev, true);
49829 +       dev_set_drvdata(dev, int0002);
49830         return 0;
49833 @@ -240,6 +241,36 @@ static int int0002_remove(struct platform_device *pdev)
49834         return 0;
49837 +static int int0002_suspend(struct device *dev)
49839 +       struct int0002_data *int0002 = dev_get_drvdata(dev);
49841 +       /*
49842 +        * The INT0002 parent IRQ is often shared with the ACPI GPE IRQ, don't
49843 +        * muck with it when firmware based suspend is used, otherwise we may
49844 +        * cause spurious wakeups from firmware managed suspend.
49845 +        */
49846 +       if (!pm_suspend_via_firmware() && int0002->wake_enable_count)
49847 +               enable_irq_wake(int0002->parent_irq);
49849 +       return 0;
49852 +static int int0002_resume(struct device *dev)
49854 +       struct int0002_data *int0002 = dev_get_drvdata(dev);
49856 +       if (!pm_suspend_via_firmware() && int0002->wake_enable_count)
49857 +               disable_irq_wake(int0002->parent_irq);
49859 +       return 0;
49862 +static const struct dev_pm_ops int0002_pm_ops = {
49863 +       .suspend = int0002_suspend,
49864 +       .resume = int0002_resume,
49867  static const struct acpi_device_id int0002_acpi_ids[] = {
49868         { "INT0002", 0 },
49869         { },
49870 @@ -250,6 +281,7 @@ static struct platform_driver int0002_driver = {
49871         .driver = {
49872                 .name                   = DRV_NAME,
49873                 .acpi_match_table       = int0002_acpi_ids,
49874 +               .pm                     = &int0002_pm_ops,
49875         },
49876         .probe  = int0002_probe,
49877         .remove = int0002_remove,
49878 diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
49879 index bffe548187ee..c2918ee3e100 100644
49880 --- a/drivers/platform/x86/intel_ips.c
49881 +++ b/drivers/platform/x86/intel_ips.c
49882 @@ -798,7 +798,7 @@ static int ips_adjust(void *data)
49883                         ips_gpu_lower(ips);
49885  sleep:
49886 -               schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD));
49887 +               schedule_msec_hrtimeout_interruptible((IPS_ADJUST_PERIOD));
49888         } while (!kthread_should_stop());
49890         dev_dbg(ips->dev, "ips-adjust thread stopped\n");
49891 @@ -974,7 +974,7 @@ static int ips_monitor(void *data)
49892         seqno_timestamp = get_jiffies_64();
49894         old_cpu_power = thm_readl(THM_CEC);
49895 -       schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
49896 +       schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
49898         /* Collect an initial average */
49899         for (i = 0; i < IPS_SAMPLE_COUNT; i++) {
49900 @@ -1001,7 +1001,7 @@ static int ips_monitor(void *data)
49901                         mchp_samples[i] = mchp;
49902                 }
49904 -               schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
49905 +               schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
49906                 if (kthread_should_stop())
49907                         break;
49908         }
49909 @@ -1028,7 +1028,7 @@ static int ips_monitor(void *data)
49910          * us to reduce the sample frequency if the CPU and GPU are idle.
49911          */
49912         old_cpu_power = thm_readl(THM_CEC);
49913 -       schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
49914 +       schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
49915         last_sample_period = IPS_SAMPLE_PERIOD;
49917         timer_setup(&ips->timer, monitor_timeout, TIMER_DEFERRABLE);
49918 diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
49919 index b5888aeb4bcf..260d49dca1ad 100644
49920 --- a/drivers/platform/x86/intel_pmc_core.c
49921 +++ b/drivers/platform/x86/intel_pmc_core.c
49922 @@ -1186,9 +1186,15 @@ static const struct pci_device_id pmc_pci_ids[] = {
49923   * the platform BIOS enforces 24Mhz crystal to shutdown
49924   * before PMC can assert SLP_S0#.
49925   */
49926 +static bool xtal_ignore;
49927  static int quirk_xtal_ignore(const struct dmi_system_id *id)
49929 -       struct pmc_dev *pmcdev = &pmc;
49930 +       xtal_ignore = true;
49931 +       return 0;
49934 +static void pmc_core_xtal_ignore(struct pmc_dev *pmcdev)
49936         u32 value;
49938         value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset);
49939 @@ -1197,7 +1203,6 @@ static int quirk_xtal_ignore(const struct dmi_system_id *id)
49940         /* Low Voltage Mode Enable */
49941         value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
49942         pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value);
49943 -       return 0;
49946  static const struct dmi_system_id pmc_core_dmi_table[]  = {
49947 @@ -1212,6 +1217,14 @@ static const struct dmi_system_id pmc_core_dmi_table[]  = {
49948         {}
49949  };
49951 +static void pmc_core_do_dmi_quirks(struct pmc_dev *pmcdev)
49953 +       dmi_check_system(pmc_core_dmi_table);
49955 +       if (xtal_ignore)
49956 +               pmc_core_xtal_ignore(pmcdev);
49959  static int pmc_core_probe(struct platform_device *pdev)
49961         static bool device_initialized;
49962 @@ -1253,7 +1266,7 @@ static int pmc_core_probe(struct platform_device *pdev)
49963         mutex_init(&pmcdev->lock);
49964         platform_set_drvdata(pdev, pmcdev);
49965         pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
49966 -       dmi_check_system(pmc_core_dmi_table);
49967 +       pmc_core_do_dmi_quirks(pmcdev);
49969         /*
49970          * On TGL, due to a hardware limitation, the GBE LTR blocks PC10 when
49971 diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
49972 index a2a2d923e60c..df1fc6c719f3 100644
49973 --- a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
49974 +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
49975 @@ -21,12 +21,16 @@
49976  #define PUNIT_MAILBOX_BUSY_BIT         31
49978  /*
49979 - * The average time to complete some commands is about 40us. The current
49980 - * count is enough to satisfy 40us. But when the firmware is very busy, this
49981 - * causes timeout occasionally.  So increase to deal with some worst case
49982 - * scenarios. Most of the command still complete in few us.
49983 + * The average time to complete mailbox commands is less than 40us. Most of
49984 + * the commands complete in few micro seconds. But the same firmware handles
49985 + * requests from all power management features.
49986 + * We can create a scenario where we flood the firmware with requests then
49987 + * the mailbox response can be delayed for 100s of micro seconds. So define
49988 + * two timeouts. One for average case and one for long.
49989 + * If the firmware is taking more than average, just call cond_resched().
49990   */
49991 -#define OS_MAILBOX_RETRY_COUNT         100
49992 +#define OS_MAILBOX_TIMEOUT_AVG_US      40
49993 +#define OS_MAILBOX_TIMEOUT_MAX_US      1000
49995  struct isst_if_device {
49996         struct mutex mutex;
49997 @@ -35,11 +39,13 @@ struct isst_if_device {
49998  static int isst_if_mbox_cmd(struct pci_dev *pdev,
49999                             struct isst_if_mbox_cmd *mbox_cmd)
50001 -       u32 retries, data;
50002 +       s64 tm_delta = 0;
50003 +       ktime_t tm;
50004 +       u32 data;
50005         int ret;
50007         /* Poll for rb bit == 0 */
50008 -       retries = OS_MAILBOX_RETRY_COUNT;
50009 +       tm = ktime_get();
50010         do {
50011                 ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
50012                                             &data);
50013 @@ -48,11 +54,14 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
50015                 if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
50016                         ret = -EBUSY;
50017 +                       tm_delta = ktime_us_delta(ktime_get(), tm);
50018 +                       if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
50019 +                               cond_resched();
50020                         continue;
50021                 }
50022                 ret = 0;
50023                 break;
50024 -       } while (--retries);
50025 +       } while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US);
50027         if (ret)
50028                 return ret;
50029 @@ -74,7 +83,8 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
50030                 return ret;
50032         /* Poll for rb bit == 0 */
50033 -       retries = OS_MAILBOX_RETRY_COUNT;
50034 +       tm_delta = 0;
50035 +       tm = ktime_get();
50036         do {
50037                 ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
50038                                             &data);
50039 @@ -83,6 +93,9 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
50041                 if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
50042                         ret = -EBUSY;
50043 +                       tm_delta = ktime_us_delta(ktime_get(), tm);
50044 +                       if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
50045 +                               cond_resched();
50046                         continue;
50047                 }
50049 @@ -96,7 +109,7 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
50050                 mbox_cmd->resp_data = data;
50051                 ret = 0;
50052                 break;
50053 -       } while (--retries);
50054 +       } while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US);
50056         return ret;
50058 diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
50059 index ca684ed760d1..a9d2a4b98e57 100644
50060 --- a/drivers/platform/x86/pmc_atom.c
50061 +++ b/drivers/platform/x86/pmc_atom.c
50062 @@ -393,34 +393,10 @@ static const struct dmi_system_id critclk_systems[] = {
50063         },
50064         {
50065                 /* pmc_plt_clk* - are used for ethernet controllers */
50066 -               .ident = "Beckhoff CB3163",
50067 +               .ident = "Beckhoff Baytrail",
50068                 .matches = {
50069                         DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
50070 -                       DMI_MATCH(DMI_BOARD_NAME, "CB3163"),
50071 -               },
50072 -       },
50073 -       {
50074 -               /* pmc_plt_clk* - are used for ethernet controllers */
50075 -               .ident = "Beckhoff CB4063",
50076 -               .matches = {
50077 -                       DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
50078 -                       DMI_MATCH(DMI_BOARD_NAME, "CB4063"),
50079 -               },
50080 -       },
50081 -       {
50082 -               /* pmc_plt_clk* - are used for ethernet controllers */
50083 -               .ident = "Beckhoff CB6263",
50084 -               .matches = {
50085 -                       DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
50086 -                       DMI_MATCH(DMI_BOARD_NAME, "CB6263"),
50087 -               },
50088 -       },
50089 -       {
50090 -               /* pmc_plt_clk* - are used for ethernet controllers */
50091 -               .ident = "Beckhoff CB6363",
50092 -               .matches = {
50093 -                       DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
50094 -                       DMI_MATCH(DMI_BOARD_NAME, "CB6363"),
50095 +                       DMI_MATCH(DMI_PRODUCT_FAMILY, "CBxx63"),
50096                 },
50097         },
50098         {
50099 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
50100 index 0d9e2ddbf904..61f1c91c62de 100644
50101 --- a/drivers/platform/x86/thinkpad_acpi.c
50102 +++ b/drivers/platform/x86/thinkpad_acpi.c
50103 @@ -6260,6 +6260,7 @@ enum thermal_access_mode {
50104  enum { /* TPACPI_THERMAL_TPEC_* */
50105         TP_EC_THERMAL_TMP0 = 0x78,      /* ACPI EC regs TMP 0..7 */
50106         TP_EC_THERMAL_TMP8 = 0xC0,      /* ACPI EC regs TMP 8..15 */
50107 +       TP_EC_FUNCREV      = 0xEF,      /* ACPI EC Functional revision */
50108         TP_EC_THERMAL_TMP_NA = -128,    /* ACPI EC sensor not available */
50110         TPACPI_THERMAL_SENSOR_NA = -128000, /* Sensor not available */
50111 @@ -6458,7 +6459,7 @@ static const struct attribute_group thermal_temp_input8_group = {
50113  static int __init thermal_init(struct ibm_init_struct *iibm)
50115 -       u8 t, ta1, ta2;
50116 +       u8 t, ta1, ta2, ver = 0;
50117         int i;
50118         int acpi_tmp7;
50119         int res;
50120 @@ -6473,7 +6474,14 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
50121                  * 0x78-0x7F, 0xC0-0xC7.  Registers return 0x00 for
50122                  * non-implemented, thermal sensors return 0x80 when
50123                  * not available
50124 +                * The above rule is unfortunately flawed. This has been seen with
50125 +                * 0xC2 (power supply ID) causing thermal control problems.
50126 +                * The EC version can be determined by offset 0xEF and at least for
50127 +                * version 3 the Lenovo firmware team confirmed that registers 0xC0-0xC7
50128 +                * are not thermal registers.
50129                  */
50130 +               if (!acpi_ec_read(TP_EC_FUNCREV, &ver))
50131 +                       pr_warn("Thinkpad ACPI EC unable to access EC version\n");
50133                 ta1 = ta2 = 0;
50134                 for (i = 0; i < 8; i++) {
50135 @@ -6483,11 +6491,13 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
50136                                 ta1 = 0;
50137                                 break;
50138                         }
50139 -                       if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
50140 -                               ta2 |= t;
50141 -                       } else {
50142 -                               ta1 = 0;
50143 -                               break;
50144 +                       if (ver < 3) {
50145 +                               if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
50146 +                                       ta2 |= t;
50147 +                               } else {
50148 +                                       ta1 = 0;
50149 +                                       break;
50150 +                               }
50151                         }
50152                 }
50153                 if (ta1 == 0) {
50154 @@ -6500,9 +6510,12 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
50155                                 thermal_read_mode = TPACPI_THERMAL_NONE;
50156                         }
50157                 } else {
50158 -                       thermal_read_mode =
50159 -                           (ta2 != 0) ?
50160 -                           TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
50161 +                       if (ver >= 3)
50162 +                               thermal_read_mode = TPACPI_THERMAL_TPEC_8;
50163 +                       else
50164 +                               thermal_read_mode =
50165 +                                       (ta2 != 0) ?
50166 +                                       TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
50167                 }
50168         } else if (acpi_tmp7) {
50169                 if (tpacpi_is_ibm() &&
50170 diff --git a/drivers/power/supply/bq25980_charger.c b/drivers/power/supply/bq25980_charger.c
50171 index 530ff4025b31..0008c229fd9c 100644
50172 --- a/drivers/power/supply/bq25980_charger.c
50173 +++ b/drivers/power/supply/bq25980_charger.c
50174 @@ -606,33 +606,6 @@ static int bq25980_get_state(struct bq25980_device *bq,
50175         return 0;
50178 -static int bq25980_set_battery_property(struct power_supply *psy,
50179 -                               enum power_supply_property psp,
50180 -                               const union power_supply_propval *val)
50182 -       struct bq25980_device *bq = power_supply_get_drvdata(psy);
50183 -       int ret = 0;
50185 -       switch (psp) {
50186 -       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
50187 -               ret = bq25980_set_const_charge_curr(bq, val->intval);
50188 -               if (ret)
50189 -                       return ret;
50190 -               break;
50192 -       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
50193 -               ret = bq25980_set_const_charge_volt(bq, val->intval);
50194 -               if (ret)
50195 -                       return ret;
50196 -               break;
50198 -       default:
50199 -               return -EINVAL;
50200 -       }
50202 -       return ret;
50205  static int bq25980_get_battery_property(struct power_supply *psy,
50206                                 enum power_supply_property psp,
50207                                 union power_supply_propval *val)
50208 @@ -701,6 +674,18 @@ static int bq25980_set_charger_property(struct power_supply *psy,
50209                         return ret;
50210                 break;
50212 +       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
50213 +               ret = bq25980_set_const_charge_curr(bq, val->intval);
50214 +               if (ret)
50215 +                       return ret;
50216 +               break;
50218 +       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
50219 +               ret = bq25980_set_const_charge_volt(bq, val->intval);
50220 +               if (ret)
50221 +                       return ret;
50222 +               break;
50224         default:
50225                 return -EINVAL;
50226         }
50227 @@ -922,7 +907,6 @@ static struct power_supply_desc bq25980_battery_desc = {
50228         .name                   = "bq25980-battery",
50229         .type                   = POWER_SUPPLY_TYPE_BATTERY,
50230         .get_property           = bq25980_get_battery_property,
50231 -       .set_property           = bq25980_set_battery_property,
50232         .properties             = bq25980_battery_props,
50233         .num_properties         = ARRAY_SIZE(bq25980_battery_props),
50234         .property_is_writeable  = bq25980_property_is_writeable,
50235 diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
50236 index 4c4a7b1c64c5..20e1dc8a87cf 100644
50237 --- a/drivers/power/supply/bq27xxx_battery.c
50238 +++ b/drivers/power/supply/bq27xxx_battery.c
50239 @@ -1661,27 +1661,6 @@ static int bq27xxx_battery_read_time(struct bq27xxx_device_info *di, u8 reg)
50240         return tval * 60;
50244 - * Read an average power register.
50245 - * Return < 0 if something fails.
50246 - */
50247 -static int bq27xxx_battery_read_pwr_avg(struct bq27xxx_device_info *di)
50249 -       int tval;
50251 -       tval = bq27xxx_read(di, BQ27XXX_REG_AP, false);
50252 -       if (tval < 0) {
50253 -               dev_err(di->dev, "error reading average power register  %02x: %d\n",
50254 -                       BQ27XXX_REG_AP, tval);
50255 -               return tval;
50256 -       }
50258 -       if (di->opts & BQ27XXX_O_ZERO)
50259 -               return (tval * BQ27XXX_POWER_CONSTANT) / BQ27XXX_RS;
50260 -       else
50261 -               return tval;
50264  /*
50265   * Returns true if a battery over temperature condition is detected
50266   */
50267 @@ -1769,8 +1748,6 @@ void bq27xxx_battery_update(struct bq27xxx_device_info *di)
50268                 }
50269                 if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR)
50270                         cache.cycle_count = bq27xxx_battery_read_cyct(di);
50271 -               if (di->regs[BQ27XXX_REG_AP] != INVALID_REG_ADDR)
50272 -                       cache.power_avg = bq27xxx_battery_read_pwr_avg(di);
50274                 /* We only have to read charge design full once */
50275                 if (di->charge_design_full <= 0)
50276 @@ -1827,9 +1804,35 @@ static int bq27xxx_battery_current(struct bq27xxx_device_info *di,
50277                 val->intval = curr * BQ27XXX_CURRENT_CONSTANT / BQ27XXX_RS;
50278         } else {
50279                 /* Other gauges return signed value */
50280 -               val->intval = -(int)((s16)curr) * 1000;
50281 +               val->intval = (int)((s16)curr) * 1000;
50282 +       }
50284 +       return 0;
50288 + * Get the average power in µW
50289 + * Return < 0 if something fails.
50290 + */
50291 +static int bq27xxx_battery_pwr_avg(struct bq27xxx_device_info *di,
50292 +                                  union power_supply_propval *val)
50294 +       int power;
50296 +       power = bq27xxx_read(di, BQ27XXX_REG_AP, false);
50297 +       if (power < 0) {
50298 +               dev_err(di->dev,
50299 +                       "error reading average power register %02x: %d\n",
50300 +                       BQ27XXX_REG_AP, power);
50301 +               return power;
50302         }
50304 +       if (di->opts & BQ27XXX_O_ZERO)
50305 +               val->intval = (power * BQ27XXX_POWER_CONSTANT) / BQ27XXX_RS;
50306 +       else
50307 +               /* Other gauges return a signed value in units of 10mW */
50308 +               val->intval = (int)((s16)power) * 10000;
50310         return 0;
50313 @@ -2020,7 +2023,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
50314                 ret = bq27xxx_simple_value(di->cache.energy, val);
50315                 break;
50316         case POWER_SUPPLY_PROP_POWER_AVG:
50317 -               ret = bq27xxx_simple_value(di->cache.power_avg, val);
50318 +               ret = bq27xxx_battery_pwr_avg(di, val);
50319                 break;
50320         case POWER_SUPPLY_PROP_HEALTH:
50321                 ret = bq27xxx_simple_value(di->cache.health, val);
50322 diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
50323 index 6d5bcdb9f45d..a3fc0084cda0 100644
50324 --- a/drivers/power/supply/cpcap-battery.c
50325 +++ b/drivers/power/supply/cpcap-battery.c
50326 @@ -786,7 +786,7 @@ static irqreturn_t cpcap_battery_irq_thread(int irq, void *data)
50327                         break;
50328         }
50330 -       if (!d)
50331 +       if (list_entry_is_head(d, &ddata->irq_list, node))
50332                 return IRQ_NONE;
50334         latest = cpcap_battery_latest(ddata);
50335 diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
50336 index 641dcad1133f..2a8915c3e73e 100644
50337 --- a/drivers/power/supply/cpcap-charger.c
50338 +++ b/drivers/power/supply/cpcap-charger.c
50339 @@ -318,7 +318,7 @@ static int cpcap_charger_current_to_regval(int microamp)
50340                 return CPCAP_REG_CRM_ICHRG(0x0);
50341         if (miliamp < 177)
50342                 return CPCAP_REG_CRM_ICHRG(0x1);
50343 -       if (miliamp > 1596)
50344 +       if (miliamp >= 1596)
50345                 return CPCAP_REG_CRM_ICHRG(0xe);
50347         res = microamp / 88666;
50348 @@ -668,6 +668,9 @@ static void cpcap_usb_detect(struct work_struct *work)
50349                 return;
50350         }
50352 +       /* Delay for 80ms to avoid vbus bouncing when usb cable is plugged in */
50353 +       usleep_range(80000, 120000);
50355         /* Throttle chrgcurr2 interrupt for charger done and retry */
50356         switch (ddata->status) {
50357         case POWER_SUPPLY_STATUS_CHARGING:
50358 diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
50359 index 0032069fbc2b..66039c665dd1 100644
50360 --- a/drivers/power/supply/generic-adc-battery.c
50361 +++ b/drivers/power/supply/generic-adc-battery.c
50362 @@ -373,7 +373,7 @@ static int gab_remove(struct platform_device *pdev)
50363         }
50365         kfree(adc_bat->psy_desc.properties);
50366 -       cancel_delayed_work(&adc_bat->bat_work);
50367 +       cancel_delayed_work_sync(&adc_bat->bat_work);
50368         return 0;
50371 diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c
50372 index e7931ffb7151..397e5a03b7d9 100644
50373 --- a/drivers/power/supply/lp8788-charger.c
50374 +++ b/drivers/power/supply/lp8788-charger.c
50375 @@ -501,7 +501,7 @@ static int lp8788_set_irqs(struct platform_device *pdev,
50377                 ret = request_threaded_irq(virq, NULL,
50378                                         lp8788_charger_irq_thread,
50379 -                                       0, name, pchg);
50380 +                                       IRQF_ONESHOT, name, pchg);
50381                 if (ret)
50382                         break;
50383         }
50384 diff --git a/drivers/power/supply/pm2301_charger.c b/drivers/power/supply/pm2301_charger.c
50385 index ac06ecf7fc9c..a3bfb9612b17 100644
50386 --- a/drivers/power/supply/pm2301_charger.c
50387 +++ b/drivers/power/supply/pm2301_charger.c
50388 @@ -1089,7 +1089,7 @@ static int pm2xxx_wall_charger_probe(struct i2c_client *i2c_client,
50389         ret = request_threaded_irq(gpio_to_irq(pm2->pdata->gpio_irq_number),
50390                                 NULL,
50391                                 pm2xxx_charger_irq[0].isr,
50392 -                               pm2->pdata->irq_type,
50393 +                               pm2->pdata->irq_type | IRQF_ONESHOT,
50394                                 pm2xxx_charger_irq[0].name, pm2);
50396         if (ret != 0) {
50397 diff --git a/drivers/power/supply/s3c_adc_battery.c b/drivers/power/supply/s3c_adc_battery.c
50398 index a2addc24ee8b..3e3a598f114d 100644
50399 --- a/drivers/power/supply/s3c_adc_battery.c
50400 +++ b/drivers/power/supply/s3c_adc_battery.c
50401 @@ -395,7 +395,7 @@ static int s3c_adc_bat_remove(struct platform_device *pdev)
50402         if (main_bat.charge_finished)
50403                 free_irq(gpiod_to_irq(main_bat.charge_finished), NULL);
50405 -       cancel_delayed_work(&bat_work);
50406 +       cancel_delayed_work_sync(&bat_work);
50408         if (pdata->exit)
50409                 pdata->exit();
50410 diff --git a/drivers/power/supply/tps65090-charger.c b/drivers/power/supply/tps65090-charger.c
50411 index 6b0098e5a88b..0990b2fa6cd8 100644
50412 --- a/drivers/power/supply/tps65090-charger.c
50413 +++ b/drivers/power/supply/tps65090-charger.c
50414 @@ -301,7 +301,7 @@ static int tps65090_charger_probe(struct platform_device *pdev)
50416         if (irq != -ENXIO) {
50417                 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
50418 -                       tps65090_charger_isr, 0, "tps65090-charger", cdata);
50419 +                       tps65090_charger_isr, IRQF_ONESHOT, "tps65090-charger", cdata);
50420                 if (ret) {
50421                         dev_err(cdata->dev,
50422                                 "Unable to register irq %d err %d\n", irq,
50423 diff --git a/drivers/power/supply/tps65217_charger.c b/drivers/power/supply/tps65217_charger.c
50424 index 814c2b81fdfe..ba33d1617e0b 100644
50425 --- a/drivers/power/supply/tps65217_charger.c
50426 +++ b/drivers/power/supply/tps65217_charger.c
50427 @@ -238,7 +238,7 @@ static int tps65217_charger_probe(struct platform_device *pdev)
50428         for (i = 0; i < NUM_CHARGER_IRQS; i++) {
50429                 ret = devm_request_threaded_irq(&pdev->dev, irq[i], NULL,
50430                                                 tps65217_charger_irq,
50431 -                                               0, "tps65217-charger",
50432 +                                               IRQF_ONESHOT, "tps65217-charger",
50433                                                 charger);
50434                 if (ret) {
50435                         dev_err(charger->dev,
50436 diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
50437 index fdda2a737186..58ecdad26cca 100644
50438 --- a/drivers/powercap/intel_rapl_common.c
50439 +++ b/drivers/powercap/intel_rapl_common.c
50440 @@ -1454,7 +1454,7 @@ static int __init rapl_init(void)
50442         id = x86_match_cpu(rapl_ids);
50443         if (!id) {
50444 -               pr_err("driver does not support CPU family %d model %d\n",
50445 +               pr_info("driver does not support CPU family %d model %d\n",
50446                        boot_cpu_data.x86, boot_cpu_data.x86_model);
50448                 return -ENODEV;
50449 diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
50450 index 5813339b597b..3292158157b6 100644
50451 --- a/drivers/pwm/pwm-atmel.c
50452 +++ b/drivers/pwm/pwm-atmel.c
50453 @@ -319,7 +319,7 @@ static void atmel_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
50455                 cdty = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm,
50456                                           atmel_pwm->data->regs.duty);
50457 -               tmp = (u64)cdty * NSEC_PER_SEC;
50458 +               tmp = (u64)(cprd - cdty) * NSEC_PER_SEC;
50459                 tmp <<= pres;
50460                 state->duty_cycle = DIV64_U64_ROUND_UP(tmp, rate);
50462 diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
50463 index 50ec53d67a4c..db4c265287ae 100644
50464 --- a/drivers/rapidio/rio_cm.c
50465 +++ b/drivers/rapidio/rio_cm.c
50466 @@ -2127,6 +2127,14 @@ static int riocm_add_mport(struct device *dev,
50467                 return -ENODEV;
50468         }
50470 +       cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
50471 +       if (!cm->rx_wq) {
50472 +               rio_release_inb_mbox(mport, cmbox);
50473 +               rio_release_outb_mbox(mport, cmbox);
50474 +               kfree(cm);
50475 +               return -ENOMEM;
50476 +       }
50478         /*
50479          * Allocate and register inbound messaging buffers to be ready
50480          * to receive channel and system management requests
50481 @@ -2137,15 +2145,6 @@ static int riocm_add_mport(struct device *dev,
50482         cm->rx_slots = RIOCM_RX_RING_SIZE;
50483         mutex_init(&cm->rx_lock);
50484         riocm_rx_fill(cm, RIOCM_RX_RING_SIZE);
50485 -       cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
50486 -       if (!cm->rx_wq) {
50487 -               riocm_error("failed to allocate IBMBOX_%d on %s",
50488 -                           cmbox, mport->name);
50489 -               rio_release_outb_mbox(mport, cmbox);
50490 -               kfree(cm);
50491 -               return -ENOMEM;
50492 -       }
50494         INIT_WORK(&cm->rx_work, rio_ibmsg_handler);
50496         cm->tx_slot = 0;
50497 diff --git a/drivers/regulator/bd9576-regulator.c b/drivers/regulator/bd9576-regulator.c
50498 index a8b5832a5a1b..204a2da054f5 100644
50499 --- a/drivers/regulator/bd9576-regulator.c
50500 +++ b/drivers/regulator/bd9576-regulator.c
50501 @@ -206,7 +206,7 @@ static int bd957x_probe(struct platform_device *pdev)
50503         struct regmap *regmap;
50504         struct regulator_config config = { 0 };
50505 -       int i, err;
50506 +       int i;
50507         bool vout_mode, ddr_sel;
50508         const struct bd957x_regulator_data *reg_data = &bd9576_regulators[0];
50509         unsigned int num_reg_data = ARRAY_SIZE(bd9576_regulators);
50510 @@ -279,8 +279,7 @@ static int bd957x_probe(struct platform_device *pdev)
50511                 break;
50512         default:
50513                 dev_err(&pdev->dev, "Unsupported chip type\n");
50514 -               err = -EINVAL;
50515 -               goto err;
50516 +               return -EINVAL;
50517         }
50519         config.dev = pdev->dev.parent;
50520 @@ -300,8 +299,7 @@ static int bd957x_probe(struct platform_device *pdev)
50521                         dev_err(&pdev->dev,
50522                                 "failed to register %s regulator\n",
50523                                 desc->name);
50524 -                       err = PTR_ERR(rdev);
50525 -                       goto err;
50526 +                       return PTR_ERR(rdev);
50527                 }
50528                 /*
50529                  * Clear the VOUT1 GPIO setting - rest of the regulators do not
50530 @@ -310,8 +308,7 @@ static int bd957x_probe(struct platform_device *pdev)
50531                 config.ena_gpiod = NULL;
50532         }
50534 -err:
50535 -       return err;
50536 +       return 0;
50539  static const struct platform_device_id bd957x_pmic_id[] = {
50540 diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
50541 index a2ede7d7897e..08cbf688e14d 100644
50542 --- a/drivers/regulator/da9121-regulator.c
50543 +++ b/drivers/regulator/da9121-regulator.c
50544 @@ -40,6 +40,7 @@ struct da9121 {
50545         unsigned int passive_delay;
50546         int chip_irq;
50547         int variant_id;
50548 +       int subvariant_id;
50549  };
50551  /* Define ranges for different variants, enabling translation to/from
50552 @@ -812,7 +813,6 @@ static struct regmap_config da9121_2ch_regmap_config = {
50553  static int da9121_check_device_type(struct i2c_client *i2c, struct da9121 *chip)
50555         u32 device_id;
50556 -       u8 chip_id = chip->variant_id;
50557         u32 variant_id;
50558         u8 variant_mrc, variant_vrc;
50559         char *type;
50560 @@ -839,22 +839,34 @@ static int da9121_check_device_type(struct i2c_client *i2c, struct da9121 *chip)
50562         variant_vrc = variant_id & DA9121_MASK_OTP_VARIANT_ID_VRC;
50564 -       switch (variant_vrc) {
50565 -       case DA9121_VARIANT_VRC:
50566 -               type = "DA9121/DA9130";
50567 -               config_match = (chip_id == DA9121_TYPE_DA9121_DA9130);
50568 +       switch (chip->subvariant_id) {
50569 +       case DA9121_SUBTYPE_DA9121:
50570 +               type = "DA9121";
50571 +               config_match = (variant_vrc == DA9121_VARIANT_VRC);
50572                 break;
50573 -       case DA9220_VARIANT_VRC:
50574 -               type = "DA9220/DA9132";
50575 -               config_match = (chip_id == DA9121_TYPE_DA9220_DA9132);
50576 +       case DA9121_SUBTYPE_DA9130:
50577 +               type = "DA9130";
50578 +               config_match = (variant_vrc == DA9130_VARIANT_VRC);
50579                 break;
50580 -       case DA9122_VARIANT_VRC:
50581 -               type = "DA9122/DA9131";
50582 -               config_match = (chip_id == DA9121_TYPE_DA9122_DA9131);
50583 +       case DA9121_SUBTYPE_DA9220:
50584 +               type = "DA9220";
50585 +               config_match = (variant_vrc == DA9220_VARIANT_VRC);
50586                 break;
50587 -       case DA9217_VARIANT_VRC:
50588 +       case DA9121_SUBTYPE_DA9132:
50589 +               type = "DA9132";
50590 +               config_match = (variant_vrc == DA9132_VARIANT_VRC);
50591 +               break;
50592 +       case DA9121_SUBTYPE_DA9122:
50593 +               type = "DA9122";
50594 +               config_match = (variant_vrc == DA9122_VARIANT_VRC);
50595 +               break;
50596 +       case DA9121_SUBTYPE_DA9131:
50597 +               type = "DA9131";
50598 +               config_match = (variant_vrc == DA9131_VARIANT_VRC);
50599 +               break;
50600 +       case DA9121_SUBTYPE_DA9217:
50601                 type = "DA9217";
50602 -               config_match = (chip_id == DA9121_TYPE_DA9217);
50603 +               config_match = (variant_vrc == DA9217_VARIANT_VRC);
50604                 break;
50605         default:
50606                 type = "Unknown";
50607 @@ -892,15 +904,27 @@ static int da9121_assign_chip_model(struct i2c_client *i2c,
50609         chip->dev = &i2c->dev;
50611 -       switch (chip->variant_id) {
50612 -       case DA9121_TYPE_DA9121_DA9130:
50613 -               fallthrough;
50614 -       case DA9121_TYPE_DA9217:
50615 +       /* Use configured subtype to select the regulator descriptor index and
50616 +        * register map, common to both consumer and automotive grade variants
50617 +        */
50618 +       switch (chip->subvariant_id) {
50619 +       case DA9121_SUBTYPE_DA9121:
50620 +       case DA9121_SUBTYPE_DA9130:
50621 +               chip->variant_id = DA9121_TYPE_DA9121_DA9130;
50622                 regmap = &da9121_1ch_regmap_config;
50623                 break;
50624 -       case DA9121_TYPE_DA9122_DA9131:
50625 -               fallthrough;
50626 -       case DA9121_TYPE_DA9220_DA9132:
50627 +       case DA9121_SUBTYPE_DA9217:
50628 +               chip->variant_id = DA9121_TYPE_DA9217;
50629 +               regmap = &da9121_1ch_regmap_config;
50630 +               break;
50631 +       case DA9121_SUBTYPE_DA9122:
50632 +       case DA9121_SUBTYPE_DA9131:
50633 +               chip->variant_id = DA9121_TYPE_DA9122_DA9131;
50634 +               regmap = &da9121_2ch_regmap_config;
50635 +               break;
50636 +       case DA9121_SUBTYPE_DA9220:
50637 +       case DA9121_SUBTYPE_DA9132:
50638 +               chip->variant_id = DA9121_TYPE_DA9220_DA9132;
50639                 regmap = &da9121_2ch_regmap_config;
50640                 break;
50641         }
50642 @@ -975,13 +999,13 @@ static int da9121_config_irq(struct i2c_client *i2c,
50645  static const struct of_device_id da9121_dt_ids[] = {
50646 -       { .compatible = "dlg,da9121", .data = (void *) DA9121_TYPE_DA9121_DA9130 },
50647 -       { .compatible = "dlg,da9130", .data = (void *) DA9121_TYPE_DA9121_DA9130 },
50648 -       { .compatible = "dlg,da9217", .data = (void *) DA9121_TYPE_DA9217 },
50649 -       { .compatible = "dlg,da9122", .data = (void *) DA9121_TYPE_DA9122_DA9131 },
50650 -       { .compatible = "dlg,da9131", .data = (void *) DA9121_TYPE_DA9122_DA9131 },
50651 -       { .compatible = "dlg,da9220", .data = (void *) DA9121_TYPE_DA9220_DA9132 },
50652 -       { .compatible = "dlg,da9132", .data = (void *) DA9121_TYPE_DA9220_DA9132 },
50653 +       { .compatible = "dlg,da9121", .data = (void *) DA9121_SUBTYPE_DA9121 },
50654 +       { .compatible = "dlg,da9130", .data = (void *) DA9121_SUBTYPE_DA9130 },
50655 +       { .compatible = "dlg,da9217", .data = (void *) DA9121_SUBTYPE_DA9217 },
50656 +       { .compatible = "dlg,da9122", .data = (void *) DA9121_SUBTYPE_DA9122 },
50657 +       { .compatible = "dlg,da9131", .data = (void *) DA9121_SUBTYPE_DA9131 },
50658 +       { .compatible = "dlg,da9220", .data = (void *) DA9121_SUBTYPE_DA9220 },
50659 +       { .compatible = "dlg,da9132", .data = (void *) DA9121_SUBTYPE_DA9132 },
50660         { }
50661  };
50662  MODULE_DEVICE_TABLE(of, da9121_dt_ids);
50663 @@ -1011,7 +1035,7 @@ static int da9121_i2c_probe(struct i2c_client *i2c,
50664         }
50666         chip->pdata = i2c->dev.platform_data;
50667 -       chip->variant_id = da9121_of_get_id(&i2c->dev);
50668 +       chip->subvariant_id = da9121_of_get_id(&i2c->dev);
50670         ret = da9121_assign_chip_model(i2c, chip);
50671         if (ret < 0)
50672 diff --git a/drivers/regulator/da9121-regulator.h b/drivers/regulator/da9121-regulator.h
50673 index 3c34cb889ca8..357f416e17c1 100644
50674 --- a/drivers/regulator/da9121-regulator.h
50675 +++ b/drivers/regulator/da9121-regulator.h
50676 @@ -29,6 +29,16 @@ enum da9121_variant {
50677         DA9121_TYPE_DA9217
50678  };
50680 +enum da9121_subvariant {
50681 +       DA9121_SUBTYPE_DA9121,
50682 +       DA9121_SUBTYPE_DA9130,
50683 +       DA9121_SUBTYPE_DA9220,
50684 +       DA9121_SUBTYPE_DA9132,
50685 +       DA9121_SUBTYPE_DA9122,
50686 +       DA9121_SUBTYPE_DA9131,
50687 +       DA9121_SUBTYPE_DA9217
50690  /* Minimum, maximum and default polling millisecond periods are provided
50691   * here as an example. It is expected that any final implementation will
50692   * include a modification of these settings to match the required
50693 @@ -279,6 +289,9 @@ enum da9121_variant {
50694  #define DA9220_VARIANT_VRC     0x0
50695  #define DA9122_VARIANT_VRC     0x2
50696  #define DA9217_VARIANT_VRC     0x7
50697 +#define DA9130_VARIANT_VRC     0x0
50698 +#define DA9131_VARIANT_VRC     0x1
50699 +#define DA9132_VARIANT_VRC     0x2
50701  /* DA9121_REG_OTP_CUSTOMER_ID */
50703 diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
50704 index dcb380e868df..549ed3fed625 100644
50705 --- a/drivers/remoteproc/pru_rproc.c
50706 +++ b/drivers/remoteproc/pru_rproc.c
50707 @@ -266,12 +266,17 @@ static void pru_rproc_create_debug_entries(struct rproc *rproc)
50709  static void pru_dispose_irq_mapping(struct pru_rproc *pru)
50711 -       while (pru->evt_count--) {
50712 +       if (!pru->mapped_irq)
50713 +               return;
50715 +       while (pru->evt_count) {
50716 +               pru->evt_count--;
50717                 if (pru->mapped_irq[pru->evt_count] > 0)
50718                         irq_dispose_mapping(pru->mapped_irq[pru->evt_count]);
50719         }
50721         kfree(pru->mapped_irq);
50722 +       pru->mapped_irq = NULL;
50725  /*
50726 @@ -284,7 +289,7 @@ static int pru_handle_intrmap(struct rproc *rproc)
50727         struct pru_rproc *pru = rproc->priv;
50728         struct pru_irq_rsc *rsc = pru->pru_interrupt_map;
50729         struct irq_fwspec fwspec;
50730 -       struct device_node *irq_parent;
50731 +       struct device_node *parent, *irq_parent;
50732         int i, ret = 0;
50734         /* not having pru_interrupt_map is not an error */
50735 @@ -307,16 +312,31 @@ static int pru_handle_intrmap(struct rproc *rproc)
50736         pru->evt_count = rsc->num_evts;
50737         pru->mapped_irq = kcalloc(pru->evt_count, sizeof(unsigned int),
50738                                   GFP_KERNEL);
50739 -       if (!pru->mapped_irq)
50740 +       if (!pru->mapped_irq) {
50741 +               pru->evt_count = 0;
50742                 return -ENOMEM;
50743 +       }
50745         /*
50746          * parse and fill in system event to interrupt channel and
50747 -        * channel-to-host mapping
50748 +        * channel-to-host mapping. The interrupt controller to be used
50749 +        * for these mappings for a given PRU remoteproc is always its
50750 +        * corresponding sibling PRUSS INTC node.
50751          */
50752 -       irq_parent = of_irq_find_parent(pru->dev->of_node);
50753 +       parent = of_get_parent(dev_of_node(pru->dev));
50754 +       if (!parent) {
50755 +               kfree(pru->mapped_irq);
50756 +               pru->mapped_irq = NULL;
50757 +               pru->evt_count = 0;
50758 +               return -ENODEV;
50759 +       }
50761 +       irq_parent = of_get_child_by_name(parent, "interrupt-controller");
50762 +       of_node_put(parent);
50763         if (!irq_parent) {
50764                 kfree(pru->mapped_irq);
50765 +               pru->mapped_irq = NULL;
50766 +               pru->evt_count = 0;
50767                 return -ENODEV;
50768         }
50770 @@ -332,16 +352,20 @@ static int pru_handle_intrmap(struct rproc *rproc)
50772                 pru->mapped_irq[i] = irq_create_fwspec_mapping(&fwspec);
50773                 if (!pru->mapped_irq[i]) {
50774 -                       dev_err(dev, "failed to get virq\n");
50775 -                       ret = pru->mapped_irq[i];
50776 +                       dev_err(dev, "failed to get virq for fw mapping %d: event %d chnl %d host %d\n",
50777 +                               i, fwspec.param[0], fwspec.param[1],
50778 +                               fwspec.param[2]);
50779 +                       ret = -EINVAL;
50780                         goto map_fail;
50781                 }
50782         }
50783 +       of_node_put(irq_parent);
50785         return ret;
50787  map_fail:
50788         pru_dispose_irq_mapping(pru);
50789 +       of_node_put(irq_parent);
50791         return ret;
50793 @@ -387,8 +411,7 @@ static int pru_rproc_stop(struct rproc *rproc)
50794         pru_control_write_reg(pru, PRU_CTRL_CTRL, val);
50796         /* dispose irq mapping - new firmware can provide new mapping */
50797 -       if (pru->mapped_irq)
50798 -               pru_dispose_irq_mapping(pru);
50799 +       pru_dispose_irq_mapping(pru);
50801         return 0;
50803 diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
50804 index 66106ba25ba3..14e0ce5f18f5 100644
50805 --- a/drivers/remoteproc/qcom_q6v5_mss.c
50806 +++ b/drivers/remoteproc/qcom_q6v5_mss.c
50807 @@ -1210,6 +1210,14 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
50808                         goto release_firmware;
50809                 }
50811 +               if (phdr->p_filesz > phdr->p_memsz) {
50812 +                       dev_err(qproc->dev,
50813 +                               "refusing to load segment %d with p_filesz > p_memsz\n",
50814 +                               i);
50815 +                       ret = -EINVAL;
50816 +                       goto release_firmware;
50817 +               }
50819                 ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC);
50820                 if (!ptr) {
50821                         dev_err(qproc->dev,
50822 @@ -1241,6 +1249,16 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
50823                                 goto release_firmware;
50824                         }
50826 +                       if (seg_fw->size != phdr->p_filesz) {
50827 +                               dev_err(qproc->dev,
50828 +                                       "failed to load segment %d from truncated file %s\n",
50829 +                                       i, fw_name);
50830 +                               ret = -EINVAL;
50831 +                               release_firmware(seg_fw);
50832 +                               memunmap(ptr);
50833 +                               goto release_firmware;
50834 +                       }
50836                         release_firmware(seg_fw);
50837                 }
50839 diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
50840 index 27a05167c18c..4840886532ff 100644
50841 --- a/drivers/rpmsg/qcom_glink_native.c
50842 +++ b/drivers/rpmsg/qcom_glink_native.c
50843 @@ -857,6 +857,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
50844                         dev_err(glink->dev,
50845                                 "no intent found for channel %s intent %d",
50846                                 channel->name, liid);
50847 +                       ret = -ENOENT;
50848                         goto advance_rx;
50849                 }
50850         }
50851 diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
50852 index cd8e438bc9c4..8752620d8e34 100644
50853 --- a/drivers/rtc/rtc-ds1307.c
50854 +++ b/drivers/rtc/rtc-ds1307.c
50855 @@ -296,7 +296,11 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
50856         t->tm_min = bcd2bin(regs[DS1307_REG_MIN] & 0x7f);
50857         tmp = regs[DS1307_REG_HOUR] & 0x3f;
50858         t->tm_hour = bcd2bin(tmp);
50859 -       t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1;
50860 +       /* rx8130 is bit position, not BCD */
50861 +       if (ds1307->type == rx_8130)
50862 +               t->tm_wday = fls(regs[DS1307_REG_WDAY] & 0x7f);
50863 +       else
50864 +               t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1;
50865         t->tm_mday = bcd2bin(regs[DS1307_REG_MDAY] & 0x3f);
50866         tmp = regs[DS1307_REG_MONTH] & 0x1f;
50867         t->tm_mon = bcd2bin(tmp) - 1;
50868 @@ -343,7 +347,11 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
50869         regs[DS1307_REG_SECS] = bin2bcd(t->tm_sec);
50870         regs[DS1307_REG_MIN] = bin2bcd(t->tm_min);
50871         regs[DS1307_REG_HOUR] = bin2bcd(t->tm_hour);
50872 -       regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
50873 +       /* rx8130 is bit position, not BCD */
50874 +       if (ds1307->type == rx_8130)
50875 +               regs[DS1307_REG_WDAY] = 1 << t->tm_wday;
50876 +       else
50877 +               regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
50878         regs[DS1307_REG_MDAY] = bin2bcd(t->tm_mday);
50879         regs[DS1307_REG_MONTH] = bin2bcd(t->tm_mon + 1);
50881 diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c
50882 index 57cc09d0a806..c0df49fb978c 100644
50883 --- a/drivers/rtc/rtc-fsl-ftm-alarm.c
50884 +++ b/drivers/rtc/rtc-fsl-ftm-alarm.c
50885 @@ -310,6 +310,7 @@ static const struct of_device_id ftm_rtc_match[] = {
50886         { .compatible = "fsl,lx2160a-ftm-alarm", },
50887         { },
50888  };
50889 +MODULE_DEVICE_TABLE(of, ftm_rtc_match);
50891  static const struct acpi_device_id ftm_imx_acpi_ids[] = {
50892         {"NXP0014",},
50893 diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
50894 index aef6c1ee8bb0..82becae14229 100644
50895 --- a/drivers/rtc/rtc-pcf85063.c
50896 +++ b/drivers/rtc/rtc-pcf85063.c
50897 @@ -478,6 +478,7 @@ static struct clk *pcf85063_clkout_register_clk(struct pcf85063 *pcf85063)
50899         struct clk *clk;
50900         struct clk_init_data init;
50901 +       struct device_node *node = pcf85063->rtc->dev.parent->of_node;
50903         init.name = "pcf85063-clkout";
50904         init.ops = &pcf85063_clkout_ops;
50905 @@ -487,15 +488,13 @@ static struct clk *pcf85063_clkout_register_clk(struct pcf85063 *pcf85063)
50906         pcf85063->clkout_hw.init = &init;
50908         /* optional override of the clockname */
50909 -       of_property_read_string(pcf85063->rtc->dev.of_node,
50910 -                               "clock-output-names", &init.name);
50911 +       of_property_read_string(node, "clock-output-names", &init.name);
50913         /* register the clock */
50914         clk = devm_clk_register(&pcf85063->rtc->dev, &pcf85063->clkout_hw);
50916         if (!IS_ERR(clk))
50917 -               of_clk_add_provider(pcf85063->rtc->dev.of_node,
50918 -                                   of_clk_src_simple_get, clk);
50919 +               of_clk_add_provider(node, of_clk_src_simple_get, clk);
50921         return clk;
50923 diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
50924 index 288abb1abdb8..bc89c62ccb9b 100644
50925 --- a/drivers/rtc/rtc-tps65910.c
50926 +++ b/drivers/rtc/rtc-tps65910.c
50927 @@ -18,6 +18,7 @@
50928  #include <linux/rtc.h>
50929  #include <linux/bcd.h>
50930  #include <linux/math64.h>
50931 +#include <linux/property.h>
50932  #include <linux/platform_device.h>
50933  #include <linux/interrupt.h>
50934  #include <linux/mfd/tps65910.h>
50935 diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
50936 index 2018614f258f..fc19b312c345 100644
50937 --- a/drivers/rtc/rtc-wm8350.c
50938 +++ b/drivers/rtc/rtc-wm8350.c
50939 @@ -114,7 +114,7 @@ static int wm8350_rtc_settime(struct device *dev, struct rtc_time *tm)
50940         /* Wait until confirmation of stopping */
50941         do {
50942                 rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
50943 -               schedule_timeout_uninterruptible(msecs_to_jiffies(1));
50944 +               schedule_msec_hrtimeout_uninterruptible((1));
50945         } while (--retries && !(rtc_ctrl & WM8350_RTC_STS));
50947         if (!retries) {
50948 @@ -197,7 +197,7 @@ static int wm8350_rtc_stop_alarm(struct wm8350 *wm8350)
50949         /* Wait until confirmation of stopping */
50950         do {
50951                 rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
50952 -               schedule_timeout_uninterruptible(msecs_to_jiffies(1));
50953 +               schedule_msec_hrtimeout_uninterruptible((1));
50954         } while (retries-- && !(rtc_ctrl & WM8350_RTC_ALMSTS));
50956         if (!(rtc_ctrl & WM8350_RTC_ALMSTS))
50957 @@ -220,7 +220,7 @@ static int wm8350_rtc_start_alarm(struct wm8350 *wm8350)
50958         /* Wait until confirmation */
50959         do {
50960                 rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
50961 -               schedule_timeout_uninterruptible(msecs_to_jiffies(1));
50962 +               schedule_msec_hrtimeout_uninterruptible((1));
50963         } while (retries-- && rtc_ctrl & WM8350_RTC_ALMSTS);
50965         if (rtc_ctrl & WM8350_RTC_ALMSTS)
50966 diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
50967 index 3f026021e95e..84f659cafe76 100644
50968 --- a/drivers/s390/cio/device.c
50969 +++ b/drivers/s390/cio/device.c
50970 @@ -1532,8 +1532,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
50971         switch (action) {
50972         case IO_SCH_ORPH_UNREG:
50973         case IO_SCH_UNREG:
50974 -               if (!cdev)
50975 -                       css_sch_device_unregister(sch);
50976 +               css_sch_device_unregister(sch);
50977                 break;
50978         case IO_SCH_ORPH_ATTACH:
50979         case IO_SCH_UNREG_ATTACH:
50980 diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
50981 index 34bf2f197c71..0e0044d70844 100644
50982 --- a/drivers/s390/cio/qdio.h
50983 +++ b/drivers/s390/cio/qdio.h
50984 @@ -181,12 +181,6 @@ struct qdio_input_q {
50985  struct qdio_output_q {
50986         /* PCIs are enabled for the queue */
50987         int pci_out_enabled;
50988 -       /* cq: use asynchronous output buffers */
50989 -       int use_cq;
50990 -       /* cq: aobs used for particual SBAL */
50991 -       struct qaob **aobs;
50992 -       /* cq: sbal state related to asynchronous operation */
50993 -       struct qdio_outbuf_state *sbal_state;
50994         /* timer to check for more outbound work */
50995         struct timer_list timer;
50996         /* tasklet to check for completions */
50997 @@ -379,12 +373,8 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data);
50998  void qdio_shutdown_irq(struct qdio_irq *irq);
50999  void qdio_print_subchannel_info(struct qdio_irq *irq_ptr);
51000  void qdio_free_queues(struct qdio_irq *irq_ptr);
51001 -void qdio_free_async_data(struct qdio_irq *irq_ptr);
51002  int qdio_setup_init(void);
51003  void qdio_setup_exit(void);
51004 -int qdio_enable_async_operation(struct qdio_output_q *q);
51005 -void qdio_disable_async_operation(struct qdio_output_q *q);
51006 -struct qaob *qdio_allocate_aob(void);
51008  int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
51009                         unsigned char *state);
51010 diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
51011 index 03a011619908..307ce7ff5ca4 100644
51012 --- a/drivers/s390/cio/qdio_main.c
51013 +++ b/drivers/s390/cio/qdio_main.c
51014 @@ -517,24 +517,6 @@ static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
51015         return 1;
51018 -static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
51019 -                                       int bufnr)
51021 -       unsigned long phys_aob = 0;
51023 -       if (!q->aobs[bufnr]) {
51024 -               struct qaob *aob = qdio_allocate_aob();
51025 -               q->aobs[bufnr] = aob;
51026 -       }
51027 -       if (q->aobs[bufnr]) {
51028 -               q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
51029 -               phys_aob = virt_to_phys(q->aobs[bufnr]);
51030 -               WARN_ON_ONCE(phys_aob & 0xFF);
51031 -       }
51033 -       return phys_aob;
51036  static inline int qdio_tasklet_schedule(struct qdio_q *q)
51038         if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
51039 @@ -548,7 +530,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
51040                                         unsigned int *error)
51042         unsigned char state = 0;
51043 -       unsigned int i;
51044         int count;
51046         q->timestamp = get_tod_clock_fast();
51047 @@ -570,10 +551,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
51049         switch (state) {
51050         case SLSB_P_OUTPUT_PENDING:
51051 -               /* detach the utilized QAOBs: */
51052 -               for (i = 0; i < count; i++)
51053 -                       q->u.out.aobs[QDIO_BUFNR(start + i)] = NULL;
51055                 *error = QDIO_ERROR_SLSB_PENDING;
51056                 fallthrough;
51057         case SLSB_P_OUTPUT_EMPTY:
51058 @@ -999,7 +976,6 @@ int qdio_free(struct ccw_device *cdev)
51059         cdev->private->qdio_data = NULL;
51060         mutex_unlock(&irq_ptr->setup_mutex);
51062 -       qdio_free_async_data(irq_ptr);
51063         qdio_free_queues(irq_ptr);
51064         free_page((unsigned long) irq_ptr->qdr);
51065         free_page(irq_ptr->chsc_page);
51066 @@ -1075,28 +1051,6 @@ int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
51068  EXPORT_SYMBOL_GPL(qdio_allocate);
51070 -static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
51072 -       struct qdio_q *q = irq_ptr->input_qs[0];
51073 -       int i, use_cq = 0;
51075 -       if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
51076 -               use_cq = 1;
51078 -       for_each_output_queue(irq_ptr, q, i) {
51079 -               if (use_cq) {
51080 -                       if (multicast_outbound(q))
51081 -                               continue;
51082 -                       if (qdio_enable_async_operation(&q->u.out) < 0) {
51083 -                               use_cq = 0;
51084 -                               continue;
51085 -                       }
51086 -               } else
51087 -                       qdio_disable_async_operation(&q->u.out);
51088 -       }
51089 -       DBF_EVENT("use_cq:%d", use_cq);
51092  static void qdio_trace_init_data(struct qdio_irq *irq,
51093                                  struct qdio_initialize *data)
51095 @@ -1191,8 +1145,6 @@ int qdio_establish(struct ccw_device *cdev,
51097         qdio_setup_ssqd_info(irq_ptr);
51099 -       qdio_detect_hsicq(irq_ptr);
51101         /* qebsm is now setup if available, initialize buffer states */
51102         qdio_init_buf_states(irq_ptr);
51104 @@ -1297,9 +1249,11 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
51105   * @callflags: flags
51106   * @bufnr: first buffer to process
51107   * @count: how many buffers are filled
51108 + * @aob: asynchronous operation block
51109   */
51110  static int handle_outbound(struct qdio_q *q, unsigned int callflags,
51111 -                          unsigned int bufnr, unsigned int count)
51112 +                          unsigned int bufnr, unsigned int count,
51113 +                          struct qaob *aob)
51115         const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
51116         unsigned char state = 0;
51117 @@ -1320,11 +1274,9 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
51118                 q->u.out.pci_out_enabled = 0;
51120         if (queue_type(q) == QDIO_IQDIO_QFMT) {
51121 -               unsigned long phys_aob = 0;
51123 -               if (q->u.out.use_cq && count == 1)
51124 -                       phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
51125 +               unsigned long phys_aob = aob ? virt_to_phys(aob) : 0;
51127 +               WARN_ON_ONCE(!IS_ALIGNED(phys_aob, 256));
51128                 rc = qdio_kick_outbound_q(q, count, phys_aob);
51129         } else if (need_siga_sync(q)) {
51130                 rc = qdio_siga_sync_q(q);
51131 @@ -1359,9 +1311,10 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
51132   * @q_nr: queue number
51133   * @bufnr: buffer number
51134   * @count: how many buffers to process
51135 + * @aob: asynchronous operation block (outbound only)
51136   */
51137  int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
51138 -           int q_nr, unsigned int bufnr, unsigned int count)
51139 +           int q_nr, unsigned int bufnr, unsigned int count, struct qaob *aob)
51141         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
51143 @@ -1383,7 +1336,7 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
51144                                       callflags, bufnr, count);
51145         else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
51146                 return handle_outbound(irq_ptr->output_qs[q_nr],
51147 -                                      callflags, bufnr, count);
51148 +                                      callflags, bufnr, count, aob);
51149         return -EINVAL;
51151  EXPORT_SYMBOL_GPL(do_QDIO);
51152 diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
51153 index c8b9620bc688..da67e4979402 100644
51154 --- a/drivers/s390/cio/qdio_setup.c
51155 +++ b/drivers/s390/cio/qdio_setup.c
51156 @@ -30,6 +30,7 @@ struct qaob *qdio_allocate_aob(void)
51158         return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
51160 +EXPORT_SYMBOL_GPL(qdio_allocate_aob);
51162  void qdio_release_aob(struct qaob *aob)
51164 @@ -247,8 +248,6 @@ static void setup_queues(struct qdio_irq *irq_ptr,
51165                          struct qdio_initialize *qdio_init)
51167         struct qdio_q *q;
51168 -       struct qdio_outbuf_state *output_sbal_state_array =
51169 -                                 qdio_init->output_sbal_state_array;
51170         int i;
51172         for_each_input_queue(irq_ptr, q, i) {
51173 @@ -265,9 +264,6 @@ static void setup_queues(struct qdio_irq *irq_ptr,
51174                 DBF_EVENT("outq:%1d", i);
51175                 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
51177 -               q->u.out.sbal_state = output_sbal_state_array;
51178 -               output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
51180                 q->is_input_q = 0;
51181                 setup_storage_lists(q, irq_ptr,
51182                                     qdio_init->output_sbal_addr_array[i], i);
51183 @@ -372,30 +368,6 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
51184         DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
51187 -void qdio_free_async_data(struct qdio_irq *irq_ptr)
51189 -       struct qdio_q *q;
51190 -       int i;
51192 -       for (i = 0; i < irq_ptr->max_output_qs; i++) {
51193 -               q = irq_ptr->output_qs[i];
51194 -               if (q->u.out.use_cq) {
51195 -                       unsigned int n;
51197 -                       for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; n++) {
51198 -                               struct qaob *aob = q->u.out.aobs[n];
51200 -                               if (aob) {
51201 -                                       qdio_release_aob(aob);
51202 -                                       q->u.out.aobs[n] = NULL;
51203 -                               }
51204 -                       }
51206 -                       qdio_disable_async_operation(&q->u.out);
51207 -               }
51208 -       }
51211  static void qdio_fill_qdr_desc(struct qdesfmt0 *desc, struct qdio_q *queue)
51213         desc->sliba = virt_to_phys(queue->slib);
51214 @@ -545,25 +517,6 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr)
51215         printk(KERN_INFO "%s", s);
51218 -int qdio_enable_async_operation(struct qdio_output_q *outq)
51220 -       outq->aobs = kcalloc(QDIO_MAX_BUFFERS_PER_Q, sizeof(struct qaob *),
51221 -                            GFP_KERNEL);
51222 -       if (!outq->aobs) {
51223 -               outq->use_cq = 0;
51224 -               return -ENOMEM;
51225 -       }
51226 -       outq->use_cq = 1;
51227 -       return 0;
51230 -void qdio_disable_async_operation(struct qdio_output_q *q)
51232 -       kfree(q->aobs);
51233 -       q->aobs = NULL;
51234 -       q->use_cq = 0;
51237  int __init qdio_setup_init(void)
51239         int rc;
51240 diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
51241 index 1ffdd411201c..6946a7e26eff 100644
51242 --- a/drivers/s390/crypto/vfio_ap_ops.c
51243 +++ b/drivers/s390/crypto/vfio_ap_ops.c
51244 @@ -294,6 +294,19 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
51245         matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
51246                                    struct ap_matrix_mdev, pqap_hook);
51248 +       /*
51249 +        * If the KVM pointer is in the process of being set, wait until the
51250 +        * process has completed.
51251 +        */
51252 +       wait_event_cmd(matrix_mdev->wait_for_kvm,
51253 +                      !matrix_mdev->kvm_busy,
51254 +                      mutex_unlock(&matrix_dev->lock),
51255 +                      mutex_lock(&matrix_dev->lock));
51257 +       /* If the there is no guest using the mdev, there is nothing to do */
51258 +       if (!matrix_mdev->kvm)
51259 +               goto out_unlock;
51261         q = vfio_ap_get_queue(matrix_mdev, apqn);
51262         if (!q)
51263                 goto out_unlock;
51264 @@ -337,6 +350,7 @@ static int vfio_ap_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
51266         matrix_mdev->mdev = mdev;
51267         vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
51268 +       init_waitqueue_head(&matrix_mdev->wait_for_kvm);
51269         mdev_set_drvdata(mdev, matrix_mdev);
51270         matrix_mdev->pqap_hook.hook = handle_pqap;
51271         matrix_mdev->pqap_hook.owner = THIS_MODULE;
51272 @@ -351,17 +365,23 @@ static int vfio_ap_mdev_remove(struct mdev_device *mdev)
51274         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
51276 -       if (matrix_mdev->kvm)
51277 +       mutex_lock(&matrix_dev->lock);
51279 +       /*
51280 +        * If the KVM pointer is in flux or the guest is running, disallow
51281 +        * un-assignment of control domain.
51282 +        */
51283 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
51284 +               mutex_unlock(&matrix_dev->lock);
51285                 return -EBUSY;
51286 +       }
51288 -       mutex_lock(&matrix_dev->lock);
51289         vfio_ap_mdev_reset_queues(mdev);
51290         list_del(&matrix_mdev->node);
51291 -       mutex_unlock(&matrix_dev->lock);
51293         kfree(matrix_mdev);
51294         mdev_set_drvdata(mdev, NULL);
51295         atomic_inc(&matrix_dev->available_instances);
51296 +       mutex_unlock(&matrix_dev->lock);
51298         return 0;
51300 @@ -606,24 +626,31 @@ static ssize_t assign_adapter_store(struct device *dev,
51301         struct mdev_device *mdev = mdev_from_dev(dev);
51302         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
51304 -       /* If the guest is running, disallow assignment of adapter */
51305 -       if (matrix_mdev->kvm)
51306 -               return -EBUSY;
51307 +       mutex_lock(&matrix_dev->lock);
51309 +       /*
51310 +        * If the KVM pointer is in flux or the guest is running, disallow
51311 +        * un-assignment of adapter
51312 +        */
51313 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
51314 +               ret = -EBUSY;
51315 +               goto done;
51316 +       }
51318         ret = kstrtoul(buf, 0, &apid);
51319         if (ret)
51320 -               return ret;
51321 +               goto done;
51323 -       if (apid > matrix_mdev->matrix.apm_max)
51324 -               return -ENODEV;
51325 +       if (apid > matrix_mdev->matrix.apm_max) {
51326 +               ret = -ENODEV;
51327 +               goto done;
51328 +       }
51330         /*
51331          * Set the bit in the AP mask (APM) corresponding to the AP adapter
51332          * number (APID). The bits in the mask, from most significant to least
51333          * significant bit, correspond to APIDs 0-255.
51334          */
51335 -       mutex_lock(&matrix_dev->lock);
51337         ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid);
51338         if (ret)
51339                 goto done;
51340 @@ -672,22 +699,31 @@ static ssize_t unassign_adapter_store(struct device *dev,
51341         struct mdev_device *mdev = mdev_from_dev(dev);
51342         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
51344 -       /* If the guest is running, disallow un-assignment of adapter */
51345 -       if (matrix_mdev->kvm)
51346 -               return -EBUSY;
51347 +       mutex_lock(&matrix_dev->lock);
51349 +       /*
51350 +        * If the KVM pointer is in flux or the guest is running, disallow
51351 +        * un-assignment of adapter
51352 +        */
51353 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
51354 +               ret = -EBUSY;
51355 +               goto done;
51356 +       }
51358         ret = kstrtoul(buf, 0, &apid);
51359         if (ret)
51360 -               return ret;
51361 +               goto done;
51363 -       if (apid > matrix_mdev->matrix.apm_max)
51364 -               return -ENODEV;
51365 +       if (apid > matrix_mdev->matrix.apm_max) {
51366 +               ret = -ENODEV;
51367 +               goto done;
51368 +       }
51370 -       mutex_lock(&matrix_dev->lock);
51371         clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
51372 +       ret = count;
51373 +done:
51374         mutex_unlock(&matrix_dev->lock);
51376 -       return count;
51377 +       return ret;
51379  static DEVICE_ATTR_WO(unassign_adapter);
51381 @@ -753,17 +789,24 @@ static ssize_t assign_domain_store(struct device *dev,
51382         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
51383         unsigned long max_apqi = matrix_mdev->matrix.aqm_max;
51385 -       /* If the guest is running, disallow assignment of domain */
51386 -       if (matrix_mdev->kvm)
51387 -               return -EBUSY;
51388 +       mutex_lock(&matrix_dev->lock);
51390 +       /*
51391 +        * If the KVM pointer is in flux or the guest is running, disallow
51392 +        * assignment of domain
51393 +        */
51394 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
51395 +               ret = -EBUSY;
51396 +               goto done;
51397 +       }
51399         ret = kstrtoul(buf, 0, &apqi);
51400         if (ret)
51401 -               return ret;
51402 -       if (apqi > max_apqi)
51403 -               return -ENODEV;
51405 -       mutex_lock(&matrix_dev->lock);
51406 +               goto done;
51407 +       if (apqi > max_apqi) {
51408 +               ret = -ENODEV;
51409 +               goto done;
51410 +       }
51412         ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi);
51413         if (ret)
51414 @@ -814,22 +857,32 @@ static ssize_t unassign_domain_store(struct device *dev,
51415         struct mdev_device *mdev = mdev_from_dev(dev);
51416         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
51418 -       /* If the guest is running, disallow un-assignment of domain */
51419 -       if (matrix_mdev->kvm)
51420 -               return -EBUSY;
51421 +       mutex_lock(&matrix_dev->lock);
51423 +       /*
51424 +        * If the KVM pointer is in flux or the guest is running, disallow
51425 +        * un-assignment of domain
51426 +        */
51427 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
51428 +               ret = -EBUSY;
51429 +               goto done;
51430 +       }
51432         ret = kstrtoul(buf, 0, &apqi);
51433         if (ret)
51434 -               return ret;
51435 +               goto done;
51437 -       if (apqi > matrix_mdev->matrix.aqm_max)
51438 -               return -ENODEV;
51439 +       if (apqi > matrix_mdev->matrix.aqm_max) {
51440 +               ret = -ENODEV;
51441 +               goto done;
51442 +       }
51444 -       mutex_lock(&matrix_dev->lock);
51445         clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
51446 -       mutex_unlock(&matrix_dev->lock);
51447 +       ret = count;
51449 -       return count;
51450 +done:
51451 +       mutex_unlock(&matrix_dev->lock);
51452 +       return ret;
51454  static DEVICE_ATTR_WO(unassign_domain);
51456 @@ -858,27 +911,36 @@ static ssize_t assign_control_domain_store(struct device *dev,
51457         struct mdev_device *mdev = mdev_from_dev(dev);
51458         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
51460 -       /* If the guest is running, disallow assignment of control domain */
51461 -       if (matrix_mdev->kvm)
51462 -               return -EBUSY;
51463 +       mutex_lock(&matrix_dev->lock);
51465 +       /*
51466 +        * If the KVM pointer is in flux or the guest is running, disallow
51467 +        * assignment of control domain.
51468 +        */
51469 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
51470 +               ret = -EBUSY;
51471 +               goto done;
51472 +       }
51474         ret = kstrtoul(buf, 0, &id);
51475         if (ret)
51476 -               return ret;
51477 +               goto done;
51479 -       if (id > matrix_mdev->matrix.adm_max)
51480 -               return -ENODEV;
51481 +       if (id > matrix_mdev->matrix.adm_max) {
51482 +               ret = -ENODEV;
51483 +               goto done;
51484 +       }
51486         /* Set the bit in the ADM (bitmask) corresponding to the AP control
51487          * domain number (id). The bits in the mask, from most significant to
51488          * least significant, correspond to IDs 0 up to the one less than the
51489          * number of control domains that can be assigned.
51490          */
51491 -       mutex_lock(&matrix_dev->lock);
51492         set_bit_inv(id, matrix_mdev->matrix.adm);
51493 +       ret = count;
51494 +done:
51495         mutex_unlock(&matrix_dev->lock);
51497 -       return count;
51498 +       return ret;
51500  static DEVICE_ATTR_WO(assign_control_domain);
51502 @@ -908,21 +970,30 @@ static ssize_t unassign_control_domain_store(struct device *dev,
51503         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
51504         unsigned long max_domid =  matrix_mdev->matrix.adm_max;
51506 -       /* If the guest is running, disallow un-assignment of control domain */
51507 -       if (matrix_mdev->kvm)
51508 -               return -EBUSY;
51509 +       mutex_lock(&matrix_dev->lock);
51511 +       /*
51512 +        * If the KVM pointer is in flux or the guest is running, disallow
51513 +        * un-assignment of control domain.
51514 +        */
51515 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
51516 +               ret = -EBUSY;
51517 +               goto done;
51518 +       }
51520         ret = kstrtoul(buf, 0, &domid);
51521         if (ret)
51522 -               return ret;
51523 -       if (domid > max_domid)
51524 -               return -ENODEV;
51525 +               goto done;
51526 +       if (domid > max_domid) {
51527 +               ret = -ENODEV;
51528 +               goto done;
51529 +       }
51531 -       mutex_lock(&matrix_dev->lock);
51532         clear_bit_inv(domid, matrix_mdev->matrix.adm);
51533 +       ret = count;
51534 +done:
51535         mutex_unlock(&matrix_dev->lock);
51537 -       return count;
51538 +       return ret;
51540  static DEVICE_ATTR_WO(unassign_control_domain);
51542 @@ -1027,8 +1098,15 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
51543   * @matrix_mdev: a mediated matrix device
51544   * @kvm: reference to KVM instance
51545   *
51546 - * Verifies no other mediated matrix device has @kvm and sets a reference to
51547 - * it in @matrix_mdev->kvm.
51548 + * Sets all data for @matrix_mdev that are needed to manage AP resources
51549 + * for the guest whose state is represented by @kvm.
51550 + *
51551 + * Note: The matrix_dev->lock must be taken prior to calling
51552 + * this function; however, the lock will be temporarily released while the
51553 + * guest's AP configuration is set to avoid a potential lockdep splat.
51554 + * The kvm->lock is taken to set the guest's AP configuration which, under
51555 + * certain circumstances, will result in a circular lock dependency if this is
51556 + * done under the @matrix_mdev->lock.
51557   *
51558   * Return 0 if no other mediated matrix device has a reference to @kvm;
51559   * otherwise, returns an -EPERM.
51560 @@ -1038,14 +1116,25 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
51562         struct ap_matrix_mdev *m;
51564 -       list_for_each_entry(m, &matrix_dev->mdev_list, node) {
51565 -               if ((m != matrix_mdev) && (m->kvm == kvm))
51566 -                       return -EPERM;
51567 -       }
51568 +       if (kvm->arch.crypto.crycbd) {
51569 +               list_for_each_entry(m, &matrix_dev->mdev_list, node) {
51570 +                       if (m != matrix_mdev && m->kvm == kvm)
51571 +                               return -EPERM;
51572 +               }
51574 -       matrix_mdev->kvm = kvm;
51575 -       kvm_get_kvm(kvm);
51576 -       kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
51577 +               kvm_get_kvm(kvm);
51578 +               matrix_mdev->kvm_busy = true;
51579 +               mutex_unlock(&matrix_dev->lock);
51580 +               kvm_arch_crypto_set_masks(kvm,
51581 +                                         matrix_mdev->matrix.apm,
51582 +                                         matrix_mdev->matrix.aqm,
51583 +                                         matrix_mdev->matrix.adm);
51584 +               mutex_lock(&matrix_dev->lock);
51585 +               kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
51586 +               matrix_mdev->kvm = kvm;
51587 +               matrix_mdev->kvm_busy = false;
51588 +               wake_up_all(&matrix_mdev->wait_for_kvm);
51589 +       }
51591         return 0;
51593 @@ -1079,51 +1168,65 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
51594         return NOTIFY_DONE;
51597 +/**
51598 + * vfio_ap_mdev_unset_kvm
51599 + *
51600 + * @matrix_mdev: a matrix mediated device
51601 + *
51602 + * Performs clean-up of resources no longer needed by @matrix_mdev.
51603 + *
51604 + * Note: The matrix_dev->lock must be taken prior to calling
51605 + * this function; however, the lock will be temporarily released while the
51606 + * guest's AP configuration is cleared to avoid a potential lockdep splat.
51607 + * The kvm->lock is taken to clear the guest's AP configuration which, under
51608 + * certain circumstances, will result in a circular lock dependency if this is
51609 + * done under the @matrix_mdev->lock.
51610 + *
51611 + */
51612  static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
51614 -       kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
51615 -       matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
51616 -       vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
51617 -       kvm_put_kvm(matrix_mdev->kvm);
51618 -       matrix_mdev->kvm = NULL;
51619 +       /*
51620 +        * If the KVM pointer is in the process of being set, wait until the
51621 +        * process has completed.
51622 +        */
51623 +       wait_event_cmd(matrix_mdev->wait_for_kvm,
51624 +                      !matrix_mdev->kvm_busy,
51625 +                      mutex_unlock(&matrix_dev->lock),
51626 +                      mutex_lock(&matrix_dev->lock));
51628 +       if (matrix_mdev->kvm) {
51629 +               matrix_mdev->kvm_busy = true;
51630 +               mutex_unlock(&matrix_dev->lock);
51631 +               kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
51632 +               mutex_lock(&matrix_dev->lock);
51633 +               vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
51634 +               matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
51635 +               kvm_put_kvm(matrix_mdev->kvm);
51636 +               matrix_mdev->kvm = NULL;
51637 +               matrix_mdev->kvm_busy = false;
51638 +               wake_up_all(&matrix_mdev->wait_for_kvm);
51639 +       }
51642  static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
51643                                        unsigned long action, void *data)
51645 -       int ret, notify_rc = NOTIFY_OK;
51646 +       int notify_rc = NOTIFY_OK;
51647         struct ap_matrix_mdev *matrix_mdev;
51649         if (action != VFIO_GROUP_NOTIFY_SET_KVM)
51650                 return NOTIFY_OK;
51652 -       matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
51653         mutex_lock(&matrix_dev->lock);
51654 +       matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
51656 -       if (!data) {
51657 -               if (matrix_mdev->kvm)
51658 -                       vfio_ap_mdev_unset_kvm(matrix_mdev);
51659 -               goto notify_done;
51660 -       }
51662 -       ret = vfio_ap_mdev_set_kvm(matrix_mdev, data);
51663 -       if (ret) {
51664 -               notify_rc = NOTIFY_DONE;
51665 -               goto notify_done;
51666 -       }
51668 -       /* If there is no CRYCB pointer, then we can't copy the masks */
51669 -       if (!matrix_mdev->kvm->arch.crypto.crycbd) {
51670 +       if (!data)
51671 +               vfio_ap_mdev_unset_kvm(matrix_mdev);
51672 +       else if (vfio_ap_mdev_set_kvm(matrix_mdev, data))
51673                 notify_rc = NOTIFY_DONE;
51674 -               goto notify_done;
51675 -       }
51677 -       kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm,
51678 -                                 matrix_mdev->matrix.aqm,
51679 -                                 matrix_mdev->matrix.adm);
51681 -notify_done:
51682         mutex_unlock(&matrix_dev->lock);
51684         return notify_rc;
51687 @@ -1258,8 +1361,7 @@ static void vfio_ap_mdev_release(struct mdev_device *mdev)
51688         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
51690         mutex_lock(&matrix_dev->lock);
51691 -       if (matrix_mdev->kvm)
51692 -               vfio_ap_mdev_unset_kvm(matrix_mdev);
51693 +       vfio_ap_mdev_unset_kvm(matrix_mdev);
51694         mutex_unlock(&matrix_dev->lock);
51696         vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
51697 @@ -1293,6 +1395,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
51698                                     unsigned int cmd, unsigned long arg)
51700         int ret;
51701 +       struct ap_matrix_mdev *matrix_mdev;
51703         mutex_lock(&matrix_dev->lock);
51704         switch (cmd) {
51705 @@ -1300,6 +1403,21 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
51706                 ret = vfio_ap_mdev_get_device_info(arg);
51707                 break;
51708         case VFIO_DEVICE_RESET:
51709 +               matrix_mdev = mdev_get_drvdata(mdev);
51710 +               if (WARN(!matrix_mdev, "Driver data missing from mdev!!")) {
51711 +                       ret = -EINVAL;
51712 +                       break;
51713 +               }
51715 +               /*
51716 +                * If the KVM pointer is in the process of being set, wait until
51717 +                * the process has completed.
51718 +                */
51719 +               wait_event_cmd(matrix_mdev->wait_for_kvm,
51720 +                              !matrix_mdev->kvm_busy,
51721 +                              mutex_unlock(&matrix_dev->lock),
51722 +                              mutex_lock(&matrix_dev->lock));
51724                 ret = vfio_ap_mdev_reset_queues(mdev);
51725                 break;
51726         default:
51727 diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
51728 index 28e9d9989768..f82a6396acae 100644
51729 --- a/drivers/s390/crypto/vfio_ap_private.h
51730 +++ b/drivers/s390/crypto/vfio_ap_private.h
51731 @@ -83,6 +83,8 @@ struct ap_matrix_mdev {
51732         struct ap_matrix matrix;
51733         struct notifier_block group_notifier;
51734         struct notifier_block iommu_notifier;
51735 +       bool kvm_busy;
51736 +       wait_queue_head_t wait_for_kvm;
51737         struct kvm *kvm;
51738         struct kvm_s390_module_hook pqap_hook;
51739         struct mdev_device *mdev;
51740 diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
51741 index 33b23884b133..09fe6bb8880b 100644
51742 --- a/drivers/s390/crypto/zcrypt_card.c
51743 +++ b/drivers/s390/crypto/zcrypt_card.c
51744 @@ -192,5 +192,6 @@ void zcrypt_card_unregister(struct zcrypt_card *zc)
51745         spin_unlock(&zcrypt_list_lock);
51746         sysfs_remove_group(&zc->card->ap_dev.device.kobj,
51747                            &zcrypt_card_attr_group);
51748 +       zcrypt_card_put(zc);
51750  EXPORT_SYMBOL(zcrypt_card_unregister);
51751 diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
51752 index 5062eae73d4a..c3ffbd26b73f 100644
51753 --- a/drivers/s390/crypto/zcrypt_queue.c
51754 +++ b/drivers/s390/crypto/zcrypt_queue.c
51755 @@ -223,5 +223,6 @@ void zcrypt_queue_unregister(struct zcrypt_queue *zq)
51756         sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
51757                            &zcrypt_queue_attr_group);
51758         zcrypt_card_put(zc);
51759 +       zcrypt_queue_put(zq);
51761  EXPORT_SYMBOL(zcrypt_queue_unregister);
51762 diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
51763 index 91acff493612..fd9b869d278e 100644
51764 --- a/drivers/s390/net/qeth_core.h
51765 +++ b/drivers/s390/net/qeth_core.h
51766 @@ -437,6 +437,7 @@ struct qeth_qdio_out_buffer {
51768         struct qeth_qdio_out_q *q;
51769         struct list_head list_entry;
51770 +       struct qaob *aob;
51771  };
51773  struct qeth_card;
51774 @@ -499,7 +500,6 @@ struct qeth_out_q_stats {
51775  struct qeth_qdio_out_q {
51776         struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
51777         struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
51778 -       struct qdio_outbuf_state *bufstates; /* convenience pointer */
51779         struct list_head pending_bufs;
51780         struct qeth_out_q_stats stats;
51781         spinlock_t lock;
51782 @@ -563,7 +563,6 @@ struct qeth_qdio_info {
51783         /* output */
51784         unsigned int no_out_queues;
51785         struct qeth_qdio_out_q *out_qs[QETH_MAX_OUT_QUEUES];
51786 -       struct qdio_outbuf_state *out_bufstates;
51788         /* priority queueing */
51789         int do_prio_queueing;
51790 diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
51791 index a814698387bc..175b82b98f36 100644
51792 --- a/drivers/s390/net/qeth_core_main.c
51793 +++ b/drivers/s390/net/qeth_core_main.c
51794 @@ -369,8 +369,7 @@ static int qeth_cq_init(struct qeth_card *card)
51795                                    QDIO_MAX_BUFFERS_PER_Q);
51796                 card->qdio.c_q->next_buf_to_init = 127;
51797                 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
51798 -                            card->qdio.no_in_queues - 1, 0,
51799 -                            127);
51800 +                            card->qdio.no_in_queues - 1, 0, 127, NULL);
51801                 if (rc) {
51802                         QETH_CARD_TEXT_(card, 2, "1err%d", rc);
51803                         goto out;
51804 @@ -383,48 +382,22 @@ static int qeth_cq_init(struct qeth_card *card)
51806  static int qeth_alloc_cq(struct qeth_card *card)
51808 -       int rc;
51810         if (card->options.cq == QETH_CQ_ENABLED) {
51811 -               int i;
51812 -               struct qdio_outbuf_state *outbuf_states;
51814                 QETH_CARD_TEXT(card, 2, "cqon");
51815                 card->qdio.c_q = qeth_alloc_qdio_queue();
51816                 if (!card->qdio.c_q) {
51817 -                       rc = -1;
51818 -                       goto kmsg_out;
51819 +                       dev_err(&card->gdev->dev, "Failed to create completion queue\n");
51820 +                       return -ENOMEM;
51821                 }
51823                 card->qdio.no_in_queues = 2;
51824 -               card->qdio.out_bufstates =
51825 -                       kcalloc(card->qdio.no_out_queues *
51826 -                                       QDIO_MAX_BUFFERS_PER_Q,
51827 -                               sizeof(struct qdio_outbuf_state),
51828 -                               GFP_KERNEL);
51829 -               outbuf_states = card->qdio.out_bufstates;
51830 -               if (outbuf_states == NULL) {
51831 -                       rc = -1;
51832 -                       goto free_cq_out;
51833 -               }
51834 -               for (i = 0; i < card->qdio.no_out_queues; ++i) {
51835 -                       card->qdio.out_qs[i]->bufstates = outbuf_states;
51836 -                       outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
51837 -               }
51838         } else {
51839                 QETH_CARD_TEXT(card, 2, "nocq");
51840                 card->qdio.c_q = NULL;
51841                 card->qdio.no_in_queues = 1;
51842         }
51843         QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
51844 -       rc = 0;
51845 -out:
51846 -       return rc;
51847 -free_cq_out:
51848 -       qeth_free_qdio_queue(card->qdio.c_q);
51849 -       card->qdio.c_q = NULL;
51850 -kmsg_out:
51851 -       dev_err(&card->gdev->dev, "Failed to create completion queue\n");
51852 -       goto out;
51853 +       return 0;
51856  static void qeth_free_cq(struct qeth_card *card)
51857 @@ -434,8 +407,6 @@ static void qeth_free_cq(struct qeth_card *card)
51858                 qeth_free_qdio_queue(card->qdio.c_q);
51859                 card->qdio.c_q = NULL;
51860         }
51861 -       kfree(card->qdio.out_bufstates);
51862 -       card->qdio.out_bufstates = NULL;
51865  static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
51866 @@ -487,12 +458,12 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
51867         switch (atomic_xchg(&buffer->state, new_state)) {
51868         case QETH_QDIO_BUF_PRIMED:
51869                 /* Faster than TX completion code, let it handle the async
51870 -                * completion for us.
51871 +                * completion for us. It will also recycle the QAOB.
51872                  */
51873                 break;
51874         case QETH_QDIO_BUF_PENDING:
51875                 /* TX completion code is active and will handle the async
51876 -                * completion for us.
51877 +                * completion for us. It will also recycle the QAOB.
51878                  */
51879                 break;
51880         case QETH_QDIO_BUF_NEED_QAOB:
51881 @@ -501,7 +472,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
51882                 qeth_notify_skbs(buffer->q, buffer, notification);
51884                 /* Free dangling allocations. The attached skbs are handled by
51885 -                * qeth_tx_complete_pending_bufs().
51886 +                * qeth_tx_complete_pending_bufs(), and so is the QAOB.
51887                  */
51888                 for (i = 0;
51889                      i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
51890 @@ -520,8 +491,6 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
51891         default:
51892                 WARN_ON_ONCE(1);
51893         }
51895 -       qdio_release_aob(aob);
51898  static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
51899 @@ -1451,6 +1420,13 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
51900         atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
51903 +static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf)
51905 +       if (buf->aob)
51906 +               qdio_release_aob(buf->aob);
51907 +       kmem_cache_free(qeth_qdio_outbuf_cache, buf);
51910  static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
51911                                           struct qeth_qdio_out_q *queue,
51912                                           bool drain)
51913 @@ -1468,7 +1444,7 @@ static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
51914                         qeth_tx_complete_buf(buf, drain, 0);
51916                         list_del(&buf->list_entry);
51917 -                       kmem_cache_free(qeth_qdio_outbuf_cache, buf);
51918 +                       qeth_free_out_buf(buf);
51919                 }
51920         }
51922 @@ -1485,7 +1461,7 @@ static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
51924                 qeth_clear_output_buffer(q, q->bufs[j], true, 0);
51925                 if (free) {
51926 -                       kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
51927 +                       qeth_free_out_buf(q->bufs[j]);
51928                         q->bufs[j] = NULL;
51929                 }
51930         }
51931 @@ -2637,7 +2613,7 @@ static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
51933  err_out_bufs:
51934         while (i > 0)
51935 -               kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[--i]);
51936 +               qeth_free_out_buf(q->bufs[--i]);
51937         qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
51938  err_qdio_bufs:
51939         kfree(q);
51940 @@ -3024,7 +3000,8 @@ static int qeth_init_qdio_queues(struct qeth_card *card)
51941         }
51943         card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
51944 -       rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs);
51945 +       rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs,
51946 +                    NULL);
51947         if (rc) {
51948                 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
51949                 return rc;
51950 @@ -3516,7 +3493,7 @@ static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
51951                 }
51953                 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
51954 -                            queue->next_buf_to_init, count);
51955 +                            queue->next_buf_to_init, count, NULL);
51956                 if (rc) {
51957                         QETH_CARD_TEXT(card, 2, "qinberr");
51958                 }
51959 @@ -3625,6 +3602,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
51960         struct qeth_qdio_out_buffer *buf = queue->bufs[index];
51961         unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
51962         struct qeth_card *card = queue->card;
51963 +       struct qaob *aob = NULL;
51964         int rc;
51965         int i;
51967 @@ -3637,16 +3615,24 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
51968                                 SBAL_EFLAGS_LAST_ENTRY;
51969                 queue->coalesced_frames += buf->frames;
51971 -               if (queue->bufstates)
51972 -                       queue->bufstates[bidx].user = buf;
51974                 if (IS_IQD(card)) {
51975                         skb_queue_walk(&buf->skb_list, skb)
51976                                 skb_tx_timestamp(skb);
51977                 }
51978         }
51980 -       if (!IS_IQD(card)) {
51981 +       if (IS_IQD(card)) {
51982 +               if (card->options.cq == QETH_CQ_ENABLED &&
51983 +                   !qeth_iqd_is_mcast_queue(card, queue) &&
51984 +                   count == 1) {
51985 +                       if (!buf->aob)
51986 +                               buf->aob = qdio_allocate_aob();
51987 +                       if (buf->aob) {
51988 +                               aob = buf->aob;
51989 +                               aob->user1 = (u64) buf;
51990 +                       }
51991 +               }
51992 +       } else {
51993                 if (!queue->do_pack) {
51994                         if ((atomic_read(&queue->used_buffers) >=
51995                                 (QETH_HIGH_WATERMARK_PACK -
51996 @@ -3677,8 +3663,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
51997         }
51999         QETH_TXQ_STAT_INC(queue, doorbell);
52000 -       rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
52001 -                    queue->queue_no, index, count);
52002 +       rc = do_QDIO(CARD_DDEV(card), qdio_flags, queue->queue_no, index, count,
52003 +                    aob);
52005         switch (rc) {
52006         case 0:
52007 @@ -3814,8 +3800,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
52008                 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
52009         }
52010         rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
52011 -                   card->qdio.c_q->next_buf_to_init,
52012 -                   count);
52013 +                    cq->next_buf_to_init, count, NULL);
52014         if (rc) {
52015                 dev_warn(&card->gdev->dev,
52016                         "QDIO reported an error, rc=%i\n", rc);
52017 @@ -5270,7 +5255,6 @@ static int qeth_qdio_establish(struct qeth_card *card)
52018         init_data.int_parm               = (unsigned long) card;
52019         init_data.input_sbal_addr_array  = in_sbal_ptrs;
52020         init_data.output_sbal_addr_array = out_sbal_ptrs;
52021 -       init_data.output_sbal_state_array = card->qdio.out_bufstates;
52022         init_data.scan_threshold         = IS_IQD(card) ? 0 : 32;
52024         if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
52025 @@ -6069,7 +6053,15 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
52026         bool error = !!qdio_error;
52028         if (qdio_error == QDIO_ERROR_SLSB_PENDING) {
52029 -               WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
52030 +               struct qaob *aob = buffer->aob;
52032 +               if (!aob) {
52033 +                       netdev_WARN_ONCE(card->dev,
52034 +                                        "Pending TX buffer %#x without QAOB on TX queue %u\n",
52035 +                                        bidx, queue->queue_no);
52036 +                       qeth_schedule_recovery(card);
52037 +                       return;
52038 +               }
52040                 QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
52042 @@ -6125,6 +6117,8 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
52043                 default:
52044                         WARN_ON_ONCE(1);
52045                 }
52047 +               memset(aob, 0, sizeof(*aob));
52048         } else if (card->options.cq == QETH_CQ_ENABLED) {
52049                 qeth_notify_skbs(queue, buffer,
52050                                  qeth_compute_cq_notification(sflags, 0));
52051 diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
52052 index 23ab16d65f2a..049596cbfb5d 100644
52053 --- a/drivers/s390/scsi/zfcp_qdio.c
52054 +++ b/drivers/s390/scsi/zfcp_qdio.c
52055 @@ -128,7 +128,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
52056         /*
52057          * put SBALs back to response queue
52058          */
52059 -       if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
52060 +       if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count, NULL))
52061                 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
52064 @@ -298,7 +298,7 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
52065         atomic_sub(sbal_number, &qdio->req_q_free);
52067         retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
52068 -                        q_req->sbal_first, sbal_number);
52069 +                        q_req->sbal_first, sbal_number, NULL);
52071         if (unlikely(retval)) {
52072                 /* Failed to submit the IO, roll back our modifications. */
52073 @@ -463,7 +463,8 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
52074                 sbale->addr = 0;
52075         }
52077 -       if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
52078 +       if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q,
52079 +                   NULL))
52080                 goto failed_qdio;
52082         /* set index of first available SBALS / number of available SBALS */
52083 diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
52084 index ea436a14087f..5eff3368143d 100644
52085 --- a/drivers/scsi/device_handler/scsi_dh_alua.c
52086 +++ b/drivers/scsi/device_handler/scsi_dh_alua.c
52087 @@ -573,10 +573,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
52088                  * even though it shouldn't according to T10.
52089                  * The retry without rtpg_ext_hdr_req set
52090                  * handles this.
52091 +                * Note:  some arrays return a sense key of ILLEGAL_REQUEST
52092 +                * with ASC 00h if they don't support the extended header.
52093                  */
52094                 if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
52095 -                   sense_hdr.sense_key == ILLEGAL_REQUEST &&
52096 -                   sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) {
52097 +                   sense_hdr.sense_key == ILLEGAL_REQUEST) {
52098                         pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP;
52099                         goto retry;
52100                 }
52101 diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
52102 index 36744968378f..09e49e21deb6 100644
52103 --- a/drivers/scsi/fnic/fnic_scsi.c
52104 +++ b/drivers/scsi/fnic/fnic_scsi.c
52105 @@ -217,7 +217,7 @@ int fnic_fw_reset_handler(struct fnic *fnic)
52107         /* wait for io cmpl */
52108         while (atomic_read(&fnic->in_flight))
52109 -               schedule_timeout(msecs_to_jiffies(1));
52110 +               schedule_msec_hrtimeout((1));
52112         spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
52114 @@ -2277,7 +2277,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
52115                 }
52116         }
52118 -       schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
52119 +       schedule_msec_hrtimeout((2 * fnic->config.ed_tov));
52121         /* walk again to check, if IOs are still pending in fw */
52122         if (fnic_is_abts_pending(fnic, lr_sc))
52123 diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
52124 index 7451377c4cb6..3e359ac752fd 100644
52125 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
52126 +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
52127 @@ -1646,7 +1646,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
52128                 idx = i * HISI_SAS_PHY_INT_NR;
52129                 for (j = 0; j < HISI_SAS_PHY_INT_NR; j++, idx++) {
52130                         irq = platform_get_irq(pdev, idx);
52131 -                       if (!irq) {
52132 +                       if (irq < 0) {
52133                                 dev_err(dev, "irq init: fail map phy interrupt %d\n",
52134                                         idx);
52135                                 return -ENOENT;
52136 @@ -1665,7 +1665,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
52137         idx = hisi_hba->n_phy * HISI_SAS_PHY_INT_NR;
52138         for (i = 0; i < hisi_hba->queue_count; i++, idx++) {
52139                 irq = platform_get_irq(pdev, idx);
52140 -               if (!irq) {
52141 +               if (irq < 0) {
52142                         dev_err(dev, "irq init: could not map cq interrupt %d\n",
52143                                 idx);
52144                         return -ENOENT;
52145 @@ -1683,7 +1683,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
52146         idx = (hisi_hba->n_phy * HISI_SAS_PHY_INT_NR) + hisi_hba->queue_count;
52147         for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++, idx++) {
52148                 irq = platform_get_irq(pdev, idx);
52149 -               if (!irq) {
52150 +               if (irq < 0) {
52151                         dev_err(dev, "irq init: could not map fatal interrupt %d\n",
52152                                 idx);
52153                         return -ENOENT;
52154 diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
52155 index 61831f2fdb30..d6675a25719d 100644
52156 --- a/drivers/scsi/ibmvscsi/ibmvfc.c
52157 +++ b/drivers/scsi/ibmvscsi/ibmvfc.c
52158 @@ -603,8 +603,17 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
52159                 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
52160                         vhost->action = action;
52161                 break;
52162 +       case IBMVFC_HOST_ACTION_REENABLE:
52163 +       case IBMVFC_HOST_ACTION_RESET:
52164 +               vhost->action = action;
52165 +               break;
52166         case IBMVFC_HOST_ACTION_INIT:
52167         case IBMVFC_HOST_ACTION_TGT_DEL:
52168 +       case IBMVFC_HOST_ACTION_LOGO:
52169 +       case IBMVFC_HOST_ACTION_QUERY_TGTS:
52170 +       case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
52171 +       case IBMVFC_HOST_ACTION_NONE:
52172 +       default:
52173                 switch (vhost->action) {
52174                 case IBMVFC_HOST_ACTION_RESET:
52175                 case IBMVFC_HOST_ACTION_REENABLE:
52176 @@ -614,15 +623,6 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
52177                         break;
52178                 }
52179                 break;
52180 -       case IBMVFC_HOST_ACTION_LOGO:
52181 -       case IBMVFC_HOST_ACTION_QUERY_TGTS:
52182 -       case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
52183 -       case IBMVFC_HOST_ACTION_NONE:
52184 -       case IBMVFC_HOST_ACTION_RESET:
52185 -       case IBMVFC_HOST_ACTION_REENABLE:
52186 -       default:
52187 -               vhost->action = action;
52188 -               break;
52189         }
52192 @@ -5373,30 +5373,49 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
52193         case IBMVFC_HOST_ACTION_INIT_WAIT:
52194                 break;
52195         case IBMVFC_HOST_ACTION_RESET:
52196 -               vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
52197                 list_splice_init(&vhost->purge, &purge);
52198                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
52199                 ibmvfc_complete_purge(&purge);
52200                 rc = ibmvfc_reset_crq(vhost);
52202                 spin_lock_irqsave(vhost->host->host_lock, flags);
52203 -               if (rc == H_CLOSED)
52204 +               if (!rc || rc == H_CLOSED)
52205                         vio_enable_interrupts(to_vio_dev(vhost->dev));
52206 -               if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
52207 -                   (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
52208 -                       ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
52209 -                       dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
52210 +               if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
52211 +                       /*
52212 +                        * The only action we could have changed to would have
52213 +                        * been reenable, in which case, we skip the rest of
52214 +                        * this path and wait until we've done the re-enable
52215 +                        * before sending the crq init.
52216 +                        */
52217 +                       vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
52219 +                       if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
52220 +                           (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
52221 +                               ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
52222 +                               dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
52223 +                       }
52224                 }
52225                 break;
52226         case IBMVFC_HOST_ACTION_REENABLE:
52227 -               vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
52228                 list_splice_init(&vhost->purge, &purge);
52229                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
52230                 ibmvfc_complete_purge(&purge);
52231                 rc = ibmvfc_reenable_crq_queue(vhost);
52233                 spin_lock_irqsave(vhost->host->host_lock, flags);
52234 -               if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
52235 -                       ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
52236 -                       dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
52237 +               if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
52238 +                       /*
52239 +                        * The only action we could have changed to would have
52240 +                        * been reset, in which case, we skip the rest of this
52241 +                        * path and wait until we've done the reset before
52242 +                        * sending the crq init.
52243 +                        */
52244 +                       vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
52245 +                       if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
52246 +                               ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
52247 +                               dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
52248 +                       }
52249                 }
52250                 break;
52251         case IBMVFC_HOST_ACTION_LOGO:
52252 diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
52253 index f0ed6863cc70..60a88a95a8e2 100644
52254 --- a/drivers/scsi/jazz_esp.c
52255 +++ b/drivers/scsi/jazz_esp.c
52256 @@ -143,7 +143,9 @@ static int esp_jazz_probe(struct platform_device *dev)
52257         if (!esp->command_block)
52258                 goto fail_unmap_regs;
52260 -       host->irq = platform_get_irq(dev, 0);
52261 +       host->irq = err = platform_get_irq(dev, 0);
52262 +       if (err < 0)
52263 +               goto fail_unmap_command_block;
52264         err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
52265         if (err < 0)
52266                 goto fail_unmap_command_block;
52267 diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
52268 index 22826544da7e..9989669beec3 100644
52269 --- a/drivers/scsi/libfc/fc_lport.c
52270 +++ b/drivers/scsi/libfc/fc_lport.c
52271 @@ -1731,7 +1731,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
52273         if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) {
52274                 FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
52275 -                            "lport->mfs:%hu\n", mfs, lport->mfs);
52276 +                            "lport->mfs:%u\n", mfs, lport->mfs);
52277                 fc_lport_error(lport, fp);
52278                 goto out;
52279         }
52280 diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
52281 index bdd9a29f4201..0496a60735ef 100644
52282 --- a/drivers/scsi/lpfc/lpfc_attr.c
52283 +++ b/drivers/scsi/lpfc/lpfc_attr.c
52284 @@ -1687,8 +1687,7 @@ lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
52285                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
52286                                 "0071 Set trunk mode failed with status: %d",
52287                                 rc);
52288 -       if (rc != MBX_TIMEOUT)
52289 -               mempool_free(mbox, phba->mbox_mem_pool);
52290 +       mempool_free(mbox, phba->mbox_mem_pool);
52292         return 0;
52294 @@ -6793,15 +6792,19 @@ lpfc_get_stats(struct Scsi_Host *shost)
52295         pmboxq->ctx_buf = NULL;
52296         pmboxq->vport = vport;
52298 -       if (vport->fc_flag & FC_OFFLINE_MODE)
52299 +       if (vport->fc_flag & FC_OFFLINE_MODE) {
52300                 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
52301 -       else
52302 -               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
52304 -       if (rc != MBX_SUCCESS) {
52305 -               if (rc != MBX_TIMEOUT)
52306 +               if (rc != MBX_SUCCESS) {
52307                         mempool_free(pmboxq, phba->mbox_mem_pool);
52308 -               return NULL;
52309 +                       return NULL;
52310 +               }
52311 +       } else {
52312 +               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
52313 +               if (rc != MBX_SUCCESS) {
52314 +                       if (rc != MBX_TIMEOUT)
52315 +                               mempool_free(pmboxq, phba->mbox_mem_pool);
52316 +                       return NULL;
52317 +               }
52318         }
52320         memset(hs, 0, sizeof (struct fc_host_statistics));
52321 @@ -6825,15 +6828,19 @@ lpfc_get_stats(struct Scsi_Host *shost)
52322         pmboxq->ctx_buf = NULL;
52323         pmboxq->vport = vport;
52325 -       if (vport->fc_flag & FC_OFFLINE_MODE)
52326 +       if (vport->fc_flag & FC_OFFLINE_MODE) {
52327                 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
52328 -       else
52329 -               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
52331 -       if (rc != MBX_SUCCESS) {
52332 -               if (rc != MBX_TIMEOUT)
52333 +               if (rc != MBX_SUCCESS) {
52334                         mempool_free(pmboxq, phba->mbox_mem_pool);
52335 -               return NULL;
52336 +                       return NULL;
52337 +               }
52338 +       } else {
52339 +               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
52340 +               if (rc != MBX_SUCCESS) {
52341 +                       if (rc != MBX_TIMEOUT)
52342 +                               mempool_free(pmboxq, phba->mbox_mem_pool);
52343 +                       return NULL;
52344 +               }
52345         }
52347         hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
52348 @@ -6906,15 +6913,19 @@ lpfc_reset_stats(struct Scsi_Host *shost)
52349         pmboxq->vport = vport;
52351         if ((vport->fc_flag & FC_OFFLINE_MODE) ||
52352 -               (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
52353 +               (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
52354                 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
52355 -       else
52356 -               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
52358 -       if (rc != MBX_SUCCESS) {
52359 -               if (rc != MBX_TIMEOUT)
52360 +               if (rc != MBX_SUCCESS) {
52361                         mempool_free(pmboxq, phba->mbox_mem_pool);
52362 -               return;
52363 +                       return;
52364 +               }
52365 +       } else {
52366 +               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
52367 +               if (rc != MBX_SUCCESS) {
52368 +                       if (rc != MBX_TIMEOUT)
52369 +                               mempool_free(pmboxq, phba->mbox_mem_pool);
52370 +                       return;
52371 +               }
52372         }
52374         memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
52375 @@ -6924,15 +6935,19 @@ lpfc_reset_stats(struct Scsi_Host *shost)
52376         pmboxq->vport = vport;
52378         if ((vport->fc_flag & FC_OFFLINE_MODE) ||
52379 -           (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
52380 +           (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
52381                 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
52382 -       else
52383 +               if (rc != MBX_SUCCESS) {
52384 +                       mempool_free(pmboxq, phba->mbox_mem_pool);
52385 +                       return;
52386 +               }
52387 +       } else {
52388                 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
52390 -       if (rc != MBX_SUCCESS) {
52391 -               if (rc != MBX_TIMEOUT)
52392 -                       mempool_free( pmboxq, phba->mbox_mem_pool);
52393 -               return;
52394 +               if (rc != MBX_SUCCESS) {
52395 +                       if (rc != MBX_TIMEOUT)
52396 +                               mempool_free(pmboxq, phba->mbox_mem_pool);
52397 +                       return;
52398 +               }
52399         }
52401         lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
52402 diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
52403 index a0aad4896a45..763b1eeb0ca8 100644
52404 --- a/drivers/scsi/lpfc/lpfc_crtn.h
52405 +++ b/drivers/scsi/lpfc/lpfc_crtn.h
52406 @@ -55,9 +55,6 @@ void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
52407  void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
52408  void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
52409  void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
52410 -void lpfc_supported_pages(struct lpfcMboxq *);
52411 -void lpfc_pc_sli4_params(struct lpfcMboxq *);
52412 -int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
52413  int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
52414                            uint16_t, uint16_t, bool);
52415  int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
52416 @@ -351,8 +348,8 @@ int lpfc_sli_hbq_size(void);
52417  int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
52418                                struct lpfc_iocbq *, void *);
52419  int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd);
52420 -int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t,
52421 -                       uint64_t, lpfc_ctx_cmd);
52422 +int lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
52423 +                       lpfc_ctx_cmd abort_cmd);
52424  int
52425  lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *,
52426                         uint16_t, uint64_t, lpfc_ctx_cmd);
52427 diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
52428 index f0a758138ae8..3dd22da3153f 100644
52429 --- a/drivers/scsi/lpfc/lpfc_els.c
52430 +++ b/drivers/scsi/lpfc/lpfc_els.c
52431 @@ -1,7 +1,7 @@
52432  /*******************************************************************
52433   * This file is part of the Emulex Linux Device Driver for         *
52434   * Fibre Channel Host Bus Adapters.                                *
52435 - * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
52436 + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
52437   * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
52438   * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
52439   * EMULEX and SLI are trademarks of Emulex.                        *
52440 @@ -1600,7 +1600,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
52441         struct lpfc_nodelist *new_ndlp;
52442         struct serv_parm *sp;
52443         uint8_t  name[sizeof(struct lpfc_name)];
52444 -       uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
52445 +       uint32_t keepDID = 0, keep_nlp_flag = 0;
52446         uint32_t keep_new_nlp_flag = 0;
52447         uint16_t keep_nlp_state;
52448         u32 keep_nlp_fc4_type = 0;
52449 @@ -1622,7 +1622,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
52450         new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
52452         /* return immediately if the WWPN matches ndlp */
52453 -       if (new_ndlp == ndlp)
52454 +       if (!new_ndlp || (new_ndlp == ndlp))
52455                 return ndlp;
52457         if (phba->sli_rev == LPFC_SLI_REV4) {
52458 @@ -1641,30 +1641,11 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
52459                          (new_ndlp ? new_ndlp->nlp_flag : 0),
52460                          (new_ndlp ? new_ndlp->nlp_fc4_type : 0));
52462 -       if (!new_ndlp) {
52463 -               rc = memcmp(&ndlp->nlp_portname, name,
52464 -                           sizeof(struct lpfc_name));
52465 -               if (!rc) {
52466 -                       if (active_rrqs_xri_bitmap)
52467 -                               mempool_free(active_rrqs_xri_bitmap,
52468 -                                            phba->active_rrq_pool);
52469 -                       return ndlp;
52470 -               }
52471 -               new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID);
52472 -               if (!new_ndlp) {
52473 -                       if (active_rrqs_xri_bitmap)
52474 -                               mempool_free(active_rrqs_xri_bitmap,
52475 -                                            phba->active_rrq_pool);
52476 -                       return ndlp;
52477 -               }
52478 -       } else {
52479 -               keepDID = new_ndlp->nlp_DID;
52480 -               if (phba->sli_rev == LPFC_SLI_REV4 &&
52481 -                   active_rrqs_xri_bitmap)
52482 -                       memcpy(active_rrqs_xri_bitmap,
52483 -                              new_ndlp->active_rrqs_xri_bitmap,
52484 -                              phba->cfg_rrq_xri_bitmap_sz);
52485 -       }
52486 +       keepDID = new_ndlp->nlp_DID;
52488 +       if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap)
52489 +               memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap,
52490 +                      phba->cfg_rrq_xri_bitmap_sz);
52492         /* At this point in this routine, we know new_ndlp will be
52493          * returned. however, any previous GID_FTs that were done
52494 @@ -2063,13 +2044,12 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
52495   * This routine issues a Port Login (PLOGI) command to a remote N_Port
52496   * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
52497   * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
52498 - * This routine constructs the proper feilds of the PLOGI IOCB and invokes
52499 + * This routine constructs the proper fields of the PLOGI IOCB and invokes
52500   * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
52501   *
52502 - * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
52503 - * will be incremented by 1 for holding the ndlp and the reference to ndlp
52504 - * will be stored into the context1 field of the IOCB for the completion
52505 - * callback function to the PLOGI ELS command.
52506 + * Note that the ndlp reference count will be incremented by 1 for holding
52507 + * the ndlp and the reference to ndlp will be stored into the context1 field
52508 + * of the IOCB for the completion callback function to the PLOGI ELS command.
52509   *
52510   * Return code
52511   *   0 - Successfully issued a plogi for @vport
52512 @@ -2087,29 +2067,28 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
52513         int ret;
52515         ndlp = lpfc_findnode_did(vport, did);
52516 +       if (!ndlp)
52517 +               return 1;
52519 -       if (ndlp) {
52520 -               /* Defer the processing of the issue PLOGI until after the
52521 -                * outstanding UNREG_RPI mbox command completes, unless we
52522 -                * are going offline. This logic does not apply for Fabric DIDs
52523 -                */
52524 -               if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
52525 -                   ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
52526 -                   !(vport->fc_flag & FC_OFFLINE_MODE)) {
52527 -                       lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
52528 -                                        "4110 Issue PLOGI x%x deferred "
52529 -                                        "on NPort x%x rpi x%x Data: x%px\n",
52530 -                                        ndlp->nlp_defer_did, ndlp->nlp_DID,
52531 -                                        ndlp->nlp_rpi, ndlp);
52533 -                       /* We can only defer 1st PLOGI */
52534 -                       if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
52535 -                               ndlp->nlp_defer_did = did;
52536 -                       return 0;
52537 -               }
52538 +       /* Defer the processing of the issue PLOGI until after the
52539 +        * outstanding UNREG_RPI mbox command completes, unless we
52540 +        * are going offline. This logic does not apply for Fabric DIDs
52541 +        */
52542 +       if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
52543 +           ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
52544 +           !(vport->fc_flag & FC_OFFLINE_MODE)) {
52545 +               lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
52546 +                                "4110 Issue PLOGI x%x deferred "
52547 +                                "on NPort x%x rpi x%x Data: x%px\n",
52548 +                                ndlp->nlp_defer_did, ndlp->nlp_DID,
52549 +                                ndlp->nlp_rpi, ndlp);
52551 +               /* We can only defer 1st PLOGI */
52552 +               if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
52553 +                       ndlp->nlp_defer_did = did;
52554 +               return 0;
52555         }
52557 -       /* If ndlp is not NULL, we will bump the reference count on it */
52558         cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
52559         elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
52560                                      ELS_CMD_PLOGI);
52561 @@ -3829,7 +3808,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
52562                 did = irsp->un.elsreq64.remoteID;
52563                 ndlp = lpfc_findnode_did(vport, did);
52564                 if (!ndlp && (cmd != ELS_CMD_PLOGI))
52565 -                       return 1;
52566 +                       return 0;
52567         }
52569         lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
52570 @@ -4473,10 +4452,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
52571   * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
52572   * field in the command IOCB is not NULL, the referred mailbox command will
52573   * be send out, and then invokes the lpfc_els_free_iocb() routine to release
52574 - * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
52575 - * link down event occurred during the discovery, the lpfc_nlp_not_used()
52576 - * routine shall be invoked trying to release the ndlp if no other threads
52577 - * are currently referring it.
52578 + * the IOCB.
52579   **/
52580  static void
52581  lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
52582 @@ -4486,10 +4462,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
52583         struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
52584         struct Scsi_Host  *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
52585         IOCB_t  *irsp;
52586 -       uint8_t *pcmd;
52587         LPFC_MBOXQ_t *mbox = NULL;
52588         struct lpfc_dmabuf *mp = NULL;
52589 -       uint32_t ls_rjt = 0;
52591         irsp = &rspiocb->iocb;
52593 @@ -4501,18 +4475,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
52594         if (cmdiocb->context_un.mbox)
52595                 mbox = cmdiocb->context_un.mbox;
52597 -       /* First determine if this is a LS_RJT cmpl. Note, this callback
52598 -        * function can have cmdiocb->contest1 (ndlp) field set to NULL.
52599 -        */
52600 -       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
52601 -       if (ndlp && (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
52602 -               /* A LS_RJT associated with Default RPI cleanup has its own
52603 -                * separate code path.
52604 -                */
52605 -               if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
52606 -                       ls_rjt = 1;
52607 -       }
52609         /* Check to see if link went down during discovery */
52610         if (!ndlp || lpfc_els_chk_latt(vport)) {
52611                 if (mbox) {
52612 @@ -4523,15 +4485,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
52613                         }
52614                         mempool_free(mbox, phba->mbox_mem_pool);
52615                 }
52616 -               if (ndlp && (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
52617 -                       if (lpfc_nlp_not_used(ndlp)) {
52618 -                               ndlp = NULL;
52619 -                               /* Indicate the node has already released,
52620 -                                * should not reference to it from within
52621 -                                * the routine lpfc_els_free_iocb.
52622 -                                */
52623 -                               cmdiocb->context1 = NULL;
52624 -                       }
52625                 goto out;
52626         }
52628 @@ -4609,29 +4562,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
52629                                 "Data: x%x x%x x%x\n",
52630                                 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
52631                                 ndlp->nlp_rpi);
52633 -                       if (lpfc_nlp_not_used(ndlp)) {
52634 -                               ndlp = NULL;
52635 -                               /* Indicate node has already been released,
52636 -                                * should not reference to it from within
52637 -                                * the routine lpfc_els_free_iocb.
52638 -                                */
52639 -                               cmdiocb->context1 = NULL;
52640 -                       }
52641 -               } else {
52642 -                       /* Do not drop node for lpfc_els_abort'ed ELS cmds */
52643 -                       if (!lpfc_error_lost_link(irsp) &&
52644 -                           ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
52645 -                               if (lpfc_nlp_not_used(ndlp)) {
52646 -                                       ndlp = NULL;
52647 -                                       /* Indicate node has already been
52648 -                                        * released, should not reference
52649 -                                        * to it from within the routine
52650 -                                        * lpfc_els_free_iocb.
52651 -                                        */
52652 -                                       cmdiocb->context1 = NULL;
52653 -                               }
52654 -                       }
52655                 }
52656                 mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
52657                 if (mp) {
52658 @@ -4647,19 +4577,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
52659                         ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
52660                 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI;
52661                 spin_unlock_irq(&ndlp->lock);
52663 -               /* If the node is not being used by another discovery thread,
52664 -                * and we are sending a reject, we are done with it.
52665 -                * Release driver reference count here and free associated
52666 -                * resources.
52667 -                */
52668 -               if (ls_rjt)
52669 -                       if (lpfc_nlp_not_used(ndlp))
52670 -                               /* Indicate node has already been released,
52671 -                                * should not reference to it from within
52672 -                                * the routine lpfc_els_free_iocb.
52673 -                                */
52674 -                               cmdiocb->context1 = NULL;
52675         }
52677         /* Release the originating I/O reference. */
52678 diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
52679 index 48ca4a612f80..c5176f406386 100644
52680 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c
52681 +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
52682 @@ -140,11 +140,8 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
52683                               "rport terminate: sid:x%x did:x%x flg:x%x",
52684                               ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
52686 -       if (ndlp->nlp_sid != NLP_NO_SID) {
52687 -               lpfc_sli_abort_iocb(vport,
52688 -                                   &vport->phba->sli.sli3_ring[LPFC_FCP_RING],
52689 -                                   ndlp->nlp_sid, 0, LPFC_CTX_TGT);
52690 -       }
52691 +       if (ndlp->nlp_sid != NLP_NO_SID)
52692 +               lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
52695  /*
52696 @@ -299,8 +296,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
52698         if (ndlp->nlp_sid != NLP_NO_SID) {
52699                 warn_on = 1;
52700 -               lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
52701 -                                   ndlp->nlp_sid, 0, LPFC_CTX_TGT);
52702 +               lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
52703         }
52705         if (warn_on) {
52706 diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
52707 index 541b9aef6bfe..f5bc2c32a817 100644
52708 --- a/drivers/scsi/lpfc/lpfc_hw4.h
52709 +++ b/drivers/scsi/lpfc/lpfc_hw4.h
52710 @@ -124,6 +124,7 @@ struct lpfc_sli_intf {
52711  /* Define SLI4 Alignment requirements. */
52712  #define LPFC_ALIGN_16_BYTE     16
52713  #define LPFC_ALIGN_64_BYTE     64
52714 +#define SLI4_PAGE_SIZE         4096
52716  /* Define SLI4 specific definitions. */
52717  #define LPFC_MQ_CQE_BYTE_OFFSET        256
52718 @@ -2976,62 +2977,6 @@ struct lpfc_mbx_request_features {
52719  #define lpfc_mbx_rq_ftr_rsp_mrqp_WORD          word3
52720  };
52722 -struct lpfc_mbx_supp_pages {
52723 -       uint32_t word1;
52724 -#define qs_SHIFT                               0
52725 -#define qs_MASK                                        0x00000001
52726 -#define qs_WORD                                        word1
52727 -#define wr_SHIFT                               1
52728 -#define wr_MASK                                0x00000001
52729 -#define wr_WORD                                        word1
52730 -#define pf_SHIFT                               8
52731 -#define pf_MASK                                        0x000000ff
52732 -#define pf_WORD                                        word1
52733 -#define cpn_SHIFT                              16
52734 -#define cpn_MASK                               0x000000ff
52735 -#define cpn_WORD                               word1
52736 -       uint32_t word2;
52737 -#define list_offset_SHIFT                      0
52738 -#define list_offset_MASK                       0x000000ff
52739 -#define list_offset_WORD                       word2
52740 -#define next_offset_SHIFT                      8
52741 -#define next_offset_MASK                       0x000000ff
52742 -#define next_offset_WORD                       word2
52743 -#define elem_cnt_SHIFT                         16
52744 -#define elem_cnt_MASK                          0x000000ff
52745 -#define elem_cnt_WORD                          word2
52746 -       uint32_t word3;
52747 -#define pn_0_SHIFT                             24
52748 -#define pn_0_MASK                              0x000000ff
52749 -#define pn_0_WORD                              word3
52750 -#define pn_1_SHIFT                             16
52751 -#define pn_1_MASK                              0x000000ff
52752 -#define pn_1_WORD                              word3
52753 -#define pn_2_SHIFT                             8
52754 -#define pn_2_MASK                              0x000000ff
52755 -#define pn_2_WORD                              word3
52756 -#define pn_3_SHIFT                             0
52757 -#define pn_3_MASK                              0x000000ff
52758 -#define pn_3_WORD                              word3
52759 -       uint32_t word4;
52760 -#define pn_4_SHIFT                             24
52761 -#define pn_4_MASK                              0x000000ff
52762 -#define pn_4_WORD                              word4
52763 -#define pn_5_SHIFT                             16
52764 -#define pn_5_MASK                              0x000000ff
52765 -#define pn_5_WORD                              word4
52766 -#define pn_6_SHIFT                             8
52767 -#define pn_6_MASK                              0x000000ff
52768 -#define pn_6_WORD                              word4
52769 -#define pn_7_SHIFT                             0
52770 -#define pn_7_MASK                              0x000000ff
52771 -#define pn_7_WORD                              word4
52772 -       uint32_t rsvd[27];
52773 -#define LPFC_SUPP_PAGES                        0
52774 -#define LPFC_BLOCK_GUARD_PROFILES      1
52775 -#define LPFC_SLI4_PARAMETERS           2
52778  struct lpfc_mbx_memory_dump_type3 {
52779         uint32_t word1;
52780  #define lpfc_mbx_memory_dump_type3_type_SHIFT    0
52781 @@ -3248,121 +3193,6 @@ struct user_eeprom {
52782         uint8_t reserved191[57];
52783  };
52785 -struct lpfc_mbx_pc_sli4_params {
52786 -       uint32_t word1;
52787 -#define qs_SHIFT                               0
52788 -#define qs_MASK                                        0x00000001
52789 -#define qs_WORD                                        word1
52790 -#define wr_SHIFT                               1
52791 -#define wr_MASK                                        0x00000001
52792 -#define wr_WORD                                        word1
52793 -#define pf_SHIFT                               8
52794 -#define pf_MASK                                        0x000000ff
52795 -#define pf_WORD                                        word1
52796 -#define cpn_SHIFT                              16
52797 -#define cpn_MASK                               0x000000ff
52798 -#define cpn_WORD                               word1
52799 -       uint32_t word2;
52800 -#define if_type_SHIFT                          0
52801 -#define if_type_MASK                           0x00000007
52802 -#define if_type_WORD                           word2
52803 -#define sli_rev_SHIFT                          4
52804 -#define sli_rev_MASK                           0x0000000f
52805 -#define sli_rev_WORD                           word2
52806 -#define sli_family_SHIFT                       8
52807 -#define sli_family_MASK                                0x000000ff
52808 -#define sli_family_WORD                                word2
52809 -#define featurelevel_1_SHIFT                   16
52810 -#define featurelevel_1_MASK                    0x000000ff
52811 -#define featurelevel_1_WORD                    word2
52812 -#define featurelevel_2_SHIFT                   24
52813 -#define featurelevel_2_MASK                    0x0000001f
52814 -#define featurelevel_2_WORD                    word2
52815 -       uint32_t word3;
52816 -#define fcoe_SHIFT                             0
52817 -#define fcoe_MASK                              0x00000001
52818 -#define fcoe_WORD                              word3
52819 -#define fc_SHIFT                               1
52820 -#define fc_MASK                                        0x00000001
52821 -#define fc_WORD                                        word3
52822 -#define nic_SHIFT                              2
52823 -#define nic_MASK                               0x00000001
52824 -#define nic_WORD                               word3
52825 -#define iscsi_SHIFT                            3
52826 -#define iscsi_MASK                             0x00000001
52827 -#define iscsi_WORD                             word3
52828 -#define rdma_SHIFT                             4
52829 -#define rdma_MASK                              0x00000001
52830 -#define rdma_WORD                              word3
52831 -       uint32_t sge_supp_len;
52832 -#define SLI4_PAGE_SIZE 4096
52833 -       uint32_t word5;
52834 -#define if_page_sz_SHIFT                       0
52835 -#define if_page_sz_MASK                                0x0000ffff
52836 -#define if_page_sz_WORD                                word5
52837 -#define loopbk_scope_SHIFT                     24
52838 -#define loopbk_scope_MASK                      0x0000000f
52839 -#define loopbk_scope_WORD                      word5
52840 -#define rq_db_window_SHIFT                     28
52841 -#define rq_db_window_MASK                      0x0000000f
52842 -#define rq_db_window_WORD                      word5
52843 -       uint32_t word6;
52844 -#define eq_pages_SHIFT                         0
52845 -#define eq_pages_MASK                          0x0000000f
52846 -#define eq_pages_WORD                          word6
52847 -#define eqe_size_SHIFT                         8
52848 -#define eqe_size_MASK                          0x000000ff
52849 -#define eqe_size_WORD                          word6
52850 -       uint32_t word7;
52851 -#define cq_pages_SHIFT                         0
52852 -#define cq_pages_MASK                          0x0000000f
52853 -#define cq_pages_WORD                          word7
52854 -#define cqe_size_SHIFT                         8
52855 -#define cqe_size_MASK                          0x000000ff
52856 -#define cqe_size_WORD                          word7
52857 -       uint32_t word8;
52858 -#define mq_pages_SHIFT                         0
52859 -#define mq_pages_MASK                          0x0000000f
52860 -#define mq_pages_WORD                          word8
52861 -#define mqe_size_SHIFT                         8
52862 -#define mqe_size_MASK                          0x000000ff
52863 -#define mqe_size_WORD                          word8
52864 -#define mq_elem_cnt_SHIFT                      16
52865 -#define mq_elem_cnt_MASK                       0x000000ff
52866 -#define mq_elem_cnt_WORD                       word8
52867 -       uint32_t word9;
52868 -#define wq_pages_SHIFT                         0
52869 -#define wq_pages_MASK                          0x0000ffff
52870 -#define wq_pages_WORD                          word9
52871 -#define wqe_size_SHIFT                         8
52872 -#define wqe_size_MASK                          0x000000ff
52873 -#define wqe_size_WORD                          word9
52874 -       uint32_t word10;
52875 -#define rq_pages_SHIFT                         0
52876 -#define rq_pages_MASK                          0x0000ffff
52877 -#define rq_pages_WORD                          word10
52878 -#define rqe_size_SHIFT                         8
52879 -#define rqe_size_MASK                          0x000000ff
52880 -#define rqe_size_WORD                          word10
52881 -       uint32_t word11;
52882 -#define hdr_pages_SHIFT                                0
52883 -#define hdr_pages_MASK                         0x0000000f
52884 -#define hdr_pages_WORD                         word11
52885 -#define hdr_size_SHIFT                         8
52886 -#define hdr_size_MASK                          0x0000000f
52887 -#define hdr_size_WORD                          word11
52888 -#define hdr_pp_align_SHIFT                     16
52889 -#define hdr_pp_align_MASK                      0x0000ffff
52890 -#define hdr_pp_align_WORD                      word11
52891 -       uint32_t word12;
52892 -#define sgl_pages_SHIFT                                0
52893 -#define sgl_pages_MASK                         0x0000000f
52894 -#define sgl_pages_WORD                         word12
52895 -#define sgl_pp_align_SHIFT                     16
52896 -#define sgl_pp_align_MASK                      0x0000ffff
52897 -#define sgl_pp_align_WORD                      word12
52898 -       uint32_t rsvd_13_63[51];
52900  #define SLI4_PAGE_ALIGN(addr) (((addr)+((SLI4_PAGE_SIZE)-1)) \
52901                                &(~((SLI4_PAGE_SIZE)-1)))
52903 @@ -3994,8 +3824,6 @@ struct lpfc_mqe {
52904                 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
52905                 struct lpfc_mbx_query_fw_config query_fw_cfg;
52906                 struct lpfc_mbx_set_beacon_config beacon_config;
52907 -               struct lpfc_mbx_supp_pages supp_pages;
52908 -               struct lpfc_mbx_pc_sli4_params sli4_params;
52909                 struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
52910                 struct lpfc_mbx_set_link_diag_state link_diag_state;
52911                 struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
52912 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
52913 index 71f340dd4fbd..a67051ba3f12 100644
52914 --- a/drivers/scsi/lpfc/lpfc_init.c
52915 +++ b/drivers/scsi/lpfc/lpfc_init.c
52916 @@ -6573,8 +6573,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
52917         LPFC_MBOXQ_t *mboxq;
52918         MAILBOX_t *mb;
52919         int rc, i, max_buf_size;
52920 -       uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
52921 -       struct lpfc_mqe *mqe;
52922         int longs;
52923         int extra;
52924         uint64_t wwn;
52925 @@ -6808,32 +6806,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
52927         lpfc_nvme_mod_param_dep(phba);
52929 -       /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
52930 -       lpfc_supported_pages(mboxq);
52931 -       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
52932 -       if (!rc) {
52933 -               mqe = &mboxq->u.mqe;
52934 -               memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
52935 -                      LPFC_MAX_SUPPORTED_PAGES);
52936 -               for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
52937 -                       switch (pn_page[i]) {
52938 -                       case LPFC_SLI4_PARAMETERS:
52939 -                               phba->sli4_hba.pc_sli4_params.supported = 1;
52940 -                               break;
52941 -                       default:
52942 -                               break;
52943 -                       }
52944 -               }
52945 -               /* Read the port's SLI4 Parameters capabilities if supported. */
52946 -               if (phba->sli4_hba.pc_sli4_params.supported)
52947 -                       rc = lpfc_pc_sli4_params_get(phba, mboxq);
52948 -               if (rc) {
52949 -                       mempool_free(mboxq, phba->mbox_mem_pool);
52950 -                       rc = -EIO;
52951 -                       goto out_free_bsmbx;
52952 -               }
52953 -       }
52955         /*
52956          * Get sli4 parameters that override parameters from Port capabilities.
52957          * If this call fails, it isn't critical unless the SLI4 parameters come
52958 @@ -9660,8 +9632,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
52959                                 "3250 QUERY_FW_CFG mailbox failed with status "
52960                                 "x%x add_status x%x, mbx status x%x\n",
52961                                 shdr_status, shdr_add_status, rc);
52962 -               if (rc != MBX_TIMEOUT)
52963 -                       mempool_free(mboxq, phba->mbox_mem_pool);
52964 +               mempool_free(mboxq, phba->mbox_mem_pool);
52965                 rc = -ENXIO;
52966                 goto out_error;
52967         }
52968 @@ -9677,8 +9648,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
52969                         "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
52970                         phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
52972 -       if (rc != MBX_TIMEOUT)
52973 -               mempool_free(mboxq, phba->mbox_mem_pool);
52974 +       mempool_free(mboxq, phba->mbox_mem_pool);
52976         /*
52977          * Set up HBA Event Queues (EQs)
52978 @@ -10276,8 +10246,7 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
52979                 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
52980                 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
52981                                          &shdr->response);
52982 -               if (rc != MBX_TIMEOUT)
52983 -                       mempool_free(mboxq, phba->mbox_mem_pool);
52984 +               mempool_free(mboxq, phba->mbox_mem_pool);
52985                 if (shdr_status || shdr_add_status || rc) {
52986                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
52987                                         "0495 SLI_FUNCTION_RESET mailbox "
52988 @@ -12075,78 +12044,6 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
52989                 phba->pport->work_port_events = 0;
52992 - /**
52993 - * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
52994 - * @phba: Pointer to HBA context object.
52995 - * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
52996 - *
52997 - * This function is called in the SLI4 code path to read the port's
52998 - * sli4 capabilities.
52999 - *
53000 - * This function may be be called from any context that can block-wait
53001 - * for the completion.  The expectation is that this routine is called
53002 - * typically from probe_one or from the online routine.
53003 - **/
53004 -int
53005 -lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
53007 -       int rc;
53008 -       struct lpfc_mqe *mqe;
53009 -       struct lpfc_pc_sli4_params *sli4_params;
53010 -       uint32_t mbox_tmo;
53012 -       rc = 0;
53013 -       mqe = &mboxq->u.mqe;
53015 -       /* Read the port's SLI4 Parameters port capabilities */
53016 -       lpfc_pc_sli4_params(mboxq);
53017 -       if (!phba->sli4_hba.intr_enable)
53018 -               rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
53019 -       else {
53020 -               mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
53021 -               rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
53022 -       }
53024 -       if (unlikely(rc))
53025 -               return 1;
53027 -       sli4_params = &phba->sli4_hba.pc_sli4_params;
53028 -       sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
53029 -       sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
53030 -       sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
53031 -       sli4_params->featurelevel_1 = bf_get(featurelevel_1,
53032 -                                            &mqe->un.sli4_params);
53033 -       sli4_params->featurelevel_2 = bf_get(featurelevel_2,
53034 -                                            &mqe->un.sli4_params);
53035 -       sli4_params->proto_types = mqe->un.sli4_params.word3;
53036 -       sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
53037 -       sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
53038 -       sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
53039 -       sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
53040 -       sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
53041 -       sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
53042 -       sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
53043 -       sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
53044 -       sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
53045 -       sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
53046 -       sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
53047 -       sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
53048 -       sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
53049 -       sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
53050 -       sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
53051 -       sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
53052 -       sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
53053 -       sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
53054 -       sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
53055 -       sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
53057 -       /* Make sure that sge_supp_len can be handled by the driver */
53058 -       if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
53059 -               sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
53061 -       return rc;
53064  /**
53065   * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
53066   * @phba: Pointer to HBA context object.
53067 @@ -12205,7 +12102,8 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
53068         else
53069                 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
53070         sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
53071 -       sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
53072 +       sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
53073 +                                          mbx_sli4_parameters);
53074         sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
53075         sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
53076         sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
53077 diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
53078 index c03a7f12dd65..72dd22ad5dcc 100644
53079 --- a/drivers/scsi/lpfc/lpfc_mbox.c
53080 +++ b/drivers/scsi/lpfc/lpfc_mbox.c
53081 @@ -2624,39 +2624,3 @@ lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
53082         resume_rpi->event_tag = ndlp->phba->fc_eventTag;
53085 -/**
53086 - * lpfc_supported_pages - Initialize the PORT_CAPABILITIES supported pages
53087 - *                        mailbox command.
53088 - * @mbox: pointer to lpfc mbox command to initialize.
53089 - *
53090 - * The PORT_CAPABILITIES supported pages mailbox command is issued to
53091 - * retrieve the particular feature pages supported by the port.
53092 - **/
53093 -void
53094 -lpfc_supported_pages(struct lpfcMboxq *mbox)
53096 -       struct lpfc_mbx_supp_pages *supp_pages;
53098 -       memset(mbox, 0, sizeof(*mbox));
53099 -       supp_pages = &mbox->u.mqe.un.supp_pages;
53100 -       bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
53101 -       bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
53104 -/**
53105 - * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd.
53106 - * @mbox: pointer to lpfc mbox command to initialize.
53107 - *
53108 - * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
53109 - * retrieve the particular SLI4 features supported by the port.
53110 - **/
53111 -void
53112 -lpfc_pc_sli4_params(struct lpfcMboxq *mbox)
53114 -       struct lpfc_mbx_pc_sli4_params *sli4_params;
53116 -       memset(mbox, 0, sizeof(*mbox));
53117 -       sli4_params = &mbox->u.mqe.un.sli4_params;
53118 -       bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
53119 -       bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
53121 diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
53122 index 135d8e8a42ba..9f05f5e329c6 100644
53123 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c
53124 +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
53125 @@ -279,106 +279,43 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
53126         lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
53129 -/* lpfc_defer_pt2pt_acc - Complete SLI3 pt2pt processing on link up
53130 +/* lpfc_defer_plogi_acc - Issue PLOGI ACC after reg_login completes
53131   * @phba: pointer to lpfc hba data structure.
53132 - * @link_mbox: pointer to CONFIG_LINK mailbox object
53133 + * @login_mbox: pointer to REG_RPI mailbox object
53134   *
53135 - * This routine is only called if we are SLI3, direct connect pt2pt
53136 - * mode and the remote NPort issues the PLOGI after link up.
53137 + * The ACC for a rcv'ed PLOGI is deferred until AFTER the REG_RPI completes
53138   */
53139  static void
53140 -lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
53141 +lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
53143 -       LPFC_MBOXQ_t *login_mbox;
53144 -       MAILBOX_t *mb = &link_mbox->u.mb;
53145         struct lpfc_iocbq *save_iocb;
53146         struct lpfc_nodelist *ndlp;
53147 +       MAILBOX_t *mb = &login_mbox->u.mb;
53149         int rc;
53151 -       ndlp = link_mbox->ctx_ndlp;
53152 -       login_mbox = link_mbox->context3;
53153 +       ndlp = login_mbox->ctx_ndlp;
53154         save_iocb = login_mbox->context3;
53155 -       link_mbox->context3 = NULL;
53156 -       login_mbox->context3 = NULL;
53158 -       /* Check for CONFIG_LINK error */
53159 -       if (mb->mbxStatus) {
53160 -               lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
53161 -                               "4575 CONFIG_LINK fails pt2pt discovery: %x\n",
53162 -                               mb->mbxStatus);
53163 -               mempool_free(login_mbox, phba->mbox_mem_pool);
53164 -               mempool_free(link_mbox, phba->mbox_mem_pool);
53165 -               kfree(save_iocb);
53166 -               return;
53167 -       }
53169 -       /* Now that CONFIG_LINK completed, and our SID is configured,
53170 -        * we can now proceed with sending the PLOGI ACC.
53171 -        */
53172 -       rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI,
53173 -                             save_iocb, ndlp, login_mbox);
53174 -       if (rc) {
53175 -               lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
53176 -                               "4576 PLOGI ACC fails pt2pt discovery: %x\n",
53177 -                               rc);
53178 -               mempool_free(login_mbox, phba->mbox_mem_pool);
53179 +       if (mb->mbxStatus == MBX_SUCCESS) {
53180 +               /* Now that REG_RPI completed successfully,
53181 +                * we can now proceed with sending the PLOGI ACC.
53182 +                */
53183 +               rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI,
53184 +                                     save_iocb, ndlp, NULL);
53185 +               if (rc) {
53186 +                       lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
53187 +                                       "4576 PLOGI ACC fails pt2pt discovery: "
53188 +                                       "DID %x Data: %x\n", ndlp->nlp_DID, rc);
53189 +               }
53190         }
53192 -       mempool_free(link_mbox, phba->mbox_mem_pool);
53193 +       /* Now process the REG_RPI cmpl */
53194 +       lpfc_mbx_cmpl_reg_login(phba, login_mbox);
53195 +       ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
53196         kfree(save_iocb);
53199 -/**
53200 - * lpfc_defer_tgt_acc - Progress SLI4 target rcv PLOGI handler
53201 - * @phba: Pointer to HBA context object.
53202 - * @pmb: Pointer to mailbox object.
53203 - *
53204 - * This function provides the unreg rpi mailbox completion handler for a tgt.
53205 - * The routine frees the memory resources associated with the completed
53206 - * mailbox command and transmits the ELS ACC.
53207 - *
53208 - * This routine is only called if we are SLI4, acting in target
53209 - * mode and the remote NPort issues the PLOGI after link up.
53210 - **/
53211 -static void
53212 -lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
53214 -       struct lpfc_vport *vport = pmb->vport;
53215 -       struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
53216 -       LPFC_MBOXQ_t *mbox = pmb->context3;
53217 -       struct lpfc_iocbq *piocb = NULL;
53218 -       int rc;
53220 -       if (mbox) {
53221 -               pmb->context3 = NULL;
53222 -               piocb = mbox->context3;
53223 -               mbox->context3 = NULL;
53224 -       }
53226 -       /*
53227 -        * Complete the unreg rpi mbx request, and update flags.
53228 -        * This will also restart any deferred events.
53229 -        */
53230 -       lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb);
53232 -       if (!piocb) {
53233 -               lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
53234 -                                "4578 PLOGI ACC fail\n");
53235 -               if (mbox)
53236 -                       mempool_free(mbox, phba->mbox_mem_pool);
53237 -               return;
53238 -       }
53240 -       rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox);
53241 -       if (rc) {
53242 -               lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
53243 -                                "4579 PLOGI ACC fail %x\n", rc);
53244 -               if (mbox)
53245 -                       mempool_free(mbox, phba->mbox_mem_pool);
53246 -       }
53247 -       kfree(piocb);
53250  static int
53251  lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
53252                struct lpfc_iocbq *cmdiocb)
53253 @@ -395,8 +332,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
53254         struct lpfc_iocbq *save_iocb;
53255         struct ls_rjt stat;
53256         uint32_t vid, flag;
53257 -       u16 rpi;
53258 -       int rc, defer_acc;
53259 +       int rc;
53261         memset(&stat, 0, sizeof (struct ls_rjt));
53262         pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
53263 @@ -445,7 +381,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
53264         else
53265                 ndlp->nlp_fcp_info |= CLASS3;
53267 -       defer_acc = 0;
53268         ndlp->nlp_class_sup = 0;
53269         if (sp->cls1.classValid)
53270                 ndlp->nlp_class_sup |= FC_COS_CLASS1;
53271 @@ -539,27 +474,26 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
53273                 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
53275 -               /* Issue config_link / reg_vfi to account for updated TOV's */
53277 +               /* Issue CONFIG_LINK for SLI3 or REG_VFI for SLI4,
53278 +                * to account for updated TOV's / parameters
53279 +                */
53280                 if (phba->sli_rev == LPFC_SLI_REV4)
53281                         lpfc_issue_reg_vfi(vport);
53282                 else {
53283 -                       defer_acc = 1;
53284                         link_mbox = mempool_alloc(phba->mbox_mem_pool,
53285                                                   GFP_KERNEL);
53286                         if (!link_mbox)
53287                                 goto out;
53288                         lpfc_config_link(phba, link_mbox);
53289 -                       link_mbox->mbox_cmpl = lpfc_defer_pt2pt_acc;
53290 +                       link_mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
53291                         link_mbox->vport = vport;
53292                         link_mbox->ctx_ndlp = ndlp;
53294 -                       save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
53295 -                       if (!save_iocb)
53296 +                       rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
53297 +                       if (rc == MBX_NOT_FINISHED) {
53298 +                               mempool_free(link_mbox, phba->mbox_mem_pool);
53299                                 goto out;
53300 -                       /* Save info from cmd IOCB used in rsp */
53301 -                       memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
53302 -                              sizeof(struct lpfc_iocbq));
53303 +                       }
53304                 }
53306                 lpfc_can_disctmo(vport);
53307 @@ -578,59 +512,28 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
53308         if (!login_mbox)
53309                 goto out;
53311 -       /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
53312 -       if (phba->nvmet_support && !defer_acc) {
53313 -               link_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
53314 -               if (!link_mbox)
53315 -                       goto out;
53317 -               /* As unique identifiers such as iotag would be overwritten
53318 -                * with those from the cmdiocb, allocate separate temporary
53319 -                * storage for the copy.
53320 -                */
53321 -               save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
53322 -               if (!save_iocb)
53323 -                       goto out;
53325 -               /* Unreg RPI is required for SLI4. */
53326 -               rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
53327 -               lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox);
53328 -               link_mbox->vport = vport;
53329 -               link_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
53330 -               if (!link_mbox->ctx_ndlp)
53331 -                       goto out;
53333 -               link_mbox->mbox_cmpl = lpfc_defer_acc_rsp;
53335 -               if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
53336 -                   (!(vport->fc_flag & FC_OFFLINE_MODE)))
53337 -                       ndlp->nlp_flag |= NLP_UNREG_INP;
53338 +       save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
53339 +       if (!save_iocb)
53340 +               goto out;
53342 -               /* Save info from cmd IOCB used in rsp */
53343 -               memcpy(save_iocb, cmdiocb, sizeof(*save_iocb));
53344 +       /* Save info from cmd IOCB to be used in rsp after all mbox completes */
53345 +       memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
53346 +              sizeof(struct lpfc_iocbq));
53348 -               /* Delay sending ACC till unreg RPI completes. */
53349 -               defer_acc = 1;
53350 -       } else if (phba->sli_rev == LPFC_SLI_REV4)
53351 +       /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
53352 +       if (phba->sli_rev == LPFC_SLI_REV4)
53353                 lpfc_unreg_rpi(vport, ndlp);
53355 +       /* Issue REG_LOGIN first, before ACCing the PLOGI, thus we will
53356 +        * always be deferring the ACC.
53357 +        */
53358         rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
53359                             (uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
53360         if (rc)
53361                 goto out;
53363 -       /* ACC PLOGI rsp command needs to execute first,
53364 -        * queue this login_mbox command to be processed later.
53365 -        */
53366         login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
53367 -       /*
53368 -        * login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
53369 -        * command issued in lpfc_cmpl_els_acc().
53370 -        */
53371         login_mbox->vport = vport;
53372 -       spin_lock_irq(&ndlp->lock);
53373 -       ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
53374 -       spin_unlock_irq(&ndlp->lock);
53376         /*
53377          * If there is an outstanding PLOGI issued, abort it before
53378 @@ -660,7 +563,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
53379                  * to register, then unregister the RPI.
53380                  */
53381                 spin_lock_irq(&ndlp->lock);
53382 -               ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
53383 +               ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN |
53384 +                                  NLP_RCV_PLOGI);
53385                 spin_unlock_irq(&ndlp->lock);
53386                 stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
53387                 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
53388 @@ -670,42 +574,39 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
53389                         mempool_free(login_mbox, phba->mbox_mem_pool);
53390                 return 1;
53391         }
53392 -       if (defer_acc) {
53393 -               /* So the order here should be:
53394 -                * SLI3 pt2pt
53395 -                *   Issue CONFIG_LINK mbox
53396 -                *   CONFIG_LINK cmpl
53397 -                * SLI4 tgt
53398 -                *   Issue UNREG RPI mbx
53399 -                *   UNREG RPI cmpl
53400 -                * Issue PLOGI ACC
53401 -                * PLOGI ACC cmpl
53402 -                * Issue REG_LOGIN mbox
53403 -                */
53405 -               /* Save the REG_LOGIN mbox for and rcv IOCB copy later */
53406 -               link_mbox->context3 = login_mbox;
53407 -               login_mbox->context3 = save_iocb;
53408 +       /* So the order here should be:
53409 +        * SLI3 pt2pt
53410 +        *   Issue CONFIG_LINK mbox
53411 +        *   CONFIG_LINK cmpl
53412 +        * SLI4 pt2pt
53413 +        *   Issue REG_VFI mbox
53414 +        *   REG_VFI cmpl
53415 +        * SLI4
53416 +        *   Issue UNREG RPI mbx
53417 +        *   UNREG RPI cmpl
53418 +        * Issue REG_RPI mbox
53419 +        * REG RPI cmpl
53420 +        * Issue PLOGI ACC
53421 +        * PLOGI ACC cmpl
53422 +        */
53423 +       login_mbox->mbox_cmpl = lpfc_defer_plogi_acc;
53424 +       login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
53425 +       login_mbox->context3 = save_iocb; /* For PLOGI ACC */
53427 -               /* Start the ball rolling by issuing CONFIG_LINK here */
53428 -               rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
53429 -               if (rc == MBX_NOT_FINISHED)
53430 -                       goto out;
53431 -               return 1;
53432 -       }
53433 +       spin_lock_irq(&ndlp->lock);
53434 +       ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
53435 +       spin_unlock_irq(&ndlp->lock);
53437 +       /* Start the ball rolling by issuing REG_LOGIN here */
53438 +       rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT);
53439 +       if (rc == MBX_NOT_FINISHED)
53440 +               goto out;
53441 +       lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
53443 -       rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, login_mbox);
53444 -       if (rc)
53445 -               mempool_free(login_mbox, phba->mbox_mem_pool);
53446         return 1;
53447  out:
53448 -       if (defer_acc)
53449 -               lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
53450 -                               "4577 discovery failure: %p %p %p\n",
53451 -                               save_iocb, link_mbox, login_mbox);
53452         kfree(save_iocb);
53453 -       if (link_mbox)
53454 -               mempool_free(link_mbox, phba->mbox_mem_pool);
53455         if (login_mbox)
53456                 mempool_free(login_mbox, phba->mbox_mem_pool);
53458 @@ -913,9 +814,14 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
53459                 }
53460         } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
53461                 ((ndlp->nlp_type & NLP_FCP_TARGET) ||
53462 -               !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
53463 +               (ndlp->nlp_type & NLP_NVME_TARGET) ||
53464 +               (vport->fc_flag & FC_PT2PT))) ||
53465                 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
53466 -               /* Only try to re-login if this is NOT a Fabric Node */
53467 +               /* Only try to re-login if this is NOT a Fabric Node
53468 +                * AND the remote NPORT is a FCP/NVME Target or we
53469 +                * are in pt2pt mode. NLP_STE_ADISC_ISSUE is a special
53470 +                * case for LOGO as a response to ADISC behavior.
53471 +                */
53472                 mod_timer(&ndlp->nlp_delayfunc,
53473                           jiffies + msecs_to_jiffies(1000 * 1));
53474                 spin_lock_irq(&ndlp->lock);
53475 @@ -1985,8 +1891,6 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
53476                 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
53478                 lpfc_issue_els_logo(vport, ndlp, 0);
53479 -               ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
53480 -               lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
53481                 return ndlp->nlp_state;
53482         }
53484 @@ -2633,12 +2537,10 @@ static uint32_t
53485  lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
53486                           void *arg, uint32_t evt)
53488 -       struct lpfc_hba  *phba = vport->phba;
53489         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
53491         /* flush the target */
53492 -       lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
53493 -                           ndlp->nlp_sid, 0, LPFC_CTX_TGT);
53494 +       lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
53496         /* Treat like rcv logo */
53497         lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
53498 diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
53499 index bb2a4a0d1295..a3fd959f7431 100644
53500 --- a/drivers/scsi/lpfc/lpfc_nvmet.c
53501 +++ b/drivers/scsi/lpfc/lpfc_nvmet.c
53502 @@ -3304,7 +3304,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
53503         bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
53505         /* Word 10 */
53506 -       bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
53507         bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
53508         bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
53509                LPFC_WQE_LENLOC_WORD12);
53510 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
53511 index a4d697373c71..fab9ea6fe965 100644
53512 --- a/drivers/scsi/lpfc/lpfc_scsi.c
53513 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
53514 @@ -5815,7 +5815,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
53515                                         tgt_id, lun_id, context);
53516         later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
53517         while (time_after(later, jiffies) && cnt) {
53518 -               schedule_timeout_uninterruptible(msecs_to_jiffies(20));
53519 +               schedule_msec_hrtimeout_uninterruptible((20));
53520                 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
53521         }
53522         if (cnt) {
53523 diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
53524 index fa1a714a78f0..920cf329268b 100644
53525 --- a/drivers/scsi/lpfc/lpfc_sli.c
53526 +++ b/drivers/scsi/lpfc/lpfc_sli.c
53527 @@ -5683,12 +5683,10 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
53528                         phba->sli4_hba.lnk_info.lnk_no,
53529                         phba->BIOSVersion);
53530  out_free_mboxq:
53531 -       if (rc != MBX_TIMEOUT) {
53532 -               if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
53533 -                       lpfc_sli4_mbox_cmd_free(phba, mboxq);
53534 -               else
53535 -                       mempool_free(mboxq, phba->mbox_mem_pool);
53536 -       }
53537 +       if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
53538 +               lpfc_sli4_mbox_cmd_free(phba, mboxq);
53539 +       else
53540 +               mempool_free(mboxq, phba->mbox_mem_pool);
53541         return rc;
53544 @@ -5789,12 +5787,10 @@ lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
53545         }
53547  out_free_mboxq:
53548 -       if (rc != MBX_TIMEOUT) {
53549 -               if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
53550 -                       lpfc_sli4_mbox_cmd_free(phba, mboxq);
53551 -               else
53552 -                       mempool_free(mboxq, phba->mbox_mem_pool);
53553 -       }
53554 +       if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
53555 +               lpfc_sli4_mbox_cmd_free(phba, mboxq);
53556 +       else
53557 +               mempool_free(mboxq, phba->mbox_mem_pool);
53558         return rc;
53561 @@ -11647,7 +11643,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
53562         icmd = &cmdiocb->iocb;
53563         if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
53564             icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
53565 -           (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
53566 +           cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED)
53567                 return IOCB_ABORTING;
53569         if (!pring) {
53570 @@ -11811,13 +11807,20 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
53571                            lpfc_ctx_cmd ctx_cmd)
53573         struct lpfc_io_buf *lpfc_cmd;
53574 +       IOCB_t *icmd = NULL;
53575         int rc = 1;
53577         if (!iocbq || iocbq->vport != vport)
53578                 return rc;
53580 -       if (!(iocbq->iocb_flag &  LPFC_IO_FCP) ||
53581 -           !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
53582 +       if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
53583 +           !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) ||
53584 +             iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
53585 +               return rc;
53587 +       icmd = &iocbq->iocb;
53588 +       if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
53589 +           icmd->ulpCommand == CMD_CLOSE_XRI_CN)
53590                 return rc;
53592         lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
53593 @@ -11945,7 +11948,6 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
53594  /**
53595   * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
53596   * @vport: Pointer to virtual port.
53597 - * @pring: Pointer to driver SLI ring object.
53598   * @tgt_id: SCSI ID of the target.
53599   * @lun_id: LUN ID of the scsi device.
53600   * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
53601 @@ -11960,18 +11962,22 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
53602   * FCP iocbs associated with SCSI target specified by tgt_id parameter.
53603   * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
53604   * FCP iocbs associated with virtual port.
53605 + * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
53606 + * lpfc_sli4_calc_ring is used.
53607   * This function returns number of iocbs it failed to abort.
53608   * This function is called with no locks held.
53609   **/
53610  int
53611 -lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
53612 -                   uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
53613 +lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
53614 +                   lpfc_ctx_cmd abort_cmd)
53616         struct lpfc_hba *phba = vport->phba;
53617 +       struct lpfc_sli_ring *pring = NULL;
53618         struct lpfc_iocbq *iocbq;
53619         int errcnt = 0, ret_val = 0;
53620         unsigned long iflags;
53621         int i;
53622 +       void *fcp_cmpl = NULL;
53624         /* all I/Os are in process of being flushed */
53625         if (phba->hba_flag & HBA_IOQ_FLUSH)
53626 @@ -11985,8 +11991,15 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
53627                         continue;
53629                 spin_lock_irqsave(&phba->hbalock, iflags);
53630 +               if (phba->sli_rev == LPFC_SLI_REV3) {
53631 +                       pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
53632 +                       fcp_cmpl = lpfc_sli_abort_fcp_cmpl;
53633 +               } else if (phba->sli_rev == LPFC_SLI_REV4) {
53634 +                       pring = lpfc_sli4_calc_ring(phba, iocbq);
53635 +                       fcp_cmpl = lpfc_sli4_abort_fcp_cmpl;
53636 +               }
53637                 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
53638 -                                                    lpfc_sli_abort_fcp_cmpl);
53639 +                                                    fcp_cmpl);
53640                 spin_unlock_irqrestore(&phba->hbalock, iflags);
53641                 if (ret_val != IOCB_SUCCESS)
53642                         errcnt++;
53643 @@ -17072,8 +17085,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
53644                                 "2509 RQ_DESTROY mailbox failed with "
53645                                 "status x%x add_status x%x, mbx status x%x\n",
53646                                 shdr_status, shdr_add_status, rc);
53647 -               if (rc != MBX_TIMEOUT)
53648 -                       mempool_free(mbox, hrq->phba->mbox_mem_pool);
53649 +               mempool_free(mbox, hrq->phba->mbox_mem_pool);
53650                 return -ENXIO;
53651         }
53652         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
53653 @@ -17170,7 +17182,9 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
53654         shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
53655         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
53656         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
53657 -       if (rc != MBX_TIMEOUT)
53658 +       if (!phba->sli4_hba.intr_enable)
53659 +               mempool_free(mbox, phba->mbox_mem_pool);
53660 +       else if (rc != MBX_TIMEOUT)
53661                 mempool_free(mbox, phba->mbox_mem_pool);
53662         if (shdr_status || shdr_add_status || rc) {
53663                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
53664 @@ -17367,7 +17381,9 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
53665         shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
53666         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
53667         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
53668 -       if (rc != MBX_TIMEOUT)
53669 +       if (!phba->sli4_hba.intr_enable)
53670 +               lpfc_sli4_mbox_cmd_free(phba, mbox);
53671 +       else if (rc != MBX_TIMEOUT)
53672                 lpfc_sli4_mbox_cmd_free(phba, mbox);
53673         if (shdr_status || shdr_add_status || rc) {
53674                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
53675 @@ -17480,7 +17496,9 @@ lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
53676         shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
53677         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
53678         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
53679 -       if (rc != MBX_TIMEOUT)
53680 +       if (!phba->sli4_hba.intr_enable)
53681 +               lpfc_sli4_mbox_cmd_free(phba, mbox);
53682 +       else if (rc != MBX_TIMEOUT)
53683                 lpfc_sli4_mbox_cmd_free(phba, mbox);
53684         if (shdr_status || shdr_add_status || rc) {
53685                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
53686 @@ -18064,7 +18082,6 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
53687         if (cmd_iocbq) {
53688                 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
53689                 lpfc_nlp_put(ndlp);
53690 -               lpfc_nlp_not_used(ndlp);
53691                 lpfc_sli_release_iocbq(phba, cmd_iocbq);
53692         }
53694 @@ -18831,8 +18848,7 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
53695         shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
53696         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
53697         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
53698 -       if (rc != MBX_TIMEOUT)
53699 -               mempool_free(mboxq, phba->mbox_mem_pool);
53700 +       mempool_free(mboxq, phba->mbox_mem_pool);
53701         if (shdr_status || shdr_add_status || rc) {
53702                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
53703                                 "2514 POST_RPI_HDR mailbox failed with "
53704 @@ -20076,7 +20092,9 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
53705                         break;
53706                 }
53707         }
53708 -       if (rc != MBX_TIMEOUT)
53709 +       if (!phba->sli4_hba.intr_enable)
53710 +               mempool_free(mbox, phba->mbox_mem_pool);
53711 +       else if (rc != MBX_TIMEOUT)
53712                 mempool_free(mbox, phba->mbox_mem_pool);
53713         if (shdr_status || shdr_add_status || rc) {
53714                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
53715 diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
53716 index ac0eef975f17..b6beacfd0f62 100644
53717 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c
53718 +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
53719 @@ -7252,6 +7252,8 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
53721         ioc_info(ioc, "sending diag reset !!\n");
53723 +       pci_cfg_access_lock(ioc->pdev);
53725         drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
53727         count = 0;
53728 @@ -7342,10 +7344,12 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
53729                 goto out;
53730         }
53732 +       pci_cfg_access_unlock(ioc->pdev);
53733         ioc_info(ioc, "diag reset: SUCCESS\n");
53734         return 0;
53736   out:
53737 +       pci_cfg_access_unlock(ioc->pdev);
53738         ioc_err(ioc, "diag reset: FAILED\n");
53739         return -EFAULT;
53741 diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
53742 index 44f9a05db94e..2ec11be62a82 100644
53743 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
53744 +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
53745 @@ -2507,7 +2507,7 @@ _ctl_addnl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
53746                     __func__, karg.unique_id);
53747                 return -EPERM;
53748         }
53749 -       memset(&karg.buffer_rel_condition, 0, sizeof(struct htb_rel_query));
53750 +       memset(&karg.rel_query, 0, sizeof(karg.rel_query));
53751         if ((ioc->diag_buffer_status[buffer_type] &
53752             MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
53753                 ioc_info(ioc, "%s: buffer_type(0x%02x) is not registered\n",
53754 @@ -2520,8 +2520,7 @@ _ctl_addnl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
53755                     __func__, buffer_type);
53756                 return -EPERM;
53757         }
53758 -       memcpy(&karg.buffer_rel_condition, &ioc->htb_rel,
53759 -           sizeof(struct  htb_rel_query));
53760 +       memcpy(&karg.rel_query, &ioc->htb_rel, sizeof(karg.rel_query));
53761  out:
53762         if (copy_to_user(arg, &karg, sizeof(struct mpt3_addnl_diag_query))) {
53763                 ioc_err(ioc, "%s: unable to write mpt3_addnl_diag_query data @ %p\n",
53764 diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
53765 index d2ccdafb8df2..8f6ffb40261c 100644
53766 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
53767 +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
53768 @@ -50,6 +50,8 @@
53769  #include <linux/miscdevice.h>
53770  #endif
53772 +#include "mpt3sas_base.h"
53774  #ifndef MPT2SAS_MINOR
53775  #define MPT2SAS_MINOR          (MPT_MINOR + 1)
53776  #endif
53777 @@ -436,19 +438,13 @@ struct mpt3_diag_read_buffer {
53778   * struct mpt3_addnl_diag_query - diagnostic buffer release reason
53779   * @hdr - generic header
53780   * @unique_id - unique id associated with this buffer.
53781 - * @buffer_rel_condition - Release condition ioctl/sysfs/reset
53782 - * @reserved1
53783 - * @trigger_type - Master/Event/scsi/MPI
53784 - * @trigger_info_dwords - Data Correspondig to trigger type
53785 + * @rel_query - release query.
53786   * @reserved2
53787   */
53788  struct mpt3_addnl_diag_query {
53789         struct mpt3_ioctl_header hdr;
53790         uint32_t unique_id;
53791 -       uint16_t buffer_rel_condition;
53792 -       uint16_t reserved1;
53793 -       uint32_t trigger_type;
53794 -       uint32_t trigger_info_dwords[2];
53795 +       struct htb_rel_query rel_query;
53796         uint32_t reserved2[2];
53797  };
53799 diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
53800 index 6aa6de729187..ae1973878cc7 100644
53801 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
53802 +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
53803 @@ -6483,6 +6483,9 @@ _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
53804                 if (!vphy)
53805                         return NULL;
53807 +               if (!port->vphys_mask)
53808 +                       INIT_LIST_HEAD(&port->vphys_list);
53810                 /*
53811                  * Enable bit corresponding to HBA phy number on its
53812                  * parent hba_port object's vphys_mask field.
53813 @@ -6490,7 +6493,6 @@ _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
53814                 port->vphys_mask |= (1 << phy_num);
53815                 vphy->phy_mask |= (1 << phy_num);
53817 -               INIT_LIST_HEAD(&port->vphys_list);
53818                 list_add_tail(&vphy->list, &port->vphys_list);
53820                 ioc_info(ioc,
53821 diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
53822 index 31e5455d280c..1b1a57f46989 100644
53823 --- a/drivers/scsi/pm8001/pm8001_hwi.c
53824 +++ b/drivers/scsi/pm8001/pm8001_hwi.c
53825 @@ -643,7 +643,7 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
53826   */
53827  static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
53829 -       u8 i = 0;
53830 +       u32 i = 0;
53831         u16 deviceid;
53832         pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
53833         /* 8081 controllers need BAR shift to access MPI space
53834 diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
53835 index 84315560e8e1..c6b0834e3806 100644
53836 --- a/drivers/scsi/pm8001/pm80xx_hwi.c
53837 +++ b/drivers/scsi/pm8001/pm80xx_hwi.c
53838 @@ -1502,9 +1502,9 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
53840         /* wait until Inbound DoorBell Clear Register toggled */
53841         if (IS_SPCV_12G(pm8001_ha->pdev)) {
53842 -               max_wait_count = 4 * 1000 * 1000;/* 4 sec */
53843 +               max_wait_count = 30 * 1000 * 1000; /* 30 sec */
53844         } else {
53845 -               max_wait_count = 2 * 1000 * 1000;/* 2 sec */
53846 +               max_wait_count = 15 * 1000 * 1000; /* 15 sec */
53847         }
53848         do {
53849                 udelay(1);
53850 diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
53851 index cec27f2ef70d..e5076f09d5ed 100644
53852 --- a/drivers/scsi/qedf/qedf_main.c
53853 +++ b/drivers/scsi/qedf/qedf_main.c
53854 @@ -536,7 +536,9 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf,
53855         if (linkmode_intersects(link->supported_caps, sup_caps))
53856                 lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
53858 -       fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
53859 +       if (lport->host && lport->host->shost_data)
53860 +               fc_host_supported_speeds(lport->host) =
53861 +                       lport->link_supported_speeds;
53864  static void qedf_bw_update(void *dev)
53865 diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
53866 index 63391c9be05d..3aa9869f6fae 100644
53867 --- a/drivers/scsi/qla2xxx/qla_attr.c
53868 +++ b/drivers/scsi/qla2xxx/qla_attr.c
53869 @@ -2864,6 +2864,8 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
53870         vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
53872         if (IS_FWI2_CAPABLE(ha)) {
53873 +               int rval;
53875                 stats = dma_alloc_coherent(&ha->pdev->dev,
53876                     sizeof(*stats), &stats_dma, GFP_KERNEL);
53877                 if (!stats) {
53878 @@ -2873,7 +2875,11 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
53879                 }
53881                 /* reset firmware statistics */
53882 -               qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
53883 +               rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
53884 +               if (rval != QLA_SUCCESS)
53885 +                       ql_log(ql_log_warn, vha, 0x70de,
53886 +                              "Resetting ISP statistics failed: rval = %d\n",
53887 +                              rval);
53889                 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
53890                     stats, stats_dma);
53891 diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
53892 index bee8cf9f8123..aef2f7cc89d3 100644
53893 --- a/drivers/scsi/qla2xxx/qla_bsg.c
53894 +++ b/drivers/scsi/qla2xxx/qla_bsg.c
53895 @@ -25,10 +25,11 @@ void qla2x00_bsg_job_done(srb_t *sp, int res)
53896         struct bsg_job *bsg_job = sp->u.bsg_job;
53897         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
53899 +       sp->free(sp);
53901         bsg_reply->result = res;
53902         bsg_job_done(bsg_job, bsg_reply->result,
53903                        bsg_reply->reply_payload_rcv_len);
53904 -       sp->free(sp);
53907  void qla2x00_bsg_sp_free(srb_t *sp)
53908 @@ -2583,6 +2584,10 @@ qla2x00_get_host_stats(struct bsg_job *bsg_job)
53909         }
53911         data = kzalloc(response_len, GFP_KERNEL);
53912 +       if (!data) {
53913 +               kfree(req_data);
53914 +               return -ENOMEM;
53915 +       }
53917         ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
53918                                     data, response_len);
53919 diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
53920 index f01f07116bd3..8cb0574cfa91 100644
53921 --- a/drivers/scsi/qla2xxx/qla_init.c
53922 +++ b/drivers/scsi/qla2xxx/qla_init.c
53923 @@ -1194,6 +1194,9 @@ static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
53925         struct qla_work_evt *e;
53927 +       if (vha->host->active_mode == MODE_TARGET)
53928 +               return QLA_FUNCTION_FAILED;
53930         e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
53931         if (!e)
53932                 return QLA_FUNCTION_FAILED;
53933 diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
53934 index 5e188375c871..af4831c9edf9 100644
53935 --- a/drivers/scsi/qla2xxx/qla_isr.c
53936 +++ b/drivers/scsi/qla2xxx/qla_isr.c
53937 @@ -4005,11 +4005,11 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
53938         if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
53939                 /* user wants to control IRQ setting for target mode */
53940                 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
53941 -                   min((u16)ha->msix_count, (u16)num_online_cpus()),
53942 +                   min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
53943                     PCI_IRQ_MSIX);
53944         } else
53945                 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
53946 -                   min((u16)ha->msix_count, (u16)num_online_cpus()),
53947 +                   min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
53948                     PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
53949                     &desc);
53951 diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
53952 index 0677295957bc..615e44af1ca6 100644
53953 --- a/drivers/scsi/qla2xxx/qla_nx.c
53954 +++ b/drivers/scsi/qla2xxx/qla_nx.c
53955 @@ -1063,7 +1063,8 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
53956                 return ret;
53957         }
53959 -       if (qla82xx_flash_set_write_enable(ha))
53960 +       ret = qla82xx_flash_set_write_enable(ha);
53961 +       if (ret < 0)
53962                 goto done_write;
53964         qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
53965 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
53966 index 074392560f3d..0e07b98dfae8 100644
53967 --- a/drivers/scsi/qla2xxx/qla_os.c
53968 +++ b/drivers/scsi/qla2xxx/qla_os.c
53969 @@ -1013,8 +1013,6 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
53970         if (rval != QLA_SUCCESS) {
53971                 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
53972                     "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
53973 -               if (rval == QLA_INTERFACE_ERROR)
53974 -                       goto qc24_free_sp_fail_command;
53975                 goto qc24_host_busy_free_sp;
53976         }
53978 @@ -1026,11 +1024,6 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
53979  qc24_target_busy:
53980         return SCSI_MLQUEUE_TARGET_BUSY;
53982 -qc24_free_sp_fail_command:
53983 -       sp->free(sp);
53984 -       CMD_SP(cmd) = NULL;
53985 -       qla2xxx_rel_qpair_sp(sp->qpair, sp);
53987  qc24_fail_command:
53988         cmd->scsi_done(cmd);
53990 diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
53991 index a1dacb6e993e..c30f6047410f 100644
53992 --- a/drivers/scsi/smartpqi/smartpqi_init.c
53993 +++ b/drivers/scsi/smartpqi/smartpqi_init.c
53994 @@ -5488,6 +5488,8 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
53996                                 list_del(&io_request->request_list_entry);
53997                                 set_host_byte(scmd, DID_RESET);
53998 +                               pqi_free_io_request(io_request);
53999 +                               scsi_dma_unmap(scmd);
54000                                 pqi_scsi_done(scmd);
54001                         }
54003 @@ -5524,6 +5526,8 @@ static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
54005                                 list_del(&io_request->request_list_entry);
54006                                 set_host_byte(scmd, DID_RESET);
54007 +                               pqi_free_io_request(io_request);
54008 +                               scsi_dma_unmap(scmd);
54009                                 pqi_scsi_done(scmd);
54010                         }
54012 @@ -6598,6 +6602,7 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
54013         shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
54014         shost->unique_id = shost->irq;
54015         shost->nr_hw_queues = ctrl_info->num_queue_groups;
54016 +       shost->host_tagset = 1;
54017         shost->hostdata[0] = (unsigned long)ctrl_info;
54019         rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
54020 @@ -8216,6 +8221,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
54021                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54022                                0x152d, 0x8a37)
54023         },
54024 +       {
54025 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54026 +                              0x193d, 0x8460)
54027 +       },
54028         {
54029                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54030                                0x193d, 0x1104)
54031 @@ -8288,6 +8297,22 @@ static const struct pci_device_id pqi_pci_id_table[] = {
54032                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54033                                0x1bd4, 0x004f)
54034         },
54035 +       {
54036 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54037 +                              0x1bd4, 0x0051)
54038 +       },
54039 +       {
54040 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54041 +                              0x1bd4, 0x0052)
54042 +       },
54043 +       {
54044 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54045 +                              0x1bd4, 0x0053)
54046 +       },
54047 +       {
54048 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54049 +                              0x1bd4, 0x0054)
54050 +       },
54051         {
54052                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54053                                0x19e5, 0xd227)
54054 @@ -8448,6 +8473,122 @@ static const struct pci_device_id pqi_pci_id_table[] = {
54055                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54056                                PCI_VENDOR_ID_ADAPTEC2, 0x1380)
54057         },
54058 +       {
54059 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54060 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1400)
54061 +       },
54062 +       {
54063 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54064 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1402)
54065 +       },
54066 +       {
54067 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54068 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1410)
54069 +       },
54070 +       {
54071 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54072 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1411)
54073 +       },
54074 +       {
54075 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54076 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1412)
54077 +       },
54078 +       {
54079 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54080 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1420)
54081 +       },
54082 +       {
54083 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54084 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1430)
54085 +       },
54086 +       {
54087 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54088 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1440)
54089 +       },
54090 +       {
54091 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54092 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1441)
54093 +       },
54094 +       {
54095 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54096 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1450)
54097 +       },
54098 +       {
54099 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54100 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1452)
54101 +       },
54102 +       {
54103 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54104 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1460)
54105 +       },
54106 +       {
54107 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54108 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1461)
54109 +       },
54110 +       {
54111 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54112 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1462)
54113 +       },
54114 +       {
54115 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54116 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1470)
54117 +       },
54118 +       {
54119 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54120 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1471)
54121 +       },
54122 +       {
54123 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54124 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1472)
54125 +       },
54126 +       {
54127 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54128 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1480)
54129 +       },
54130 +       {
54131 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54132 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1490)
54133 +       },
54134 +       {
54135 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54136 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1491)
54137 +       },
54138 +       {
54139 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54140 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
54141 +       },
54142 +       {
54143 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54144 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
54145 +       },
54146 +       {
54147 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54148 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
54149 +       },
54150 +       {
54151 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54152 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
54153 +       },
54154 +       {
54155 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54156 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
54157 +       },
54158 +       {
54159 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54160 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
54161 +       },
54162 +       {
54163 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54164 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
54165 +       },
54166 +       {
54167 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54168 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
54169 +       },
54170 +       {
54171 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54172 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
54173 +       },
54174         {
54175                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54176                                PCI_VENDOR_ID_ADVANTECH, 0x8312)
54177 @@ -8512,6 +8653,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
54178                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54179                                PCI_VENDOR_ID_HP, 0x1001)
54180         },
54181 +       {
54182 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54183 +                              PCI_VENDOR_ID_HP, 0x1002)
54184 +       },
54185         {
54186                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54187                                PCI_VENDOR_ID_HP, 0x1100)
54188 @@ -8520,6 +8665,22 @@ static const struct pci_device_id pqi_pci_id_table[] = {
54189                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54190                                PCI_VENDOR_ID_HP, 0x1101)
54191         },
54192 +       {
54193 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54194 +                              0x1590, 0x0294)
54195 +       },
54196 +       {
54197 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54198 +                              0x1590, 0x02db)
54199 +       },
54200 +       {
54201 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54202 +                              0x1590, 0x02dc)
54203 +       },
54204 +       {
54205 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54206 +                              0x1590, 0x032e)
54207 +       },
54208         {
54209                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
54210                                0x1d8d, 0x0800)
54211 diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
54212 index 9e2e196bc202..97c6f81b1d2a 100644
54213 --- a/drivers/scsi/sni_53c710.c
54214 +++ b/drivers/scsi/sni_53c710.c
54215 @@ -58,6 +58,7 @@ static int snirm710_probe(struct platform_device *dev)
54216         struct NCR_700_Host_Parameters *hostdata;
54217         struct Scsi_Host *host;
54218         struct  resource *res;
54219 +       int rc;
54221         res = platform_get_resource(dev, IORESOURCE_MEM, 0);
54222         if (!res)
54223 @@ -83,7 +84,9 @@ static int snirm710_probe(struct platform_device *dev)
54224                 goto out_kfree;
54225         host->this_id = 7;
54226         host->base = base;
54227 -       host->irq = platform_get_irq(dev, 0);
54228 +       host->irq = rc = platform_get_irq(dev, 0);
54229 +       if (rc < 0)
54230 +               goto out_put_host;
54231         if(request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "snirm710", host)) {
54232                 printk(KERN_ERR "snirm710: request_irq failed!\n");
54233                 goto out_put_host;
54234 diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
54235 index 6dd0ff188bb4..aedf0b78f622 100644
54236 --- a/drivers/scsi/snic/snic_scsi.c
54237 +++ b/drivers/scsi/snic/snic_scsi.c
54238 @@ -2349,7 +2349,7 @@ snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc)
54240         /* Wait for all the IOs that are entered in Qcmd */
54241         while (atomic_read(&snic->ios_inflight))
54242 -               schedule_timeout(msecs_to_jiffies(1));
54243 +               schedule_msec_hrtimeout((1));
54245         ret = snic_issue_hba_reset(snic, sc);
54246         if (ret) {
54247 diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
54248 index 7de82f2c9757..d3489ac7ab28 100644
54249 --- a/drivers/scsi/sun3x_esp.c
54250 +++ b/drivers/scsi/sun3x_esp.c
54251 @@ -206,7 +206,9 @@ static int esp_sun3x_probe(struct platform_device *dev)
54252         if (!esp->command_block)
54253                 goto fail_unmap_regs_dma;
54255 -       host->irq = platform_get_irq(dev, 0);
54256 +       host->irq = err = platform_get_irq(dev, 0);
54257 +       if (err < 0)
54258 +               goto fail_unmap_command_block;
54259         err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
54260                           "SUN3X ESP", esp);
54261         if (err < 0)
54262 diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
54263 index 0aa58131e791..d0626773eb38 100644
54264 --- a/drivers/scsi/ufs/ufs-hisi.c
54265 +++ b/drivers/scsi/ufs/ufs-hisi.c
54266 @@ -467,21 +467,24 @@ static int ufs_hisi_init_common(struct ufs_hba *hba)
54267         host->hba = hba;
54268         ufshcd_set_variant(hba, host);
54270 -       host->rst  = devm_reset_control_get(dev, "rst");
54271 +       host->rst = devm_reset_control_get(dev, "rst");
54272         if (IS_ERR(host->rst)) {
54273                 dev_err(dev, "%s: failed to get reset control\n", __func__);
54274 -               return PTR_ERR(host->rst);
54275 +               err = PTR_ERR(host->rst);
54276 +               goto error;
54277         }
54279         ufs_hisi_set_pm_lvl(hba);
54281         err = ufs_hisi_get_resource(host);
54282 -       if (err) {
54283 -               ufshcd_set_variant(hba, NULL);
54284 -               return err;
54285 -       }
54286 +       if (err)
54287 +               goto error;
54289         return 0;
54291 +error:
54292 +       ufshcd_set_variant(hba, NULL);
54293 +       return err;
54296  static int ufs_hi3660_init(struct ufs_hba *hba)
54297 diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
54298 index 1a69949a4ea1..b56d9b4e5f03 100644
54299 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c
54300 +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
54301 @@ -377,7 +377,7 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
54303         irq = platform_get_irq(pdev, 0);
54304         if (irq < 0) {
54305 -               err = -ENODEV;
54306 +               err = irq;
54307                 goto out;
54308         }
54310 diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
54311 index d3d05e997c13..e1e510882ff4 100644
54312 --- a/drivers/scsi/ufs/ufshcd.c
54313 +++ b/drivers/scsi/ufs/ufshcd.c
54314 @@ -2849,7 +2849,7 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
54315   * ufshcd_exec_dev_cmd - API for sending device management requests
54316   * @hba: UFS hba
54317   * @cmd_type: specifies the type (NOP, Query...)
54318 - * @timeout: time in seconds
54319 + * @timeout: timeout in milliseconds
54320   *
54321   * NOTE: Since there is only one available tag for device management commands,
54322   * it is expected you hold the hba->dev_cmd.lock mutex.
54323 @@ -2879,6 +2879,9 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
54324         }
54325         tag = req->tag;
54326         WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
54327 +       /* Set the timeout such that the SCSI error handler is not activated. */
54328 +       req->timeout = msecs_to_jiffies(2 * timeout);
54329 +       blk_mq_start_request(req);
54331         init_completion(&wait);
54332         lrbp = &hba->lrb[tag];
54333 @@ -8599,7 +8602,7 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
54334         } else if (!ufshcd_is_ufs_dev_active(hba)) {
54335                 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
54336                 vcc_off = true;
54337 -               if (!ufshcd_is_link_active(hba)) {
54338 +               if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
54339                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
54340                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
54341                 }
54342 @@ -8621,7 +8624,7 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
54343             !hba->dev_info.is_lu_power_on_wp) {
54344                 ret = ufshcd_setup_vreg(hba, true);
54345         } else if (!ufshcd_is_ufs_dev_active(hba)) {
54346 -               if (!ret && !ufshcd_is_link_active(hba)) {
54347 +               if (!ufshcd_is_link_active(hba)) {
54348                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
54349                         if (ret)
54350                                 goto vcc_disable;
54351 @@ -8978,10 +8981,13 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
54352         if (!hba->is_powered)
54353                 return 0;
54355 +       cancel_delayed_work_sync(&hba->rpm_dev_flush_recheck_work);
54357         if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
54358              hba->curr_dev_pwr_mode) &&
54359             (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
54360              hba->uic_link_state) &&
54361 +            pm_runtime_suspended(hba->dev) &&
54362              !hba->dev_info.b_rpm_dev_flush_capable)
54363                 goto out;
54365 diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
54366 index 20acac6342ef..5828f94b8a7d 100644
54367 --- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
54368 +++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
54369 @@ -95,8 +95,10 @@ static ssize_t snoop_file_read(struct file *file, char __user *buffer,
54370                         return -EINTR;
54371         }
54372         ret = kfifo_to_user(&chan->fifo, buffer, count, &copied);
54373 +       if (ret)
54374 +               return ret;
54376 -       return ret ? ret : copied;
54377 +       return copied;
54380  static __poll_t snoop_file_poll(struct file *file,
54381 diff --git a/drivers/soc/mediatek/mt8173-pm-domains.h b/drivers/soc/mediatek/mt8173-pm-domains.h
54382 index 3e8ee5dabb43..654c717e5467 100644
54383 --- a/drivers/soc/mediatek/mt8173-pm-domains.h
54384 +++ b/drivers/soc/mediatek/mt8173-pm-domains.h
54385 @@ -12,24 +12,28 @@
54387  static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
54388         [MT8173_POWER_DOMAIN_VDEC] = {
54389 +               .name = "vdec",
54390                 .sta_mask = PWR_STATUS_VDEC,
54391                 .ctl_offs = SPM_VDE_PWR_CON,
54392                 .sram_pdn_bits = GENMASK(11, 8),
54393                 .sram_pdn_ack_bits = GENMASK(12, 12),
54394         },
54395         [MT8173_POWER_DOMAIN_VENC] = {
54396 +               .name = "venc",
54397                 .sta_mask = PWR_STATUS_VENC,
54398                 .ctl_offs = SPM_VEN_PWR_CON,
54399                 .sram_pdn_bits = GENMASK(11, 8),
54400                 .sram_pdn_ack_bits = GENMASK(15, 12),
54401         },
54402         [MT8173_POWER_DOMAIN_ISP] = {
54403 +               .name = "isp",
54404                 .sta_mask = PWR_STATUS_ISP,
54405                 .ctl_offs = SPM_ISP_PWR_CON,
54406                 .sram_pdn_bits = GENMASK(11, 8),
54407                 .sram_pdn_ack_bits = GENMASK(13, 12),
54408         },
54409         [MT8173_POWER_DOMAIN_MM] = {
54410 +               .name = "mm",
54411                 .sta_mask = PWR_STATUS_DISP,
54412                 .ctl_offs = SPM_DIS_PWR_CON,
54413                 .sram_pdn_bits = GENMASK(11, 8),
54414 @@ -40,18 +44,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
54415                 },
54416         },
54417         [MT8173_POWER_DOMAIN_VENC_LT] = {
54418 +               .name = "venc_lt",
54419                 .sta_mask = PWR_STATUS_VENC_LT,
54420                 .ctl_offs = SPM_VEN2_PWR_CON,
54421                 .sram_pdn_bits = GENMASK(11, 8),
54422                 .sram_pdn_ack_bits = GENMASK(15, 12),
54423         },
54424         [MT8173_POWER_DOMAIN_AUDIO] = {
54425 +               .name = "audio",
54426                 .sta_mask = PWR_STATUS_AUDIO,
54427                 .ctl_offs = SPM_AUDIO_PWR_CON,
54428                 .sram_pdn_bits = GENMASK(11, 8),
54429                 .sram_pdn_ack_bits = GENMASK(15, 12),
54430         },
54431         [MT8173_POWER_DOMAIN_USB] = {
54432 +               .name = "usb",
54433                 .sta_mask = PWR_STATUS_USB,
54434                 .ctl_offs = SPM_USB_PWR_CON,
54435                 .sram_pdn_bits = GENMASK(11, 8),
54436 @@ -59,18 +66,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
54437                 .caps = MTK_SCPD_ACTIVE_WAKEUP,
54438         },
54439         [MT8173_POWER_DOMAIN_MFG_ASYNC] = {
54440 +               .name = "mfg_async",
54441                 .sta_mask = PWR_STATUS_MFG_ASYNC,
54442                 .ctl_offs = SPM_MFG_ASYNC_PWR_CON,
54443                 .sram_pdn_bits = GENMASK(11, 8),
54444                 .sram_pdn_ack_bits = 0,
54445         },
54446         [MT8173_POWER_DOMAIN_MFG_2D] = {
54447 +               .name = "mfg_2d",
54448                 .sta_mask = PWR_STATUS_MFG_2D,
54449                 .ctl_offs = SPM_MFG_2D_PWR_CON,
54450                 .sram_pdn_bits = GENMASK(11, 8),
54451                 .sram_pdn_ack_bits = GENMASK(13, 12),
54452         },
54453         [MT8173_POWER_DOMAIN_MFG] = {
54454 +               .name = "mfg",
54455                 .sta_mask = PWR_STATUS_MFG,
54456                 .ctl_offs = SPM_MFG_PWR_CON,
54457                 .sram_pdn_bits = GENMASK(13, 8),
54458 diff --git a/drivers/soc/mediatek/mt8183-pm-domains.h b/drivers/soc/mediatek/mt8183-pm-domains.h
54459 index aa5230e6c12f..98a9940d05fb 100644
54460 --- a/drivers/soc/mediatek/mt8183-pm-domains.h
54461 +++ b/drivers/soc/mediatek/mt8183-pm-domains.h
54462 @@ -12,12 +12,14 @@
54464  static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
54465         [MT8183_POWER_DOMAIN_AUDIO] = {
54466 +               .name = "audio",
54467                 .sta_mask = PWR_STATUS_AUDIO,
54468                 .ctl_offs = 0x0314,
54469                 .sram_pdn_bits = GENMASK(11, 8),
54470                 .sram_pdn_ack_bits = GENMASK(15, 12),
54471         },
54472         [MT8183_POWER_DOMAIN_CONN] = {
54473 +               .name = "conn",
54474                 .sta_mask = PWR_STATUS_CONN,
54475                 .ctl_offs = 0x032c,
54476                 .sram_pdn_bits = 0,
54477 @@ -28,12 +30,14 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
54478                 },
54479         },
54480         [MT8183_POWER_DOMAIN_MFG_ASYNC] = {
54481 +               .name = "mfg_async",
54482                 .sta_mask = PWR_STATUS_MFG_ASYNC,
54483                 .ctl_offs = 0x0334,
54484                 .sram_pdn_bits = 0,
54485                 .sram_pdn_ack_bits = 0,
54486         },
54487         [MT8183_POWER_DOMAIN_MFG] = {
54488 +               .name = "mfg",
54489                 .sta_mask = PWR_STATUS_MFG,
54490                 .ctl_offs = 0x0338,
54491                 .sram_pdn_bits = GENMASK(8, 8),
54492 @@ -41,18 +45,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
54493                 .caps = MTK_SCPD_DOMAIN_SUPPLY,
54494         },
54495         [MT8183_POWER_DOMAIN_MFG_CORE0] = {
54496 +               .name = "mfg_core0",
54497                 .sta_mask = BIT(7),
54498                 .ctl_offs = 0x034c,
54499                 .sram_pdn_bits = GENMASK(8, 8),
54500                 .sram_pdn_ack_bits = GENMASK(12, 12),
54501         },
54502         [MT8183_POWER_DOMAIN_MFG_CORE1] = {
54503 +               .name = "mfg_core1",
54504                 .sta_mask = BIT(20),
54505                 .ctl_offs = 0x0310,
54506                 .sram_pdn_bits = GENMASK(8, 8),
54507                 .sram_pdn_ack_bits = GENMASK(12, 12),
54508         },
54509         [MT8183_POWER_DOMAIN_MFG_2D] = {
54510 +               .name = "mfg_2d",
54511                 .sta_mask = PWR_STATUS_MFG_2D,
54512                 .ctl_offs = 0x0348,
54513                 .sram_pdn_bits = GENMASK(8, 8),
54514 @@ -65,6 +72,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
54515                 },
54516         },
54517         [MT8183_POWER_DOMAIN_DISP] = {
54518 +               .name = "disp",
54519                 .sta_mask = PWR_STATUS_DISP,
54520                 .ctl_offs = 0x030c,
54521                 .sram_pdn_bits = GENMASK(8, 8),
54522 @@ -83,6 +91,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
54523                 },
54524         },
54525         [MT8183_POWER_DOMAIN_CAM] = {
54526 +               .name = "cam",
54527                 .sta_mask = BIT(25),
54528                 .ctl_offs = 0x0344,
54529                 .sram_pdn_bits = GENMASK(9, 8),
54530 @@ -105,6 +114,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
54531                 },
54532         },
54533         [MT8183_POWER_DOMAIN_ISP] = {
54534 +               .name = "isp",
54535                 .sta_mask = PWR_STATUS_ISP,
54536                 .ctl_offs = 0x0308,
54537                 .sram_pdn_bits = GENMASK(9, 8),
54538 @@ -127,6 +137,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
54539                 },
54540         },
54541         [MT8183_POWER_DOMAIN_VDEC] = {
54542 +               .name = "vdec",
54543                 .sta_mask = BIT(31),
54544                 .ctl_offs = 0x0300,
54545                 .sram_pdn_bits = GENMASK(8, 8),
54546 @@ -139,6 +150,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
54547                 },
54548         },
54549         [MT8183_POWER_DOMAIN_VENC] = {
54550 +               .name = "venc",
54551                 .sta_mask = PWR_STATUS_VENC,
54552                 .ctl_offs = 0x0304,
54553                 .sram_pdn_bits = GENMASK(11, 8),
54554 @@ -151,6 +163,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
54555                 },
54556         },
54557         [MT8183_POWER_DOMAIN_VPU_TOP] = {
54558 +               .name = "vpu_top",
54559                 .sta_mask = BIT(26),
54560                 .ctl_offs = 0x0324,
54561                 .sram_pdn_bits = GENMASK(8, 8),
54562 @@ -177,6 +190,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
54563                 },
54564         },
54565         [MT8183_POWER_DOMAIN_VPU_CORE0] = {
54566 +               .name = "vpu_core0",
54567                 .sta_mask = BIT(27),
54568                 .ctl_offs = 0x33c,
54569                 .sram_pdn_bits = GENMASK(11, 8),
54570 @@ -194,6 +208,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
54571                 .caps = MTK_SCPD_SRAM_ISO,
54572         },
54573         [MT8183_POWER_DOMAIN_VPU_CORE1] = {
54574 +               .name = "vpu_core1",
54575                 .sta_mask = BIT(28),
54576                 .ctl_offs = 0x0340,
54577                 .sram_pdn_bits = GENMASK(11, 8),
54578 diff --git a/drivers/soc/mediatek/mt8192-pm-domains.h b/drivers/soc/mediatek/mt8192-pm-domains.h
54579 index 0fdf6dc6231f..543dda70de01 100644
54580 --- a/drivers/soc/mediatek/mt8192-pm-domains.h
54581 +++ b/drivers/soc/mediatek/mt8192-pm-domains.h
54582 @@ -12,6 +12,7 @@
54584  static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
54585         [MT8192_POWER_DOMAIN_AUDIO] = {
54586 +               .name = "audio",
54587                 .sta_mask = BIT(21),
54588                 .ctl_offs = 0x0354,
54589                 .sram_pdn_bits = GENMASK(8, 8),
54590 @@ -24,6 +25,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
54591                 },
54592         },
54593         [MT8192_POWER_DOMAIN_CONN] = {
54594 +               .name = "conn",
54595                 .sta_mask = PWR_STATUS_CONN,
54596                 .ctl_offs = 0x0304,
54597                 .sram_pdn_bits = 0,
54598 @@ -45,12 +47,14 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
54599                 .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
54600         },
54601         [MT8192_POWER_DOMAIN_MFG0] = {
54602 +               .name = "mfg0",
54603                 .sta_mask = BIT(2),
54604                 .ctl_offs = 0x0308,
54605                 .sram_pdn_bits = GENMASK(8, 8),
54606                 .sram_pdn_ack_bits = GENMASK(12, 12),
54607         },
54608         [MT8192_POWER_DOMAIN_MFG1] = {
54609 +               .name = "mfg1",
54610                 .sta_mask = BIT(3),
54611                 .ctl_offs = 0x030c,
54612                 .sram_pdn_bits = GENMASK(8, 8),
54613 @@ -75,36 +79,42 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
54614                 },
54615         },
54616         [MT8192_POWER_DOMAIN_MFG2] = {
54617 +               .name = "mfg2",
54618                 .sta_mask = BIT(4),
54619                 .ctl_offs = 0x0310,
54620                 .sram_pdn_bits = GENMASK(8, 8),
54621                 .sram_pdn_ack_bits = GENMASK(12, 12),
54622         },
54623         [MT8192_POWER_DOMAIN_MFG3] = {
54624 +               .name = "mfg3",
54625                 .sta_mask = BIT(5),
54626                 .ctl_offs = 0x0314,
54627                 .sram_pdn_bits = GENMASK(8, 8),
54628                 .sram_pdn_ack_bits = GENMASK(12, 12),
54629         },
54630         [MT8192_POWER_DOMAIN_MFG4] = {
54631 +               .name = "mfg4",
54632                 .sta_mask = BIT(6),
54633                 .ctl_offs = 0x0318,
54634                 .sram_pdn_bits = GENMASK(8, 8),
54635                 .sram_pdn_ack_bits = GENMASK(12, 12),
54636         },
54637         [MT8192_POWER_DOMAIN_MFG5] = {
54638 +               .name = "mfg5",
54639                 .sta_mask = BIT(7),
54640                 .ctl_offs = 0x031c,
54641                 .sram_pdn_bits = GENMASK(8, 8),
54642                 .sram_pdn_ack_bits = GENMASK(12, 12),
54643         },
54644         [MT8192_POWER_DOMAIN_MFG6] = {
54645 +               .name = "mfg6",
54646                 .sta_mask = BIT(8),
54647                 .ctl_offs = 0x0320,
54648                 .sram_pdn_bits = GENMASK(8, 8),
54649                 .sram_pdn_ack_bits = GENMASK(12, 12),
54650         },
54651         [MT8192_POWER_DOMAIN_DISP] = {
54652 +               .name = "disp",
54653                 .sta_mask = BIT(20),
54654                 .ctl_offs = 0x0350,
54655                 .sram_pdn_bits = GENMASK(8, 8),
54656 @@ -133,6 +143,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
54657                 },
54658         },
54659         [MT8192_POWER_DOMAIN_IPE] = {
54660 +               .name = "ipe",
54661                 .sta_mask = BIT(14),
54662                 .ctl_offs = 0x0338,
54663                 .sram_pdn_bits = GENMASK(8, 8),
54664 @@ -149,6 +160,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
54665                 },
54666         },
54667         [MT8192_POWER_DOMAIN_ISP] = {
54668 +               .name = "isp",
54669                 .sta_mask = BIT(12),
54670                 .ctl_offs = 0x0330,
54671                 .sram_pdn_bits = GENMASK(8, 8),
54672 @@ -165,6 +177,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
54673                 },
54674         },
54675         [MT8192_POWER_DOMAIN_ISP2] = {
54676 +               .name = "isp2",
54677                 .sta_mask = BIT(13),
54678                 .ctl_offs = 0x0334,
54679                 .sram_pdn_bits = GENMASK(8, 8),
54680 @@ -181,6 +194,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
54681                 },
54682         },
54683         [MT8192_POWER_DOMAIN_MDP] = {
54684 +               .name = "mdp",
54685                 .sta_mask = BIT(19),
54686                 .ctl_offs = 0x034c,
54687                 .sram_pdn_bits = GENMASK(8, 8),
54688 @@ -197,6 +211,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
54689                 },
54690         },
54691         [MT8192_POWER_DOMAIN_VENC] = {
54692 +               .name = "venc",
54693                 .sta_mask = BIT(17),
54694                 .ctl_offs = 0x0344,
54695                 .sram_pdn_bits = GENMASK(8, 8),
54696 @@ -213,6 +228,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
54697                 },
54698         },
54699         [MT8192_POWER_DOMAIN_VDEC] = {
54700 +               .name = "vdec",
54701                 .sta_mask = BIT(15),
54702                 .ctl_offs = 0x033c,
54703                 .sram_pdn_bits = GENMASK(8, 8),
54704 @@ -229,12 +245,14 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
54705                 },
54706         },
54707         [MT8192_POWER_DOMAIN_VDEC2] = {
54708 +               .name = "vdec2",
54709                 .sta_mask = BIT(16),
54710                 .ctl_offs = 0x0340,
54711                 .sram_pdn_bits = GENMASK(8, 8),
54712                 .sram_pdn_ack_bits = GENMASK(12, 12),
54713         },
54714         [MT8192_POWER_DOMAIN_CAM] = {
54715 +               .name = "cam",
54716                 .sta_mask = BIT(23),
54717                 .ctl_offs = 0x035c,
54718                 .sram_pdn_bits = GENMASK(8, 8),
54719 @@ -263,18 +281,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
54720                 },
54721         },
54722         [MT8192_POWER_DOMAIN_CAM_RAWA] = {
54723 +               .name = "cam_rawa",
54724                 .sta_mask = BIT(24),
54725                 .ctl_offs = 0x0360,
54726                 .sram_pdn_bits = GENMASK(8, 8),
54727                 .sram_pdn_ack_bits = GENMASK(12, 12),
54728         },
54729         [MT8192_POWER_DOMAIN_CAM_RAWB] = {
54730 +               .name = "cam_rawb",
54731                 .sta_mask = BIT(25),
54732                 .ctl_offs = 0x0364,
54733                 .sram_pdn_bits = GENMASK(8, 8),
54734                 .sram_pdn_ack_bits = GENMASK(12, 12),
54735         },
54736         [MT8192_POWER_DOMAIN_CAM_RAWC] = {
54737 +               .name = "cam_rawc",
54738                 .sta_mask = BIT(26),
54739                 .ctl_offs = 0x0368,
54740                 .sram_pdn_bits = GENMASK(8, 8),
54741 diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
54742 index b7f697666bdd..0af00efa0ef8 100644
54743 --- a/drivers/soc/mediatek/mtk-pm-domains.c
54744 +++ b/drivers/soc/mediatek/mtk-pm-domains.c
54745 @@ -438,7 +438,11 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
54746                 goto err_unprepare_subsys_clocks;
54747         }
54749 -       pd->genpd.name = node->name;
54750 +       if (!pd->data->name)
54751 +               pd->genpd.name = node->name;
54752 +       else
54753 +               pd->genpd.name = pd->data->name;
54755         pd->genpd.power_off = scpsys_power_off;
54756         pd->genpd.power_on = scpsys_power_on;
54758 @@ -487,8 +491,9 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren
54760                 child_pd = scpsys_add_one_domain(scpsys, child);
54761                 if (IS_ERR(child_pd)) {
54762 -                       dev_err_probe(scpsys->dev, PTR_ERR(child_pd),
54763 -                                     "%pOF: failed to get child domain id\n", child);
54764 +                       ret = PTR_ERR(child_pd);
54765 +                       dev_err_probe(scpsys->dev, ret, "%pOF: failed to get child domain id\n",
54766 +                                     child);
54767                         goto err_put_node;
54768                 }
54770 diff --git a/drivers/soc/mediatek/mtk-pm-domains.h b/drivers/soc/mediatek/mtk-pm-domains.h
54771 index 141dc76054e6..21a4e113bbec 100644
54772 --- a/drivers/soc/mediatek/mtk-pm-domains.h
54773 +++ b/drivers/soc/mediatek/mtk-pm-domains.h
54774 @@ -76,6 +76,7 @@ struct scpsys_bus_prot_data {
54776  /**
54777   * struct scpsys_domain_data - scp domain data for power on/off flow
54778 + * @name: The name of the power domain.
54779   * @sta_mask: The mask for power on/off status bit.
54780   * @ctl_offs: The offset for main power control register.
54781   * @sram_pdn_bits: The mask for sram power control bits.
54782 @@ -85,6 +86,7 @@ struct scpsys_bus_prot_data {
54783   * @bp_smi: bus protection for smi subsystem
54784   */
54785  struct scpsys_domain_data {
54786 +       const char *name;
54787         u32 sta_mask;
54788         int ctl_offs;
54789         u32 sram_pdn_bits;
54790 diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
54791 index 24cd193dec55..eba7f76f9d61 100644
54792 --- a/drivers/soc/qcom/mdt_loader.c
54793 +++ b/drivers/soc/qcom/mdt_loader.c
54794 @@ -230,6 +230,14 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
54795                         break;
54796                 }
54798 +               if (phdr->p_filesz > phdr->p_memsz) {
54799 +                       dev_err(dev,
54800 +                               "refusing to load segment %d with p_filesz > p_memsz\n",
54801 +                               i);
54802 +                       ret = -EINVAL;
54803 +                       break;
54804 +               }
54806                 ptr = mem_region + offset;
54808                 if (phdr->p_filesz && phdr->p_offset < fw->size) {
54809 @@ -253,6 +261,15 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
54810                                 break;
54811                         }
54813 +                       if (seg_fw->size != phdr->p_filesz) {
54814 +                               dev_err(dev,
54815 +                                       "failed to load segment %d from truncated file %s\n",
54816 +                                       i, fw_name);
54817 +                               release_firmware(seg_fw);
54818 +                               ret = -EINVAL;
54819 +                               break;
54820 +                       }
54822                         release_firmware(seg_fw);
54823                 }
54825 diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
54826 index 209dcdca923f..915d5bc3d46e 100644
54827 --- a/drivers/soc/qcom/pdr_interface.c
54828 +++ b/drivers/soc/qcom/pdr_interface.c
54829 @@ -153,7 +153,7 @@ static int pdr_register_listener(struct pdr_handle *pdr,
54830         if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
54831                 pr_err("PDR: %s register listener failed: 0x%x\n",
54832                        pds->service_path, resp.resp.error);
54833 -               return ret;
54834 +               return -EREMOTEIO;
54835         }
54837         pds->state = resp.curr_state;
54838 diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
54839 index df9a5ca8c99c..0118bd986f90 100644
54840 --- a/drivers/soc/tegra/pmc.c
54841 +++ b/drivers/soc/tegra/pmc.c
54842 @@ -317,6 +317,8 @@ struct tegra_pmc_soc {
54843                                    bool invert);
54844         int (*irq_set_wake)(struct irq_data *data, unsigned int on);
54845         int (*irq_set_type)(struct irq_data *data, unsigned int type);
54846 +       int (*powergate_set)(struct tegra_pmc *pmc, unsigned int id,
54847 +                            bool new_state);
54849         const char * const *reset_sources;
54850         unsigned int num_reset_sources;
54851 @@ -517,6 +519,63 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
54852         return -ENODEV;
54855 +static int tegra20_powergate_set(struct tegra_pmc *pmc, unsigned int id,
54856 +                                bool new_state)
54858 +       unsigned int retries = 100;
54859 +       bool status;
54860 +       int ret;
54862 +       /*
54863 +        * As per TRM documentation, the toggle command will be dropped by PMC
54864 +        * if there is contention with a HW-initiated toggling (i.e. CPU core
54865 +        * power-gated), the command should be retried in that case.
54866 +        */
54867 +       do {
54868 +               tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
54870 +               /* wait for PMC to execute the command */
54871 +               ret = readx_poll_timeout(tegra_powergate_state, id, status,
54872 +                                        status == new_state, 1, 10);
54873 +       } while (ret == -ETIMEDOUT && retries--);
54875 +       return ret;
54878 +static inline bool tegra_powergate_toggle_ready(struct tegra_pmc *pmc)
54880 +       return !(tegra_pmc_readl(pmc, PWRGATE_TOGGLE) & PWRGATE_TOGGLE_START);
54883 +static int tegra114_powergate_set(struct tegra_pmc *pmc, unsigned int id,
54884 +                                 bool new_state)
54886 +       bool status;
54887 +       int err;
54889 +       /* wait while PMC power gating is contended */
54890 +       err = readx_poll_timeout(tegra_powergate_toggle_ready, pmc, status,
54891 +                                status == true, 1, 100);
54892 +       if (err)
54893 +               return err;
54895 +       tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
54897 +       /* wait for PMC to accept the command */
54898 +       err = readx_poll_timeout(tegra_powergate_toggle_ready, pmc, status,
54899 +                                status == true, 1, 100);
54900 +       if (err)
54901 +               return err;
54903 +       /* wait for PMC to execute the command */
54904 +       err = readx_poll_timeout(tegra_powergate_state, id, status,
54905 +                                status == new_state, 10, 100000);
54906 +       if (err)
54907 +               return err;
54909 +       return 0;
54912  /**
54913   * tegra_powergate_set() - set the state of a partition
54914   * @pmc: power management controller
54915 @@ -526,7 +585,6 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
54916  static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id,
54917                                bool new_state)
54919 -       bool status;
54920         int err;
54922         if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps)
54923 @@ -539,10 +597,7 @@ static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id,
54924                 return 0;
54925         }
54927 -       tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
54929 -       err = readx_poll_timeout(tegra_powergate_state, id, status,
54930 -                                status == new_state, 10, 100000);
54931 +       err = pmc->soc->powergate_set(pmc, id, new_state);
54933         mutex_unlock(&pmc->powergates_lock);
54935 @@ -2699,6 +2754,7 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = {
54936         .regs = &tegra20_pmc_regs,
54937         .init = tegra20_pmc_init,
54938         .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
54939 +       .powergate_set = tegra20_powergate_set,
54940         .reset_sources = NULL,
54941         .num_reset_sources = 0,
54942         .reset_levels = NULL,
54943 @@ -2757,6 +2813,7 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
54944         .regs = &tegra20_pmc_regs,
54945         .init = tegra20_pmc_init,
54946         .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
54947 +       .powergate_set = tegra20_powergate_set,
54948         .reset_sources = tegra30_reset_sources,
54949         .num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
54950         .reset_levels = NULL,
54951 @@ -2811,6 +2868,7 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = {
54952         .regs = &tegra20_pmc_regs,
54953         .init = tegra20_pmc_init,
54954         .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
54955 +       .powergate_set = tegra114_powergate_set,
54956         .reset_sources = tegra30_reset_sources,
54957         .num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
54958         .reset_levels = NULL,
54959 @@ -2925,6 +2983,7 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
54960         .regs = &tegra20_pmc_regs,
54961         .init = tegra20_pmc_init,
54962         .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
54963 +       .powergate_set = tegra114_powergate_set,
54964         .reset_sources = tegra30_reset_sources,
54965         .num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
54966         .reset_levels = NULL,
54967 @@ -3048,6 +3107,7 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
54968         .regs = &tegra20_pmc_regs,
54969         .init = tegra20_pmc_init,
54970         .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
54971 +       .powergate_set = tegra114_powergate_set,
54972         .irq_set_wake = tegra210_pmc_irq_set_wake,
54973         .irq_set_type = tegra210_pmc_irq_set_type,
54974         .reset_sources = tegra210_reset_sources,
54975 diff --git a/drivers/soc/tegra/regulators-tegra30.c b/drivers/soc/tegra/regulators-tegra30.c
54976 index 7f21f31de09d..0e776b20f625 100644
54977 --- a/drivers/soc/tegra/regulators-tegra30.c
54978 +++ b/drivers/soc/tegra/regulators-tegra30.c
54979 @@ -178,7 +178,7 @@ static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
54980          * survive the voltage drop if it's running on a higher frequency.
54981          */
54982         if (!cpu_min_uV_consumers)
54983 -               cpu_min_uV = cpu_uV;
54984 +               cpu_min_uV = max(cpu_uV, cpu_min_uV);
54986         /*
54987          * Bootloader shall set up voltages correctly, but if it
54988 diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
54989 index 46885429928a..4ec29338ce9a 100644
54990 --- a/drivers/soundwire/bus.c
54991 +++ b/drivers/soundwire/bus.c
54992 @@ -705,7 +705,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
54993         struct sdw_slave *slave, *_s;
54994         struct sdw_slave_id id;
54995         struct sdw_msg msg;
54996 -       bool found = false;
54997 +       bool found;
54998         int count = 0, ret;
54999         u64 addr;
55001 @@ -737,6 +737,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
55003                 sdw_extract_slave_id(bus, addr, &id);
55005 +               found = false;
55006                 /* Now compare with entries */
55007                 list_for_each_entry_safe(slave, _s, &bus->slaves, node) {
55008                         if (sdw_compare_devid(slave, id) == 0) {
55009 diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
55010 index d05442e646a3..57c59a33ce61 100644
55011 --- a/drivers/soundwire/cadence_master.c
55012 +++ b/drivers/soundwire/cadence_master.c
55013 @@ -1450,10 +1450,12 @@ int sdw_cdns_clock_stop(struct sdw_cdns *cdns, bool block_wake)
55014         }
55016         /* Prepare slaves for clock stop */
55017 -       ret = sdw_bus_prep_clk_stop(&cdns->bus);
55018 -       if (ret < 0) {
55019 -               dev_err(cdns->dev, "prepare clock stop failed %d", ret);
55020 -               return ret;
55021 +       if (slave_present) {
55022 +               ret = sdw_bus_prep_clk_stop(&cdns->bus);
55023 +               if (ret < 0 && ret != -ENODATA) {
55024 +                       dev_err(cdns->dev, "prepare clock stop failed %d\n", ret);
55025 +                       return ret;
55026 +               }
55027         }
55029         /*
55030 diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
55031 index 1099b5d1262b..a418c3c7001c 100644
55032 --- a/drivers/soundwire/stream.c
55033 +++ b/drivers/soundwire/stream.c
55034 @@ -1375,8 +1375,16 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
55035         }
55037         ret = sdw_config_stream(&slave->dev, stream, stream_config, true);
55038 -       if (ret)
55039 +       if (ret) {
55040 +               /*
55041 +                * sdw_release_master_stream will release s_rt in slave_rt_list in
55042 +                * stream_error case, but s_rt is only added to slave_rt_list
55043 +                * when sdw_config_stream is successful, so free s_rt explicitly
55044 +                * when sdw_config_stream is failed.
55045 +                */
55046 +               kfree(s_rt);
55047                 goto stream_error;
55048 +       }
55050         list_add_tail(&s_rt->m_rt_node, &m_rt->slave_rt_list);
55052 diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
55053 index eb9a243e9526..98ace748cd98 100644
55054 --- a/drivers/spi/spi-ath79.c
55055 +++ b/drivers/spi/spi-ath79.c
55056 @@ -156,8 +156,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
55058         master->use_gpio_descriptors = true;
55059         master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
55060 -       master->setup = spi_bitbang_setup;
55061 -       master->cleanup = spi_bitbang_cleanup;
55062 +       master->flags = SPI_MASTER_GPIO_SS;
55063         if (pdata) {
55064                 master->bus_num = pdata->bus_num;
55065                 master->num_chipselect = pdata->num_chipselect;
55066 diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c
55067 index 75b33d7d14b0..9a4d942fafcf 100644
55068 --- a/drivers/spi/spi-dln2.c
55069 +++ b/drivers/spi/spi-dln2.c
55070 @@ -780,7 +780,7 @@ static int dln2_spi_probe(struct platform_device *pdev)
55072  static int dln2_spi_remove(struct platform_device *pdev)
55074 -       struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
55075 +       struct spi_master *master = platform_get_drvdata(pdev);
55076         struct dln2_spi *dln2 = spi_master_get_devdata(master);
55078         pm_runtime_disable(&pdev->dev);
55079 diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
55080 index a2886ee44e4c..5d98611dd999 100644
55081 --- a/drivers/spi/spi-fsl-lpspi.c
55082 +++ b/drivers/spi/spi-fsl-lpspi.c
55083 @@ -200,7 +200,7 @@ static int lpspi_prepare_xfer_hardware(struct spi_controller *controller)
55084                                 spi_controller_get_devdata(controller);
55085         int ret;
55087 -       ret = pm_runtime_get_sync(fsl_lpspi->dev);
55088 +       ret = pm_runtime_resume_and_get(fsl_lpspi->dev);
55089         if (ret < 0) {
55090                 dev_err(fsl_lpspi->dev, "failed to enable clock\n");
55091                 return ret;
55092 diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
55093 index e4a8d203f940..d0e5aa18b7ba 100644
55094 --- a/drivers/spi/spi-fsl-spi.c
55095 +++ b/drivers/spi/spi-fsl-spi.c
55096 @@ -707,6 +707,11 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
55097         struct resource mem;
55098         int irq, type;
55099         int ret;
55100 +       bool spisel_boot = false;
55101 +#if IS_ENABLED(CONFIG_FSL_SOC)
55102 +       struct mpc8xxx_spi_probe_info *pinfo = NULL;
55103 +#endif
55106         ret = of_mpc8xxx_spi_probe(ofdev);
55107         if (ret)
55108 @@ -715,9 +720,8 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
55109         type = fsl_spi_get_type(&ofdev->dev);
55110         if (type == TYPE_FSL) {
55111                 struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
55112 -               bool spisel_boot = false;
55113  #if IS_ENABLED(CONFIG_FSL_SOC)
55114 -               struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
55115 +               pinfo = to_of_pinfo(pdata);
55117                 spisel_boot = of_property_read_bool(np, "fsl,spisel_boot");
55118                 if (spisel_boot) {
55119 @@ -746,15 +750,24 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
55121         ret = of_address_to_resource(np, 0, &mem);
55122         if (ret)
55123 -               return ret;
55124 +               goto unmap_out;
55126         irq = platform_get_irq(ofdev, 0);
55127 -       if (irq < 0)
55128 -               return irq;
55129 +       if (irq < 0) {
55130 +               ret = irq;
55131 +               goto unmap_out;
55132 +       }
55134         master = fsl_spi_probe(dev, &mem, irq);
55136         return PTR_ERR_OR_ZERO(master);
55138 +unmap_out:
55139 +#if IS_ENABLED(CONFIG_FSL_SOC)
55140 +       if (spisel_boot)
55141 +               iounmap(pinfo->immr_spi_cs);
55142 +#endif
55143 +       return ret;
55146  static int of_fsl_spi_remove(struct platform_device *ofdev)
55147 diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
55148 index 36a4922a134a..ccd817ee4917 100644
55149 --- a/drivers/spi/spi-omap-100k.c
55150 +++ b/drivers/spi/spi-omap-100k.c
55151 @@ -424,7 +424,7 @@ static int omap1_spi100k_probe(struct platform_device *pdev)
55153  static int omap1_spi100k_remove(struct platform_device *pdev)
55155 -       struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
55156 +       struct spi_master *master = platform_get_drvdata(pdev);
55157         struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
55159         pm_runtime_disable(&pdev->dev);
55160 @@ -438,7 +438,7 @@ static int omap1_spi100k_remove(struct platform_device *pdev)
55161  #ifdef CONFIG_PM
55162  static int omap1_spi100k_runtime_suspend(struct device *dev)
55164 -       struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
55165 +       struct spi_master *master = dev_get_drvdata(dev);
55166         struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
55168         clk_disable_unprepare(spi100k->ick);
55169 @@ -449,7 +449,7 @@ static int omap1_spi100k_runtime_suspend(struct device *dev)
55171  static int omap1_spi100k_runtime_resume(struct device *dev)
55173 -       struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
55174 +       struct spi_master *master = dev_get_drvdata(dev);
55175         struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
55176         int ret;
55178 diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
55179 index 8dcb2e70735c..d39dec6d1c91 100644
55180 --- a/drivers/spi/spi-qup.c
55181 +++ b/drivers/spi/spi-qup.c
55182 @@ -1263,7 +1263,7 @@ static int spi_qup_remove(struct platform_device *pdev)
55183         struct spi_qup *controller = spi_master_get_devdata(master);
55184         int ret;
55186 -       ret = pm_runtime_get_sync(&pdev->dev);
55187 +       ret = pm_runtime_resume_and_get(&pdev->dev);
55188         if (ret < 0)
55189                 return ret;
55191 diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
55192 index 936ef54e0903..0d75080da648 100644
55193 --- a/drivers/spi/spi-rockchip.c
55194 +++ b/drivers/spi/spi-rockchip.c
55195 @@ -476,7 +476,7 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
55196         return 1;
55199 -static void rockchip_spi_config(struct rockchip_spi *rs,
55200 +static int rockchip_spi_config(struct rockchip_spi *rs,
55201                 struct spi_device *spi, struct spi_transfer *xfer,
55202                 bool use_dma, bool slave_mode)
55204 @@ -521,7 +521,9 @@ static void rockchip_spi_config(struct rockchip_spi *rs,
55205                  * ctlr->bits_per_word_mask, so this shouldn't
55206                  * happen
55207                  */
55208 -               unreachable();
55209 +               dev_err(rs->dev, "unknown bits per word: %d\n",
55210 +                       xfer->bits_per_word);
55211 +               return -EINVAL;
55212         }
55214         if (use_dma) {
55215 @@ -554,6 +556,8 @@ static void rockchip_spi_config(struct rockchip_spi *rs,
55216          */
55217         writel_relaxed(2 * DIV_ROUND_UP(rs->freq, 2 * xfer->speed_hz),
55218                         rs->regs + ROCKCHIP_SPI_BAUDR);
55220 +       return 0;
55223  static size_t rockchip_spi_max_transfer_size(struct spi_device *spi)
55224 @@ -577,6 +581,7 @@ static int rockchip_spi_transfer_one(
55225                 struct spi_transfer *xfer)
55227         struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
55228 +       int ret;
55229         bool use_dma;
55231         WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
55232 @@ -596,7 +601,9 @@ static int rockchip_spi_transfer_one(
55234         use_dma = ctlr->can_dma ? ctlr->can_dma(ctlr, spi, xfer) : false;
55236 -       rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave);
55237 +       ret = rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave);
55238 +       if (ret)
55239 +               return ret;
55241         if (use_dma)
55242                 return rockchip_spi_prepare_dma(rs, ctlr, xfer);
55243 diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
55244 index 947e6b9dc9f4..2786470a5201 100644
55245 --- a/drivers/spi/spi-stm32-qspi.c
55246 +++ b/drivers/spi/spi-stm32-qspi.c
55247 @@ -727,21 +727,31 @@ static int __maybe_unused stm32_qspi_suspend(struct device *dev)
55249         pinctrl_pm_select_sleep_state(dev);
55251 -       return 0;
55252 +       return pm_runtime_force_suspend(dev);
55255  static int __maybe_unused stm32_qspi_resume(struct device *dev)
55257         struct stm32_qspi *qspi = dev_get_drvdata(dev);
55258 +       int ret;
55260 +       ret = pm_runtime_force_resume(dev);
55261 +       if (ret < 0)
55262 +               return ret;
55264         pinctrl_pm_select_default_state(dev);
55265 -       clk_prepare_enable(qspi->clk);
55267 +       ret = pm_runtime_get_sync(dev);
55268 +       if (ret < 0) {
55269 +               pm_runtime_put_noidle(dev);
55270 +               return ret;
55271 +       }
55273         writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
55274         writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
55276 -       pm_runtime_mark_last_busy(qspi->dev);
55277 -       pm_runtime_put_autosuspend(qspi->dev);
55278 +       pm_runtime_mark_last_busy(dev);
55279 +       pm_runtime_put_autosuspend(dev);
55281         return 0;
55283 diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
55284 index 25c076461011..7f0244a246e9 100644
55285 --- a/drivers/spi/spi-stm32.c
55286 +++ b/drivers/spi/spi-stm32.c
55287 @@ -1803,7 +1803,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
55288         struct reset_control *rst;
55289         int ret;
55291 -       master = spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
55292 +       master = devm_spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
55293         if (!master) {
55294                 dev_err(&pdev->dev, "spi master allocation failed\n");
55295                 return -ENOMEM;
55296 @@ -1821,18 +1821,16 @@ static int stm32_spi_probe(struct platform_device *pdev)
55298         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
55299         spi->base = devm_ioremap_resource(&pdev->dev, res);
55300 -       if (IS_ERR(spi->base)) {
55301 -               ret = PTR_ERR(spi->base);
55302 -               goto err_master_put;
55303 -       }
55304 +       if (IS_ERR(spi->base))
55305 +               return PTR_ERR(spi->base);
55307         spi->phys_addr = (dma_addr_t)res->start;
55309         spi->irq = platform_get_irq(pdev, 0);
55310 -       if (spi->irq <= 0) {
55311 -               ret = dev_err_probe(&pdev->dev, spi->irq, "failed to get irq\n");
55312 -               goto err_master_put;
55313 -       }
55314 +       if (spi->irq <= 0)
55315 +               return dev_err_probe(&pdev->dev, spi->irq,
55316 +                                    "failed to get irq\n");
55318         ret = devm_request_threaded_irq(&pdev->dev, spi->irq,
55319                                         spi->cfg->irq_handler_event,
55320                                         spi->cfg->irq_handler_thread,
55321 @@ -1840,20 +1838,20 @@ static int stm32_spi_probe(struct platform_device *pdev)
55322         if (ret) {
55323                 dev_err(&pdev->dev, "irq%d request failed: %d\n", spi->irq,
55324                         ret);
55325 -               goto err_master_put;
55326 +               return ret;
55327         }
55329         spi->clk = devm_clk_get(&pdev->dev, NULL);
55330         if (IS_ERR(spi->clk)) {
55331                 ret = PTR_ERR(spi->clk);
55332                 dev_err(&pdev->dev, "clk get failed: %d\n", ret);
55333 -               goto err_master_put;
55334 +               return ret;
55335         }
55337         ret = clk_prepare_enable(spi->clk);
55338         if (ret) {
55339                 dev_err(&pdev->dev, "clk enable failed: %d\n", ret);
55340 -               goto err_master_put;
55341 +               return ret;
55342         }
55343         spi->clk_rate = clk_get_rate(spi->clk);
55344         if (!spi->clk_rate) {
55345 @@ -1929,7 +1927,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
55346         pm_runtime_set_active(&pdev->dev);
55347         pm_runtime_enable(&pdev->dev);
55349 -       ret = devm_spi_register_master(&pdev->dev, master);
55350 +       ret = spi_register_master(master);
55351         if (ret) {
55352                 dev_err(&pdev->dev, "spi master registration failed: %d\n",
55353                         ret);
55354 @@ -1949,8 +1947,6 @@ static int stm32_spi_probe(struct platform_device *pdev)
55355                 dma_release_channel(spi->dma_rx);
55356  err_clk_disable:
55357         clk_disable_unprepare(spi->clk);
55358 -err_master_put:
55359 -       spi_master_put(master);
55361         return ret;
55363 @@ -1960,6 +1956,7 @@ static int stm32_spi_remove(struct platform_device *pdev)
55364         struct spi_master *master = platform_get_drvdata(pdev);
55365         struct stm32_spi *spi = spi_master_get_devdata(master);
55367 +       spi_unregister_master(master);
55368         spi->cfg->disable(spi);
55370         if (master->dma_tx)
55371 diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
55372 index 9417385c0921..e06aafe169e0 100644
55373 --- a/drivers/spi/spi-ti-qspi.c
55374 +++ b/drivers/spi/spi-ti-qspi.c
55375 @@ -733,6 +733,17 @@ static int ti_qspi_runtime_resume(struct device *dev)
55376         return 0;
55379 +static void ti_qspi_dma_cleanup(struct ti_qspi *qspi)
55381 +       if (qspi->rx_bb_addr)
55382 +               dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE,
55383 +                                 qspi->rx_bb_addr,
55384 +                                 qspi->rx_bb_dma_addr);
55386 +       if (qspi->rx_chan)
55387 +               dma_release_channel(qspi->rx_chan);
55390  static const struct of_device_id ti_qspi_match[] = {
55391         {.compatible = "ti,dra7xxx-qspi" },
55392         {.compatible = "ti,am4372-qspi" },
55393 @@ -886,6 +897,8 @@ static int ti_qspi_probe(struct platform_device *pdev)
55394         if (!ret)
55395                 return 0;
55397 +       ti_qspi_dma_cleanup(qspi);
55399         pm_runtime_disable(&pdev->dev);
55400  free_master:
55401         spi_master_put(master);
55402 @@ -904,12 +917,7 @@ static int ti_qspi_remove(struct platform_device *pdev)
55403         pm_runtime_put_sync(&pdev->dev);
55404         pm_runtime_disable(&pdev->dev);
55406 -       if (qspi->rx_bb_addr)
55407 -               dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE,
55408 -                                 qspi->rx_bb_addr,
55409 -                                 qspi->rx_bb_dma_addr);
55410 -       if (qspi->rx_chan)
55411 -               dma_release_channel(qspi->rx_chan);
55412 +       ti_qspi_dma_cleanup(qspi);
55414         return 0;
55416 diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
55417 index c8fa6ee18ae7..7162387b9f96 100644
55418 --- a/drivers/spi/spi-zynqmp-gqspi.c
55419 +++ b/drivers/spi/spi-zynqmp-gqspi.c
55420 @@ -157,6 +157,7 @@ enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
55421   * @data_completion:   completion structure
55422   */
55423  struct zynqmp_qspi {
55424 +       struct spi_controller *ctlr;
55425         void __iomem *regs;
55426         struct clk *refclk;
55427         struct clk *pclk;
55428 @@ -173,6 +174,7 @@ struct zynqmp_qspi {
55429         u32 genfifoentry;
55430         enum mode_type mode;
55431         struct completion data_completion;
55432 +       struct mutex op_lock;
55433  };
55435  /**
55436 @@ -486,24 +488,10 @@ static int zynqmp_qspi_setup_op(struct spi_device *qspi)
55438         struct spi_controller *ctlr = qspi->master;
55439         struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
55440 -       struct device *dev = &ctlr->dev;
55441 -       int ret;
55443         if (ctlr->busy)
55444                 return -EBUSY;
55446 -       ret = clk_enable(xqspi->refclk);
55447 -       if (ret) {
55448 -               dev_err(dev, "Cannot enable device clock.\n");
55449 -               return ret;
55450 -       }
55452 -       ret = clk_enable(xqspi->pclk);
55453 -       if (ret) {
55454 -               dev_err(dev, "Cannot enable APB clock.\n");
55455 -               clk_disable(xqspi->refclk);
55456 -               return ret;
55457 -       }
55458         zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
55460         return 0;
55461 @@ -520,7 +508,7 @@ static void zynqmp_qspi_filltxfifo(struct zynqmp_qspi *xqspi, int size)
55463         u32 count = 0, intermediate;
55465 -       while ((xqspi->bytes_to_transfer > 0) && (count < size)) {
55466 +       while ((xqspi->bytes_to_transfer > 0) && (count < size) && (xqspi->txbuf)) {
55467                 memcpy(&intermediate, xqspi->txbuf, 4);
55468                 zynqmp_gqspi_write(xqspi, GQSPI_TXD_OFST, intermediate);
55470 @@ -579,7 +567,7 @@ static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
55471                 genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
55472                 genfifoentry |= GQSPI_GENFIFO_TX;
55473                 transfer_len = xqspi->bytes_to_transfer;
55474 -       } else {
55475 +       } else if (xqspi->rxbuf) {
55476                 genfifoentry &= ~GQSPI_GENFIFO_TX;
55477                 genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
55478                 genfifoentry |= GQSPI_GENFIFO_RX;
55479 @@ -587,6 +575,11 @@ static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
55480                         transfer_len = xqspi->dma_rx_bytes;
55481                 else
55482                         transfer_len = xqspi->bytes_to_receive;
55483 +       } else {
55484 +               /* Sending dummy circles here */
55485 +               genfifoentry &= ~(GQSPI_GENFIFO_TX | GQSPI_GENFIFO_RX);
55486 +               genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
55487 +               transfer_len = xqspi->bytes_to_transfer;
55488         }
55489         genfifoentry |= zynqmp_qspi_selectspimode(xqspi, nbits);
55490         xqspi->genfifoentry = genfifoentry;
55491 @@ -738,7 +731,7 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
55492   * zynqmp_qspi_setuprxdma - This function sets up the RX DMA operation
55493   * @xqspi:     xqspi is a pointer to the GQSPI instance.
55494   */
55495 -static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
55496 +static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
55498         u32 rx_bytes, rx_rem, config_reg;
55499         dma_addr_t addr;
55500 @@ -752,7 +745,7 @@ static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
55501                 zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
55502                 xqspi->mode = GQSPI_MODE_IO;
55503                 xqspi->dma_rx_bytes = 0;
55504 -               return;
55505 +               return 0;
55506         }
55508         rx_rem = xqspi->bytes_to_receive % 4;
55509 @@ -760,8 +753,10 @@ static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
55511         addr = dma_map_single(xqspi->dev, (void *)xqspi->rxbuf,
55512                               rx_bytes, DMA_FROM_DEVICE);
55513 -       if (dma_mapping_error(xqspi->dev, addr))
55514 +       if (dma_mapping_error(xqspi->dev, addr)) {
55515                 dev_err(xqspi->dev, "ERR:rxdma:memory not mapped\n");
55516 +               return -ENOMEM;
55517 +       }
55519         xqspi->dma_rx_bytes = rx_bytes;
55520         xqspi->dma_addr = addr;
55521 @@ -782,6 +777,8 @@ static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
55523         /* Write the number of bytes to transfer */
55524         zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_SIZE_OFST, rx_bytes);
55526 +       return 0;
55529  /**
55530 @@ -818,11 +815,17 @@ static void zynqmp_qspi_write_op(struct zynqmp_qspi *xqspi, u8 tx_nbits,
55531   * @genfifoentry:      genfifoentry is pointer to the variable in which
55532   *                     GENFIFO mask is returned to calling function
55533   */
55534 -static void zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
55535 +static int zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
55536                                 u32 genfifoentry)
55538 +       int ret;
55540 +       ret = zynqmp_qspi_setuprxdma(xqspi);
55541 +       if (ret)
55542 +               return ret;
55543         zynqmp_qspi_fillgenfifo(xqspi, rx_nbits, genfifoentry);
55544 -       zynqmp_qspi_setuprxdma(xqspi);
55546 +       return 0;
55549  /**
55550 @@ -835,10 +838,13 @@ static void zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
55551   */
55552  static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
55554 -       struct spi_controller *ctlr = dev_get_drvdata(dev);
55555 -       struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
55556 +       struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
55557 +       struct spi_controller *ctlr = xqspi->ctlr;
55558 +       int ret;
55560 -       spi_controller_suspend(ctlr);
55561 +       ret = spi_controller_suspend(ctlr);
55562 +       if (ret)
55563 +               return ret;
55565         zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
55567 @@ -856,27 +862,13 @@ static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
55568   */
55569  static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
55571 -       struct spi_controller *ctlr = dev_get_drvdata(dev);
55572 -       struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
55573 -       int ret = 0;
55575 -       ret = clk_enable(xqspi->pclk);
55576 -       if (ret) {
55577 -               dev_err(dev, "Cannot enable APB clock.\n");
55578 -               return ret;
55579 -       }
55580 +       struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
55581 +       struct spi_controller *ctlr = xqspi->ctlr;
55583 -       ret = clk_enable(xqspi->refclk);
55584 -       if (ret) {
55585 -               dev_err(dev, "Cannot enable device clock.\n");
55586 -               clk_disable(xqspi->pclk);
55587 -               return ret;
55588 -       }
55589 +       zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
55591         spi_controller_resume(ctlr);
55593 -       clk_disable(xqspi->refclk);
55594 -       clk_disable(xqspi->pclk);
55595         return 0;
55598 @@ -890,10 +882,10 @@ static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
55599   */
55600  static int __maybe_unused zynqmp_runtime_suspend(struct device *dev)
55602 -       struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_get_drvdata(dev);
55603 +       struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
55605 -       clk_disable(xqspi->refclk);
55606 -       clk_disable(xqspi->pclk);
55607 +       clk_disable_unprepare(xqspi->refclk);
55608 +       clk_disable_unprepare(xqspi->pclk);
55610         return 0;
55612 @@ -908,19 +900,19 @@ static int __maybe_unused zynqmp_runtime_suspend(struct device *dev)
55613   */
55614  static int __maybe_unused zynqmp_runtime_resume(struct device *dev)
55616 -       struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_get_drvdata(dev);
55617 +       struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
55618         int ret;
55620 -       ret = clk_enable(xqspi->pclk);
55621 +       ret = clk_prepare_enable(xqspi->pclk);
55622         if (ret) {
55623                 dev_err(dev, "Cannot enable APB clock.\n");
55624                 return ret;
55625         }
55627 -       ret = clk_enable(xqspi->refclk);
55628 +       ret = clk_prepare_enable(xqspi->refclk);
55629         if (ret) {
55630                 dev_err(dev, "Cannot enable device clock.\n");
55631 -               clk_disable(xqspi->pclk);
55632 +               clk_disable_unprepare(xqspi->pclk);
55633                 return ret;
55634         }
55636 @@ -944,25 +936,23 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
55637         struct zynqmp_qspi *xqspi = spi_controller_get_devdata
55638                                     (mem->spi->master);
55639         int err = 0, i;
55640 -       u8 *tmpbuf;
55641         u32 genfifoentry = 0;
55642 +       u16 opcode = op->cmd.opcode;
55643 +       u64 opaddr;
55645         dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
55646                 op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
55647                 op->dummy.buswidth, op->data.buswidth);
55649 +       mutex_lock(&xqspi->op_lock);
55650         zynqmp_qspi_config_op(xqspi, mem->spi);
55651         zynqmp_qspi_chipselect(mem->spi, false);
55652         genfifoentry |= xqspi->genfifocs;
55653         genfifoentry |= xqspi->genfifobus;
55655         if (op->cmd.opcode) {
55656 -               tmpbuf = kzalloc(op->cmd.nbytes, GFP_KERNEL | GFP_DMA);
55657 -               if (!tmpbuf)
55658 -                       return -ENOMEM;
55659 -               tmpbuf[0] = op->cmd.opcode;
55660                 reinit_completion(&xqspi->data_completion);
55661 -               xqspi->txbuf = tmpbuf;
55662 +               xqspi->txbuf = &opcode;
55663                 xqspi->rxbuf = NULL;
55664                 xqspi->bytes_to_transfer = op->cmd.nbytes;
55665                 xqspi->bytes_to_receive = 0;
55666 @@ -973,16 +963,15 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
55667                 zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
55668                                    GQSPI_IER_GENFIFOEMPTY_MASK |
55669                                    GQSPI_IER_TXNOT_FULL_MASK);
55670 -               if (!wait_for_completion_interruptible_timeout
55671 +               if (!wait_for_completion_timeout
55672                     (&xqspi->data_completion, msecs_to_jiffies(1000))) {
55673                         err = -ETIMEDOUT;
55674 -                       kfree(tmpbuf);
55675                         goto return_err;
55676                 }
55677 -               kfree(tmpbuf);
55678         }
55680         if (op->addr.nbytes) {
55681 +               xqspi->txbuf = &opaddr;
55682                 for (i = 0; i < op->addr.nbytes; i++) {
55683                         *(((u8 *)xqspi->txbuf) + i) = op->addr.val >>
55684                                         (8 * (op->addr.nbytes - i - 1));
55685 @@ -1001,7 +990,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
55686                                    GQSPI_IER_TXEMPTY_MASK |
55687                                    GQSPI_IER_GENFIFOEMPTY_MASK |
55688                                    GQSPI_IER_TXNOT_FULL_MASK);
55689 -               if (!wait_for_completion_interruptible_timeout
55690 +               if (!wait_for_completion_timeout
55691                     (&xqspi->data_completion, msecs_to_jiffies(1000))) {
55692                         err = -ETIMEDOUT;
55693                         goto return_err;
55694 @@ -1009,32 +998,23 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
55695         }
55697         if (op->dummy.nbytes) {
55698 -               tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL | GFP_DMA);
55699 -               if (!tmpbuf)
55700 -                       return -ENOMEM;
55701 -               memset(tmpbuf, 0xff, op->dummy.nbytes);
55702 -               reinit_completion(&xqspi->data_completion);
55703 -               xqspi->txbuf = tmpbuf;
55704 +               xqspi->txbuf = NULL;
55705                 xqspi->rxbuf = NULL;
55706 -               xqspi->bytes_to_transfer = op->dummy.nbytes;
55707 +               /*
55708 +                * xqspi->bytes_to_transfer here represents the dummy circles
55709 +                * which need to be sent.
55710 +                */
55711 +               xqspi->bytes_to_transfer = op->dummy.nbytes * 8 / op->dummy.buswidth;
55712                 xqspi->bytes_to_receive = 0;
55713 -               zynqmp_qspi_write_op(xqspi, op->dummy.buswidth,
55714 +               /*
55715 +                * Using op->data.buswidth instead of op->dummy.buswidth here because
55716 +                * we need to use it to configure the correct SPI mode.
55717 +                */
55718 +               zynqmp_qspi_write_op(xqspi, op->data.buswidth,
55719                                      genfifoentry);
55720                 zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
55721                                    zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
55722                                    GQSPI_CFG_START_GEN_FIFO_MASK);
55723 -               zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
55724 -                                  GQSPI_IER_TXEMPTY_MASK |
55725 -                                  GQSPI_IER_GENFIFOEMPTY_MASK |
55726 -                                  GQSPI_IER_TXNOT_FULL_MASK);
55727 -               if (!wait_for_completion_interruptible_timeout
55728 -                   (&xqspi->data_completion, msecs_to_jiffies(1000))) {
55729 -                       err = -ETIMEDOUT;
55730 -                       kfree(tmpbuf);
55731 -                       goto return_err;
55732 -               }
55734 -               kfree(tmpbuf);
55735         }
55737         if (op->data.nbytes) {
55738 @@ -1059,8 +1039,11 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
55739                         xqspi->rxbuf = (u8 *)op->data.buf.in;
55740                         xqspi->bytes_to_receive = op->data.nbytes;
55741                         xqspi->bytes_to_transfer = 0;
55742 -                       zynqmp_qspi_read_op(xqspi, op->data.buswidth,
55743 +                       err = zynqmp_qspi_read_op(xqspi, op->data.buswidth,
55744                                             genfifoentry);
55745 +                       if (err)
55746 +                               goto return_err;
55748                         zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
55749                                            zynqmp_gqspi_read
55750                                            (xqspi, GQSPI_CONFIG_OFST) |
55751 @@ -1076,7 +1059,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
55752                                                    GQSPI_IER_RXEMPTY_MASK);
55753                         }
55754                 }
55755 -               if (!wait_for_completion_interruptible_timeout
55756 +               if (!wait_for_completion_timeout
55757                     (&xqspi->data_completion, msecs_to_jiffies(1000)))
55758                         err = -ETIMEDOUT;
55759         }
55760 @@ -1084,6 +1067,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
55761  return_err:
55763         zynqmp_qspi_chipselect(mem->spi, true);
55764 +       mutex_unlock(&xqspi->op_lock);
55766         return err;
55768 @@ -1120,6 +1104,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
55770         xqspi = spi_controller_get_devdata(ctlr);
55771         xqspi->dev = dev;
55772 +       xqspi->ctlr = ctlr;
55773         platform_set_drvdata(pdev, xqspi);
55775         xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
55776 @@ -1135,13 +1120,11 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
55777                 goto remove_master;
55778         }
55780 -       init_completion(&xqspi->data_completion);
55782         xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk");
55783         if (IS_ERR(xqspi->refclk)) {
55784                 dev_err(dev, "ref_clk clock not found.\n");
55785                 ret = PTR_ERR(xqspi->refclk);
55786 -               goto clk_dis_pclk;
55787 +               goto remove_master;
55788         }
55790         ret = clk_prepare_enable(xqspi->pclk);
55791 @@ -1156,15 +1139,24 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
55792                 goto clk_dis_pclk;
55793         }
55795 +       init_completion(&xqspi->data_completion);
55797 +       mutex_init(&xqspi->op_lock);
55799         pm_runtime_use_autosuspend(&pdev->dev);
55800         pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
55801         pm_runtime_set_active(&pdev->dev);
55802         pm_runtime_enable(&pdev->dev);
55804 +       ret = pm_runtime_get_sync(&pdev->dev);
55805 +       if (ret < 0) {
55806 +               dev_err(&pdev->dev, "Failed to pm_runtime_get_sync: %d\n", ret);
55807 +               goto clk_dis_all;
55808 +       }
55810         /* QSPI controller initializations */
55811         zynqmp_qspi_init_hw(xqspi);
55813 -       pm_runtime_mark_last_busy(&pdev->dev);
55814 -       pm_runtime_put_autosuspend(&pdev->dev);
55815         xqspi->irq = platform_get_irq(pdev, 0);
55816         if (xqspi->irq <= 0) {
55817                 ret = -ENXIO;
55818 @@ -1178,6 +1170,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
55819                 goto clk_dis_all;
55820         }
55822 +       dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
55823         ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
55824         ctlr->num_chipselect = GQSPI_DEFAULT_NUM_CS;
55825         ctlr->mem_ops = &zynqmp_qspi_mem_ops;
55826 @@ -1187,6 +1180,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
55827         ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
55828                             SPI_TX_DUAL | SPI_TX_QUAD;
55829         ctlr->dev.of_node = np;
55830 +       ctlr->auto_runtime_pm = true;
55832         ret = devm_spi_register_controller(&pdev->dev, ctlr);
55833         if (ret) {
55834 @@ -1194,9 +1188,13 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
55835                 goto clk_dis_all;
55836         }
55838 +       pm_runtime_mark_last_busy(&pdev->dev);
55839 +       pm_runtime_put_autosuspend(&pdev->dev);
55841         return 0;
55843  clk_dis_all:
55844 +       pm_runtime_put_sync(&pdev->dev);
55845         pm_runtime_set_suspended(&pdev->dev);
55846         pm_runtime_disable(&pdev->dev);
55847         clk_disable_unprepare(xqspi->refclk);
55848 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
55849 index b08efe88ccd6..8da4fe475b84 100644
55850 --- a/drivers/spi/spi.c
55851 +++ b/drivers/spi/spi.c
55852 @@ -795,7 +795,7 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
55854  /*-------------------------------------------------------------------------*/
55856 -static void spi_set_cs(struct spi_device *spi, bool enable)
55857 +static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
55859         bool enable1 = enable;
55861 @@ -803,7 +803,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
55862          * Avoid calling into the driver (or doing delays) if the chip select
55863          * isn't actually changing from the last time this was called.
55864          */
55865 -       if ((spi->controller->last_cs_enable == enable) &&
55866 +       if (!force && (spi->controller->last_cs_enable == enable) &&
55867             (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
55868                 return;
55870 @@ -1253,7 +1253,7 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
55871         struct spi_statistics *statm = &ctlr->statistics;
55872         struct spi_statistics *stats = &msg->spi->statistics;
55874 -       spi_set_cs(msg->spi, true);
55875 +       spi_set_cs(msg->spi, true, false);
55877         SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
55878         SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
55879 @@ -1321,9 +1321,9 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
55880                                          &msg->transfers)) {
55881                                 keep_cs = true;
55882                         } else {
55883 -                               spi_set_cs(msg->spi, false);
55884 +                               spi_set_cs(msg->spi, false, false);
55885                                 _spi_transfer_cs_change_delay(msg, xfer);
55886 -                               spi_set_cs(msg->spi, true);
55887 +                               spi_set_cs(msg->spi, true, false);
55888                         }
55889                 }
55891 @@ -1332,7 +1332,7 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
55893  out:
55894         if (ret != 0 || !keep_cs)
55895 -               spi_set_cs(msg->spi, false);
55896 +               spi_set_cs(msg->spi, false, false);
55898         if (msg->status == -EINPROGRESS)
55899                 msg->status = ret;
55900 @@ -2496,6 +2496,7 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
55902         ctlr = __spi_alloc_controller(dev, size, slave);
55903         if (ctlr) {
55904 +               ctlr->devm_allocated = true;
55905                 *ptr = ctlr;
55906                 devres_add(dev, ptr);
55907         } else {
55908 @@ -2842,11 +2843,6 @@ int devm_spi_register_controller(struct device *dev,
55910  EXPORT_SYMBOL_GPL(devm_spi_register_controller);
55912 -static int devm_spi_match_controller(struct device *dev, void *res, void *ctlr)
55914 -       return *(struct spi_controller **)res == ctlr;
55917  static int __unregister(struct device *dev, void *null)
55919         spi_unregister_device(to_spi_device(dev));
55920 @@ -2893,8 +2889,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
55921         /* Release the last reference on the controller if its driver
55922          * has not yet been converted to devm_spi_alloc_master/slave().
55923          */
55924 -       if (!devres_find(ctlr->dev.parent, devm_spi_release_controller,
55925 -                        devm_spi_match_controller, ctlr))
55926 +       if (!ctlr->devm_allocated)
55927                 put_device(&ctlr->dev);
55929         /* free bus id */
55930 @@ -3423,11 +3418,11 @@ int spi_setup(struct spi_device *spi)
55931                  */
55932                 status = 0;
55934 -               spi_set_cs(spi, false);
55935 +               spi_set_cs(spi, false, true);
55936                 pm_runtime_mark_last_busy(spi->controller->dev.parent);
55937                 pm_runtime_put_autosuspend(spi->controller->dev.parent);
55938         } else {
55939 -               spi_set_cs(spi, false);
55940 +               spi_set_cs(spi, false, true);
55941         }
55943         mutex_unlock(&spi->controller->io_mutex);
55944 diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
55945 index 70498adb1575..5c35653ed36d 100644
55946 --- a/drivers/staging/android/Kconfig
55947 +++ b/drivers/staging/android/Kconfig
55948 @@ -4,7 +4,7 @@ menu "Android"
55949  if ANDROID
55951  config ASHMEM
55952 -       bool "Enable the Anonymous Shared Memory Subsystem"
55953 +       tristate "Enable the Anonymous Shared Memory Subsystem"
55954         depends on SHMEM
55955         help
55956           The ashmem subsystem is a new shared memory allocator, similar to
55957 diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
55958 index e9a55a5e6529..3d794218dd4b 100644
55959 --- a/drivers/staging/android/Makefile
55960 +++ b/drivers/staging/android/Makefile
55961 @@ -1,4 +1,5 @@
55962  # SPDX-License-Identifier: GPL-2.0
55963  ccflags-y += -I$(src)                  # needed for trace events
55965 -obj-$(CONFIG_ASHMEM)                   += ashmem.o
55966 +ashmem_linux-y                         += ashmem.o
55967 +obj-$(CONFIG_ASHMEM)                   += ashmem_linux.o
55968 diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
55969 index d66a64e42273..e28d9a2ce7f1 100644
55970 --- a/drivers/staging/android/ashmem.c
55971 +++ b/drivers/staging/android/ashmem.c
55972 @@ -19,6 +19,7 @@
55973  #include <linux/security.h>
55974  #include <linux/mm.h>
55975  #include <linux/mman.h>
55976 +#include <linux/module.h>
55977  #include <linux/uaccess.h>
55978  #include <linux/personality.h>
55979  #include <linux/bitops.h>
55980 @@ -964,4 +965,18 @@ static int __init ashmem_init(void)
55981  out:
55982         return ret;
55984 -device_initcall(ashmem_init);
55986 +static void __exit ashmem_exit(void)
55988 +       misc_deregister(&ashmem_misc);
55989 +       unregister_shrinker(&ashmem_shrinker);
55990 +       kmem_cache_destroy(ashmem_range_cachep);
55991 +       kmem_cache_destroy(ashmem_area_cachep);
55994 +module_init(ashmem_init);
55995 +module_exit(ashmem_exit);
55997 +MODULE_AUTHOR("Google, Inc.");
55998 +MODULE_DESCRIPTION("Driver for Android shared memory device");
55999 +MODULE_LICENSE("GPL v2");
56000 diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
56001 index 4f80a4991f95..c164c8524909 100644
56002 --- a/drivers/staging/comedi/drivers/ni_mio_common.c
56003 +++ b/drivers/staging/comedi/drivers/ni_mio_common.c
56004 @@ -4747,7 +4747,7 @@ static int cs5529_wait_for_idle(struct comedi_device *dev)
56005                 if ((status & NI67XX_CAL_STATUS_BUSY) == 0)
56006                         break;
56007                 set_current_state(TASK_INTERRUPTIBLE);
56008 -               if (schedule_timeout(1))
56009 +               if (schedule_min_hrtimeout())
56010                         return -EIO;
56011         }
56012         if (i == timeout) {
56013 diff --git a/drivers/staging/comedi/drivers/tests/ni_routes_test.c b/drivers/staging/comedi/drivers/tests/ni_routes_test.c
56014 index 4061b3b5f8e9..68defeb53de4 100644
56015 --- a/drivers/staging/comedi/drivers/tests/ni_routes_test.c
56016 +++ b/drivers/staging/comedi/drivers/tests/ni_routes_test.c
56017 @@ -217,7 +217,8 @@ void test_ni_assign_device_routes(void)
56018         const u8 *table, *oldtable;
56020         init_pci_6070e();
56021 -       ni_assign_device_routes(ni_eseries, pci_6070e, &private.routing_tables);
56022 +       ni_assign_device_routes(ni_eseries, pci_6070e, NULL,
56023 +                               &private.routing_tables);
56024         devroutes = private.routing_tables.valid_routes;
56025         table = private.routing_tables.route_values;
56027 @@ -253,7 +254,8 @@ void test_ni_assign_device_routes(void)
56028         olddevroutes = devroutes;
56029         oldtable = table;
56030         init_pci_6220();
56031 -       ni_assign_device_routes(ni_mseries, pci_6220, &private.routing_tables);
56032 +       ni_assign_device_routes(ni_mseries, pci_6220, NULL,
56033 +                               &private.routing_tables);
56034         devroutes = private.routing_tables.valid_routes;
56035         table = private.routing_tables.route_values;
56037 diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
56038 index c368082aae1a..0f4655d7d520 100644
56039 --- a/drivers/staging/fwserial/fwserial.c
56040 +++ b/drivers/staging/fwserial/fwserial.c
56041 @@ -1218,13 +1218,12 @@ static int get_serial_info(struct tty_struct *tty,
56042         struct fwtty_port *port = tty->driver_data;
56044         mutex_lock(&port->port.mutex);
56045 -       ss->type =  PORT_UNKNOWN;
56046 -       ss->line =  port->port.tty->index;
56047 -       ss->flags = port->port.flags;
56048 -       ss->xmit_fifo_size = FWTTY_PORT_TXFIFO_LEN;
56049 +       ss->line = port->index;
56050         ss->baud_base = 400000000;
56051 -       ss->close_delay = port->port.close_delay;
56052 +       ss->close_delay = jiffies_to_msecs(port->port.close_delay) / 10;
56053 +       ss->closing_wait = 3000;
56054         mutex_unlock(&port->port.mutex);
56056         return 0;
56059 @@ -1232,20 +1231,20 @@ static int set_serial_info(struct tty_struct *tty,
56060                            struct serial_struct *ss)
56062         struct fwtty_port *port = tty->driver_data;
56063 +       unsigned int cdelay;
56065 -       if (ss->irq != 0 || ss->port != 0 || ss->custom_divisor != 0 ||
56066 -           ss->baud_base != 400000000)
56067 -               return -EPERM;
56068 +       cdelay = msecs_to_jiffies(ss->close_delay * 10);
56070         mutex_lock(&port->port.mutex);
56071         if (!capable(CAP_SYS_ADMIN)) {
56072 -               if (((ss->flags & ~ASYNC_USR_MASK) !=
56073 +               if (cdelay != port->port.close_delay ||
56074 +                   ((ss->flags & ~ASYNC_USR_MASK) !=
56075                      (port->port.flags & ~ASYNC_USR_MASK))) {
56076                         mutex_unlock(&port->port.mutex);
56077                         return -EPERM;
56078                 }
56079         }
56080 -       port->port.close_delay = ss->close_delay * HZ / 100;
56081 +       port->port.close_delay = cdelay;
56082         mutex_unlock(&port->port.mutex);
56084         return 0;
56085 diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
56086 index 607378bfebb7..a520f7f213db 100644
56087 --- a/drivers/staging/greybus/uart.c
56088 +++ b/drivers/staging/greybus/uart.c
56089 @@ -614,10 +614,12 @@ static int get_serial_info(struct tty_struct *tty,
56090         ss->line = gb_tty->minor;
56091         ss->xmit_fifo_size = 16;
56092         ss->baud_base = 9600;
56093 -       ss->close_delay = gb_tty->port.close_delay / 10;
56094 +       ss->close_delay = jiffies_to_msecs(gb_tty->port.close_delay) / 10;
56095         ss->closing_wait =
56096                 gb_tty->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
56097 -               ASYNC_CLOSING_WAIT_NONE : gb_tty->port.closing_wait / 10;
56098 +               ASYNC_CLOSING_WAIT_NONE :
56099 +               jiffies_to_msecs(gb_tty->port.closing_wait) / 10;
56101         return 0;
56104 @@ -629,17 +631,16 @@ static int set_serial_info(struct tty_struct *tty,
56105         unsigned int close_delay;
56106         int retval = 0;
56108 -       close_delay = ss->close_delay * 10;
56109 +       close_delay = msecs_to_jiffies(ss->close_delay * 10);
56110         closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
56111 -                       ASYNC_CLOSING_WAIT_NONE : ss->closing_wait * 10;
56112 +                       ASYNC_CLOSING_WAIT_NONE :
56113 +                       msecs_to_jiffies(ss->closing_wait * 10);
56115         mutex_lock(&gb_tty->port.mutex);
56116         if (!capable(CAP_SYS_ADMIN)) {
56117                 if ((close_delay != gb_tty->port.close_delay) ||
56118                     (closing_wait != gb_tty->port.closing_wait))
56119                         retval = -EPERM;
56120 -               else
56121 -                       retval = -EOPNOTSUPP;
56122         } else {
56123                 gb_tty->port.close_delay = close_delay;
56124                 gb_tty->port.closing_wait = closing_wait;
56125 diff --git a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
56126 index 7ca7378b1859..0ab67b2aec67 100644
56127 --- a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
56128 +++ b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
56129 @@ -843,8 +843,10 @@ static int lm3554_probe(struct i2c_client *client)
56130                 return -ENOMEM;
56132         flash->pdata = lm3554_platform_data_func(client);
56133 -       if (IS_ERR(flash->pdata))
56134 -               return PTR_ERR(flash->pdata);
56135 +       if (IS_ERR(flash->pdata)) {
56136 +               err = PTR_ERR(flash->pdata);
56137 +               goto fail1;
56138 +       }
56140         v4l2_i2c_subdev_init(&flash->sd, client, &lm3554_ops);
56141         flash->sd.internal_ops = &lm3554_internal_ops;
56142 @@ -856,7 +858,7 @@ static int lm3554_probe(struct i2c_client *client)
56143                                    ARRAY_SIZE(lm3554_controls));
56144         if (ret) {
56145                 dev_err(&client->dev, "error initialize a ctrl_handler.\n");
56146 -               goto fail2;
56147 +               goto fail3;
56148         }
56150         for (i = 0; i < ARRAY_SIZE(lm3554_controls); i++)
56151 @@ -865,14 +867,14 @@ static int lm3554_probe(struct i2c_client *client)
56153         if (flash->ctrl_handler.error) {
56154                 dev_err(&client->dev, "ctrl_handler error.\n");
56155 -               goto fail2;
56156 +               goto fail3;
56157         }
56159         flash->sd.ctrl_handler = &flash->ctrl_handler;
56160         err = media_entity_pads_init(&flash->sd.entity, 0, NULL);
56161         if (err) {
56162                 dev_err(&client->dev, "error initialize a media entity.\n");
56163 -               goto fail1;
56164 +               goto fail2;
56165         }
56167         flash->sd.entity.function = MEDIA_ENT_F_FLASH;
56168 @@ -884,14 +886,15 @@ static int lm3554_probe(struct i2c_client *client)
56169         err = lm3554_gpio_init(client);
56170         if (err) {
56171                 dev_err(&client->dev, "gpio request/direction_output fail");
56172 -               goto fail2;
56173 +               goto fail3;
56174         }
56175         return atomisp_register_i2c_module(&flash->sd, NULL, LED_FLASH);
56176 -fail2:
56177 +fail3:
56178         media_entity_cleanup(&flash->sd.entity);
56179         v4l2_ctrl_handler_free(&flash->ctrl_handler);
56180 -fail1:
56181 +fail2:
56182         v4l2_device_unregister_subdev(&flash->sd);
56183 +fail1:
56184         kfree(flash);
56186         return err;
56187 diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c
56188 index 453bb6913550..f1e6b2597853 100644
56189 --- a/drivers/staging/media/atomisp/pci/atomisp_fops.c
56190 +++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c
56191 @@ -221,6 +221,9 @@ int atomisp_q_video_buffers_to_css(struct atomisp_sub_device *asd,
56192         unsigned long irqflags;
56193         int err = 0;
56195 +       if (WARN_ON(css_pipe_id >= IA_CSS_PIPE_ID_NUM))
56196 +               return -EINVAL;
56198         while (pipe->buffers_in_css < ATOMISP_CSS_Q_DEPTH) {
56199                 struct videobuf_buffer *vb;
56201 diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
56202 index 2ae50decfc8b..9da82855552d 100644
56203 --- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
56204 +++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
56205 @@ -948,10 +948,8 @@ int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
56206                 dev_dbg(isp->dev, "allocating %d dis buffers\n", count);
56207                 while (count--) {
56208                         dis_buf = kzalloc(sizeof(struct atomisp_dis_buf), GFP_KERNEL);
56209 -                       if (!dis_buf) {
56210 -                               kfree(s3a_buf);
56211 +                       if (!dis_buf)
56212                                 goto error;
56213 -                       }
56214                         if (atomisp_css_allocate_stat_buffers(
56215                                 asd, stream_id, NULL, dis_buf, NULL)) {
56216                                 kfree(dis_buf);
56217 diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
56218 index f13af2329f48..0168f9839c90 100644
56219 --- a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
56220 +++ b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
56221 @@ -857,16 +857,17 @@ static void free_private_pages(struct hmm_buffer_object *bo,
56222         kfree(bo->page_obj);
56225 -static void free_user_pages(struct hmm_buffer_object *bo)
56226 +static void free_user_pages(struct hmm_buffer_object *bo,
56227 +                           unsigned int page_nr)
56229         int i;
56231         hmm_mem_stat.usr_size -= bo->pgnr;
56233         if (bo->mem_type == HMM_BO_MEM_TYPE_PFN) {
56234 -               unpin_user_pages(bo->pages, bo->pgnr);
56235 +               unpin_user_pages(bo->pages, page_nr);
56236         } else {
56237 -               for (i = 0; i < bo->pgnr; i++)
56238 +               for (i = 0; i < page_nr; i++)
56239                         put_page(bo->pages[i]);
56240         }
56241         kfree(bo->pages);
56242 @@ -942,6 +943,8 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
56243                 dev_err(atomisp_dev,
56244                         "get_user_pages err: bo->pgnr = %d, pgnr actually pinned = %d.\n",
56245                         bo->pgnr, page_nr);
56246 +               if (page_nr < 0)
56247 +                       page_nr = 0;
56248                 goto out_of_mem;
56249         }
56251 @@ -954,7 +957,7 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
56253  out_of_mem:
56255 -       free_user_pages(bo);
56256 +       free_user_pages(bo, page_nr);
56258         return -ENOMEM;
56260 @@ -1037,7 +1040,7 @@ void hmm_bo_free_pages(struct hmm_buffer_object *bo)
56261         if (bo->type == HMM_BO_PRIVATE)
56262                 free_private_pages(bo, &dynamic_pool, &reserved_pool);
56263         else if (bo->type == HMM_BO_USER)
56264 -               free_user_pages(bo);
56265 +               free_user_pages(bo, bo->pgnr);
56266         else
56267                 dev_err(atomisp_dev, "invalid buffer type.\n");
56268         mutex_unlock(&bo->mutex);
56269 diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
56270 index e10ce103a5b4..94a0467d673b 100644
56271 --- a/drivers/staging/media/imx/imx-media-capture.c
56272 +++ b/drivers/staging/media/imx/imx-media-capture.c
56273 @@ -557,7 +557,7 @@ static int capture_validate_fmt(struct capture_priv *priv)
56274                 priv->vdev.fmt.fmt.pix.height != f.fmt.pix.height ||
56275                 priv->vdev.cc->cs != cc->cs ||
56276                 priv->vdev.compose.width != compose.width ||
56277 -               priv->vdev.compose.height != compose.height) ? -EINVAL : 0;
56278 +               priv->vdev.compose.height != compose.height) ? -EPIPE : 0;
56281  static int capture_start_streaming(struct vb2_queue *vq, unsigned int count)
56282 diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
56283 index 60aa02eb7d2a..6d9c49b39531 100644
56284 --- a/drivers/staging/media/ipu3/ipu3-v4l2.c
56285 +++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
56286 @@ -686,6 +686,7 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
56288         dev_dbg(dev, "IPU3 pipe %u pipe_id = %u", pipe, css_pipe->pipe_id);
56290 +       css_q = imgu_node_to_queue(node);
56291         for (i = 0; i < IPU3_CSS_QUEUES; i++) {
56292                 unsigned int inode = imgu_map_node(imgu, i);
56294 @@ -693,6 +694,18 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
56295                 if (inode == IMGU_NODE_STAT_3A || inode == IMGU_NODE_PARAMS)
56296                         continue;
56298 +               /* CSS expects some format on OUT queue */
56299 +               if (i != IPU3_CSS_QUEUE_OUT &&
56300 +                   !imgu_pipe->nodes[inode].enabled) {
56301 +                       fmts[i] = NULL;
56302 +                       continue;
56303 +               }
56305 +               if (i == css_q) {
56306 +                       fmts[i] = &f->fmt.pix_mp;
56307 +                       continue;
56308 +               }
56310                 if (try) {
56311                         fmts[i] = kmemdup(&imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp,
56312                                           sizeof(struct v4l2_pix_format_mplane),
56313 @@ -705,10 +718,6 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
56314                         fmts[i] = &imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp;
56315                 }
56317 -               /* CSS expects some format on OUT queue */
56318 -               if (i != IPU3_CSS_QUEUE_OUT &&
56319 -                   !imgu_pipe->nodes[inode].enabled)
56320 -                       fmts[i] = NULL;
56321         }
56323         if (!try) {
56324 @@ -725,16 +734,10 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
56325                 rects[IPU3_CSS_RECT_GDC]->height = pad_fmt.height;
56326         }
56328 -       /*
56329 -        * imgu doesn't set the node to the value given by user
56330 -        * before we return success from this function, so set it here.
56331 -        */
56332 -       css_q = imgu_node_to_queue(node);
56333         if (!fmts[css_q]) {
56334                 ret = -EINVAL;
56335                 goto out;
56336         }
56337 -       *fmts[css_q] = f->fmt.pix_mp;
56339         if (try)
56340                 ret = imgu_css_fmt_try(&imgu->css, fmts, rects, pipe);
56341 @@ -745,15 +748,18 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
56342         if (ret < 0)
56343                 goto out;
56345 -       if (try)
56346 -               f->fmt.pix_mp = *fmts[css_q];
56347 -       else
56348 -               f->fmt = imgu_pipe->nodes[node].vdev_fmt.fmt;
56349 +       /*
56350 +        * imgu doesn't set the node to the value given by user
56351 +        * before we return success from this function, so set it here.
56352 +        */
56353 +       if (!try)
56354 +               imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp = f->fmt.pix_mp;
56356  out:
56357         if (try) {
56358                 for (i = 0; i < IPU3_CSS_QUEUES; i++)
56359 -                       kfree(fmts[i]);
56360 +                       if (i != css_q)
56361 +                               kfree(fmts[i]);
56362         }
56364         return ret;
56365 diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
56366 index dae9073e7d3c..085397045b36 100644
56367 --- a/drivers/staging/media/omap4iss/iss.c
56368 +++ b/drivers/staging/media/omap4iss/iss.c
56369 @@ -1236,8 +1236,10 @@ static int iss_probe(struct platform_device *pdev)
56370         if (ret < 0)
56371                 goto error;
56373 -       if (!omap4iss_get(iss))
56374 +       if (!omap4iss_get(iss)) {
56375 +               ret = -EINVAL;
56376                 goto error;
56377 +       }
56379         ret = iss_reset(iss);
56380         if (ret < 0)
56381 diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
56382 index d3eb81ee8dc2..d821661d30f3 100644
56383 --- a/drivers/staging/media/rkvdec/rkvdec.c
56384 +++ b/drivers/staging/media/rkvdec/rkvdec.c
56385 @@ -55,16 +55,13 @@ static const struct v4l2_ctrl_ops rkvdec_ctrl_ops = {
56387  static const struct rkvdec_ctrl_desc rkvdec_h264_ctrl_descs[] = {
56388         {
56389 -               .mandatory = true,
56390                 .cfg.id = V4L2_CID_STATELESS_H264_DECODE_PARAMS,
56391         },
56392         {
56393 -               .mandatory = true,
56394                 .cfg.id = V4L2_CID_STATELESS_H264_SPS,
56395                 .cfg.ops = &rkvdec_ctrl_ops,
56396         },
56397         {
56398 -               .mandatory = true,
56399                 .cfg.id = V4L2_CID_STATELESS_H264_PPS,
56400         },
56401         {
56402 @@ -585,25 +582,7 @@ static const struct vb2_ops rkvdec_queue_ops = {
56404  static int rkvdec_request_validate(struct media_request *req)
56406 -       struct media_request_object *obj;
56407 -       const struct rkvdec_ctrls *ctrls;
56408 -       struct v4l2_ctrl_handler *hdl;
56409 -       struct rkvdec_ctx *ctx = NULL;
56410 -       unsigned int count, i;
56411 -       int ret;
56413 -       list_for_each_entry(obj, &req->objects, list) {
56414 -               if (vb2_request_object_is_buffer(obj)) {
56415 -                       struct vb2_buffer *vb;
56417 -                       vb = container_of(obj, struct vb2_buffer, req_obj);
56418 -                       ctx = vb2_get_drv_priv(vb->vb2_queue);
56419 -                       break;
56420 -               }
56421 -       }
56423 -       if (!ctx)
56424 -               return -EINVAL;
56425 +       unsigned int count;
56427         count = vb2_request_buffer_cnt(req);
56428         if (!count)
56429 @@ -611,31 +590,6 @@ static int rkvdec_request_validate(struct media_request *req)
56430         else if (count > 1)
56431                 return -EINVAL;
56433 -       hdl = v4l2_ctrl_request_hdl_find(req, &ctx->ctrl_hdl);
56434 -       if (!hdl)
56435 -               return -ENOENT;
56437 -       ret = 0;
56438 -       ctrls = ctx->coded_fmt_desc->ctrls;
56439 -       for (i = 0; ctrls && i < ctrls->num_ctrls; i++) {
56440 -               u32 id = ctrls->ctrls[i].cfg.id;
56441 -               struct v4l2_ctrl *ctrl;
56443 -               if (!ctrls->ctrls[i].mandatory)
56444 -                       continue;
56446 -               ctrl = v4l2_ctrl_request_hdl_ctrl_find(hdl, id);
56447 -               if (!ctrl) {
56448 -                       ret = -ENOENT;
56449 -                       break;
56450 -               }
56451 -       }
56453 -       v4l2_ctrl_request_hdl_put(hdl);
56455 -       if (ret)
56456 -               return ret;
56458         return vb2_request_validate(req);
56461 @@ -1118,7 +1072,7 @@ static struct platform_driver rkvdec_driver = {
56462         .remove = rkvdec_remove,
56463         .driver = {
56464                    .name = "rkvdec",
56465 -                  .of_match_table = of_match_ptr(of_rkvdec_match),
56466 +                  .of_match_table = of_rkvdec_match,
56467                    .pm = &rkvdec_pm_ops,
56468         },
56469  };
56470 diff --git a/drivers/staging/media/rkvdec/rkvdec.h b/drivers/staging/media/rkvdec/rkvdec.h
56471 index 77a137cca88e..52ac3874c5e5 100644
56472 --- a/drivers/staging/media/rkvdec/rkvdec.h
56473 +++ b/drivers/staging/media/rkvdec/rkvdec.h
56474 @@ -25,7 +25,6 @@
56475  struct rkvdec_ctx;
56477  struct rkvdec_ctrl_desc {
56478 -       u32 mandatory : 1;
56479         struct v4l2_ctrl_config cfg;
56480  };
56482 diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
56483 index 7718c561823f..92ace87c1c7d 100644
56484 --- a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
56485 +++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
56486 @@ -443,16 +443,17 @@
56487  #define VE_DEC_H265_STATUS_STCD_BUSY           BIT(21)
56488  #define VE_DEC_H265_STATUS_WB_BUSY             BIT(20)
56489  #define VE_DEC_H265_STATUS_BS_DMA_BUSY         BIT(19)
56490 -#define VE_DEC_H265_STATUS_IQIT_BUSY           BIT(18)
56491 +#define VE_DEC_H265_STATUS_IT_BUSY             BIT(18)
56492  #define VE_DEC_H265_STATUS_INTER_BUSY          BIT(17)
56493  #define VE_DEC_H265_STATUS_MORE_DATA           BIT(16)
56494 -#define VE_DEC_H265_STATUS_VLD_BUSY            BIT(14)
56495 -#define VE_DEC_H265_STATUS_DEBLOCKING_BUSY     BIT(13)
56496 -#define VE_DEC_H265_STATUS_DEBLOCKING_DRAM_BUSY        BIT(12)
56497 -#define VE_DEC_H265_STATUS_INTRA_BUSY          BIT(11)
56498 -#define VE_DEC_H265_STATUS_SAO_BUSY            BIT(10)
56499 -#define VE_DEC_H265_STATUS_MVP_BUSY            BIT(9)
56500 -#define VE_DEC_H265_STATUS_SWDEC_BUSY          BIT(8)
56501 +#define VE_DEC_H265_STATUS_DBLK_BUSY           BIT(15)
56502 +#define VE_DEC_H265_STATUS_IREC_BUSY           BIT(14)
56503 +#define VE_DEC_H265_STATUS_INTRA_BUSY          BIT(13)
56504 +#define VE_DEC_H265_STATUS_MCRI_BUSY           BIT(12)
56505 +#define VE_DEC_H265_STATUS_IQIT_BUSY           BIT(11)
56506 +#define VE_DEC_H265_STATUS_MVP_BUSY            BIT(10)
56507 +#define VE_DEC_H265_STATUS_IS_BUSY             BIT(9)
56508 +#define VE_DEC_H265_STATUS_VLD_BUSY            BIT(8)
56509  #define VE_DEC_H265_STATUS_OVER_TIME           BIT(3)
56510  #define VE_DEC_H265_STATUS_VLD_DATA_REQ                BIT(2)
56511  #define VE_DEC_H265_STATUS_ERROR               BIT(1)
56512 diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
56513 index 5516be3af898..c1d52190e1bd 100644
56514 --- a/drivers/staging/qlge/qlge_main.c
56515 +++ b/drivers/staging/qlge/qlge_main.c
56516 @@ -4550,7 +4550,7 @@ static int qlge_probe(struct pci_dev *pdev,
56517         struct net_device *ndev = NULL;
56518         struct devlink *devlink;
56519         static int cards_found;
56520 -       int err = 0;
56521 +       int err;
56523         devlink = devlink_alloc(&qlge_devlink_ops, sizeof(struct qlge_adapter));
56524         if (!devlink)
56525 @@ -4561,8 +4561,10 @@ static int qlge_probe(struct pci_dev *pdev,
56526         ndev = alloc_etherdev_mq(sizeof(struct qlge_netdev_priv),
56527                                  min(MAX_CPUS,
56528                                      netif_get_num_default_rss_queues()));
56529 -       if (!ndev)
56530 +       if (!ndev) {
56531 +               err = -ENOMEM;
56532                 goto devlink_free;
56533 +       }
56535         ndev_priv = netdev_priv(ndev);
56536         ndev_priv->qdev = qdev;
56537 diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
56538 index 9fc4adc83d77..b5a313649f44 100644
56539 --- a/drivers/staging/rtl8192u/r8192U_core.c
56540 +++ b/drivers/staging/rtl8192u/r8192U_core.c
56541 @@ -3210,7 +3210,7 @@ static void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
56542                              u32 *TotalRxDataNum)
56544         u16                     SlotIndex;
56545 -       u8                      i;
56546 +       u16                     i;
56548         *TotalRxBcnNum = 0;
56549         *TotalRxDataNum = 0;
56550 diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
56551 index 898add4d1fc8..0aa9dd467349 100644
56552 --- a/drivers/staging/rts5208/rtsx.c
56553 +++ b/drivers/staging/rts5208/rtsx.c
56554 @@ -477,7 +477,7 @@ static int rtsx_polling_thread(void *__dev)
56556         for (;;) {
56557                 set_current_state(TASK_INTERRUPTIBLE);
56558 -               schedule_timeout(msecs_to_jiffies(POLLING_INTERVAL));
56559 +               schedule_msec_hrtimeout((POLLING_INTERVAL));
56561                 /* lock the device pointers */
56562                 mutex_lock(&dev->dev_mutex);
56563 diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
56564 index 0433536930a9..d8726f28843f 100644
56565 --- a/drivers/staging/unisys/visornic/visornic_main.c
56566 +++ b/drivers/staging/unisys/visornic/visornic_main.c
56567 @@ -549,7 +549,7 @@ static int visornic_disable_with_timeout(struct net_device *netdev,
56568                 }
56569                 set_current_state(TASK_INTERRUPTIBLE);
56570                 spin_unlock_irqrestore(&devdata->priv_lock, flags);
56571 -               wait += schedule_timeout(msecs_to_jiffies(10));
56572 +               wait += schedule_msec_hrtimeout((10));
56573                 spin_lock_irqsave(&devdata->priv_lock, flags);
56574         }
56576 @@ -560,7 +560,7 @@ static int visornic_disable_with_timeout(struct net_device *netdev,
56577                 while (1) {
56578                         set_current_state(TASK_INTERRUPTIBLE);
56579                         spin_unlock_irqrestore(&devdata->priv_lock, flags);
56580 -                       schedule_timeout(msecs_to_jiffies(10));
56581 +                       schedule_msec_hrtimeout((10));
56582                         spin_lock_irqsave(&devdata->priv_lock, flags);
56583                         if (atomic_read(&devdata->usage))
56584                                 break;
56585 @@ -714,7 +714,7 @@ static int visornic_enable_with_timeout(struct net_device *netdev,
56586                 }
56587                 set_current_state(TASK_INTERRUPTIBLE);
56588                 spin_unlock_irqrestore(&devdata->priv_lock, flags);
56589 -               wait += schedule_timeout(msecs_to_jiffies(10));
56590 +               wait += schedule_msec_hrtimeout((10));
56591                 spin_lock_irqsave(&devdata->priv_lock, flags);
56592         }
56594 diff --git a/drivers/staging/wimax/i2400m/op-rfkill.c b/drivers/staging/wimax/i2400m/op-rfkill.c
56595 index fbddf2e18c14..44698a1aae87 100644
56596 --- a/drivers/staging/wimax/i2400m/op-rfkill.c
56597 +++ b/drivers/staging/wimax/i2400m/op-rfkill.c
56598 @@ -86,7 +86,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
56599         if (cmd == NULL)
56600                 goto error_alloc;
56601         cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_RF_CONTROL);
56602 -       cmd->hdr.length = sizeof(cmd->sw_rf);
56603 +       cmd->hdr.length = cpu_to_le16(sizeof(cmd->sw_rf));
56604         cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION);
56605         cmd->sw_rf.hdr.type = cpu_to_le16(I2400M_TLV_RF_OPERATION);
56606         cmd->sw_rf.hdr.length = cpu_to_le16(sizeof(cmd->sw_rf.status));
56607 diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
56608 index 9ee797b8cb7e..508b49b0eaf5 100644
56609 --- a/drivers/target/target_core_pscsi.c
56610 +++ b/drivers/target/target_core_pscsi.c
56611 @@ -620,8 +620,9 @@ static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status,
56612                         unsigned char *buf;
56614                         buf = transport_kmap_data_sg(cmd);
56615 -                       if (!buf)
56616 +                       if (!buf) {
56617                                 ; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
56618 +                       }
56620                         if (cdb[0] == MODE_SENSE_10) {
56621                                 if (!(buf[3] & 0x80))
56622 diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
56623 index bf73cd5f4b04..6809c970be03 100644
56624 --- a/drivers/target/target_core_user.c
56625 +++ b/drivers/target/target_core_user.c
56626 @@ -1377,7 +1377,7 @@ static int tcmu_run_tmr_queue(struct tcmu_dev *udev)
56627         return 1;
56630 -static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
56631 +static bool tcmu_handle_completions(struct tcmu_dev *udev)
56633         struct tcmu_mailbox *mb;
56634         struct tcmu_cmd *cmd;
56635 @@ -1420,7 +1420,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
56636                         pr_err("cmd_id %u not found, ring is broken\n",
56637                                entry->hdr.cmd_id);
56638                         set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
56639 -                       break;
56640 +                       return false;
56641                 }
56643                 tcmu_handle_completion(cmd, entry);
56644 diff --git a/drivers/tee/amdtee/amdtee_private.h b/drivers/tee/amdtee/amdtee_private.h
56645 index 337c8d82f74e..6d0f7062bb87 100644
56646 --- a/drivers/tee/amdtee/amdtee_private.h
56647 +++ b/drivers/tee/amdtee/amdtee_private.h
56648 @@ -21,6 +21,7 @@
56649  #define TEEC_SUCCESS                   0x00000000
56650  #define TEEC_ERROR_GENERIC             0xFFFF0000
56651  #define TEEC_ERROR_BAD_PARAMETERS      0xFFFF0006
56652 +#define TEEC_ERROR_OUT_OF_MEMORY       0xFFFF000C
56653  #define TEEC_ERROR_COMMUNICATION       0xFFFF000E
56655  #define TEEC_ORIGIN_COMMS              0x00000002
56656 @@ -93,6 +94,18 @@ struct amdtee_shm_data {
56657         u32     buf_id;
56658  };
56660 +/**
56661 + * struct amdtee_ta_data - Keeps track of all TAs loaded in AMD Secure
56662 + *                        Processor
56663 + * @ta_handle: Handle to TA loaded in TEE
56664 + * @refcount:  Reference count for the loaded TA
56665 + */
56666 +struct amdtee_ta_data {
56667 +       struct list_head list_node;
56668 +       u32 ta_handle;
56669 +       u32 refcount;
56672  #define LOWER_TWO_BYTE_MASK    0x0000FFFF
56674  /**
56675 diff --git a/drivers/tee/amdtee/call.c b/drivers/tee/amdtee/call.c
56676 index 096dd4d92d39..07f36ac834c8 100644
56677 --- a/drivers/tee/amdtee/call.c
56678 +++ b/drivers/tee/amdtee/call.c
56679 @@ -121,15 +121,69 @@ static int amd_params_to_tee_params(struct tee_param *tee, u32 count,
56680         return ret;
56683 +static DEFINE_MUTEX(ta_refcount_mutex);
56684 +static struct list_head ta_list = LIST_HEAD_INIT(ta_list);
56686 +static u32 get_ta_refcount(u32 ta_handle)
56688 +       struct amdtee_ta_data *ta_data;
56689 +       u32 count = 0;
56691 +       /* Caller must hold a mutex */
56692 +       list_for_each_entry(ta_data, &ta_list, list_node)
56693 +               if (ta_data->ta_handle == ta_handle)
56694 +                       return ++ta_data->refcount;
56696 +       ta_data = kzalloc(sizeof(*ta_data), GFP_KERNEL);
56697 +       if (ta_data) {
56698 +               ta_data->ta_handle = ta_handle;
56699 +               ta_data->refcount = 1;
56700 +               count = ta_data->refcount;
56701 +               list_add(&ta_data->list_node, &ta_list);
56702 +       }
56704 +       return count;
56707 +static u32 put_ta_refcount(u32 ta_handle)
56709 +       struct amdtee_ta_data *ta_data;
56710 +       u32 count = 0;
56712 +       /* Caller must hold a mutex */
56713 +       list_for_each_entry(ta_data, &ta_list, list_node)
56714 +               if (ta_data->ta_handle == ta_handle) {
56715 +                       count = --ta_data->refcount;
56716 +                       if (count == 0) {
56717 +                               list_del(&ta_data->list_node);
56718 +                               kfree(ta_data);
56719 +                               break;
56720 +                       }
56721 +               }
56723 +       return count;
56726  int handle_unload_ta(u32 ta_handle)
56728         struct tee_cmd_unload_ta cmd = {0};
56729 -       u32 status;
56730 +       u32 status, count;
56731         int ret;
56733         if (!ta_handle)
56734                 return -EINVAL;
56736 +       mutex_lock(&ta_refcount_mutex);
56738 +       count = put_ta_refcount(ta_handle);
56740 +       if (count) {
56741 +               pr_debug("unload ta: not unloading %u count %u\n",
56742 +                        ta_handle, count);
56743 +               ret = -EBUSY;
56744 +               goto unlock;
56745 +       }
56747         cmd.ta_handle = ta_handle;
56749         ret = psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA, (void *)&cmd,
56750 @@ -137,8 +191,12 @@ int handle_unload_ta(u32 ta_handle)
56751         if (!ret && status != 0) {
56752                 pr_err("unload ta: status = 0x%x\n", status);
56753                 ret = -EBUSY;
56754 +       } else {
56755 +               pr_debug("unloaded ta handle %u\n", ta_handle);
56756         }
56758 +unlock:
56759 +       mutex_unlock(&ta_refcount_mutex);
56760         return ret;
56763 @@ -340,7 +398,8 @@ int handle_open_session(struct tee_ioctl_open_session_arg *arg, u32 *info,
56765  int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
56767 -       struct tee_cmd_load_ta cmd = {0};
56768 +       struct tee_cmd_unload_ta unload_cmd = {};
56769 +       struct tee_cmd_load_ta load_cmd = {};
56770         phys_addr_t blob;
56771         int ret;
56773 @@ -353,21 +412,36 @@ int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
56774                 return -EINVAL;
56775         }
56777 -       cmd.hi_addr = upper_32_bits(blob);
56778 -       cmd.low_addr = lower_32_bits(blob);
56779 -       cmd.size = size;
56780 +       load_cmd.hi_addr = upper_32_bits(blob);
56781 +       load_cmd.low_addr = lower_32_bits(blob);
56782 +       load_cmd.size = size;
56784 -       ret = psp_tee_process_cmd(TEE_CMD_ID_LOAD_TA, (void *)&cmd,
56785 -                                 sizeof(cmd), &arg->ret);
56786 +       mutex_lock(&ta_refcount_mutex);
56788 +       ret = psp_tee_process_cmd(TEE_CMD_ID_LOAD_TA, (void *)&load_cmd,
56789 +                                 sizeof(load_cmd), &arg->ret);
56790         if (ret) {
56791                 arg->ret_origin = TEEC_ORIGIN_COMMS;
56792                 arg->ret = TEEC_ERROR_COMMUNICATION;
56793 -       } else {
56794 -               set_session_id(cmd.ta_handle, 0, &arg->session);
56795 +       } else if (arg->ret == TEEC_SUCCESS) {
56796 +               ret = get_ta_refcount(load_cmd.ta_handle);
56797 +               if (!ret) {
56798 +                       arg->ret_origin = TEEC_ORIGIN_COMMS;
56799 +                       arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
56801 +                       /* Unload the TA on error */
56802 +                       unload_cmd.ta_handle = load_cmd.ta_handle;
56803 +                       psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA,
56804 +                                           (void *)&unload_cmd,
56805 +                                           sizeof(unload_cmd), &ret);
56806 +               } else {
56807 +                       set_session_id(load_cmd.ta_handle, 0, &arg->session);
56808 +               }
56809         }
56810 +       mutex_unlock(&ta_refcount_mutex);
56812         pr_debug("load TA: TA handle = 0x%x, RO = 0x%x, ret = 0x%x\n",
56813 -                cmd.ta_handle, arg->ret_origin, arg->ret);
56814 +                load_cmd.ta_handle, arg->ret_origin, arg->ret);
56816         return 0;
56818 diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
56819 index 8a6a8f30bb42..da6b88e80dc0 100644
56820 --- a/drivers/tee/amdtee/core.c
56821 +++ b/drivers/tee/amdtee/core.c
56822 @@ -59,10 +59,9 @@ static void release_session(struct amdtee_session *sess)
56823                         continue;
56825                 handle_close_session(sess->ta_handle, sess->session_info[i]);
56826 +               handle_unload_ta(sess->ta_handle);
56827         }
56829 -       /* Unload Trusted Application once all sessions are closed */
56830 -       handle_unload_ta(sess->ta_handle);
56831         kfree(sess);
56834 @@ -224,8 +223,6 @@ static void destroy_session(struct kref *ref)
56835         struct amdtee_session *sess = container_of(ref, struct amdtee_session,
56836                                                    refcount);
56838 -       /* Unload the TA from TEE */
56839 -       handle_unload_ta(sess->ta_handle);
56840         mutex_lock(&session_list_mutex);
56841         list_del(&sess->list_node);
56842         mutex_unlock(&session_list_mutex);
56843 @@ -238,7 +235,7 @@ int amdtee_open_session(struct tee_context *ctx,
56845         struct amdtee_context_data *ctxdata = ctx->data;
56846         struct amdtee_session *sess = NULL;
56847 -       u32 session_info;
56848 +       u32 session_info, ta_handle;
56849         size_t ta_size;
56850         int rc, i;
56851         void *ta;
56852 @@ -259,11 +256,14 @@ int amdtee_open_session(struct tee_context *ctx,
56853         if (arg->ret != TEEC_SUCCESS)
56854                 goto out;
56856 +       ta_handle = get_ta_handle(arg->session);
56858         mutex_lock(&session_list_mutex);
56859         sess = alloc_session(ctxdata, arg->session);
56860         mutex_unlock(&session_list_mutex);
56862         if (!sess) {
56863 +               handle_unload_ta(ta_handle);
56864                 rc = -ENOMEM;
56865                 goto out;
56866         }
56867 @@ -277,6 +277,7 @@ int amdtee_open_session(struct tee_context *ctx,
56869         if (i >= TEE_NUM_SESSIONS) {
56870                 pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
56871 +               handle_unload_ta(ta_handle);
56872                 kref_put(&sess->refcount, destroy_session);
56873                 rc = -ENOMEM;
56874                 goto out;
56875 @@ -289,12 +290,13 @@ int amdtee_open_session(struct tee_context *ctx,
56876                 spin_lock(&sess->lock);
56877                 clear_bit(i, sess->sess_mask);
56878                 spin_unlock(&sess->lock);
56879 +               handle_unload_ta(ta_handle);
56880                 kref_put(&sess->refcount, destroy_session);
56881                 goto out;
56882         }
56884         sess->session_info[i] = session_info;
56885 -       set_session_id(sess->ta_handle, i, &arg->session);
56886 +       set_session_id(ta_handle, i, &arg->session);
56887  out:
56888         free_pages((u64)ta, get_order(ta_size));
56889         return rc;
56890 @@ -329,6 +331,7 @@ int amdtee_close_session(struct tee_context *ctx, u32 session)
56892         /* Close the session */
56893         handle_close_session(ta_handle, session_info);
56894 +       handle_unload_ta(ta_handle);
56896         kref_put(&sess->refcount, destroy_session);
56898 diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
56899 index 319a1e701163..ddb8f9ecf307 100644
56900 --- a/drivers/tee/optee/core.c
56901 +++ b/drivers/tee/optee/core.c
56902 @@ -79,16 +79,6 @@ int optee_from_msg_param(struct tee_param *params, size_t num_params,
56903                                 return rc;
56904                         p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
56905                         p->u.memref.shm = shm;
56907 -                       /* Check that the memref is covered by the shm object */
56908 -                       if (p->u.memref.size) {
56909 -                               size_t o = p->u.memref.shm_offs +
56910 -                                          p->u.memref.size - 1;
56912 -                               rc = tee_shm_get_pa(shm, o, NULL);
56913 -                               if (rc)
56914 -                                       return rc;
56915 -                       }
56916                         break;
56917                 case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
56918                 case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
56919 diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
56920 index 10af3341e5ea..6956581ed7a4 100644
56921 --- a/drivers/thermal/cpufreq_cooling.c
56922 +++ b/drivers/thermal/cpufreq_cooling.c
56923 @@ -125,7 +125,7 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
56925         int i;
56927 -       for (i = cpufreq_cdev->max_level; i >= 0; i--) {
56928 +       for (i = cpufreq_cdev->max_level; i > 0; i--) {
56929                 if (power >= cpufreq_cdev->em->table[i].power)
56930                         break;
56931         }
56932 diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c
56933 index aaa07180ab48..645432ce6365 100644
56934 --- a/drivers/thermal/gov_fair_share.c
56935 +++ b/drivers/thermal/gov_fair_share.c
56936 @@ -82,6 +82,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
56937         int total_instance = 0;
56938         int cur_trip_level = get_trip_level(tz);
56940 +       mutex_lock(&tz->lock);
56942         list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
56943                 if (instance->trip != trip)
56944                         continue;
56945 @@ -110,6 +112,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
56946                 mutex_unlock(&instance->cdev->lock);
56947                 thermal_cdev_update(cdev);
56948         }
56950 +       mutex_unlock(&tz->lock);
56951         return 0;
56954 diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
56955 index d8ce3a687b80..3c4c0516e58a 100644
56956 --- a/drivers/thermal/qcom/tsens.c
56957 +++ b/drivers/thermal/qcom/tsens.c
56958 @@ -755,8 +755,10 @@ int __init init_common(struct tsens_priv *priv)
56959                 for (i = VER_MAJOR; i <= VER_STEP; i++) {
56960                         priv->rf[i] = devm_regmap_field_alloc(dev, priv->srot_map,
56961                                                               priv->fields[i]);
56962 -                       if (IS_ERR(priv->rf[i]))
56963 -                               return PTR_ERR(priv->rf[i]);
56964 +                       if (IS_ERR(priv->rf[i])) {
56965 +                               ret = PTR_ERR(priv->rf[i]);
56966 +                               goto err_put_device;
56967 +                       }
56968                 }
56969                 ret = regmap_field_read(priv->rf[VER_MINOR], &ver_minor);
56970                 if (ret)
56971 diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
56972 index 69ef12f852b7..5b76f9a1280d 100644
56973 --- a/drivers/thermal/thermal_of.c
56974 +++ b/drivers/thermal/thermal_of.c
56975 @@ -704,14 +704,17 @@ static int thermal_of_populate_bind_params(struct device_node *np,
56977         count = of_count_phandle_with_args(np, "cooling-device",
56978                                            "#cooling-cells");
56979 -       if (!count) {
56980 +       if (count <= 0) {
56981                 pr_err("Add a cooling_device property with at least one device\n");
56982 +               ret = -ENOENT;
56983                 goto end;
56984         }
56986         __tcbp = kcalloc(count, sizeof(*__tcbp), GFP_KERNEL);
56987 -       if (!__tcbp)
56988 +       if (!__tcbp) {
56989 +               ret = -ENOMEM;
56990                 goto end;
56991 +       }
56993         for (i = 0; i < count; i++) {
56994                 ret = of_parse_phandle_with_args(np, "cooling-device",
56995 diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
56996 index 18b78ea110ef..ecda5e18d23f 100644
56997 --- a/drivers/tty/amiserial.c
56998 +++ b/drivers/tty/amiserial.c
56999 @@ -970,6 +970,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
57000         if (!serial_isroot()) {
57001                 if ((ss->baud_base != state->baud_base) ||
57002                     (ss->close_delay != port->close_delay) ||
57003 +                   (ss->closing_wait != port->closing_wait) ||
57004                     (ss->xmit_fifo_size != state->xmit_fifo_size) ||
57005                     ((ss->flags & ~ASYNC_USR_MASK) !=
57006                      (port->flags & ~ASYNC_USR_MASK))) {
57007 diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
57008 index 9f13f7d49dd7..f9f14104bd2c 100644
57009 --- a/drivers/tty/moxa.c
57010 +++ b/drivers/tty/moxa.c
57011 @@ -2040,7 +2040,7 @@ static int moxa_get_serial_info(struct tty_struct *tty,
57012         ss->line = info->port.tty->index,
57013         ss->flags = info->port.flags,
57014         ss->baud_base = 921600,
57015 -       ss->close_delay = info->port.close_delay;
57016 +       ss->close_delay = jiffies_to_msecs(info->port.close_delay) / 10;
57017         mutex_unlock(&info->port.mutex);
57018         return 0;
57020 @@ -2050,6 +2050,7 @@ static int moxa_set_serial_info(struct tty_struct *tty,
57021                 struct serial_struct *ss)
57023         struct moxa_port *info = tty->driver_data;
57024 +       unsigned int close_delay;
57026         if (tty->index == MAX_PORTS)
57027                 return -EINVAL;
57028 @@ -2061,19 +2062,24 @@ static int moxa_set_serial_info(struct tty_struct *tty,
57029                         ss->baud_base != 921600)
57030                 return -EPERM;
57032 +       close_delay = msecs_to_jiffies(ss->close_delay * 10);
57034         mutex_lock(&info->port.mutex);
57035         if (!capable(CAP_SYS_ADMIN)) {
57036 -               if (((ss->flags & ~ASYNC_USR_MASK) !=
57037 +               if (close_delay != info->port.close_delay ||
57038 +                   ss->type != info->type ||
57039 +                   ((ss->flags & ~ASYNC_USR_MASK) !=
57040                      (info->port.flags & ~ASYNC_USR_MASK))) {
57041                         mutex_unlock(&info->port.mutex);
57042                         return -EPERM;
57043                 }
57044 -       }
57045 -       info->port.close_delay = ss->close_delay * HZ / 100;
57046 +       } else {
57047 +               info->port.close_delay = close_delay;
57049 -       MoxaSetFifo(info, ss->type == PORT_16550A);
57050 +               MoxaSetFifo(info, ss->type == PORT_16550A);
57052 -       info->type = ss->type;
57053 +               info->type = ss->type;
57054 +       }
57055         mutex_unlock(&info->port.mutex);
57056         return 0;
57058 diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
57059 index 4203b64bccdb..2d8e76263a25 100644
57060 --- a/drivers/tty/mxser.c
57061 +++ b/drivers/tty/mxser.c
57062 @@ -1208,19 +1208,26 @@ static int mxser_get_serial_info(struct tty_struct *tty,
57064         struct mxser_port *info = tty->driver_data;
57065         struct tty_port *port = &info->port;
57066 +       unsigned int closing_wait, close_delay;
57068         if (tty->index == MXSER_PORTS)
57069                 return -ENOTTY;
57071         mutex_lock(&port->mutex);
57073 +       close_delay = jiffies_to_msecs(info->port.close_delay) / 10;
57074 +       closing_wait = info->port.closing_wait;
57075 +       if (closing_wait != ASYNC_CLOSING_WAIT_NONE)
57076 +               closing_wait = jiffies_to_msecs(closing_wait) / 10;
57078         ss->type = info->type,
57079         ss->line = tty->index,
57080         ss->port = info->ioaddr,
57081         ss->irq = info->board->irq,
57082         ss->flags = info->port.flags,
57083         ss->baud_base = info->baud_base,
57084 -       ss->close_delay = info->port.close_delay,
57085 -       ss->closing_wait = info->port.closing_wait,
57086 +       ss->close_delay = close_delay;
57087 +       ss->closing_wait = closing_wait;
57088         ss->custom_divisor = info->custom_divisor,
57089         mutex_unlock(&port->mutex);
57090         return 0;
57091 @@ -1233,7 +1240,7 @@ static int mxser_set_serial_info(struct tty_struct *tty,
57092         struct tty_port *port = &info->port;
57093         speed_t baud;
57094         unsigned long sl_flags;
57095 -       unsigned int flags;
57096 +       unsigned int flags, close_delay, closing_wait;
57097         int retval = 0;
57099         if (tty->index == MXSER_PORTS)
57100 @@ -1255,9 +1262,15 @@ static int mxser_set_serial_info(struct tty_struct *tty,
57102         flags = port->flags & ASYNC_SPD_MASK;
57104 +       close_delay = msecs_to_jiffies(ss->close_delay * 10);
57105 +       closing_wait = ss->closing_wait;
57106 +       if (closing_wait != ASYNC_CLOSING_WAIT_NONE)
57107 +               closing_wait = msecs_to_jiffies(closing_wait * 10);
57109         if (!capable(CAP_SYS_ADMIN)) {
57110                 if ((ss->baud_base != info->baud_base) ||
57111 -                               (ss->close_delay != info->port.close_delay) ||
57112 +                               (close_delay != info->port.close_delay) ||
57113 +                               (closing_wait != info->port.closing_wait) ||
57114                                 ((ss->flags & ~ASYNC_USR_MASK) != (info->port.flags & ~ASYNC_USR_MASK))) {
57115                         mutex_unlock(&port->mutex);
57116                         return -EPERM;
57117 @@ -1271,8 +1284,8 @@ static int mxser_set_serial_info(struct tty_struct *tty,
57118                  */
57119                 port->flags = ((port->flags & ~ASYNC_FLAGS) |
57120                                 (ss->flags & ASYNC_FLAGS));
57121 -               port->close_delay = ss->close_delay * HZ / 100;
57122 -               port->closing_wait = ss->closing_wait * HZ / 100;
57123 +               port->close_delay = close_delay;
57124 +               port->closing_wait = closing_wait;
57125                 if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST &&
57126                                 (ss->baud_base != info->baud_base ||
57127                                 ss->custom_divisor !=
57128 @@ -1284,11 +1297,11 @@ static int mxser_set_serial_info(struct tty_struct *tty,
57129                         baud = ss->baud_base / ss->custom_divisor;
57130                         tty_encode_baud_rate(tty, baud, baud);
57131                 }
57132 -       }
57134 -       info->type = ss->type;
57135 +               info->type = ss->type;
57137 -       process_txrx_fifo(info);
57138 +               process_txrx_fifo(info);
57139 +       }
57141         if (tty_port_initialized(port)) {
57142                 if (flags != (port->flags & ASYNC_SPD_MASK)) {
57143 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
57144 index 51dafc06f541..2406653d38b7 100644
57145 --- a/drivers/tty/n_gsm.c
57146 +++ b/drivers/tty/n_gsm.c
57147 @@ -2384,8 +2384,18 @@ static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
57148                 /* Don't register device 0 - this is the control channel and not
57149                    a usable tty interface */
57150                 base = mux_num_to_base(gsm); /* Base for this MUX */
57151 -               for (i = 1; i < NUM_DLCI; i++)
57152 -                       tty_register_device(gsm_tty_driver, base + i, NULL);
57153 +               for (i = 1; i < NUM_DLCI; i++) {
57154 +                       struct device *dev;
57156 +                       dev = tty_register_device(gsm_tty_driver,
57157 +                                                       base + i, NULL);
57158 +                       if (IS_ERR(dev)) {
57159 +                               for (i--; i >= 1; i--)
57160 +                                       tty_unregister_device(gsm_tty_driver,
57161 +                                                               base + i);
57162 +                               return PTR_ERR(dev);
57163 +                       }
57164 +               }
57165         }
57166         return ret;
57168 diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c
57169 index 64842f3539e1..0b06770642cb 100644
57170 --- a/drivers/tty/serial/liteuart.c
57171 +++ b/drivers/tty/serial/liteuart.c
57172 @@ -270,8 +270,8 @@ static int liteuart_probe(struct platform_device *pdev)
57174         /* get membase */
57175         port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
57176 -       if (!port->membase)
57177 -               return -ENXIO;
57178 +       if (IS_ERR(port->membase))
57179 +               return PTR_ERR(port->membase);
57181         /* values not from device tree */
57182         port->dev = &pdev->dev;
57183 diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
57184 index e0c00a1b0763..51b0ecabf2ec 100644
57185 --- a/drivers/tty/serial/mvebu-uart.c
57186 +++ b/drivers/tty/serial/mvebu-uart.c
57187 @@ -818,9 +818,6 @@ static int mvebu_uart_probe(struct platform_device *pdev)
57188                 return -EINVAL;
57189         }
57191 -       if (!match)
57192 -               return -ENODEV;
57194         /* Assume that all UART ports have a DT alias or none has */
57195         id = of_alias_get_id(pdev->dev.of_node, "serial");
57196         if (!pdev->dev.of_node || id < 0)
57197 diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
57198 index 76b94d0ff586..84e8158088cd 100644
57199 --- a/drivers/tty/serial/omap-serial.c
57200 +++ b/drivers/tty/serial/omap-serial.c
57201 @@ -159,6 +159,8 @@ struct uart_omap_port {
57202         u32                     calc_latency;
57203         struct work_struct      qos_work;
57204         bool                    is_suspending;
57206 +       unsigned int            rs485_tx_filter_count;
57207  };
57209  #define to_uart_omap_port(p) ((container_of((p), struct uart_omap_port, port)))
57210 @@ -302,7 +304,8 @@ static void serial_omap_stop_tx(struct uart_port *port)
57211                         serial_out(up, UART_OMAP_SCR, up->scr);
57212                         res = (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) ?
57213                                 1 : 0;
57214 -                       if (gpiod_get_value(up->rts_gpiod) != res) {
57215 +                       if (up->rts_gpiod &&
57216 +                           gpiod_get_value(up->rts_gpiod) != res) {
57217                                 if (port->rs485.delay_rts_after_send > 0)
57218                                         mdelay(
57219                                         port->rs485.delay_rts_after_send);
57220 @@ -328,19 +331,6 @@ static void serial_omap_stop_tx(struct uart_port *port)
57221                 serial_out(up, UART_IER, up->ier);
57222         }
57224 -       if ((port->rs485.flags & SER_RS485_ENABLED) &&
57225 -           !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
57226 -               /*
57227 -                * Empty the RX FIFO, we are not interested in anything
57228 -                * received during the half-duplex transmission.
57229 -                */
57230 -               serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_RCVR);
57231 -               /* Re-enable RX interrupts */
57232 -               up->ier |= UART_IER_RLSI | UART_IER_RDI;
57233 -               up->port.read_status_mask |= UART_LSR_DR;
57234 -               serial_out(up, UART_IER, up->ier);
57235 -       }
57237         pm_runtime_mark_last_busy(up->dev);
57238         pm_runtime_put_autosuspend(up->dev);
57240 @@ -366,6 +356,10 @@ static void transmit_chars(struct uart_omap_port *up, unsigned int lsr)
57241                 serial_out(up, UART_TX, up->port.x_char);
57242                 up->port.icount.tx++;
57243                 up->port.x_char = 0;
57244 +               if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
57245 +                   !(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
57246 +                       up->rs485_tx_filter_count++;
57248                 return;
57249         }
57250         if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
57251 @@ -377,6 +371,10 @@ static void transmit_chars(struct uart_omap_port *up, unsigned int lsr)
57252                 serial_out(up, UART_TX, xmit->buf[xmit->tail]);
57253                 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
57254                 up->port.icount.tx++;
57255 +               if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
57256 +                   !(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
57257 +                       up->rs485_tx_filter_count++;
57259                 if (uart_circ_empty(xmit))
57260                         break;
57261         } while (--count > 0);
57262 @@ -411,7 +409,7 @@ static void serial_omap_start_tx(struct uart_port *port)
57264                 /* if rts not already enabled */
57265                 res = (port->rs485.flags & SER_RS485_RTS_ON_SEND) ? 1 : 0;
57266 -               if (gpiod_get_value(up->rts_gpiod) != res) {
57267 +               if (up->rts_gpiod && gpiod_get_value(up->rts_gpiod) != res) {
57268                         gpiod_set_value(up->rts_gpiod, res);
57269                         if (port->rs485.delay_rts_before_send > 0)
57270                                 mdelay(port->rs485.delay_rts_before_send);
57271 @@ -420,7 +418,7 @@ static void serial_omap_start_tx(struct uart_port *port)
57273         if ((port->rs485.flags & SER_RS485_ENABLED) &&
57274             !(port->rs485.flags & SER_RS485_RX_DURING_TX))
57275 -               serial_omap_stop_rx(port);
57276 +               up->rs485_tx_filter_count = 0;
57278         serial_omap_enable_ier_thri(up);
57279         pm_runtime_mark_last_busy(up->dev);
57280 @@ -491,8 +489,13 @@ static void serial_omap_rlsi(struct uart_omap_port *up, unsigned int lsr)
57281          * Read one data character out to avoid stalling the receiver according
57282          * to the table 23-246 of the omap4 TRM.
57283          */
57284 -       if (likely(lsr & UART_LSR_DR))
57285 +       if (likely(lsr & UART_LSR_DR)) {
57286                 serial_in(up, UART_RX);
57287 +               if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
57288 +                   !(up->port.rs485.flags & SER_RS485_RX_DURING_TX) &&
57289 +                   up->rs485_tx_filter_count)
57290 +                       up->rs485_tx_filter_count--;
57291 +       }
57293         up->port.icount.rx++;
57294         flag = TTY_NORMAL;
57295 @@ -543,6 +546,13 @@ static void serial_omap_rdi(struct uart_omap_port *up, unsigned int lsr)
57296                 return;
57298         ch = serial_in(up, UART_RX);
57299 +       if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
57300 +           !(up->port.rs485.flags & SER_RS485_RX_DURING_TX) &&
57301 +           up->rs485_tx_filter_count) {
57302 +               up->rs485_tx_filter_count--;
57303 +               return;
57304 +       }
57306         flag = TTY_NORMAL;
57307         up->port.icount.rx++;
57309 @@ -1407,18 +1417,13 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
57310         /* store new config */
57311         port->rs485 = *rs485;
57313 -       /*
57314 -        * Just as a precaution, only allow rs485
57315 -        * to be enabled if the gpio pin is valid
57316 -        */
57317         if (up->rts_gpiod) {
57318                 /* enable / disable rts */
57319                 val = (port->rs485.flags & SER_RS485_ENABLED) ?
57320                         SER_RS485_RTS_AFTER_SEND : SER_RS485_RTS_ON_SEND;
57321                 val = (port->rs485.flags & val) ? 1 : 0;
57322                 gpiod_set_value(up->rts_gpiod, val);
57323 -       } else
57324 -               port->rs485.flags &= ~SER_RS485_ENABLED;
57325 +       }
57327         /* Enable interrupts */
57328         up->ier = mode;
57329 diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
57330 index f86ec2d2635b..9adb8362578c 100644
57331 --- a/drivers/tty/serial/sc16is7xx.c
57332 +++ b/drivers/tty/serial/sc16is7xx.c
57333 @@ -1196,7 +1196,7 @@ static int sc16is7xx_probe(struct device *dev,
57334         ret = regmap_read(regmap,
57335                           SC16IS7XX_LSR_REG << SC16IS7XX_REG_SHIFT, &val);
57336         if (ret < 0)
57337 -               return ret;
57338 +               return -EPROBE_DEFER;
57340         /* Alloc port structure */
57341         s = devm_kzalloc(dev, struct_size(s, p, devtype->nr_uart), GFP_KERNEL);
57342 diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
57343 index ba31e97d3d96..43f02ed055d5 100644
57344 --- a/drivers/tty/serial/serial_core.c
57345 +++ b/drivers/tty/serial/serial_core.c
57346 @@ -1305,7 +1305,7 @@ static int uart_set_rs485_config(struct uart_port *port,
57347         unsigned long flags;
57349         if (!port->rs485_config)
57350 -               return -ENOIOCTLCMD;
57351 +               return -ENOTTY;
57353         if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user)))
57354                 return -EFAULT;
57355 @@ -1329,7 +1329,7 @@ static int uart_get_iso7816_config(struct uart_port *port,
57356         struct serial_iso7816 aux;
57358         if (!port->iso7816_config)
57359 -               return -ENOIOCTLCMD;
57360 +               return -ENOTTY;
57362         spin_lock_irqsave(&port->lock, flags);
57363         aux = port->iso7816;
57364 @@ -1349,7 +1349,7 @@ static int uart_set_iso7816_config(struct uart_port *port,
57365         unsigned long flags;
57367         if (!port->iso7816_config)
57368 -               return -ENOIOCTLCMD;
57369 +               return -ENOTTY;
57371         if (copy_from_user(&iso7816, iso7816_user, sizeof(*iso7816_user)))
57372                 return -EFAULT;
57373 diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
57374 index b3675cf25a69..99dfa884cbef 100644
57375 --- a/drivers/tty/serial/stm32-usart.c
57376 +++ b/drivers/tty/serial/stm32-usart.c
57377 @@ -214,12 +214,14 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
57378         struct tty_port *tport = &port->state->port;
57379         struct stm32_port *stm32_port = to_stm32_port(port);
57380         const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
57381 -       unsigned long c;
57382 +       unsigned long c, flags;
57383         u32 sr;
57384         char flag;
57386 -       if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
57387 -               pm_wakeup_event(tport->tty->dev, 0);
57388 +       if (threaded)
57389 +               spin_lock_irqsave(&port->lock, flags);
57390 +       else
57391 +               spin_lock(&port->lock);
57393         while (stm32_usart_pending_rx(port, &sr, &stm32_port->last_res,
57394                                       threaded)) {
57395 @@ -276,9 +278,12 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
57396                 uart_insert_char(port, sr, USART_SR_ORE, c, flag);
57397         }
57399 -       spin_unlock(&port->lock);
57400 +       if (threaded)
57401 +               spin_unlock_irqrestore(&port->lock, flags);
57402 +       else
57403 +               spin_unlock(&port->lock);
57405         tty_flip_buffer_push(tport);
57406 -       spin_lock(&port->lock);
57409  static void stm32_usart_tx_dma_complete(void *arg)
57410 @@ -286,12 +291,16 @@ static void stm32_usart_tx_dma_complete(void *arg)
57411         struct uart_port *port = arg;
57412         struct stm32_port *stm32port = to_stm32_port(port);
57413         const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
57414 +       unsigned long flags;
57416 +       dmaengine_terminate_async(stm32port->tx_ch);
57417         stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
57418         stm32port->tx_dma_busy = false;
57420         /* Let's see if we have pending data to send */
57421 +       spin_lock_irqsave(&port->lock, flags);
57422         stm32_usart_transmit_chars(port);
57423 +       spin_unlock_irqrestore(&port->lock, flags);
57426  static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
57427 @@ -455,29 +464,34 @@ static void stm32_usart_transmit_chars(struct uart_port *port)
57428  static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
57430         struct uart_port *port = ptr;
57431 +       struct tty_port *tport = &port->state->port;
57432         struct stm32_port *stm32_port = to_stm32_port(port);
57433         const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
57434         u32 sr;
57436 -       spin_lock(&port->lock);
57438         sr = readl_relaxed(port->membase + ofs->isr);
57440         if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
57441                 writel_relaxed(USART_ICR_RTOCF,
57442                                port->membase + ofs->icr);
57444 -       if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG)
57445 +       if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
57446 +               /* Clear wake up flag and disable wake up interrupt */
57447                 writel_relaxed(USART_ICR_WUCF,
57448                                port->membase + ofs->icr);
57449 +               stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
57450 +               if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
57451 +                       pm_wakeup_event(tport->tty->dev, 0);
57452 +       }
57454         if ((sr & USART_SR_RXNE) && !(stm32_port->rx_ch))
57455                 stm32_usart_receive_chars(port, false);
57457 -       if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch))
57458 +       if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
57459 +               spin_lock(&port->lock);
57460                 stm32_usart_transmit_chars(port);
57462 -       spin_unlock(&port->lock);
57463 +               spin_unlock(&port->lock);
57464 +       }
57466         if (stm32_port->rx_ch)
57467                 return IRQ_WAKE_THREAD;
57468 @@ -490,13 +504,9 @@ static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr)
57469         struct uart_port *port = ptr;
57470         struct stm32_port *stm32_port = to_stm32_port(port);
57472 -       spin_lock(&port->lock);
57474         if (stm32_port->rx_ch)
57475                 stm32_usart_receive_chars(port, true);
57477 -       spin_unlock(&port->lock);
57479         return IRQ_HANDLED;
57482 @@ -505,7 +515,10 @@ static unsigned int stm32_usart_tx_empty(struct uart_port *port)
57483         struct stm32_port *stm32_port = to_stm32_port(port);
57484         const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
57486 -       return readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE;
57487 +       if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
57488 +               return TIOCSER_TEMT;
57490 +       return 0;
57493  static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
57494 @@ -634,6 +647,7 @@ static int stm32_usart_startup(struct uart_port *port)
57496         struct stm32_port *stm32_port = to_stm32_port(port);
57497         const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
57498 +       const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
57499         const char *name = to_platform_device(port->dev)->name;
57500         u32 val;
57501         int ret;
57502 @@ -646,21 +660,10 @@ static int stm32_usart_startup(struct uart_port *port)
57504         /* RX FIFO Flush */
57505         if (ofs->rqr != UNDEF_REG)
57506 -               stm32_usart_set_bits(port, ofs->rqr, USART_RQR_RXFRQ);
57508 -       /* Tx and RX FIFO configuration */
57509 -       if (stm32_port->fifoen) {
57510 -               val = readl_relaxed(port->membase + ofs->cr3);
57511 -               val &= ~(USART_CR3_TXFTCFG_MASK | USART_CR3_RXFTCFG_MASK);
57512 -               val |= USART_CR3_TXFTCFG_HALF << USART_CR3_TXFTCFG_SHIFT;
57513 -               val |= USART_CR3_RXFTCFG_HALF << USART_CR3_RXFTCFG_SHIFT;
57514 -               writel_relaxed(val, port->membase + ofs->cr3);
57515 -       }
57516 +               writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr);
57518 -       /* RX FIFO enabling */
57519 -       val = stm32_port->cr1_irq | USART_CR1_RE;
57520 -       if (stm32_port->fifoen)
57521 -               val |= USART_CR1_FIFOEN;
57522 +       /* RX enabling */
57523 +       val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit);
57524         stm32_usart_set_bits(port, ofs->cr1, val);
57526         return 0;
57527 @@ -691,6 +694,11 @@ static void stm32_usart_shutdown(struct uart_port *port)
57528         if (ret)
57529                 dev_err(port->dev, "Transmission is not complete\n");
57531 +       /* flush RX & TX FIFO */
57532 +       if (ofs->rqr != UNDEF_REG)
57533 +               writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
57534 +                              port->membase + ofs->rqr);
57536         stm32_usart_clr_bits(port, ofs->cr1, val);
57538         free_irq(port->irq, port);
57539 @@ -737,8 +745,9 @@ static void stm32_usart_set_termios(struct uart_port *port,
57540         unsigned int baud, bits;
57541         u32 usartdiv, mantissa, fraction, oversampling;
57542         tcflag_t cflag = termios->c_cflag;
57543 -       u32 cr1, cr2, cr3;
57544 +       u32 cr1, cr2, cr3, isr;
57545         unsigned long flags;
57546 +       int ret;
57548         if (!stm32_port->hw_flow_control)
57549                 cflag &= ~CRTSCTS;
57550 @@ -747,21 +756,36 @@ static void stm32_usart_set_termios(struct uart_port *port,
57552         spin_lock_irqsave(&port->lock, flags);
57554 +       ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
57555 +                                               isr,
57556 +                                               (isr & USART_SR_TC),
57557 +                                               10, 100000);
57559 +       /* Send the TC error message only when ISR_TC is not set. */
57560 +       if (ret)
57561 +               dev_err(port->dev, "Transmission is not complete\n");
57563         /* Stop serial port and reset value */
57564         writel_relaxed(0, port->membase + ofs->cr1);
57566         /* flush RX & TX FIFO */
57567         if (ofs->rqr != UNDEF_REG)
57568 -               stm32_usart_set_bits(port, ofs->rqr,
57569 -                                    USART_RQR_TXFRQ | USART_RQR_RXFRQ);
57570 +               writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
57571 +                              port->membase + ofs->rqr);
57573         cr1 = USART_CR1_TE | USART_CR1_RE;
57574         if (stm32_port->fifoen)
57575                 cr1 |= USART_CR1_FIFOEN;
57576         cr2 = 0;
57578 +       /* Tx and RX FIFO configuration */
57579         cr3 = readl_relaxed(port->membase + ofs->cr3);
57580 -       cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTCFG_MASK | USART_CR3_RXFTIE
57581 -               | USART_CR3_TXFTCFG_MASK;
57582 +       cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE;
57583 +       if (stm32_port->fifoen) {
57584 +               cr3 &= ~(USART_CR3_TXFTCFG_MASK | USART_CR3_RXFTCFG_MASK);
57585 +               cr3 |= USART_CR3_TXFTCFG_HALF << USART_CR3_TXFTCFG_SHIFT;
57586 +               cr3 |= USART_CR3_RXFTCFG_HALF << USART_CR3_RXFTCFG_SHIFT;
57587 +       }
57589         if (cflag & CSTOPB)
57590                 cr2 |= USART_CR2_STOP_2B;
57591 @@ -817,12 +841,6 @@ static void stm32_usart_set_termios(struct uart_port *port,
57592                 cr3 |= USART_CR3_CTSE | USART_CR3_RTSE;
57593         }
57595 -       /* Handle modem control interrupts */
57596 -       if (UART_ENABLE_MS(port, termios->c_cflag))
57597 -               stm32_usart_enable_ms(port);
57598 -       else
57599 -               stm32_usart_disable_ms(port);
57601         usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
57603         /*
57604 @@ -892,12 +910,24 @@ static void stm32_usart_set_termios(struct uart_port *port,
57605                 cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
57606         }
57608 +       /* Configure wake up from low power on start bit detection */
57609 +       if (stm32_port->wakeirq > 0) {
57610 +               cr3 &= ~USART_CR3_WUS_MASK;
57611 +               cr3 |= USART_CR3_WUS_START_BIT;
57612 +       }
57614         writel_relaxed(cr3, port->membase + ofs->cr3);
57615         writel_relaxed(cr2, port->membase + ofs->cr2);
57616         writel_relaxed(cr1, port->membase + ofs->cr1);
57618         stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
57619         spin_unlock_irqrestore(&port->lock, flags);
57621 +       /* Handle modem control interrupts */
57622 +       if (UART_ENABLE_MS(port, termios->c_cflag))
57623 +               stm32_usart_enable_ms(port);
57624 +       else
57625 +               stm32_usart_disable_ms(port);
57628  static const char *stm32_usart_type(struct uart_port *port)
57629 @@ -1252,10 +1282,6 @@ static int stm32_usart_serial_probe(struct platform_device *pdev)
57630                 device_set_wakeup_enable(&pdev->dev, false);
57631         }
57633 -       ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
57634 -       if (ret)
57635 -               goto err_wirq;
57637         ret = stm32_usart_of_dma_rx_probe(stm32port, pdev);
57638         if (ret)
57639                 dev_info(&pdev->dev, "interrupt mode used for rx (no dma)\n");
57640 @@ -1269,11 +1295,40 @@ static int stm32_usart_serial_probe(struct platform_device *pdev)
57641         pm_runtime_get_noresume(&pdev->dev);
57642         pm_runtime_set_active(&pdev->dev);
57643         pm_runtime_enable(&pdev->dev);
57645 +       ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
57646 +       if (ret)
57647 +               goto err_port;
57649         pm_runtime_put_sync(&pdev->dev);
57651         return 0;
57653 -err_wirq:
57654 +err_port:
57655 +       pm_runtime_disable(&pdev->dev);
57656 +       pm_runtime_set_suspended(&pdev->dev);
57657 +       pm_runtime_put_noidle(&pdev->dev);
57659 +       if (stm32port->rx_ch) {
57660 +               dmaengine_terminate_async(stm32port->rx_ch);
57661 +               dma_release_channel(stm32port->rx_ch);
57662 +       }
57664 +       if (stm32port->rx_dma_buf)
57665 +               dma_free_coherent(&pdev->dev,
57666 +                                 RX_BUF_L, stm32port->rx_buf,
57667 +                                 stm32port->rx_dma_buf);
57669 +       if (stm32port->tx_ch) {
57670 +               dmaengine_terminate_async(stm32port->tx_ch);
57671 +               dma_release_channel(stm32port->tx_ch);
57672 +       }
57674 +       if (stm32port->tx_dma_buf)
57675 +               dma_free_coherent(&pdev->dev,
57676 +                                 TX_BUF_L, stm32port->tx_buf,
57677 +                                 stm32port->tx_dma_buf);
57679         if (stm32port->wakeirq > 0)
57680                 dev_pm_clear_wake_irq(&pdev->dev);
57682 @@ -1295,11 +1350,20 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
57683         int err;
57685         pm_runtime_get_sync(&pdev->dev);
57686 +       err = uart_remove_one_port(&stm32_usart_driver, port);
57687 +       if (err)
57688 +               return(err);
57690 +       pm_runtime_disable(&pdev->dev);
57691 +       pm_runtime_set_suspended(&pdev->dev);
57692 +       pm_runtime_put_noidle(&pdev->dev);
57694         stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
57696 -       if (stm32_port->rx_ch)
57697 +       if (stm32_port->rx_ch) {
57698 +               dmaengine_terminate_async(stm32_port->rx_ch);
57699                 dma_release_channel(stm32_port->rx_ch);
57700 +       }
57702         if (stm32_port->rx_dma_buf)
57703                 dma_free_coherent(&pdev->dev,
57704 @@ -1308,8 +1372,10 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
57706         stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
57708 -       if (stm32_port->tx_ch)
57709 +       if (stm32_port->tx_ch) {
57710 +               dmaengine_terminate_async(stm32_port->tx_ch);
57711                 dma_release_channel(stm32_port->tx_ch);
57712 +       }
57714         if (stm32_port->tx_dma_buf)
57715                 dma_free_coherent(&pdev->dev,
57716 @@ -1323,12 +1389,7 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
57718         stm32_usart_deinit_port(stm32_port);
57720 -       err = uart_remove_one_port(&stm32_usart_driver, port);
57722 -       pm_runtime_disable(&pdev->dev);
57723 -       pm_runtime_put_noidle(&pdev->dev);
57725 -       return err;
57726 +       return 0;
57729  #ifdef CONFIG_SERIAL_STM32_CONSOLE
57730 @@ -1436,23 +1497,20 @@ static void __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
57732         struct stm32_port *stm32_port = to_stm32_port(port);
57733         const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
57734 -       const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
57735 -       u32 val;
57737         if (stm32_port->wakeirq <= 0)
57738                 return;
57740 +       /*
57741 +        * Enable low-power wake-up and wake-up irq if argument is set to
57742 +        * "enable", disable low-power wake-up and wake-up irq otherwise
57743 +        */
57744         if (enable) {
57745 -               stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
57746                 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM);
57747 -               val = readl_relaxed(port->membase + ofs->cr3);
57748 -               val &= ~USART_CR3_WUS_MASK;
57749 -               /* Enable Wake up interrupt from low power on start bit */
57750 -               val |= USART_CR3_WUS_START_BIT | USART_CR3_WUFIE;
57751 -               writel_relaxed(val, port->membase + ofs->cr3);
57752 -               stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
57753 +               stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE);
57754         } else {
57755                 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM);
57756 +               stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
57757         }
57760 diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
57761 index cb4f327c46db..94b568aa46bb 100644
57762 --- a/drivers/tty/serial/stm32-usart.h
57763 +++ b/drivers/tty/serial/stm32-usart.h
57764 @@ -127,9 +127,6 @@ struct stm32_usart_info stm32h7_info = {
57765  /* Dummy bits */
57766  #define USART_SR_DUMMY_RX      BIT(16)
57768 -/* USART_ICR (F7) */
57769 -#define USART_CR_TC            BIT(6)
57771  /* USART_DR */
57772  #define USART_DR_MASK          GENMASK(8, 0)
57774 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
57775 index 391bada4cedb..adbcbfa11b29 100644
57776 --- a/drivers/tty/tty_io.c
57777 +++ b/drivers/tty/tty_io.c
57778 @@ -2530,14 +2530,14 @@ static int send_break(struct tty_struct *tty, unsigned int duration)
57779   *     @p: pointer to result
57780   *
57781   *     Obtain the modem status bits from the tty driver if the feature
57782 - *     is supported. Return -EINVAL if it is not available.
57783 + *     is supported. Return -ENOTTY if it is not available.
57784   *
57785   *     Locking: none (up to the driver)
57786   */
57788  static int tty_tiocmget(struct tty_struct *tty, int __user *p)
57790 -       int retval = -EINVAL;
57791 +       int retval = -ENOTTY;
57793         if (tty->ops->tiocmget) {
57794                 retval = tty->ops->tiocmget(tty);
57795 @@ -2555,7 +2555,7 @@ static int tty_tiocmget(struct tty_struct *tty, int __user *p)
57796   *     @p: pointer to desired bits
57797   *
57798   *     Set the modem status bits from the tty driver if the feature
57799 - *     is supported. Return -EINVAL if it is not available.
57800 + *     is supported. Return -ENOTTY if it is not available.
57801   *
57802   *     Locking: none (up to the driver)
57803   */
57804 @@ -2567,7 +2567,7 @@ static int tty_tiocmset(struct tty_struct *tty, unsigned int cmd,
57805         unsigned int set, clear, val;
57807         if (tty->ops->tiocmset == NULL)
57808 -               return -EINVAL;
57809 +               return -ENOTTY;
57811         retval = get_user(val, p);
57812         if (retval)
57813 @@ -2607,7 +2607,7 @@ int tty_get_icount(struct tty_struct *tty,
57814         if (tty->ops->get_icount)
57815                 return tty->ops->get_icount(tty, icount);
57816         else
57817 -               return -EINVAL;
57818 +               return -ENOTTY;
57820  EXPORT_SYMBOL_GPL(tty_get_icount);
57822 diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
57823 index 4de1c6ddb8ff..803da2d111c8 100644
57824 --- a/drivers/tty/tty_ioctl.c
57825 +++ b/drivers/tty/tty_ioctl.c
57826 @@ -774,8 +774,8 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
57827         case TCSETX:
57828         case TCSETXW:
57829         case TCSETXF:
57830 -               return -EINVAL;
57831 -#endif         
57832 +               return -ENOTTY;
57833 +#endif
57834         case TIOCGSOFTCAR:
57835                 copy_termios(real_tty, &kterm);
57836                 ret = put_user((kterm.c_cflag & CLOCAL) ? 1 : 0,
57837 diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
57838 index 284b07224c55..53cbf2c3f033 100644
57839 --- a/drivers/tty/vt/vt.c
57840 +++ b/drivers/tty/vt/vt.c
57841 @@ -1171,7 +1171,7 @@ static inline int resize_screen(struct vc_data *vc, int width, int height,
57842         /* Resizes the resolution of the display adapater */
57843         int err = 0;
57845 -       if (vc->vc_mode != KD_GRAPHICS && vc->vc_sw->con_resize)
57846 +       if (vc->vc_sw->con_resize)
57847                 err = vc->vc_sw->con_resize(vc, width, height, user);
57849         return err;
57850 @@ -1381,6 +1381,7 @@ struct vc_data *vc_deallocate(unsigned int currcons)
57851                 atomic_notifier_call_chain(&vt_notifier_list, VT_DEALLOCATE, &param);
57852                 vcs_remove_sysfs(currcons);
57853                 visual_deinit(vc);
57854 +               con_free_unimap(vc);
57855                 put_pid(vc->vt_pid);
57856                 vc_uniscr_set(vc, NULL);
57857                 kfree(vc->vc_screenbuf);
57858 diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
57859 index 89aeaf3c1bca..0e0cd9e9e589 100644
57860 --- a/drivers/tty/vt/vt_ioctl.c
57861 +++ b/drivers/tty/vt/vt_ioctl.c
57862 @@ -671,21 +671,58 @@ static int vt_resizex(struct vc_data *vc, struct vt_consize __user *cs)
57863         if (copy_from_user(&v, cs, sizeof(struct vt_consize)))
57864                 return -EFAULT;
57866 -       if (v.v_vlin)
57867 -               pr_info_once("\"struct vt_consize\"->v_vlin is ignored. Please report if you need this.\n");
57868 -       if (v.v_clin)
57869 -               pr_info_once("\"struct vt_consize\"->v_clin is ignored. Please report if you need this.\n");
57870 +       /* FIXME: Should check the copies properly */
57871 +       if (!v.v_vlin)
57872 +               v.v_vlin = vc->vc_scan_lines;
57874 +       if (v.v_clin) {
57875 +               int rows = v.v_vlin / v.v_clin;
57876 +               if (v.v_rows != rows) {
57877 +                       if (v.v_rows) /* Parameters don't add up */
57878 +                               return -EINVAL;
57879 +                       v.v_rows = rows;
57880 +               }
57881 +       }
57883 +       if (v.v_vcol && v.v_ccol) {
57884 +               int cols = v.v_vcol / v.v_ccol;
57885 +               if (v.v_cols != cols) {
57886 +                       if (v.v_cols)
57887 +                               return -EINVAL;
57888 +                       v.v_cols = cols;
57889 +               }
57890 +       }
57892 +       if (v.v_clin > 32)
57893 +               return -EINVAL;
57895 -       console_lock();
57896         for (i = 0; i < MAX_NR_CONSOLES; i++) {
57897 -               vc = vc_cons[i].d;
57898 +               struct vc_data *vcp;
57900 -               if (vc) {
57901 -                       vc->vc_resize_user = 1;
57902 -                       vc_resize(vc, v.v_cols, v.v_rows);
57903 +               if (!vc_cons[i].d)
57904 +                       continue;
57905 +               console_lock();
57906 +               vcp = vc_cons[i].d;
57907 +               if (vcp) {
57908 +                       int ret;
57909 +                       int save_scan_lines = vcp->vc_scan_lines;
57910 +                       int save_cell_height = vcp->vc_cell_height;
57912 +                       if (v.v_vlin)
57913 +                               vcp->vc_scan_lines = v.v_vlin;
57914 +                       if (v.v_clin)
57915 +                               vcp->vc_cell_height = v.v_clin;
57916 +                       vcp->vc_resize_user = 1;
57917 +                       ret = vc_resize(vcp, v.v_cols, v.v_rows);
57918 +                       if (ret) {
57919 +                               vcp->vc_scan_lines = save_scan_lines;
57920 +                               vcp->vc_cell_height = save_cell_height;
57921 +                               console_unlock();
57922 +                               return ret;
57923 +                       }
57924                 }
57925 +               console_unlock();
57926         }
57927 -       console_unlock();
57929         return 0;
57931 diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
57932 index 0330ba99730e..652fe2547587 100644
57933 --- a/drivers/uio/uio_hv_generic.c
57934 +++ b/drivers/uio/uio_hv_generic.c
57935 @@ -291,13 +291,15 @@ hv_uio_probe(struct hv_device *dev,
57936         pdata->recv_buf = vzalloc(RECV_BUFFER_SIZE);
57937         if (pdata->recv_buf == NULL) {
57938                 ret = -ENOMEM;
57939 -               goto fail_close;
57940 +               goto fail_free_ring;
57941         }
57943         ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
57944                                     RECV_BUFFER_SIZE, &pdata->recv_gpadl);
57945 -       if (ret)
57946 +       if (ret) {
57947 +               vfree(pdata->recv_buf);
57948                 goto fail_close;
57949 +       }
57951         /* put Global Physical Address Label in name */
57952         snprintf(pdata->recv_name, sizeof(pdata->recv_name),
57953 @@ -316,8 +318,10 @@ hv_uio_probe(struct hv_device *dev,
57955         ret = vmbus_establish_gpadl(channel, pdata->send_buf,
57956                                     SEND_BUFFER_SIZE, &pdata->send_gpadl);
57957 -       if (ret)
57958 +       if (ret) {
57959 +               vfree(pdata->send_buf);
57960                 goto fail_close;
57961 +       }
57963         snprintf(pdata->send_name, sizeof(pdata->send_name),
57964                  "send:%u", pdata->send_gpadl);
57965 @@ -347,6 +351,8 @@ hv_uio_probe(struct hv_device *dev,
57967  fail_close:
57968         hv_uio_cleanup(dev, pdata);
57969 +fail_free_ring:
57970 +       vmbus_free_ring(dev->channel);
57972         return ret;
57974 diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c
57975 index c7d681fef198..3bb0b0075467 100644
57976 --- a/drivers/uio/uio_pci_generic.c
57977 +++ b/drivers/uio/uio_pci_generic.c
57978 @@ -82,7 +82,7 @@ static int probe(struct pci_dev *pdev,
57979         }
57981         if (pdev->irq && !pci_intx_mask_supported(pdev))
57982 -               return -ENOMEM;
57983 +               return -ENODEV;
57985         gdev = devm_kzalloc(&pdev->dev, sizeof(struct uio_pci_generic_dev), GFP_KERNEL);
57986         if (!gdev)
57987 diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
57988 index d7d4bdd57f46..56707b6b0f57 100644
57989 --- a/drivers/usb/cdns3/cdnsp-gadget.c
57990 +++ b/drivers/usb/cdns3/cdnsp-gadget.c
57991 @@ -727,7 +727,7 @@ int cdnsp_reset_device(struct cdnsp_device *pdev)
57992          * are in Disabled state.
57993          */
57994         for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i)
57995 -               pdev->eps[i].ep_state |= EP_STOPPED;
57996 +               pdev->eps[i].ep_state |= EP_STOPPED | EP_UNCONFIGURED;
57998         trace_cdnsp_handle_cmd_reset_dev(slot_ctx);
58000 @@ -942,6 +942,7 @@ static int cdnsp_gadget_ep_enable(struct usb_ep *ep,
58002         pep = to_cdnsp_ep(ep);
58003         pdev = pep->pdev;
58004 +       pep->ep_state &= ~EP_UNCONFIGURED;
58006         if (dev_WARN_ONCE(pdev->dev, pep->ep_state & EP_ENABLED,
58007                           "%s is already enabled\n", pep->name))
58008 @@ -1023,9 +1024,13 @@ static int cdnsp_gadget_ep_disable(struct usb_ep *ep)
58009                 goto finish;
58010         }
58012 -       cdnsp_cmd_stop_ep(pdev, pep);
58013         pep->ep_state |= EP_DIS_IN_RROGRESS;
58014 -       cdnsp_cmd_flush_ep(pdev, pep);
58016 +       /* Endpoint was unconfigured by Reset Device command. */
58017 +       if (!(pep->ep_state & EP_UNCONFIGURED)) {
58018 +               cdnsp_cmd_stop_ep(pdev, pep);
58019 +               cdnsp_cmd_flush_ep(pdev, pep);
58020 +       }
58022         /* Remove all queued USB requests. */
58023         while (!list_empty(&pep->pending_list)) {
58024 @@ -1043,10 +1048,12 @@ static int cdnsp_gadget_ep_disable(struct usb_ep *ep)
58026         cdnsp_endpoint_zero(pdev, pep);
58028 -       ret = cdnsp_update_eps_configuration(pdev, pep);
58029 +       if (!(pep->ep_state & EP_UNCONFIGURED))
58030 +               ret = cdnsp_update_eps_configuration(pdev, pep);
58032         cdnsp_free_endpoint_rings(pdev, pep);
58034 -       pep->ep_state &= ~EP_ENABLED;
58035 +       pep->ep_state &= ~(EP_ENABLED | EP_UNCONFIGURED);
58036         pep->ep_state |= EP_STOPPED;
58038  finish:
58039 diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h
58040 index 6bbb26548c04..783ca8ffde00 100644
58041 --- a/drivers/usb/cdns3/cdnsp-gadget.h
58042 +++ b/drivers/usb/cdns3/cdnsp-gadget.h
58043 @@ -835,6 +835,7 @@ struct cdnsp_ep {
58044  #define EP_WEDGE               BIT(4)
58045  #define EP0_HALTED_STATUS      BIT(5)
58046  #define EP_HAS_STREAMS         BIT(6)
58047 +#define EP_UNCONFIGURED                BIT(7)
58049         bool skip;
58050  };
58051 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
58052 index 3fda1ec961d7..c103961c3fae 100644
58053 --- a/drivers/usb/class/cdc-acm.c
58054 +++ b/drivers/usb/class/cdc-acm.c
58055 @@ -929,8 +929,7 @@ static int get_serial_info(struct tty_struct *tty, struct serial_struct *ss)
58057         struct acm *acm = tty->driver_data;
58059 -       ss->xmit_fifo_size = acm->writesize;
58060 -       ss->baud_base = le32_to_cpu(acm->line.dwDTERate);
58061 +       ss->line = acm->minor;
58062         ss->close_delay = jiffies_to_msecs(acm->port.close_delay) / 10;
58063         ss->closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
58064                                 ASYNC_CLOSING_WAIT_NONE :
58065 @@ -942,7 +941,6 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
58067         struct acm *acm = tty->driver_data;
58068         unsigned int closing_wait, close_delay;
58069 -       unsigned int old_closing_wait, old_close_delay;
58070         int retval = 0;
58072         close_delay = msecs_to_jiffies(ss->close_delay * 10);
58073 @@ -950,20 +948,12 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
58074                         ASYNC_CLOSING_WAIT_NONE :
58075                         msecs_to_jiffies(ss->closing_wait * 10);
58077 -       /* we must redo the rounding here, so that the values match */
58078 -       old_close_delay = jiffies_to_msecs(acm->port.close_delay) / 10;
58079 -       old_closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
58080 -                               ASYNC_CLOSING_WAIT_NONE :
58081 -                               jiffies_to_msecs(acm->port.closing_wait) / 10;
58083         mutex_lock(&acm->port.mutex);
58085         if (!capable(CAP_SYS_ADMIN)) {
58086 -               if ((ss->close_delay != old_close_delay) ||
58087 -                   (ss->closing_wait != old_closing_wait))
58088 +               if ((close_delay != acm->port.close_delay) ||
58089 +                   (closing_wait != acm->port.closing_wait))
58090                         retval = -EPERM;
58091 -               else
58092 -                       retval = -EOPNOTSUPP;
58093         } else {
58094                 acm->port.close_delay  = close_delay;
58095                 acm->port.closing_wait = closing_wait;
58096 @@ -1634,12 +1624,13 @@ static int acm_resume(struct usb_interface *intf)
58097         struct urb *urb;
58098         int rv = 0;
58100 -       acm_unpoison_urbs(acm);
58101         spin_lock_irq(&acm->write_lock);
58103         if (--acm->susp_count)
58104                 goto out;
58106 +       acm_unpoison_urbs(acm);
58108         if (tty_port_initialized(&acm->port)) {
58109                 rv = usb_submit_urb(acm->ctrlurb, GFP_ATOMIC);
58111 diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
58112 index 508b1c3f8b73..d1e4a7379beb 100644
58113 --- a/drivers/usb/class/cdc-wdm.c
58114 +++ b/drivers/usb/class/cdc-wdm.c
58115 @@ -321,12 +321,23 @@ static void wdm_int_callback(struct urb *urb)
58119 -static void kill_urbs(struct wdm_device *desc)
58120 +static void poison_urbs(struct wdm_device *desc)
58122         /* the order here is essential */
58123 -       usb_kill_urb(desc->command);
58124 -       usb_kill_urb(desc->validity);
58125 -       usb_kill_urb(desc->response);
58126 +       usb_poison_urb(desc->command);
58127 +       usb_poison_urb(desc->validity);
58128 +       usb_poison_urb(desc->response);
58131 +static void unpoison_urbs(struct wdm_device *desc)
58133 +       /*
58134 +        *  the order here is not essential
58135 +        *  it is symmetrical just to be nice
58136 +        */
58137 +       usb_unpoison_urb(desc->response);
58138 +       usb_unpoison_urb(desc->validity);
58139 +       usb_unpoison_urb(desc->command);
58142  static void free_urbs(struct wdm_device *desc)
58143 @@ -741,11 +752,12 @@ static int wdm_release(struct inode *inode, struct file *file)
58144         if (!desc->count) {
58145                 if (!test_bit(WDM_DISCONNECTING, &desc->flags)) {
58146                         dev_dbg(&desc->intf->dev, "wdm_release: cleanup\n");
58147 -                       kill_urbs(desc);
58148 +                       poison_urbs(desc);
58149                         spin_lock_irq(&desc->iuspin);
58150                         desc->resp_count = 0;
58151                         spin_unlock_irq(&desc->iuspin);
58152                         desc->manage_power(desc->intf, 0);
58153 +                       unpoison_urbs(desc);
58154                 } else {
58155                         /* must avoid dev_printk here as desc->intf is invalid */
58156                         pr_debug(KBUILD_MODNAME " %s: device gone - cleaning up\n", __func__);
58157 @@ -1037,9 +1049,9 @@ static void wdm_disconnect(struct usb_interface *intf)
58158         wake_up_all(&desc->wait);
58159         mutex_lock(&desc->rlock);
58160         mutex_lock(&desc->wlock);
58161 +       poison_urbs(desc);
58162         cancel_work_sync(&desc->rxwork);
58163         cancel_work_sync(&desc->service_outs_intr);
58164 -       kill_urbs(desc);
58165         mutex_unlock(&desc->wlock);
58166         mutex_unlock(&desc->rlock);
58168 @@ -1080,9 +1092,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
58169                 set_bit(WDM_SUSPENDING, &desc->flags);
58170                 spin_unlock_irq(&desc->iuspin);
58171                 /* callback submits work - order is essential */
58172 -               kill_urbs(desc);
58173 +               poison_urbs(desc);
58174                 cancel_work_sync(&desc->rxwork);
58175                 cancel_work_sync(&desc->service_outs_intr);
58176 +               unpoison_urbs(desc);
58177         }
58178         if (!PMSG_IS_AUTO(message)) {
58179                 mutex_unlock(&desc->wlock);
58180 @@ -1140,7 +1153,7 @@ static int wdm_pre_reset(struct usb_interface *intf)
58181         wake_up_all(&desc->wait);
58182         mutex_lock(&desc->rlock);
58183         mutex_lock(&desc->wlock);
58184 -       kill_urbs(desc);
58185 +       poison_urbs(desc);
58186         cancel_work_sync(&desc->rxwork);
58187         cancel_work_sync(&desc->service_outs_intr);
58188         return 0;
58189 @@ -1151,6 +1164,7 @@ static int wdm_post_reset(struct usb_interface *intf)
58190         struct wdm_device *desc = wdm_find_device(intf);
58191         int rv;
58193 +       unpoison_urbs(desc);
58194         clear_bit(WDM_OVERFLOW, &desc->flags);
58195         clear_bit(WDM_RESETTING, &desc->flags);
58196         rv = recover_from_urb_loss(desc);
58197 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
58198 index 7f71218cc1e5..13fe37fbbd2c 100644
58199 --- a/drivers/usb/core/hub.c
58200 +++ b/drivers/usb/core/hub.c
58201 @@ -3556,7 +3556,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
58202         u16             portchange, portstatus;
58204         if (!test_and_set_bit(port1, hub->child_usage_bits)) {
58205 -               status = pm_runtime_get_sync(&port_dev->dev);
58206 +               status = pm_runtime_resume_and_get(&port_dev->dev);
58207                 if (status < 0) {
58208                         dev_dbg(&udev->dev, "can't resume usb port, status %d\n",
58209                                         status);
58210 @@ -3593,9 +3593,6 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
58211                  * sequence.
58212                  */
58213                 status = hub_port_status(hub, port1, &portstatus, &portchange);
58215 -               /* TRSMRCY = 10 msec */
58216 -               msleep(10);
58217         }
58219   SuspendCleared:
58220 @@ -3610,6 +3607,9 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
58221                                 usb_clear_port_feature(hub->hdev, port1,
58222                                                 USB_PORT_FEAT_C_SUSPEND);
58223                 }
58225 +               /* TRSMRCY = 10 msec */
58226 +               msleep(10);
58227         }
58229         if (udev->persist_enabled)
58230 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
58231 index 76ac5d6555ae..21e7522655ac 100644
58232 --- a/drivers/usb/core/quirks.c
58233 +++ b/drivers/usb/core/quirks.c
58234 @@ -406,6 +406,7 @@ static const struct usb_device_id usb_quirk_list[] = {
58236         /* Realtek hub in Dell WD19 (Type-C) */
58237         { USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM },
58238 +       { USB_DEVICE(0x0bda, 0x5487), .driver_info = USB_QUIRK_RESET_RESUME },
58240         /* Generic RTL8153 based ethernet adapters */
58241         { USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM },
58242 @@ -438,6 +439,9 @@ static const struct usb_device_id usb_quirk_list[] = {
58243         { USB_DEVICE(0x17ef, 0xa012), .driver_info =
58244                         USB_QUIRK_DISCONNECT_SUSPEND },
58246 +       /* Lenovo ThinkPad USB-C Dock Gen2 Ethernet (RTL8153 GigE) */
58247 +       { USB_DEVICE(0x17ef, 0xa387), .driver_info = USB_QUIRK_NO_LPM },
58249         /* BUILDWIN Photo Frame */
58250         { USB_DEVICE(0x1908, 0x1315), .driver_info =
58251                         USB_QUIRK_HONOR_BNUMINTERFACES },
58252 diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
58253 index 7161344c6522..641e4251cb7f 100644
58254 --- a/drivers/usb/dwc2/core.h
58255 +++ b/drivers/usb/dwc2/core.h
58256 @@ -112,6 +112,7 @@ struct dwc2_hsotg_req;
58257   * @debugfs: File entry for debugfs file for this endpoint.
58258   * @dir_in: Set to true if this endpoint is of the IN direction, which
58259   *          means that it is sending data to the Host.
58260 + * @map_dir: Set to the value of dir_in when the DMA buffer is mapped.
58261   * @index: The index for the endpoint registers.
58262   * @mc: Multi Count - number of transactions per microframe
58263   * @interval: Interval for periodic endpoints, in frames or microframes.
58264 @@ -161,6 +162,7 @@ struct dwc2_hsotg_ep {
58265         unsigned short          fifo_index;
58267         unsigned char           dir_in;
58268 +       unsigned char           map_dir;
58269         unsigned char           index;
58270         unsigned char           mc;
58271         u16                     interval;
58272 diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
58273 index 55f1d14fc414..510fd0572feb 100644
58274 --- a/drivers/usb/dwc2/core_intr.c
58275 +++ b/drivers/usb/dwc2/core_intr.c
58276 @@ -307,6 +307,7 @@ static void dwc2_handle_conn_id_status_change_intr(struct dwc2_hsotg *hsotg)
58277  static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
58279         int ret;
58280 +       u32 hprt0;
58282         /* Clear interrupt */
58283         dwc2_writel(hsotg, GINTSTS_SESSREQINT, GINTSTS);
58284 @@ -327,6 +328,13 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
58285                  * established
58286                  */
58287                 dwc2_hsotg_disconnect(hsotg);
58288 +       } else {
58289 +               /* Turn on the port power bit. */
58290 +               hprt0 = dwc2_read_hprt0(hsotg);
58291 +               hprt0 |= HPRT0_PWR;
58292 +               dwc2_writel(hsotg, hprt0, HPRT0);
58293 +               /* Connect hcd after port power is set. */
58294 +               dwc2_hcd_connect(hsotg);
58295         }
58298 @@ -652,6 +660,71 @@ static u32 dwc2_read_common_intr(struct dwc2_hsotg *hsotg)
58299                 return 0;
58302 +/**
58303 + * dwc_handle_gpwrdn_disc_det() - Handles the gpwrdn disconnect detect.
58304 + * Exits hibernation without restoring registers.
58305 + *
58306 + * @hsotg: Programming view of DWC_otg controller
58307 + * @gpwrdn: GPWRDN register
58308 + */
58309 +static inline void dwc_handle_gpwrdn_disc_det(struct dwc2_hsotg *hsotg,
58310 +                                             u32 gpwrdn)
58312 +       u32 gpwrdn_tmp;
58314 +       /* Switch-on voltage to the core */
58315 +       gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
58316 +       gpwrdn_tmp &= ~GPWRDN_PWRDNSWTCH;
58317 +       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
58318 +       udelay(5);
58320 +       /* Reset core */
58321 +       gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
58322 +       gpwrdn_tmp &= ~GPWRDN_PWRDNRSTN;
58323 +       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
58324 +       udelay(5);
58326 +       /* Disable Power Down Clamp */
58327 +       gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
58328 +       gpwrdn_tmp &= ~GPWRDN_PWRDNCLMP;
58329 +       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
58330 +       udelay(5);
58332 +       /* Deassert reset core */
58333 +       gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
58334 +       gpwrdn_tmp |= GPWRDN_PWRDNRSTN;
58335 +       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
58336 +       udelay(5);
58338 +       /* Disable PMU interrupt */
58339 +       gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
58340 +       gpwrdn_tmp &= ~GPWRDN_PMUINTSEL;
58341 +       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
58343 +       /* De-assert Wakeup Logic */
58344 +       gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
58345 +       gpwrdn_tmp &= ~GPWRDN_PMUACTV;
58346 +       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
58348 +       hsotg->hibernated = 0;
58349 +       hsotg->bus_suspended = 0;
58351 +       if (gpwrdn & GPWRDN_IDSTS) {
58352 +               hsotg->op_state = OTG_STATE_B_PERIPHERAL;
58353 +               dwc2_core_init(hsotg, false);
58354 +               dwc2_enable_global_interrupts(hsotg);
58355 +               dwc2_hsotg_core_init_disconnected(hsotg, false);
58356 +               dwc2_hsotg_core_connect(hsotg);
58357 +       } else {
58358 +               hsotg->op_state = OTG_STATE_A_HOST;
58360 +               /* Initialize the Core for Host mode */
58361 +               dwc2_core_init(hsotg, false);
58362 +               dwc2_enable_global_interrupts(hsotg);
58363 +               dwc2_hcd_start(hsotg);
58364 +       }
58367  /*
58368   * GPWRDN interrupt handler.
58369   *
58370 @@ -673,64 +746,14 @@ static void dwc2_handle_gpwrdn_intr(struct dwc2_hsotg *hsotg)
58372         if ((gpwrdn & GPWRDN_DISCONN_DET) &&
58373             (gpwrdn & GPWRDN_DISCONN_DET_MSK) && !linestate) {
58374 -               u32 gpwrdn_tmp;
58376                 dev_dbg(hsotg->dev, "%s: GPWRDN_DISCONN_DET\n", __func__);
58378 -               /* Switch-on voltage to the core */
58379 -               gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
58380 -               gpwrdn_tmp &= ~GPWRDN_PWRDNSWTCH;
58381 -               dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
58382 -               udelay(10);
58384 -               /* Reset core */
58385 -               gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
58386 -               gpwrdn_tmp &= ~GPWRDN_PWRDNRSTN;
58387 -               dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
58388 -               udelay(10);
58390 -               /* Disable Power Down Clamp */
58391 -               gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
58392 -               gpwrdn_tmp &= ~GPWRDN_PWRDNCLMP;
58393 -               dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
58394 -               udelay(10);
58396 -               /* Deassert reset core */
58397 -               gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
58398 -               gpwrdn_tmp |= GPWRDN_PWRDNRSTN;
58399 -               dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
58400 -               udelay(10);
58402 -               /* Disable PMU interrupt */
58403 -               gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
58404 -               gpwrdn_tmp &= ~GPWRDN_PMUINTSEL;
58405 -               dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
58407 -               /* De-assert Wakeup Logic */
58408 -               gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
58409 -               gpwrdn_tmp &= ~GPWRDN_PMUACTV;
58410 -               dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
58412 -               hsotg->hibernated = 0;
58414 -               if (gpwrdn & GPWRDN_IDSTS) {
58415 -                       hsotg->op_state = OTG_STATE_B_PERIPHERAL;
58416 -                       dwc2_core_init(hsotg, false);
58417 -                       dwc2_enable_global_interrupts(hsotg);
58418 -                       dwc2_hsotg_core_init_disconnected(hsotg, false);
58419 -                       dwc2_hsotg_core_connect(hsotg);
58420 -               } else {
58421 -                       hsotg->op_state = OTG_STATE_A_HOST;
58423 -                       /* Initialize the Core for Host mode */
58424 -                       dwc2_core_init(hsotg, false);
58425 -                       dwc2_enable_global_interrupts(hsotg);
58426 -                       dwc2_hcd_start(hsotg);
58427 -               }
58428 -       }
58430 -       if ((gpwrdn & GPWRDN_LNSTSCHG) &&
58431 -           (gpwrdn & GPWRDN_LNSTSCHG_MSK) && linestate) {
58432 +               /*
58433 +                * Call disconnect detect function to exit from
58434 +                * hibernation
58435 +                */
58436 +               dwc_handle_gpwrdn_disc_det(hsotg, gpwrdn);
58437 +       } else if ((gpwrdn & GPWRDN_LNSTSCHG) &&
58438 +                  (gpwrdn & GPWRDN_LNSTSCHG_MSK) && linestate) {
58439                 dev_dbg(hsotg->dev, "%s: GPWRDN_LNSTSCHG\n", __func__);
58440                 if (hsotg->hw_params.hibernation &&
58441                     hsotg->hibernated) {
58442 @@ -741,24 +764,21 @@ static void dwc2_handle_gpwrdn_intr(struct dwc2_hsotg *hsotg)
58443                                 dwc2_exit_hibernation(hsotg, 1, 0, 1);
58444                         }
58445                 }
58446 -       }
58447 -       if ((gpwrdn & GPWRDN_RST_DET) && (gpwrdn & GPWRDN_RST_DET_MSK)) {
58448 +       } else if ((gpwrdn & GPWRDN_RST_DET) &&
58449 +                  (gpwrdn & GPWRDN_RST_DET_MSK)) {
58450                 dev_dbg(hsotg->dev, "%s: GPWRDN_RST_DET\n", __func__);
58451                 if (!linestate && (gpwrdn & GPWRDN_BSESSVLD))
58452                         dwc2_exit_hibernation(hsotg, 0, 1, 0);
58453 -       }
58454 -       if ((gpwrdn & GPWRDN_STS_CHGINT) &&
58455 -           (gpwrdn & GPWRDN_STS_CHGINT_MSK) && linestate) {
58456 +       } else if ((gpwrdn & GPWRDN_STS_CHGINT) &&
58457 +                  (gpwrdn & GPWRDN_STS_CHGINT_MSK)) {
58458                 dev_dbg(hsotg->dev, "%s: GPWRDN_STS_CHGINT\n", __func__);
58459 -               if (hsotg->hw_params.hibernation &&
58460 -                   hsotg->hibernated) {
58461 -                       if (gpwrdn & GPWRDN_IDSTS) {
58462 -                               dwc2_exit_hibernation(hsotg, 0, 0, 0);
58463 -                               call_gadget(hsotg, resume);
58464 -                       } else {
58465 -                               dwc2_exit_hibernation(hsotg, 1, 0, 1);
58466 -                       }
58467 -               }
58468 +               /*
58469 +                * As GPWRDN_STS_CHGINT exit from hibernation flow is
58470 +                * the same as in GPWRDN_DISCONN_DET flow. Call
58471 +                * disconnect detect helper function to exit from
58472 +                * hibernation.
58473 +                */
58474 +               dwc_handle_gpwrdn_disc_det(hsotg, gpwrdn);
58475         }
58478 diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
58479 index ad4c94366dad..d2f623d83bf7 100644
58480 --- a/drivers/usb/dwc2/gadget.c
58481 +++ b/drivers/usb/dwc2/gadget.c
58482 @@ -422,7 +422,7 @@ static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
58484         struct usb_request *req = &hs_req->req;
58486 -       usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
58487 +       usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->map_dir);
58490  /*
58491 @@ -1242,6 +1242,7 @@ static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
58493         int ret;
58495 +       hs_ep->map_dir = hs_ep->dir_in;
58496         ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
58497         if (ret)
58498                 goto dma_error;
58499 diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
58500 index 1a9789ec5847..6af1dcbc3656 100644
58501 --- a/drivers/usb/dwc2/hcd.c
58502 +++ b/drivers/usb/dwc2/hcd.c
58503 @@ -5580,7 +5580,15 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
58504                 return ret;
58505         }
58507 -       dwc2_hcd_rem_wakeup(hsotg);
58508 +       if (rem_wakeup) {
58509 +               dwc2_hcd_rem_wakeup(hsotg);
58510 +               /*
58511 +                * Change "port_connect_status_change" flag to re-enumerate,
58512 +                * because after exit from hibernation port connection status
58513 +                * is not detected.
58514 +                */
58515 +               hsotg->flags.b.port_connect_status_change = 1;
58516 +       }
58518         hsotg->hibernated = 0;
58519         hsotg->bus_suspended = 0;
58520 diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
58521 index f2448d0a9d39..126f0e10b3ef 100644
58522 --- a/drivers/usb/dwc3/core.c
58523 +++ b/drivers/usb/dwc3/core.c
58524 @@ -114,6 +114,8 @@ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
58525         dwc->current_dr_role = mode;
58528 +static int dwc3_core_soft_reset(struct dwc3 *dwc);
58530  static void __dwc3_set_mode(struct work_struct *work)
58532         struct dwc3 *dwc = work_to_dwc(work);
58533 @@ -121,6 +123,8 @@ static void __dwc3_set_mode(struct work_struct *work)
58534         int ret;
58535         u32 reg;
58537 +       mutex_lock(&dwc->mutex);
58539         pm_runtime_get_sync(dwc->dev);
58541         if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG)
58542 @@ -154,6 +158,25 @@ static void __dwc3_set_mode(struct work_struct *work)
58543                 break;
58544         }
58546 +       /* For DRD host or device mode only */
58547 +       if (dwc->desired_dr_role != DWC3_GCTL_PRTCAP_OTG) {
58548 +               reg = dwc3_readl(dwc->regs, DWC3_GCTL);
58549 +               reg |= DWC3_GCTL_CORESOFTRESET;
58550 +               dwc3_writel(dwc->regs, DWC3_GCTL, reg);
58552 +               /*
58553 +                * Wait for internal clocks to synchronized. DWC_usb31 and
58554 +                * DWC_usb32 may need at least 50ms (less for DWC_usb3). To
58555 +                * keep it consistent across different IPs, let's wait up to
58556 +                * 100ms before clearing GCTL.CORESOFTRESET.
58557 +                */
58558 +               msleep(100);
58560 +               reg = dwc3_readl(dwc->regs, DWC3_GCTL);
58561 +               reg &= ~DWC3_GCTL_CORESOFTRESET;
58562 +               dwc3_writel(dwc->regs, DWC3_GCTL, reg);
58563 +       }
58565         spin_lock_irqsave(&dwc->lock, flags);
58567         dwc3_set_prtcap(dwc, dwc->desired_dr_role);
58568 @@ -178,6 +201,8 @@ static void __dwc3_set_mode(struct work_struct *work)
58569                 }
58570                 break;
58571         case DWC3_GCTL_PRTCAP_DEVICE:
58572 +               dwc3_core_soft_reset(dwc);
58574                 dwc3_event_buffers_setup(dwc);
58576                 if (dwc->usb2_phy)
58577 @@ -200,6 +225,7 @@ static void __dwc3_set_mode(struct work_struct *work)
58578  out:
58579         pm_runtime_mark_last_busy(dwc->dev);
58580         pm_runtime_put_autosuspend(dwc->dev);
58581 +       mutex_unlock(&dwc->mutex);
58584  void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
58585 @@ -1277,6 +1303,8 @@ static void dwc3_get_properties(struct dwc3 *dwc)
58586                                 "snps,usb3_lpm_capable");
58587         dwc->usb2_lpm_disable = device_property_read_bool(dev,
58588                                 "snps,usb2-lpm-disable");
58589 +       dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev,
58590 +                               "snps,usb2-gadget-lpm-disable");
58591         device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd",
58592                                 &rx_thr_num_pkt_prd);
58593         device_property_read_u8(dev, "snps,rx-max-burst-prd",
58594 @@ -1543,6 +1571,7 @@ static int dwc3_probe(struct platform_device *pdev)
58595         dwc3_cache_hwparams(dwc);
58597         spin_lock_init(&dwc->lock);
58598 +       mutex_init(&dwc->mutex);
58600         pm_runtime_set_active(dev);
58601         pm_runtime_use_autosuspend(dev);
58602 diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
58603 index 052b20d52651..453cfebd4d04 100644
58604 --- a/drivers/usb/dwc3/core.h
58605 +++ b/drivers/usb/dwc3/core.h
58606 @@ -13,6 +13,7 @@
58608  #include <linux/device.h>
58609  #include <linux/spinlock.h>
58610 +#include <linux/mutex.h>
58611  #include <linux/ioport.h>
58612  #include <linux/list.h>
58613  #include <linux/bitops.h>
58614 @@ -946,6 +947,7 @@ struct dwc3_scratchpad_array {
58615   * @scratch_addr: dma address of scratchbuf
58616   * @ep0_in_setup: one control transfer is completed and enter setup phase
58617   * @lock: for synchronizing
58618 + * @mutex: for mode switching
58619   * @dev: pointer to our struct device
58620   * @sysdev: pointer to the DMA-capable device
58621   * @xhci: pointer to our xHCI child
58622 @@ -1034,7 +1036,8 @@ struct dwc3_scratchpad_array {
58623   * @dis_start_transfer_quirk: set if start_transfer failure SW workaround is
58624   *                     not needed for DWC_usb31 version 1.70a-ea06 and below
58625   * @usb3_lpm_capable: set if hadrware supports Link Power Management
58626 - * @usb2_lpm_disable: set to disable usb2 lpm
58627 + * @usb2_lpm_disable: set to disable usb2 lpm for host
58628 + * @usb2_gadget_lpm_disable: set to disable usb2 lpm for gadget
58629   * @disable_scramble_quirk: set if we enable the disable scramble quirk
58630   * @u2exit_lfps_quirk: set if we enable u2exit lfps quirk
58631   * @u2ss_inp3_quirk: set if we enable P3 OK for U2/SS Inactive quirk
58632 @@ -1085,6 +1088,9 @@ struct dwc3 {
58633         /* device lock */
58634         spinlock_t              lock;
58636 +       /* mode switching lock */
58637 +       struct mutex            mutex;
58639         struct device           *dev;
58640         struct device           *sysdev;
58642 @@ -1238,6 +1244,7 @@ struct dwc3 {
58643         unsigned                dis_start_transfer_quirk:1;
58644         unsigned                usb3_lpm_capable:1;
58645         unsigned                usb2_lpm_disable:1;
58646 +       unsigned                usb2_gadget_lpm_disable:1;
58648         unsigned                disable_scramble_quirk:1;
58649         unsigned                u2exit_lfps_quirk:1;
58650 diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c
58651 index 75f0042b998b..84c1a4ac2444 100644
58652 --- a/drivers/usb/dwc3/dwc3-imx8mp.c
58653 +++ b/drivers/usb/dwc3/dwc3-imx8mp.c
58654 @@ -167,6 +167,7 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
58656         dwc3_np = of_get_child_by_name(node, "dwc3");
58657         if (!dwc3_np) {
58658 +               err = -ENODEV;
58659                 dev_err(dev, "failed to find dwc3 core child\n");
58660                 goto disable_rpm;
58661         }
58662 diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
58663 index 3db17806e92e..e196673f5c64 100644
58664 --- a/drivers/usb/dwc3/dwc3-omap.c
58665 +++ b/drivers/usb/dwc3/dwc3-omap.c
58666 @@ -437,8 +437,13 @@ static int dwc3_omap_extcon_register(struct dwc3_omap *omap)
58668                 if (extcon_get_state(edev, EXTCON_USB) == true)
58669                         dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
58670 +               else
58671 +                       dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF);
58673                 if (extcon_get_state(edev, EXTCON_USB_HOST) == true)
58674                         dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
58675 +               else
58676 +                       dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT);
58678                 omap->edev = edev;
58679         }
58680 diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
58681 index 4c5c6972124a..19789e94bbd0 100644
58682 --- a/drivers/usb/dwc3/dwc3-pci.c
58683 +++ b/drivers/usb/dwc3/dwc3-pci.c
58684 @@ -41,6 +41,7 @@
58685  #define PCI_DEVICE_ID_INTEL_TGPH               0x43ee
58686  #define PCI_DEVICE_ID_INTEL_JSP                        0x4dee
58687  #define PCI_DEVICE_ID_INTEL_ADLP               0x51ee
58688 +#define PCI_DEVICE_ID_INTEL_ADLM               0x54ee
58689  #define PCI_DEVICE_ID_INTEL_ADLS               0x7ae1
58690  #define PCI_DEVICE_ID_INTEL_TGL                        0x9a15
58692 @@ -122,6 +123,7 @@ static const struct property_entry dwc3_pci_mrfld_properties[] = {
58693         PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
58694         PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
58695         PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
58696 +       PROPERTY_ENTRY_BOOL("snps,usb2-gadget-lpm-disable"),
58697         PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
58698         {}
58699  };
58700 @@ -388,6 +390,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
58701         { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLP),
58702           (kernel_ulong_t) &dwc3_pci_intel_swnode, },
58704 +       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLM),
58705 +         (kernel_ulong_t) &dwc3_pci_intel_swnode, },
58707         { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS),
58708           (kernel_ulong_t) &dwc3_pci_intel_swnode, },
58710 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
58711 index c7ef218e7a8c..8585b56d9f2d 100644
58712 --- a/drivers/usb/dwc3/gadget.c
58713 +++ b/drivers/usb/dwc3/gadget.c
58714 @@ -308,13 +308,12 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
58715         }
58717         if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
58718 -               int             needs_wakeup;
58719 +               int link_state;
58721 -               needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
58722 -                               dwc->link_state == DWC3_LINK_STATE_U2 ||
58723 -                               dwc->link_state == DWC3_LINK_STATE_U3);
58725 -               if (unlikely(needs_wakeup)) {
58726 +               link_state = dwc3_gadget_get_link_state(dwc);
58727 +               if (link_state == DWC3_LINK_STATE_U1 ||
58728 +                   link_state == DWC3_LINK_STATE_U2 ||
58729 +                   link_state == DWC3_LINK_STATE_U3) {
58730                         ret = __dwc3_gadget_wakeup(dwc);
58731                         dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
58732                                         ret);
58733 @@ -608,12 +607,14 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
58734                 u8 bInterval_m1;
58736                 /*
58737 -                * Valid range for DEPCFG.bInterval_m1 is from 0 to 13, and it
58738 -                * must be set to 0 when the controller operates in full-speed.
58739 +                * Valid range for DEPCFG.bInterval_m1 is from 0 to 13.
58740 +                *
58741 +                * NOTE: The programming guide incorrectly stated bInterval_m1
58742 +                * must be set to 0 when operating in fullspeed. Internally the
58743 +                * controller does not have this limitation. See DWC_usb3x
58744 +                * programming guide section 3.2.2.1.
58745                  */
58746                 bInterval_m1 = min_t(u8, desc->bInterval - 1, 13);
58747 -               if (dwc->gadget->speed == USB_SPEED_FULL)
58748 -                       bInterval_m1 = 0;
58750                 if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
58751                     dwc->gadget->speed == USB_SPEED_FULL)
58752 @@ -1675,7 +1676,9 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
58753                 }
58754         }
58756 -       return __dwc3_gadget_kick_transfer(dep);
58757 +       __dwc3_gadget_kick_transfer(dep);
58759 +       return 0;
58762  static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
58763 @@ -1973,6 +1976,8 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
58764         case DWC3_LINK_STATE_RESET:
58765         case DWC3_LINK_STATE_RX_DET:    /* in HS, means Early Suspend */
58766         case DWC3_LINK_STATE_U3:        /* in HS, means SUSPEND */
58767 +       case DWC3_LINK_STATE_U2:        /* in HS, means Sleep (L1) */
58768 +       case DWC3_LINK_STATE_U1:
58769         case DWC3_LINK_STATE_RESUME:
58770                 break;
58771         default:
58772 @@ -2299,6 +2304,10 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
58773         if (DWC3_VER_IS_PRIOR(DWC3, 250A))
58774                 reg |= DWC3_DEVTEN_ULSTCNGEN;
58776 +       /* On 2.30a and above this bit enables U3/L2-L1 Suspend Events */
58777 +       if (!DWC3_VER_IS_PRIOR(DWC3, 230A))
58778 +               reg |= DWC3_DEVTEN_EOPFEN;
58780         dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
58783 @@ -3322,6 +3331,15 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
58785         u32                     reg;
58787 +       /*
58788 +        * Ideally, dwc3_reset_gadget() would trigger the function
58789 +        * drivers to stop any active transfers through ep disable.
58790 +        * However, for functions which defer ep disable, such as mass
58791 +        * storage, we will need to rely on the call to stop active
58792 +        * transfers here, and avoid allowing of request queuing.
58793 +        */
58794 +       dwc->connected = false;
58796         /*
58797          * WORKAROUND: DWC3 revisions <1.88a have an issue which
58798          * would cause a missing Disconnect Event if there's a
58799 @@ -3460,6 +3478,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
58800         /* Enable USB2 LPM Capability */
58802         if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A) &&
58803 +           !dwc->usb2_gadget_lpm_disable &&
58804             (speed != DWC3_DSTS_SUPERSPEED) &&
58805             (speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
58806                 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
58807 @@ -3486,6 +3505,12 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
58809                 dwc3_gadget_dctl_write_safe(dwc, reg);
58810         } else {
58811 +               if (dwc->usb2_gadget_lpm_disable) {
58812 +                       reg = dwc3_readl(dwc->regs, DWC3_DCFG);
58813 +                       reg &= ~DWC3_DCFG_LPM_CAP;
58814 +                       dwc3_writel(dwc->regs, DWC3_DCFG, reg);
58815 +               }
58817                 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
58818                 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
58819                 dwc3_gadget_dctl_write_safe(dwc, reg);
58820 @@ -3934,7 +3959,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
58821         dwc->gadget->ssp_rate           = USB_SSP_GEN_UNKNOWN;
58822         dwc->gadget->sg_supported       = true;
58823         dwc->gadget->name               = "dwc3-gadget";
58824 -       dwc->gadget->lpm_capable        = true;
58825 +       dwc->gadget->lpm_capable        = !dwc->usb2_gadget_lpm_disable;
58827         /*
58828          * FIXME We might be setting max_speed to <SUPER, however versions
58829 @@ -4005,8 +4030,9 @@ int dwc3_gadget_init(struct dwc3 *dwc)
58831  void dwc3_gadget_exit(struct dwc3 *dwc)
58833 -       usb_del_gadget_udc(dwc->gadget);
58834 +       usb_del_gadget(dwc->gadget);
58835         dwc3_gadget_free_endpoints(dwc);
58836 +       usb_put_gadget(dwc->gadget);
58837         dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
58838                           dwc->bounce_addr);
58839         kfree(dwc->setup_buf);
58840 diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
58841 index 2d115353424c..8bb25773b61e 100644
58842 --- a/drivers/usb/gadget/config.c
58843 +++ b/drivers/usb/gadget/config.c
58844 @@ -194,9 +194,13 @@ EXPORT_SYMBOL_GPL(usb_assign_descriptors);
58845  void usb_free_all_descriptors(struct usb_function *f)
58847         usb_free_descriptors(f->fs_descriptors);
58848 +       f->fs_descriptors = NULL;
58849         usb_free_descriptors(f->hs_descriptors);
58850 +       f->hs_descriptors = NULL;
58851         usb_free_descriptors(f->ss_descriptors);
58852 +       f->ss_descriptors = NULL;
58853         usb_free_descriptors(f->ssp_descriptors);
58854 +       f->ssp_descriptors = NULL;
58856  EXPORT_SYMBOL_GPL(usb_free_all_descriptors);
58858 diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
58859 index 801a8b668a35..10a5d9f0f2b9 100644
58860 --- a/drivers/usb/gadget/function/f_fs.c
58861 +++ b/drivers/usb/gadget/function/f_fs.c
58862 @@ -2640,6 +2640,7 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
58864         do { /* lang_count > 0 so we can use do-while */
58865                 unsigned needed = needed_count;
58866 +               u32 str_per_lang = str_count;
58868                 if (len < 3)
58869                         goto error_free;
58870 @@ -2675,7 +2676,7 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
58872                         data += length + 1;
58873                         len -= length + 1;
58874 -               } while (--str_count);
58875 +               } while (--str_per_lang);
58877                 s->id = 0;   /* terminator */
58878                 s->s = NULL;
58879 diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
58880 index 560382e0a8f3..e65f474ad7b3 100644
58881 --- a/drivers/usb/gadget/function/f_uac1.c
58882 +++ b/drivers/usb/gadget/function/f_uac1.c
58883 @@ -19,6 +19,9 @@
58884  #include "u_audio.h"
58885  #include "u_uac1.h"
58887 +/* UAC1 spec: 3.7.2.3 Audio Channel Cluster Format */
58888 +#define UAC1_CHANNEL_MASK 0x0FFF
58890  struct f_uac1 {
58891         struct g_audio g_audio;
58892         u8 ac_intf, as_in_intf, as_out_intf;
58893 @@ -30,6 +33,11 @@ static inline struct f_uac1 *func_to_uac1(struct usb_function *f)
58894         return container_of(f, struct f_uac1, g_audio.func);
58897 +static inline struct f_uac1_opts *g_audio_to_uac1_opts(struct g_audio *audio)
58899 +       return container_of(audio->func.fi, struct f_uac1_opts, func_inst);
58902  /*
58903   * DESCRIPTORS ... most are static, but strings and full
58904   * configuration descriptors are built on demand.
58905 @@ -505,11 +513,42 @@ static void f_audio_disable(struct usb_function *f)
58907  /*-------------------------------------------------------------------------*/
58909 +static int f_audio_validate_opts(struct g_audio *audio, struct device *dev)
58911 +       struct f_uac1_opts *opts = g_audio_to_uac1_opts(audio);
58913 +       if (!opts->p_chmask && !opts->c_chmask) {
58914 +               dev_err(dev, "Error: no playback and capture channels\n");
58915 +               return -EINVAL;
58916 +       } else if (opts->p_chmask & ~UAC1_CHANNEL_MASK) {
58917 +               dev_err(dev, "Error: unsupported playback channels mask\n");
58918 +               return -EINVAL;
58919 +       } else if (opts->c_chmask & ~UAC1_CHANNEL_MASK) {
58920 +               dev_err(dev, "Error: unsupported capture channels mask\n");
58921 +               return -EINVAL;
58922 +       } else if ((opts->p_ssize < 1) || (opts->p_ssize > 4)) {
58923 +               dev_err(dev, "Error: incorrect playback sample size\n");
58924 +               return -EINVAL;
58925 +       } else if ((opts->c_ssize < 1) || (opts->c_ssize > 4)) {
58926 +               dev_err(dev, "Error: incorrect capture sample size\n");
58927 +               return -EINVAL;
58928 +       } else if (!opts->p_srate) {
58929 +               dev_err(dev, "Error: incorrect playback sampling rate\n");
58930 +               return -EINVAL;
58931 +       } else if (!opts->c_srate) {
58932 +               dev_err(dev, "Error: incorrect capture sampling rate\n");
58933 +               return -EINVAL;
58934 +       }
58936 +       return 0;
58939  /* audio function driver setup/binding */
58940  static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
58942         struct usb_composite_dev        *cdev = c->cdev;
58943         struct usb_gadget               *gadget = cdev->gadget;
58944 +       struct device                   *dev = &gadget->dev;
58945         struct f_uac1                   *uac1 = func_to_uac1(f);
58946         struct g_audio                  *audio = func_to_g_audio(f);
58947         struct f_uac1_opts              *audio_opts;
58948 @@ -519,6 +558,10 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
58949         int                             rate;
58950         int                             status;
58952 +       status = f_audio_validate_opts(audio, dev);
58953 +       if (status)
58954 +               return status;
58956         audio_opts = container_of(f->fi, struct f_uac1_opts, func_inst);
58958         us = usb_gstrings_attach(cdev, uac1_strings, ARRAY_SIZE(strings_uac1));
58959 diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
58960 index 6f03e944e0e3..dd960cea642f 100644
58961 --- a/drivers/usb/gadget/function/f_uac2.c
58962 +++ b/drivers/usb/gadget/function/f_uac2.c
58963 @@ -14,6 +14,9 @@
58964  #include "u_audio.h"
58965  #include "u_uac2.h"
58967 +/* UAC2 spec: 4.1 Audio Channel Cluster Descriptor */
58968 +#define UAC2_CHANNEL_MASK 0x07FFFFFF
58970  /*
58971   * The driver implements a simple UAC_2 topology.
58972   * USB-OUT -> IT_1 -> OT_3 -> ALSA_Capture
58973 @@ -604,6 +607,36 @@ static void setup_descriptor(struct f_uac2_opts *opts)
58974         hs_audio_desc[i] = NULL;
58977 +static int afunc_validate_opts(struct g_audio *agdev, struct device *dev)
58979 +       struct f_uac2_opts *opts = g_audio_to_uac2_opts(agdev);
58981 +       if (!opts->p_chmask && !opts->c_chmask) {
58982 +               dev_err(dev, "Error: no playback and capture channels\n");
58983 +               return -EINVAL;
58984 +       } else if (opts->p_chmask & ~UAC2_CHANNEL_MASK) {
58985 +               dev_err(dev, "Error: unsupported playback channels mask\n");
58986 +               return -EINVAL;
58987 +       } else if (opts->c_chmask & ~UAC2_CHANNEL_MASK) {
58988 +               dev_err(dev, "Error: unsupported capture channels mask\n");
58989 +               return -EINVAL;
58990 +       } else if ((opts->p_ssize < 1) || (opts->p_ssize > 4)) {
58991 +               dev_err(dev, "Error: incorrect playback sample size\n");
58992 +               return -EINVAL;
58993 +       } else if ((opts->c_ssize < 1) || (opts->c_ssize > 4)) {
58994 +               dev_err(dev, "Error: incorrect capture sample size\n");
58995 +               return -EINVAL;
58996 +       } else if (!opts->p_srate) {
58997 +               dev_err(dev, "Error: incorrect playback sampling rate\n");
58998 +               return -EINVAL;
58999 +       } else if (!opts->c_srate) {
59000 +               dev_err(dev, "Error: incorrect capture sampling rate\n");
59001 +               return -EINVAL;
59002 +       }
59004 +       return 0;
59007  static int
59008  afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
59010 @@ -612,11 +645,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
59011         struct usb_composite_dev *cdev = cfg->cdev;
59012         struct usb_gadget *gadget = cdev->gadget;
59013         struct device *dev = &gadget->dev;
59014 -       struct f_uac2_opts *uac2_opts;
59015 +       struct f_uac2_opts *uac2_opts = g_audio_to_uac2_opts(agdev);
59016         struct usb_string *us;
59017         int ret;
59019 -       uac2_opts = container_of(fn->fi, struct f_uac2_opts, func_inst);
59020 +       ret = afunc_validate_opts(agdev, dev);
59021 +       if (ret)
59022 +               return ret;
59024         us = usb_gstrings_attach(cdev, fn_strings, ARRAY_SIZE(strings_fn));
59025         if (IS_ERR(us))
59026 diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
59027 index 44b4352a2676..f48a00e49794 100644
59028 --- a/drivers/usb/gadget/function/f_uvc.c
59029 +++ b/drivers/usb/gadget/function/f_uvc.c
59030 @@ -633,7 +633,12 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
59032         uvc_hs_streaming_ep.wMaxPacketSize =
59033                 cpu_to_le16(max_packet_size | ((max_packet_mult - 1) << 11));
59034 -       uvc_hs_streaming_ep.bInterval = opts->streaming_interval;
59036 +       /* A high-bandwidth endpoint must specify a bInterval value of 1 */
59037 +       if (max_packet_mult > 1)
59038 +               uvc_hs_streaming_ep.bInterval = 1;
59039 +       else
59040 +               uvc_hs_streaming_ep.bInterval = opts->streaming_interval;
59042         uvc_ss_streaming_ep.wMaxPacketSize = cpu_to_le16(max_packet_size);
59043         uvc_ss_streaming_ep.bInterval = opts->streaming_interval;
59044 @@ -817,6 +822,7 @@ static struct usb_function_instance *uvc_alloc_inst(void)
59045         pd->bmControls[0]               = 1;
59046         pd->bmControls[1]               = 0;
59047         pd->iProcessing                 = 0;
59048 +       pd->bmVideoStandards            = 0;
59050         od = &opts->uvc_output_terminal;
59051         od->bLength                     = UVC_DT_OUTPUT_TERMINAL_SIZE;
59052 diff --git a/drivers/usb/gadget/legacy/webcam.c b/drivers/usb/gadget/legacy/webcam.c
59053 index a9f8eb8e1c76..2c9eab2b863d 100644
59054 --- a/drivers/usb/gadget/legacy/webcam.c
59055 +++ b/drivers/usb/gadget/legacy/webcam.c
59056 @@ -125,6 +125,7 @@ static const struct uvc_processing_unit_descriptor uvc_processing = {
59057         .bmControls[0]          = 1,
59058         .bmControls[1]          = 0,
59059         .iProcessing            = 0,
59060 +       .bmVideoStandards       = 0,
59061  };
59063  static const struct uvc_output_terminal_descriptor uvc_output_terminal = {
59064 diff --git a/drivers/usb/gadget/udc/aspeed-vhub/core.c b/drivers/usb/gadget/udc/aspeed-vhub/core.c
59065 index be7bb64e3594..d11d3d14313f 100644
59066 --- a/drivers/usb/gadget/udc/aspeed-vhub/core.c
59067 +++ b/drivers/usb/gadget/udc/aspeed-vhub/core.c
59068 @@ -36,6 +36,7 @@ void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
59069                    int status)
59071         bool internal = req->internal;
59072 +       struct ast_vhub *vhub = ep->vhub;
59074         EPVDBG(ep, "completing request @%p, status %d\n", req, status);
59076 @@ -46,7 +47,7 @@ void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
59078         if (req->req.dma) {
59079                 if (!WARN_ON(!ep->dev))
59080 -                       usb_gadget_unmap_request(&ep->dev->gadget,
59081 +                       usb_gadget_unmap_request_by_dev(&vhub->pdev->dev,
59082                                                  &req->req, ep->epn.is_in);
59083                 req->req.dma = 0;
59084         }
59085 diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
59086 index 02d8bfae58fb..cb164c615e6f 100644
59087 --- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
59088 +++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
59089 @@ -376,7 +376,7 @@ static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
59090         if (ep->epn.desc_mode ||
59091             ((((unsigned long)u_req->buf & 7) == 0) &&
59092              (ep->epn.is_in || !(u_req->length & (u_ep->maxpacket - 1))))) {
59093 -               rc = usb_gadget_map_request(&ep->dev->gadget, u_req,
59094 +               rc = usb_gadget_map_request_by_dev(&vhub->pdev->dev, u_req,
59095                                             ep->epn.is_in);
59096                 if (rc) {
59097                         dev_warn(&vhub->pdev->dev,
59098 diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
59099 index 57067763b100..5f474ffe2be1 100644
59100 --- a/drivers/usb/gadget/udc/dummy_hcd.c
59101 +++ b/drivers/usb/gadget/udc/dummy_hcd.c
59102 @@ -903,6 +903,21 @@ static int dummy_pullup(struct usb_gadget *_gadget, int value)
59103         spin_lock_irqsave(&dum->lock, flags);
59104         dum->pullup = (value != 0);
59105         set_link_state(dum_hcd);
59106 +       if (value == 0) {
59107 +               /*
59108 +                * Emulate synchronize_irq(): wait for callbacks to finish.
59109 +                * This seems to be the best place to emulate the call to
59110 +                * synchronize_irq() that's in usb_gadget_remove_driver().
59111 +                * Doing it in dummy_udc_stop() would be too late since it
59112 +                * is called after the unbind callback and unbind shouldn't
59113 +                * be invoked until all the other callbacks are finished.
59114 +                */
59115 +               while (dum->callback_usage > 0) {
59116 +                       spin_unlock_irqrestore(&dum->lock, flags);
59117 +                       usleep_range(1000, 2000);
59118 +                       spin_lock_irqsave(&dum->lock, flags);
59119 +               }
59120 +       }
59121         spin_unlock_irqrestore(&dum->lock, flags);
59123         usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd));
59124 @@ -1004,14 +1019,6 @@ static int dummy_udc_stop(struct usb_gadget *g)
59125         spin_lock_irq(&dum->lock);
59126         dum->ints_enabled = 0;
59127         stop_activity(dum);
59129 -       /* emulate synchronize_irq(): wait for callbacks to finish */
59130 -       while (dum->callback_usage > 0) {
59131 -               spin_unlock_irq(&dum->lock);
59132 -               usleep_range(1000, 2000);
59133 -               spin_lock_irq(&dum->lock);
59134 -       }
59136         dum->driver = NULL;
59137         spin_unlock_irq(&dum->lock);
59139 diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
59140 index d6ca50f01985..75bf446f4a66 100644
59141 --- a/drivers/usb/gadget/udc/fotg210-udc.c
59142 +++ b/drivers/usb/gadget/udc/fotg210-udc.c
59143 @@ -338,15 +338,16 @@ static void fotg210_start_dma(struct fotg210_ep *ep,
59144                 } else {
59145                         buffer = req->req.buf + req->req.actual;
59146                         length = ioread32(ep->fotg210->reg +
59147 -                                       FOTG210_FIBCR(ep->epnum - 1));
59148 -                       length &= FIBCR_BCFX;
59149 +                                       FOTG210_FIBCR(ep->epnum - 1)) & FIBCR_BCFX;
59150 +                       if (length > req->req.length - req->req.actual)
59151 +                               length = req->req.length - req->req.actual;
59152                 }
59153         } else {
59154                 buffer = req->req.buf + req->req.actual;
59155                 if (req->req.length - req->req.actual > ep->ep.maxpacket)
59156                         length = ep->ep.maxpacket;
59157                 else
59158 -                       length = req->req.length;
59159 +                       length = req->req.length - req->req.actual;
59160         }
59162         d = dma_map_single(dev, buffer, length,
59163 @@ -379,8 +380,7 @@ static void fotg210_ep0_queue(struct fotg210_ep *ep,
59164         }
59165         if (ep->dir_in) { /* if IN */
59166                 fotg210_start_dma(ep, req);
59167 -               if ((req->req.length == req->req.actual) ||
59168 -                   (req->req.actual < ep->ep.maxpacket))
59169 +               if (req->req.length == req->req.actual)
59170                         fotg210_done(ep, req, 0);
59171         } else { /* OUT */
59172                 u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR0);
59173 @@ -820,7 +820,7 @@ static void fotg210_ep0in(struct fotg210_udc *fotg210)
59174                 if (req->req.length)
59175                         fotg210_start_dma(ep, req);
59177 -               if ((req->req.length - req->req.actual) < ep->ep.maxpacket)
59178 +               if (req->req.actual == req->req.length)
59179                         fotg210_done(ep, req, 0);
59180         } else {
59181                 fotg210_set_cxdone(fotg210);
59182 @@ -849,12 +849,16 @@ static void fotg210_out_fifo_handler(struct fotg210_ep *ep)
59184         struct fotg210_request *req = list_entry(ep->queue.next,
59185                                                  struct fotg210_request, queue);
59186 +       int disgr1 = ioread32(ep->fotg210->reg + FOTG210_DISGR1);
59188         fotg210_start_dma(ep, req);
59190 -       /* finish out transfer */
59191 +       /* Complete the request when it's full or a short packet arrived.
59192 +        * Like other drivers, short_not_ok isn't handled.
59193 +        */
59195         if (req->req.length == req->req.actual ||
59196 -           req->req.actual < ep->ep.maxpacket)
59197 +           (disgr1 & DISGR1_SPK_INT(ep->epnum - 1)))
59198                 fotg210_done(ep, req, 0);
59201 @@ -1027,6 +1031,12 @@ static void fotg210_init(struct fotg210_udc *fotg210)
59202         value &= ~DMCR_GLINT_EN;
59203         iowrite32(value, fotg210->reg + FOTG210_DMCR);
59205 +       /* enable only grp2 irqs we handle */
59206 +       iowrite32(~(DISGR2_DMA_ERROR | DISGR2_RX0BYTE_INT | DISGR2_TX0BYTE_INT
59207 +                   | DISGR2_ISO_SEQ_ABORT_INT | DISGR2_ISO_SEQ_ERR_INT
59208 +                   | DISGR2_RESM_INT | DISGR2_SUSP_INT | DISGR2_USBRST_INT),
59209 +                 fotg210->reg + FOTG210_DMISGR2);
59211         /* disable all fifo interrupt */
59212         iowrite32(~(u32)0, fotg210->reg + FOTG210_DMISGR1);
59214 diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
59215 index a3c1fc924268..fd3656d0f760 100644
59216 --- a/drivers/usb/gadget/udc/pch_udc.c
59217 +++ b/drivers/usb/gadget/udc/pch_udc.c
59218 @@ -7,12 +7,14 @@
59219  #include <linux/module.h>
59220  #include <linux/pci.h>
59221  #include <linux/delay.h>
59222 +#include <linux/dmi.h>
59223  #include <linux/errno.h>
59224 +#include <linux/gpio/consumer.h>
59225 +#include <linux/gpio/machine.h>
59226  #include <linux/list.h>
59227  #include <linux/interrupt.h>
59228  #include <linux/usb/ch9.h>
59229  #include <linux/usb/gadget.h>
59230 -#include <linux/gpio/consumer.h>
59231  #include <linux/irq.h>
59233  #define PCH_VBUS_PERIOD                3000    /* VBUS polling period (msec) */
59234 @@ -596,18 +598,22 @@ static void pch_udc_reconnect(struct pch_udc_dev *dev)
59235  static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
59236                                           int is_active)
59238 +       unsigned long           iflags;
59240 +       spin_lock_irqsave(&dev->lock, iflags);
59241         if (is_active) {
59242                 pch_udc_reconnect(dev);
59243                 dev->vbus_session = 1;
59244         } else {
59245                 if (dev->driver && dev->driver->disconnect) {
59246 -                       spin_lock(&dev->lock);
59247 +                       spin_unlock_irqrestore(&dev->lock, iflags);
59248                         dev->driver->disconnect(&dev->gadget);
59249 -                       spin_unlock(&dev->lock);
59250 +                       spin_lock_irqsave(&dev->lock, iflags);
59251                 }
59252                 pch_udc_set_disconnect(dev);
59253                 dev->vbus_session = 0;
59254         }
59255 +       spin_unlock_irqrestore(&dev->lock, iflags);
59258  /**
59259 @@ -1166,20 +1172,25 @@ static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
59260  static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
59262         struct pch_udc_dev      *dev;
59263 +       unsigned long           iflags;
59265         if (!gadget)
59266                 return -EINVAL;
59268         dev = container_of(gadget, struct pch_udc_dev, gadget);
59270 +       spin_lock_irqsave(&dev->lock, iflags);
59271         if (is_on) {
59272                 pch_udc_reconnect(dev);
59273         } else {
59274                 if (dev->driver && dev->driver->disconnect) {
59275 -                       spin_lock(&dev->lock);
59276 +                       spin_unlock_irqrestore(&dev->lock, iflags);
59277                         dev->driver->disconnect(&dev->gadget);
59278 -                       spin_unlock(&dev->lock);
59279 +                       spin_lock_irqsave(&dev->lock, iflags);
59280                 }
59281                 pch_udc_set_disconnect(dev);
59282         }
59283 +       spin_unlock_irqrestore(&dev->lock, iflags);
59285         return 0;
59287 @@ -1350,6 +1361,43 @@ static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
59288         return IRQ_HANDLED;
59291 +static struct gpiod_lookup_table minnowboard_udc_gpios = {
59292 +       .dev_id         = "0000:02:02.4",
59293 +       .table          = {
59294 +               GPIO_LOOKUP("sch_gpio.33158", 12, NULL, GPIO_ACTIVE_HIGH),
59295 +               {}
59296 +       },
59299 +static const struct dmi_system_id pch_udc_gpio_dmi_table[] = {
59300 +       {
59301 +               .ident = "MinnowBoard",
59302 +               .matches = {
59303 +                       DMI_MATCH(DMI_BOARD_NAME, "MinnowBoard"),
59304 +               },
59305 +               .driver_data = &minnowboard_udc_gpios,
59306 +       },
59307 +       { }
59310 +static void pch_vbus_gpio_remove_table(void *table)
59312 +       gpiod_remove_lookup_table(table);
59315 +static int pch_vbus_gpio_add_table(struct pch_udc_dev *dev)
59317 +       struct device *d = &dev->pdev->dev;
59318 +       const struct dmi_system_id *dmi;
59320 +       dmi = dmi_first_match(pch_udc_gpio_dmi_table);
59321 +       if (!dmi)
59322 +               return 0;
59324 +       gpiod_add_lookup_table(dmi->driver_data);
59325 +       return devm_add_action_or_reset(d, pch_vbus_gpio_remove_table, dmi->driver_data);
59328  /**
59329   * pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS.
59330   * @dev:               Reference to the driver structure
59331 @@ -1360,6 +1408,7 @@ static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
59332   */
59333  static int pch_vbus_gpio_init(struct pch_udc_dev *dev)
59335 +       struct device *d = &dev->pdev->dev;
59336         int err;
59337         int irq_num = 0;
59338         struct gpio_desc *gpiod;
59339 @@ -1367,8 +1416,12 @@ static int pch_vbus_gpio_init(struct pch_udc_dev *dev)
59340         dev->vbus_gpio.port = NULL;
59341         dev->vbus_gpio.intr = 0;
59343 +       err = pch_vbus_gpio_add_table(dev);
59344 +       if (err)
59345 +               return err;
59347         /* Retrieve the GPIO line from the USB gadget device */
59348 -       gpiod = devm_gpiod_get(dev->gadget.dev.parent, NULL, GPIOD_IN);
59349 +       gpiod = devm_gpiod_get_optional(d, NULL, GPIOD_IN);
59350         if (IS_ERR(gpiod))
59351                 return PTR_ERR(gpiod);
59352         gpiod_set_consumer_name(gpiod, "pch_vbus");
59353 @@ -1756,7 +1809,7 @@ static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
59354         }
59355         /* prevent from using desc. - set HOST BUSY */
59356         dma_desc->status |= PCH_UDC_BS_HST_BSY;
59357 -       dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID);
59358 +       dma_desc->dataptr = lower_32_bits(DMA_ADDR_INVALID);
59359         req->td_data = dma_desc;
59360         req->td_data_last = dma_desc;
59361         req->chain_len = 1;
59362 @@ -2298,6 +2351,21 @@ static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
59363                 pch_udc_set_dma(dev, DMA_DIR_RX);
59366 +static int pch_udc_gadget_setup(struct pch_udc_dev *dev)
59367 +       __must_hold(&dev->lock)
59369 +       int rc;
59371 +       /* In some cases we can get an interrupt before driver gets setup */
59372 +       if (!dev->driver)
59373 +               return -ESHUTDOWN;
59375 +       spin_unlock(&dev->lock);
59376 +       rc = dev->driver->setup(&dev->gadget, &dev->setup_data);
59377 +       spin_lock(&dev->lock);
59378 +       return rc;
59381  /**
59382   * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
59383   * @dev:       Reference to the device structure
59384 @@ -2369,15 +2437,12 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
59385                         dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
59386                 else /* OUT */
59387                         dev->gadget.ep0 = &ep->ep;
59388 -               spin_lock(&dev->lock);
59389                 /* If Mass storage Reset */
59390                 if ((dev->setup_data.bRequestType == 0x21) &&
59391                     (dev->setup_data.bRequest == 0xFF))
59392                         dev->prot_stall = 0;
59393                 /* call gadget with setup data received */
59394 -               setup_supported = dev->driver->setup(&dev->gadget,
59395 -                                                    &dev->setup_data);
59396 -               spin_unlock(&dev->lock);
59397 +               setup_supported = pch_udc_gadget_setup(dev);
59399                 if (dev->setup_data.bRequestType & USB_DIR_IN) {
59400                         ep->td_data->status = (ep->td_data->status &
59401 @@ -2625,9 +2690,7 @@ static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
59402                 dev->ep[i].halted = 0;
59403         }
59404         dev->stall = 0;
59405 -       spin_unlock(&dev->lock);
59406 -       dev->driver->setup(&dev->gadget, &dev->setup_data);
59407 -       spin_lock(&dev->lock);
59408 +       pch_udc_gadget_setup(dev);
59411  /**
59412 @@ -2662,9 +2725,7 @@ static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
59413         dev->stall = 0;
59415         /* call gadget zero with setup data received */
59416 -       spin_unlock(&dev->lock);
59417 -       dev->driver->setup(&dev->gadget, &dev->setup_data);
59418 -       spin_lock(&dev->lock);
59419 +       pch_udc_gadget_setup(dev);
59422  /**
59423 @@ -2870,14 +2931,20 @@ static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
59424   * @dev:       Reference to the driver structure
59425   *
59426   * Return codes:
59427 - *     0: Success
59428 + *     0:              Success
59429 + *     -%ERRNO:        All kind of errors when retrieving VBUS GPIO
59430   */
59431  static int pch_udc_pcd_init(struct pch_udc_dev *dev)
59433 +       int ret;
59435         pch_udc_init(dev);
59436         pch_udc_pcd_reinit(dev);
59437 -       pch_vbus_gpio_init(dev);
59438 -       return 0;
59440 +       ret = pch_vbus_gpio_init(dev);
59441 +       if (ret)
59442 +               pch_udc_exit(dev);
59443 +       return ret;
59446  /**
59447 @@ -2938,7 +3005,7 @@ static int init_dma_pools(struct pch_udc_dev *dev)
59448         dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
59449                                        UDC_EP0OUT_BUFF_SIZE * 4,
59450                                        DMA_FROM_DEVICE);
59451 -       return 0;
59452 +       return dma_mapping_error(&dev->pdev->dev, dev->dma_addr);
59455  static int pch_udc_start(struct usb_gadget *g,
59456 @@ -3063,6 +3130,7 @@ static int pch_udc_probe(struct pci_dev *pdev,
59457         if (retval)
59458                 return retval;
59460 +       dev->pdev = pdev;
59461         pci_set_drvdata(pdev, dev);
59463         /* Determine BAR based on PCI ID */
59464 @@ -3078,16 +3146,10 @@ static int pch_udc_probe(struct pci_dev *pdev,
59466         dev->base_addr = pcim_iomap_table(pdev)[bar];
59468 -       /*
59469 -        * FIXME: add a GPIO descriptor table to pdev.dev using
59470 -        * gpiod_add_descriptor_table() from <linux/gpio/machine.h> based on
59471 -        * the PCI subsystem ID. The system-dependent GPIO is necessary for
59472 -        * VBUS operation.
59473 -        */
59475         /* initialize the hardware */
59476 -       if (pch_udc_pcd_init(dev))
59477 -               return -ENODEV;
59478 +       retval = pch_udc_pcd_init(dev);
59479 +       if (retval)
59480 +               return retval;
59482         pci_enable_msi(pdev);
59484 @@ -3104,7 +3166,6 @@ static int pch_udc_probe(struct pci_dev *pdev,
59486         /* device struct setup */
59487         spin_lock_init(&dev->lock);
59488 -       dev->pdev = pdev;
59489         dev->gadget.ops = &pch_udc_ops;
59491         retval = init_dma_pools(dev);
59492 diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
59493 index 896c1a016d55..65cae4883454 100644
59494 --- a/drivers/usb/gadget/udc/r8a66597-udc.c
59495 +++ b/drivers/usb/gadget/udc/r8a66597-udc.c
59496 @@ -1849,6 +1849,8 @@ static int r8a66597_probe(struct platform_device *pdev)
59497                 return PTR_ERR(reg);
59499         ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
59500 +       if (!ires)
59501 +               return -EINVAL;
59502         irq = ires->start;
59503         irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
59505 diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
59506 index 1d3ebb07ccd4..b154b62abefa 100644
59507 --- a/drivers/usb/gadget/udc/s3c2410_udc.c
59508 +++ b/drivers/usb/gadget/udc/s3c2410_udc.c
59509 @@ -54,8 +54,6 @@ static struct clk             *udc_clock;
59510  static struct clk              *usb_bus_clock;
59511  static void __iomem            *base_addr;
59512  static int                     irq_usbd;
59513 -static u64                     rsrc_start;
59514 -static u64                     rsrc_len;
59515  static struct dentry           *s3c2410_udc_debugfs_root;
59517  static inline u32 udc_read(u32 reg)
59518 @@ -1752,7 +1750,8 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
59519         udc_clock = clk_get(NULL, "usb-device");
59520         if (IS_ERR(udc_clock)) {
59521                 dev_err(dev, "failed to get udc clock source\n");
59522 -               return PTR_ERR(udc_clock);
59523 +               retval = PTR_ERR(udc_clock);
59524 +               goto err_usb_bus_clk;
59525         }
59527         clk_prepare_enable(udc_clock);
59528 @@ -1775,7 +1774,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
59529         base_addr = devm_platform_ioremap_resource(pdev, 0);
59530         if (IS_ERR(base_addr)) {
59531                 retval = PTR_ERR(base_addr);
59532 -               goto err_mem;
59533 +               goto err_udc_clk;
59534         }
59536         the_controller = udc;
59537 @@ -1793,7 +1792,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
59538         if (retval != 0) {
59539                 dev_err(dev, "cannot get irq %i, err %d\n", irq_usbd, retval);
59540                 retval = -EBUSY;
59541 -               goto err_map;
59542 +               goto err_udc_clk;
59543         }
59545         dev_dbg(dev, "got irq %i\n", irq_usbd);
59546 @@ -1864,10 +1863,14 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
59547                 gpio_free(udc_info->vbus_pin);
59548  err_int:
59549         free_irq(irq_usbd, udc);
59550 -err_map:
59551 -       iounmap(base_addr);
59552 -err_mem:
59553 -       release_mem_region(rsrc_start, rsrc_len);
59554 +err_udc_clk:
59555 +       clk_disable_unprepare(udc_clock);
59556 +       clk_put(udc_clock);
59557 +       udc_clock = NULL;
59558 +err_usb_bus_clk:
59559 +       clk_disable_unprepare(usb_bus_clock);
59560 +       clk_put(usb_bus_clock);
59561 +       usb_bus_clock = NULL;
59563         return retval;
59565 @@ -1899,9 +1902,6 @@ static int s3c2410_udc_remove(struct platform_device *pdev)
59567         free_irq(irq_usbd, udc);
59569 -       iounmap(base_addr);
59570 -       release_mem_region(rsrc_start, rsrc_len);
59572         if (!IS_ERR(udc_clock) && udc_clock != NULL) {
59573                 clk_disable_unprepare(udc_clock);
59574                 clk_put(udc_clock);
59575 diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c
59576 index 32f1d3e90c26..99805d60a7ab 100644
59577 --- a/drivers/usb/gadget/udc/snps_udc_plat.c
59578 +++ b/drivers/usb/gadget/udc/snps_udc_plat.c
59579 @@ -114,8 +114,8 @@ static int udc_plat_probe(struct platform_device *pdev)
59581         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
59582         udc->virt_addr = devm_ioremap_resource(dev, res);
59583 -       if (IS_ERR(udc->regs))
59584 -               return PTR_ERR(udc->regs);
59585 +       if (IS_ERR(udc->virt_addr))
59586 +               return PTR_ERR(udc->virt_addr);
59588         /* udc csr registers base */
59589         udc->csr = udc->virt_addr + UDC_CSR_ADDR;
59590 diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
59591 index 580bef8eb4cb..2319c9737c2b 100644
59592 --- a/drivers/usb/gadget/udc/tegra-xudc.c
59593 +++ b/drivers/usb/gadget/udc/tegra-xudc.c
59594 @@ -3883,7 +3883,7 @@ static int tegra_xudc_remove(struct platform_device *pdev)
59596         pm_runtime_get_sync(xudc->dev);
59598 -       cancel_delayed_work(&xudc->plc_reset_work);
59599 +       cancel_delayed_work_sync(&xudc->plc_reset_work);
59600         cancel_work_sync(&xudc->usb_role_sw_work);
59602         usb_del_gadget_udc(&xudc->gadget);
59603 diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
59604 index b94f2a070c05..df9428f1dc5e 100644
59605 --- a/drivers/usb/host/Kconfig
59606 +++ b/drivers/usb/host/Kconfig
59607 @@ -272,6 +272,7 @@ config USB_EHCI_TEGRA
59608         select USB_CHIPIDEA
59609         select USB_CHIPIDEA_HOST
59610         select USB_CHIPIDEA_TEGRA
59611 +       select USB_GADGET
59612         help
59613           This option is deprecated now and the driver was removed, use
59614           USB_CHIPIDEA_TEGRA instead.
59615 diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
59616 index 5617ef30530a..f0e4a315cc81 100644
59617 --- a/drivers/usb/host/fotg210-hcd.c
59618 +++ b/drivers/usb/host/fotg210-hcd.c
59619 @@ -5568,7 +5568,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
59620         struct usb_hcd *hcd;
59621         struct resource *res;
59622         int irq;
59623 -       int retval = -ENODEV;
59624 +       int retval;
59625         struct fotg210_hcd *fotg210;
59627         if (usb_disabled())
59628 @@ -5588,7 +5588,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
59629         hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev,
59630                         dev_name(dev));
59631         if (!hcd) {
59632 -               dev_err(dev, "failed to create hcd with err %d\n", retval);
59633 +               dev_err(dev, "failed to create hcd\n");
59634                 retval = -ENOMEM;
59635                 goto fail_create_hcd;
59636         }
59637 diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
59638 index 115ced0d93e1..1be692d3cf90 100644
59639 --- a/drivers/usb/host/sl811-hcd.c
59640 +++ b/drivers/usb/host/sl811-hcd.c
59641 @@ -1287,11 +1287,10 @@ sl811h_hub_control(
59642                         goto error;
59643                 put_unaligned_le32(sl811->port1, buf);
59645 -#ifndef        VERBOSE
59646 -       if (*(u16*)(buf+2))     /* only if wPortChange is interesting */
59647 -#endif
59648 -               dev_dbg(hcd->self.controller, "GetPortStatus %08x\n",
59649 -                       sl811->port1);
59650 +               if (__is_defined(VERBOSE) ||
59651 +                   *(u16*)(buf+2)) /* only if wPortChange is interesting */
59652 +                       dev_dbg(hcd->self.controller, "GetPortStatus %08x\n",
59653 +                               sl811->port1);
59654                 break;
59655         case SetPortFeature:
59656                 if (wIndex != 1 || wLength != 0)
59657 diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
59658 index fa59b242cd51..e8af0a125f84 100644
59659 --- a/drivers/usb/host/xhci-ext-caps.h
59660 +++ b/drivers/usb/host/xhci-ext-caps.h
59661 @@ -7,8 +7,9 @@
59662   * Author: Sarah Sharp
59663   * Some code borrowed from the Linux EHCI driver.
59664   */
59665 -/* Up to 16 ms to halt an HC */
59666 -#define XHCI_MAX_HALT_USEC     (16*1000)
59668 +/* HC should halt within 16 ms, but use 32 ms as some hosts take longer */
59669 +#define XHCI_MAX_HALT_USEC     (32 * 1000)
59670  /* HC not running - set to 1 when run/stop bit is cleared. */
59671  #define XHCI_STS_HALT          (1<<0)
59673 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
59674 index f2c4ee7c4786..717c122f9449 100644
59675 --- a/drivers/usb/host/xhci-mem.c
59676 +++ b/drivers/usb/host/xhci-mem.c
59677 @@ -2129,6 +2129,15 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
59679         if (major_revision == 0x03) {
59680                 rhub = &xhci->usb3_rhub;
59681 +               /*
59682 +                * Some hosts incorrectly use sub-minor version for minor
59683 +                * version (i.e. 0x02 instead of 0x20 for bcdUSB 0x320 and 0x01
59684 +                * for bcdUSB 0x310). Since there is no USB release with sub
59685 +                * minor version 0x301 to 0x309, we can assume that they are
59686 +                * incorrect and fix it here.
59687 +                */
59688 +               if (minor_revision > 0x00 && minor_revision < 0x10)
59689 +                       minor_revision <<= 4;
59690         } else if (major_revision <= 0x02) {
59691                 rhub = &xhci->usb2_rhub;
59692         } else {
59693 @@ -2240,6 +2249,9 @@ static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
59694                 return;
59695         rhub->ports = kcalloc_node(rhub->num_ports, sizeof(*rhub->ports),
59696                         flags, dev_to_node(dev));
59697 +       if (!rhub->ports)
59698 +               return;
59700         for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
59701                 if (xhci->hw_ports[i].rhub != rhub ||
59702                     xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
59703 diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
59704 index b45e5bf08997..8950d1f10a7f 100644
59705 --- a/drivers/usb/host/xhci-mtk-sch.c
59706 +++ b/drivers/usb/host/xhci-mtk-sch.c
59707 @@ -378,6 +378,31 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
59708         sch_ep->allocated = used;
59711 +static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
59713 +       struct mu3h_sch_tt *tt = sch_ep->sch_tt;
59714 +       u32 num_esit, tmp;
59715 +       int base;
59716 +       int i, j;
59718 +       num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
59719 +       for (i = 0; i < num_esit; i++) {
59720 +               base = offset + i * sch_ep->esit;
59722 +               /*
59723 +                * Compared with hs bus, no matter what ep type,
59724 +                * the hub will always delay one uframe to send data
59725 +                */
59726 +               for (j = 0; j < sch_ep->cs_count; j++) {
59727 +                       tmp = tt->fs_bus_bw[base + j] + sch_ep->bw_cost_per_microframe;
59728 +                       if (tmp > FS_PAYLOAD_MAX)
59729 +                               return -ERANGE;
59730 +               }
59731 +       }
59733 +       return 0;
59736  static int check_sch_tt(struct usb_device *udev,
59737         struct mu3h_sch_ep_info *sch_ep, u32 offset)
59739 @@ -402,7 +427,7 @@ static int check_sch_tt(struct usb_device *udev,
59740                         return -ERANGE;
59742                 for (i = 0; i < sch_ep->cs_count; i++)
59743 -                       if (test_bit(offset + i, tt->split_bit_map))
59744 +                       if (test_bit(offset + i, tt->ss_bit_map))
59745                                 return -ERANGE;
59747         } else {
59748 @@ -432,7 +457,7 @@ static int check_sch_tt(struct usb_device *udev,
59749                         cs_count = 7; /* HW limit */
59751                 for (i = 0; i < cs_count + 2; i++) {
59752 -                       if (test_bit(offset + i, tt->split_bit_map))
59753 +                       if (test_bit(offset + i, tt->ss_bit_map))
59754                                 return -ERANGE;
59755                 }
59757 @@ -448,24 +473,44 @@ static int check_sch_tt(struct usb_device *udev,
59758                         sch_ep->num_budget_microframes = sch_ep->esit;
59759         }
59761 -       return 0;
59762 +       return check_fs_bus_bw(sch_ep, offset);
59765  static void update_sch_tt(struct usb_device *udev,
59766 -       struct mu3h_sch_ep_info *sch_ep)
59767 +       struct mu3h_sch_ep_info *sch_ep, bool used)
59769         struct mu3h_sch_tt *tt = sch_ep->sch_tt;
59770         u32 base, num_esit;
59771 +       int bw_updated;
59772 +       int bits;
59773         int i, j;
59775         num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
59776 +       bits = (sch_ep->ep_type == ISOC_OUT_EP) ? sch_ep->cs_count : 1;
59778 +       if (used)
59779 +               bw_updated = sch_ep->bw_cost_per_microframe;
59780 +       else
59781 +               bw_updated = -sch_ep->bw_cost_per_microframe;
59783         for (i = 0; i < num_esit; i++) {
59784                 base = sch_ep->offset + i * sch_ep->esit;
59785 -               for (j = 0; j < sch_ep->num_budget_microframes; j++)
59786 -                       set_bit(base + j, tt->split_bit_map);
59788 +               for (j = 0; j < bits; j++) {
59789 +                       if (used)
59790 +                               set_bit(base + j, tt->ss_bit_map);
59791 +                       else
59792 +                               clear_bit(base + j, tt->ss_bit_map);
59793 +               }
59795 +               for (j = 0; j < sch_ep->cs_count; j++)
59796 +                       tt->fs_bus_bw[base + j] += bw_updated;
59797         }
59799 -       list_add_tail(&sch_ep->tt_endpoint, &tt->ep_list);
59800 +       if (used)
59801 +               list_add_tail(&sch_ep->tt_endpoint, &tt->ep_list);
59802 +       else
59803 +               list_del(&sch_ep->tt_endpoint);
59806  static int check_sch_bw(struct usb_device *udev,
59807 @@ -535,7 +580,7 @@ static int check_sch_bw(struct usb_device *udev,
59808                 if (!tt_offset_ok)
59809                         return -ERANGE;
59811 -               update_sch_tt(udev, sch_ep);
59812 +               update_sch_tt(udev, sch_ep, 1);
59813         }
59815         /* update bus bandwidth info */
59816 @@ -548,15 +593,16 @@ static void destroy_sch_ep(struct usb_device *udev,
59817         struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep)
59819         /* only release ep bw check passed by check_sch_bw() */
59820 -       if (sch_ep->allocated)
59821 +       if (sch_ep->allocated) {
59822                 update_bus_bw(sch_bw, sch_ep, 0);
59823 +               if (sch_ep->sch_tt)
59824 +                       update_sch_tt(udev, sch_ep, 0);
59825 +       }
59827 -       list_del(&sch_ep->endpoint);
59829 -       if (sch_ep->sch_tt) {
59830 -               list_del(&sch_ep->tt_endpoint);
59831 +       if (sch_ep->sch_tt)
59832                 drop_tt(udev);
59833 -       }
59835 +       list_del(&sch_ep->endpoint);
59836         kfree(sch_ep);
59839 @@ -643,7 +689,7 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
59840                  */
59841                 if (usb_endpoint_xfer_int(&ep->desc)
59842                         || usb_endpoint_xfer_isoc(&ep->desc))
59843 -                       ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(1));
59844 +                       ep_ctx->reserved[0] = cpu_to_le32(EP_BPKTS(1));
59846                 return 0;
59847         }
59848 @@ -730,10 +776,10 @@ int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
59849                 list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
59851                 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
59852 -               ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
59853 +               ep_ctx->reserved[0] = cpu_to_le32(EP_BPKTS(sch_ep->pkts)
59854                         | EP_BCSCOUNT(sch_ep->cs_count)
59855                         | EP_BBM(sch_ep->burst_mode));
59856 -               ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
59857 +               ep_ctx->reserved[1] = cpu_to_le32(EP_BOFFSET(sch_ep->offset)
59858                         | EP_BREPEAT(sch_ep->repeat));
59860                 xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
59861 diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
59862 index 2f27dc0d9c6b..1c331577fca9 100644
59863 --- a/drivers/usb/host/xhci-mtk.c
59864 +++ b/drivers/usb/host/xhci-mtk.c
59865 @@ -397,6 +397,8 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
59866         xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
59867         if (mtk->lpm_support)
59868                 xhci->quirks |= XHCI_LPM_SUPPORT;
59869 +       if (mtk->u2_lpm_disable)
59870 +               xhci->quirks |= XHCI_HW_LPM_DISABLE;
59872         /*
59873          * MTK xHCI 0.96: PSA is 1 by default even if doesn't support stream,
59874 @@ -469,6 +471,7 @@ static int xhci_mtk_probe(struct platform_device *pdev)
59875                 return ret;
59877         mtk->lpm_support = of_property_read_bool(node, "usb3-lpm-capable");
59878 +       mtk->u2_lpm_disable = of_property_read_bool(node, "usb2-lpm-disable");
59879         /* optional property, ignore the error if it does not exist */
59880         of_property_read_u32(node, "mediatek,u3p-dis-msk",
59881                              &mtk->u3p_dis_msk);
59882 diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
59883 index cbb09dfea62e..2fc0568ba054 100644
59884 --- a/drivers/usb/host/xhci-mtk.h
59885 +++ b/drivers/usb/host/xhci-mtk.h
59886 @@ -20,13 +20,15 @@
59887  #define XHCI_MTK_MAX_ESIT      64
59889  /**
59890 - * @split_bit_map: used to avoid split microframes overlay
59891 + * @ss_bit_map: used to avoid start split microframes overlay
59892 + * @fs_bus_bw: array to keep track of bandwidth already used for FS
59893   * @ep_list: Endpoints using this TT
59894   * @usb_tt: usb TT related
59895   * @tt_port: TT port number
59896   */
59897  struct mu3h_sch_tt {
59898 -       DECLARE_BITMAP(split_bit_map, XHCI_MTK_MAX_ESIT);
59899 +       DECLARE_BITMAP(ss_bit_map, XHCI_MTK_MAX_ESIT);
59900 +       u32 fs_bus_bw[XHCI_MTK_MAX_ESIT];
59901         struct list_head ep_list;
59902         struct usb_tt *usb_tt;
59903         int tt_port;
59904 @@ -150,6 +152,7 @@ struct xhci_hcd_mtk {
59905         struct phy **phys;
59906         int num_phys;
59907         bool lpm_support;
59908 +       bool u2_lpm_disable;
59909         /* usb remote wakeup */
59910         bool uwk_en;
59911         struct regmap *uwk;
59912 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
59913 index 5bbccc9a0179..7bc18cf8042c 100644
59914 --- a/drivers/usb/host/xhci-pci.c
59915 +++ b/drivers/usb/host/xhci-pci.c
59916 @@ -57,6 +57,7 @@
59917  #define PCI_DEVICE_ID_INTEL_CML_XHCI                   0xa3af
59918  #define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI            0x9a13
59919  #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI           0x1138
59920 +#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI            0x461e
59922  #define PCI_DEVICE_ID_AMD_PROMONTORYA_4                        0x43b9
59923  #define PCI_DEVICE_ID_AMD_PROMONTORYA_3                        0x43ba
59924 @@ -166,8 +167,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
59925             (pdev->device == 0x15e0 || pdev->device == 0x15e1))
59926                 xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND;
59928 -       if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5)
59929 +       if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5) {
59930                 xhci->quirks |= XHCI_DISABLE_SPARSE;
59931 +               xhci->quirks |= XHCI_RESET_ON_RESUME;
59932 +       }
59934         if (pdev->vendor == PCI_VENDOR_ID_AMD)
59935                 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
59936 @@ -243,7 +246,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
59937              pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI ||
59938              pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI ||
59939              pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI ||
59940 -            pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI))
59941 +            pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI ||
59942 +            pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI))
59943                 xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
59945         if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
59946 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
59947 index ce38076901e2..6cdea0d00d19 100644
59948 --- a/drivers/usb/host/xhci-ring.c
59949 +++ b/drivers/usb/host/xhci-ring.c
59950 @@ -863,7 +863,7 @@ static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id,
59951         return ret;
59954 -static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
59955 +static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
59956                                 struct xhci_virt_ep *ep, unsigned int stream_id,
59957                                 struct xhci_td *td,
59958                                 enum xhci_ep_reset_type reset_type)
59959 @@ -876,7 +876,7 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
59960          * Device will be reset soon to recover the link so don't do anything
59961          */
59962         if (ep->vdev->flags & VDEV_PORT_ERROR)
59963 -               return;
59964 +               return -ENODEV;
59966         /* add td to cancelled list and let reset ep handler take care of it */
59967         if (reset_type == EP_HARD_RESET) {
59968 @@ -889,16 +889,18 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
59970         if (ep->ep_state & EP_HALTED) {
59971                 xhci_dbg(xhci, "Reset ep command already pending\n");
59972 -               return;
59973 +               return 0;
59974         }
59976         err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type);
59977         if (err)
59978 -               return;
59979 +               return err;
59981         ep->ep_state |= EP_HALTED;
59983         xhci_ring_cmd_db(xhci);
59985 +       return 0;
59988  /*
59989 @@ -1015,6 +1017,7 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
59990         struct xhci_td *td = NULL;
59991         enum xhci_ep_reset_type reset_type;
59992         struct xhci_command *command;
59993 +       int err;
59995         if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
59996                 if (!xhci->devs[slot_id])
59997 @@ -1059,7 +1062,10 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
59998                                         td->status = -EPROTO;
59999                         }
60000                         /* reset ep, reset handler cleans up cancelled tds */
60001 -                       xhci_handle_halted_endpoint(xhci, ep, 0, td, reset_type);
60002 +                       err = xhci_handle_halted_endpoint(xhci, ep, 0, td,
60003 +                                                         reset_type);
60004 +                       if (err)
60005 +                               break;
60006                         xhci_stop_watchdog_timer_in_irq(xhci, ep);
60007                         return;
60008                 case EP_STATE_RUNNING:
60009 @@ -2129,16 +2135,13 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
60010         return 0;
60013 -static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
60014 -       struct xhci_transfer_event *event, struct xhci_virt_ep *ep)
60015 +static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
60016 +                    struct xhci_ring *ep_ring, struct xhci_td *td,
60017 +                    u32 trb_comp_code)
60019         struct xhci_ep_ctx *ep_ctx;
60020 -       struct xhci_ring *ep_ring;
60021 -       u32 trb_comp_code;
60023 -       ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
60024         ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
60025 -       trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
60027         switch (trb_comp_code) {
60028         case COMP_STOPPED_LENGTH_INVALID:
60029 @@ -2234,9 +2237,9 @@ static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
60030  /*
60031   * Process control tds, update urb status and actual_length.
60032   */
60033 -static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
60034 -       union xhci_trb *ep_trb, struct xhci_transfer_event *event,
60035 -       struct xhci_virt_ep *ep)
60036 +static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
60037 +               struct xhci_ring *ep_ring,  struct xhci_td *td,
60038 +                          union xhci_trb *ep_trb, struct xhci_transfer_event *event)
60040         struct xhci_ep_ctx *ep_ctx;
60041         u32 trb_comp_code;
60042 @@ -2324,15 +2327,15 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
60043                 td->urb->actual_length = requested;
60045  finish_td:
60046 -       return finish_td(xhci, td, event, ep);
60047 +       return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
60050  /*
60051   * Process isochronous tds, update urb packet status and actual_length.
60052   */
60053 -static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
60054 -       union xhci_trb *ep_trb, struct xhci_transfer_event *event,
60055 -       struct xhci_virt_ep *ep)
60056 +static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
60057 +               struct xhci_ring *ep_ring, struct xhci_td *td,
60058 +               union xhci_trb *ep_trb, struct xhci_transfer_event *event)
60060         struct urb_priv *urb_priv;
60061         int idx;
60062 @@ -2409,7 +2412,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
60064         td->urb->actual_length += frame->actual_length;
60066 -       return finish_td(xhci, td, event, ep);
60067 +       return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
60070  static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
60071 @@ -2441,17 +2444,15 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
60072  /*
60073   * Process bulk and interrupt tds, update urb status and actual_length.
60074   */
60075 -static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
60076 -       union xhci_trb *ep_trb, struct xhci_transfer_event *event,
60077 -       struct xhci_virt_ep *ep)
60078 +static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
60079 +               struct xhci_ring *ep_ring, struct xhci_td *td,
60080 +               union xhci_trb *ep_trb, struct xhci_transfer_event *event)
60082         struct xhci_slot_ctx *slot_ctx;
60083 -       struct xhci_ring *ep_ring;
60084         u32 trb_comp_code;
60085         u32 remaining, requested, ep_trb_len;
60087         slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
60088 -       ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
60089         trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
60090         remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
60091         ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
60092 @@ -2511,7 +2512,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
60093                           remaining);
60094                 td->urb->actual_length = 0;
60095         }
60096 -       return finish_td(xhci, td, event, ep);
60098 +       return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
60101  /*
60102 @@ -2854,11 +2856,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
60104                 /* update the urb's actual_length and give back to the core */
60105                 if (usb_endpoint_xfer_control(&td->urb->ep->desc))
60106 -                       process_ctrl_td(xhci, td, ep_trb, event, ep);
60107 +                       process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event);
60108                 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
60109 -                       process_isoc_td(xhci, td, ep_trb, event, ep);
60110 +                       process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event);
60111                 else
60112 -                       process_bulk_intr_td(xhci, td, ep_trb, event, ep);
60113 +                       process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event);
60114  cleanup:
60115                 handling_skipped_tds = ep->skip &&
60116                         trb_comp_code != COMP_MISSED_SERVICE_ERROR &&
60117 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
60118 index 1975016f46bf..0d2f1c37ab74 100644
60119 --- a/drivers/usb/host/xhci.c
60120 +++ b/drivers/usb/host/xhci.c
60121 @@ -228,6 +228,7 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
60122         struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
60123         int err, i;
60124         u64 val;
60125 +       u32 intrs;
60127         /*
60128          * Some Renesas controllers get into a weird state if they are
60129 @@ -266,7 +267,10 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
60130         if (upper_32_bits(val))
60131                 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
60133 -       for (i = 0; i < HCS_MAX_INTRS(xhci->hcs_params1); i++) {
60134 +       intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1),
60135 +                     ARRAY_SIZE(xhci->run_regs->ir_set));
60137 +       for (i = 0; i < intrs; i++) {
60138                 struct xhci_intr_reg __iomem *ir;
60140                 ir = &xhci->run_regs->ir_set[i];
60141 @@ -1510,7 +1514,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
60142   * we need to issue an evaluate context command and wait on it.
60143   */
60144  static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
60145 -               unsigned int ep_index, struct urb *urb)
60146 +               unsigned int ep_index, struct urb *urb, gfp_t mem_flags)
60148         struct xhci_container_ctx *out_ctx;
60149         struct xhci_input_control_ctx *ctrl_ctx;
60150 @@ -1541,7 +1545,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
60151                  * changes max packet sizes.
60152                  */
60154 -               command = xhci_alloc_command(xhci, true, GFP_KERNEL);
60155 +               command = xhci_alloc_command(xhci, true, mem_flags);
60156                 if (!command)
60157                         return -ENOMEM;
60159 @@ -1635,7 +1639,7 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
60160                  */
60161                 if (urb->dev->speed == USB_SPEED_FULL) {
60162                         ret = xhci_check_maxpacket(xhci, slot_id,
60163 -                                       ep_index, urb);
60164 +                                       ep_index, urb, mem_flags);
60165                         if (ret < 0) {
60166                                 xhci_urb_free_priv(urb_priv);
60167                                 urb->hcpriv = NULL;
60168 @@ -3269,6 +3273,14 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
60170         /* config ep command clears toggle if add and drop ep flags are set */
60171         ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
60172 +       if (!ctrl_ctx) {
60173 +               spin_unlock_irqrestore(&xhci->lock, flags);
60174 +               xhci_free_command(xhci, cfg_cmd);
60175 +               xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
60176 +                               __func__);
60177 +               goto cleanup;
60178 +       }
60180         xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
60181                                            ctrl_ctx, ep_flag, ep_flag);
60182         xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
60183 diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
60184 index eebeadd26946..6b92d037d8fc 100644
60185 --- a/drivers/usb/musb/mediatek.c
60186 +++ b/drivers/usb/musb/mediatek.c
60187 @@ -518,8 +518,8 @@ static int mtk_musb_probe(struct platform_device *pdev)
60189         glue->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
60190         if (IS_ERR(glue->xceiv)) {
60191 -               dev_err(dev, "fail to getting usb-phy %d\n", ret);
60192                 ret = PTR_ERR(glue->xceiv);
60193 +               dev_err(dev, "fail to getting usb-phy %d\n", ret);
60194                 goto err_unregister_usb_phy;
60195         }
60197 diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
60198 index fc0457db62e1..8f09a387b773 100644
60199 --- a/drivers/usb/musb/musb_core.c
60200 +++ b/drivers/usb/musb/musb_core.c
60201 @@ -2070,7 +2070,7 @@ static void musb_irq_work(struct work_struct *data)
60202         struct musb *musb = container_of(data, struct musb, irq_work.work);
60203         int error;
60205 -       error = pm_runtime_get_sync(musb->controller);
60206 +       error = pm_runtime_resume_and_get(musb->controller);
60207         if (error < 0) {
60208                 dev_err(musb->controller, "Could not enable: %i\n", error);
60210 diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
60211 index 97f37077b7f9..33b637d0d8d9 100644
60212 --- a/drivers/usb/roles/class.c
60213 +++ b/drivers/usb/roles/class.c
60214 @@ -189,6 +189,8 @@ usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
60215                 return NULL;
60217         dev = class_find_device_by_fwnode(role_class, fwnode);
60218 +       if (dev)
60219 +               WARN_ON(!try_module_get(dev->parent->driver->owner));
60221         return dev ? to_role_switch(dev) : NULL;
60223 diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
60224 index 7252b0ce75a6..fe1c13a8849c 100644
60225 --- a/drivers/usb/serial/ti_usb_3410_5052.c
60226 +++ b/drivers/usb/serial/ti_usb_3410_5052.c
60227 @@ -1418,14 +1418,19 @@ static int ti_set_serial_info(struct tty_struct *tty,
60228         struct serial_struct *ss)
60230         struct usb_serial_port *port = tty->driver_data;
60231 -       struct ti_port *tport = usb_get_serial_port_data(port);
60232 +       struct tty_port *tport = &port->port;
60233         unsigned cwait;
60235         cwait = ss->closing_wait;
60236         if (cwait != ASYNC_CLOSING_WAIT_NONE)
60237                 cwait = msecs_to_jiffies(10 * ss->closing_wait);
60239 -       tport->tp_port->port.closing_wait = cwait;
60240 +       if (!capable(CAP_SYS_ADMIN)) {
60241 +               if (cwait != tport->closing_wait)
60242 +                       return -EPERM;
60243 +       }
60245 +       tport->closing_wait = cwait;
60247         return 0;
60249 diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
60250 index 46d46a4f99c9..4e9c994a972a 100644
60251 --- a/drivers/usb/serial/usb_wwan.c
60252 +++ b/drivers/usb/serial/usb_wwan.c
60253 @@ -140,10 +140,10 @@ int usb_wwan_get_serial_info(struct tty_struct *tty,
60254         ss->line            = port->minor;
60255         ss->port            = port->port_number;
60256         ss->baud_base       = tty_get_baud_rate(port->port.tty);
60257 -       ss->close_delay     = port->port.close_delay / 10;
60258 +       ss->close_delay     = jiffies_to_msecs(port->port.close_delay) / 10;
60259         ss->closing_wait    = port->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
60260                                  ASYNC_CLOSING_WAIT_NONE :
60261 -                                port->port.closing_wait / 10;
60262 +                                jiffies_to_msecs(port->port.closing_wait) / 10;
60263         return 0;
60265  EXPORT_SYMBOL(usb_wwan_get_serial_info);
60266 @@ -155,9 +155,10 @@ int usb_wwan_set_serial_info(struct tty_struct *tty,
60267         unsigned int closing_wait, close_delay;
60268         int retval = 0;
60270 -       close_delay = ss->close_delay * 10;
60271 +       close_delay = msecs_to_jiffies(ss->close_delay * 10);
60272         closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
60273 -                       ASYNC_CLOSING_WAIT_NONE : ss->closing_wait * 10;
60274 +                       ASYNC_CLOSING_WAIT_NONE :
60275 +                       msecs_to_jiffies(ss->closing_wait * 10);
60277         mutex_lock(&port->port.mutex);
60279 diff --git a/drivers/usb/serial/xr_serial.c b/drivers/usb/serial/xr_serial.c
60280 index 0ca04906da4b..c59c8b47a120 100644
60281 --- a/drivers/usb/serial/xr_serial.c
60282 +++ b/drivers/usb/serial/xr_serial.c
60283 @@ -467,6 +467,11 @@ static void xr_set_termios(struct tty_struct *tty,
60284                 termios->c_cflag &= ~CSIZE;
60285                 if (old_termios)
60286                         termios->c_cflag |= old_termios->c_cflag & CSIZE;
60287 +               else
60288 +                       termios->c_cflag |= CS8;
60290 +               if (C_CSIZE(tty) == CS7)
60291 +                       bits |= XR21V141X_UART_DATA_7;
60292                 else
60293                         bits |= XR21V141X_UART_DATA_8;
60294                 break;
60295 diff --git a/drivers/usb/typec/stusb160x.c b/drivers/usb/typec/stusb160x.c
60296 index d21750bbbb44..6eaeba9b096e 100644
60297 --- a/drivers/usb/typec/stusb160x.c
60298 +++ b/drivers/usb/typec/stusb160x.c
60299 @@ -682,8 +682,8 @@ static int stusb160x_probe(struct i2c_client *client)
60300         }
60302         fwnode = device_get_named_child_node(chip->dev, "connector");
60303 -       if (IS_ERR(fwnode))
60304 -               return PTR_ERR(fwnode);
60305 +       if (!fwnode)
60306 +               return -ENODEV;
60308         /*
60309          * When both VDD and VSYS power supplies are present, the low power
60310 diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
60311 index a27deb0b5f03..027afd7dfdce 100644
60312 --- a/drivers/usb/typec/tcpm/tcpci.c
60313 +++ b/drivers/usb/typec/tcpm/tcpci.c
60314 @@ -24,6 +24,15 @@
60315  #define        AUTO_DISCHARGE_PD_HEADROOM_MV           850
60316  #define        AUTO_DISCHARGE_PPS_HEADROOM_MV          1250
60318 +#define tcpc_presenting_cc1_rd(reg) \
60319 +       (!(TCPC_ROLE_CTRL_DRP & (reg)) && \
60320 +        (((reg) & (TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT)) == \
60321 +         (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT)))
60322 +#define tcpc_presenting_cc2_rd(reg) \
60323 +       (!(TCPC_ROLE_CTRL_DRP & (reg)) && \
60324 +        (((reg) & (TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT)) == \
60325 +         (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT)))
60327  struct tcpci {
60328         struct device *dev;
60330 @@ -178,19 +187,25 @@ static int tcpci_get_cc(struct tcpc_dev *tcpc,
60331                         enum typec_cc_status *cc1, enum typec_cc_status *cc2)
60333         struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
60334 -       unsigned int reg;
60335 +       unsigned int reg, role_control;
60336         int ret;
60338 +       ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, &role_control);
60339 +       if (ret < 0)
60340 +               return ret;
60342         ret = regmap_read(tcpci->regmap, TCPC_CC_STATUS, &reg);
60343         if (ret < 0)
60344                 return ret;
60346         *cc1 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC1_SHIFT) &
60347                                  TCPC_CC_STATUS_CC1_MASK,
60348 -                                reg & TCPC_CC_STATUS_TERM);
60349 +                                reg & TCPC_CC_STATUS_TERM ||
60350 +                                tcpc_presenting_cc1_rd(role_control));
60351         *cc2 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC2_SHIFT) &
60352                                  TCPC_CC_STATUS_CC2_MASK,
60353 -                                reg & TCPC_CC_STATUS_TERM);
60354 +                                reg & TCPC_CC_STATUS_TERM ||
60355 +                                tcpc_presenting_cc2_rd(role_control));
60357         return 0;
60359 diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
60360 index ce7af398c7c1..52acc884a61f 100644
60361 --- a/drivers/usb/typec/tcpm/tcpm.c
60362 +++ b/drivers/usb/typec/tcpm/tcpm.c
60363 @@ -268,12 +268,27 @@ struct pd_mode_data {
60364         struct typec_altmode_desc altmode_desc[ALTMODE_DISCOVERY_MAX];
60365  };
60368 + * @min_volt: Actual min voltage at the local port
60369 + * @req_min_volt: Requested min voltage to the port partner
60370 + * @max_volt: Actual max voltage at the local port
60371 + * @req_max_volt: Requested max voltage to the port partner
60372 + * @max_curr: Actual max current at the local port
60373 + * @req_max_curr: Requested max current of the port partner
60374 + * @req_out_volt: Requested output voltage to the port partner
60375 + * @req_op_curr: Requested operating current to the port partner
60376 + * @supported: Parter has atleast one APDO hence supports PPS
60377 + * @active: PPS mode is active
60378 + */
60379  struct pd_pps_data {
60380         u32 min_volt;
60381 +       u32 req_min_volt;
60382         u32 max_volt;
60383 +       u32 req_max_volt;
60384         u32 max_curr;
60385 -       u32 out_volt;
60386 -       u32 op_curr;
60387 +       u32 req_max_curr;
60388 +       u32 req_out_volt;
60389 +       u32 req_op_curr;
60390         bool supported;
60391         bool active;
60392  };
60393 @@ -389,7 +404,10 @@ struct tcpm_port {
60394         unsigned int operating_snk_mw;
60395         bool update_sink_caps;
60397 -       /* Requested current / voltage */
60398 +       /* Requested current / voltage to the port partner */
60399 +       u32 req_current_limit;
60400 +       u32 req_supply_voltage;
60401 +       /* Actual current / voltage limit of the local port */
60402         u32 current_limit;
60403         u32 supply_voltage;
60405 @@ -438,6 +456,9 @@ struct tcpm_port {
60406         enum tcpm_ams next_ams;
60407         bool in_ams;
60409 +       /* Auto vbus discharge status */
60410 +       bool auto_vbus_discharge_enabled;
60412  #ifdef CONFIG_DEBUG_FS
60413         struct dentry *dentry;
60414         struct mutex logbuffer_lock;    /* log buffer access lock */
60415 @@ -507,6 +528,9 @@ static const char * const pd_rev[] = {
60416         (tcpm_port_is_sink(port) && \
60417         ((port)->cc1 == TYPEC_CC_RP_3_0 || (port)->cc2 == TYPEC_CC_RP_3_0))
60419 +#define tcpm_wait_for_discharge(port) \
60420 +       (((port)->auto_vbus_discharge_enabled && !(port)->vbus_vsafe0v) ? PD_T_SAFE_0V : 0)
60422  static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
60424         if (port->port_type == TYPEC_PORT_DRP) {
60425 @@ -1853,7 +1877,6 @@ static void vdm_run_state_machine(struct tcpm_port *port)
60426                         }
60428                         if (res < 0) {
60429 -                               port->vdm_sm_running = false;
60430                                 return;
60431                         }
60432                 }
60433 @@ -1869,6 +1892,7 @@ static void vdm_run_state_machine(struct tcpm_port *port)
60434                 port->vdo_data[0] = port->vdo_retry;
60435                 port->vdo_count = 1;
60436                 port->vdm_state = VDM_STATE_READY;
60437 +               tcpm_ams_finish(port);
60438                 break;
60439         case VDM_STATE_BUSY:
60440                 port->vdm_state = VDM_STATE_ERR_TMOUT;
60441 @@ -1934,7 +1958,7 @@ static void vdm_state_machine_work(struct kthread_work *work)
60442                  port->vdm_state != VDM_STATE_BUSY &&
60443                  port->vdm_state != VDM_STATE_SEND_MESSAGE);
60445 -       if (port->vdm_state == VDM_STATE_ERR_TMOUT)
60446 +       if (port->vdm_state < VDM_STATE_READY)
60447                 port->vdm_sm_running = false;
60449         mutex_unlock(&port->lock);
60450 @@ -2363,7 +2387,7 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
60451                 port->nr_sink_caps = cnt;
60452                 port->sink_cap_done = true;
60453                 if (port->ams == GET_SINK_CAPABILITIES)
60454 -                       tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
60455 +                       tcpm_set_state(port, ready_state(port), 0);
60456                 /* Unexpected Sink Capabilities */
60457                 else
60458                         tcpm_pd_handle_msg(port,
60459 @@ -2432,8 +2456,8 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
60460                 case SNK_TRANSITION_SINK:
60461                         if (port->vbus_present) {
60462                                 tcpm_set_current_limit(port,
60463 -                                                      port->current_limit,
60464 -                                                      port->supply_voltage);
60465 +                                                      port->req_current_limit,
60466 +                                                      port->req_supply_voltage);
60467                                 port->explicit_contract = true;
60468                                 tcpm_set_auto_vbus_discharge_threshold(port,
60469                                                                        TYPEC_PWR_MODE_PD,
60470 @@ -2492,8 +2516,8 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
60471                         break;
60472                 case SNK_NEGOTIATE_PPS_CAPABILITIES:
60473                         /* Revert data back from any requested PPS updates */
60474 -                       port->pps_data.out_volt = port->supply_voltage;
60475 -                       port->pps_data.op_curr = port->current_limit;
60476 +                       port->pps_data.req_out_volt = port->supply_voltage;
60477 +                       port->pps_data.req_op_curr = port->current_limit;
60478                         port->pps_status = (type == PD_CTRL_WAIT ?
60479                                             -EAGAIN : -EOPNOTSUPP);
60481 @@ -2525,6 +2549,16 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
60482                         port->sink_cap_done = true;
60483                         tcpm_set_state(port, ready_state(port), 0);
60484                         break;
60485 +               case SRC_READY:
60486 +               case SNK_READY:
60487 +                       if (port->vdm_state > VDM_STATE_READY) {
60488 +                               port->vdm_state = VDM_STATE_DONE;
60489 +                               if (tcpm_vdm_ams(port))
60490 +                                       tcpm_ams_finish(port);
60491 +                               mod_vdm_delayed_work(port, 0);
60492 +                               break;
60493 +                       }
60494 +                       fallthrough;
60495                 default:
60496                         tcpm_pd_handle_state(port,
60497                                              port->pwr_role == TYPEC_SOURCE ?
60498 @@ -2542,8 +2576,12 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
60499                         break;
60500                 case SNK_NEGOTIATE_PPS_CAPABILITIES:
60501                         port->pps_data.active = true;
60502 -                       port->supply_voltage = port->pps_data.out_volt;
60503 -                       port->current_limit = port->pps_data.op_curr;
60504 +                       port->pps_data.min_volt = port->pps_data.req_min_volt;
60505 +                       port->pps_data.max_volt = port->pps_data.req_max_volt;
60506 +                       port->pps_data.max_curr = port->pps_data.req_max_curr;
60507 +                       port->req_supply_voltage = port->pps_data.req_out_volt;
60508 +                       port->req_current_limit = port->pps_data.req_op_curr;
60509 +                       power_supply_changed(port->psy);
60510                         tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
60511                         break;
60512                 case SOFT_RESET_SEND:
60513 @@ -3102,17 +3140,16 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
60514                 src = port->source_caps[src_pdo];
60515                 snk = port->snk_pdo[snk_pdo];
60517 -               port->pps_data.min_volt = max(pdo_pps_apdo_min_voltage(src),
60518 -                                             pdo_pps_apdo_min_voltage(snk));
60519 -               port->pps_data.max_volt = min(pdo_pps_apdo_max_voltage(src),
60520 -                                             pdo_pps_apdo_max_voltage(snk));
60521 -               port->pps_data.max_curr = min_pps_apdo_current(src, snk);
60522 -               port->pps_data.out_volt = min(port->pps_data.max_volt,
60523 -                                             max(port->pps_data.min_volt,
60524 -                                                 port->pps_data.out_volt));
60525 -               port->pps_data.op_curr = min(port->pps_data.max_curr,
60526 -                                            port->pps_data.op_curr);
60527 -               power_supply_changed(port->psy);
60528 +               port->pps_data.req_min_volt = max(pdo_pps_apdo_min_voltage(src),
60529 +                                                 pdo_pps_apdo_min_voltage(snk));
60530 +               port->pps_data.req_max_volt = min(pdo_pps_apdo_max_voltage(src),
60531 +                                                 pdo_pps_apdo_max_voltage(snk));
60532 +               port->pps_data.req_max_curr = min_pps_apdo_current(src, snk);
60533 +               port->pps_data.req_out_volt = min(port->pps_data.req_max_volt,
60534 +                                                 max(port->pps_data.req_min_volt,
60535 +                                                     port->pps_data.req_out_volt));
60536 +               port->pps_data.req_op_curr = min(port->pps_data.req_max_curr,
60537 +                                                port->pps_data.req_op_curr);
60538         }
60540         return src_pdo;
60541 @@ -3192,8 +3229,8 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
60542                          flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
60543         }
60545 -       port->current_limit = ma;
60546 -       port->supply_voltage = mv;
60547 +       port->req_current_limit = ma;
60548 +       port->req_supply_voltage = mv;
60550         return 0;
60552 @@ -3239,10 +3276,10 @@ static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
60553                         tcpm_log(port, "Invalid APDO selected!");
60554                         return -EINVAL;
60555                 }
60556 -               max_mv = port->pps_data.max_volt;
60557 -               max_ma = port->pps_data.max_curr;
60558 -               out_mv = port->pps_data.out_volt;
60559 -               op_ma = port->pps_data.op_curr;
60560 +               max_mv = port->pps_data.req_max_volt;
60561 +               max_ma = port->pps_data.req_max_curr;
60562 +               out_mv = port->pps_data.req_out_volt;
60563 +               op_ma = port->pps_data.req_op_curr;
60564                 break;
60565         default:
60566                 tcpm_log(port, "Invalid PDO selected!");
60567 @@ -3289,8 +3326,8 @@ static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
60568         tcpm_log(port, "Requesting APDO %d: %u mV, %u mA",
60569                  src_pdo_index, out_mv, op_ma);
60571 -       port->pps_data.op_curr = op_ma;
60572 -       port->pps_data.out_volt = out_mv;
60573 +       port->pps_data.req_op_curr = op_ma;
60574 +       port->pps_data.req_out_volt = out_mv;
60576         return 0;
60578 @@ -3418,6 +3455,8 @@ static int tcpm_src_attach(struct tcpm_port *port)
60579         if (port->tcpc->enable_auto_vbus_discharge) {
60580                 ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, true);
60581                 tcpm_log_force(port, "enable vbus discharge ret:%d", ret);
60582 +               if (!ret)
60583 +                       port->auto_vbus_discharge_enabled = true;
60584         }
60586         ret = tcpm_set_roles(port, true, TYPEC_SOURCE, tcpm_data_role_for_source(port));
60587 @@ -3500,6 +3539,8 @@ static void tcpm_reset_port(struct tcpm_port *port)
60588         if (port->tcpc->enable_auto_vbus_discharge) {
60589                 ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, false);
60590                 tcpm_log_force(port, "Disable vbus discharge ret:%d", ret);
60591 +               if (!ret)
60592 +                       port->auto_vbus_discharge_enabled = false;
60593         }
60594         port->in_ams = false;
60595         port->ams = NONE_AMS;
60596 @@ -3533,8 +3574,6 @@ static void tcpm_reset_port(struct tcpm_port *port)
60597         port->sink_cap_done = false;
60598         if (port->tcpc->enable_frs)
60599                 port->tcpc->enable_frs(port->tcpc, false);
60601 -       power_supply_changed(port->psy);
60604  static void tcpm_detach(struct tcpm_port *port)
60605 @@ -3574,6 +3613,8 @@ static int tcpm_snk_attach(struct tcpm_port *port)
60606                 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
60607                 ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, true);
60608                 tcpm_log_force(port, "enable vbus discharge ret:%d", ret);
60609 +               if (!ret)
60610 +                       port->auto_vbus_discharge_enabled = true;
60611         }
60613         ret = tcpm_set_roles(port, true, TYPEC_SINK, tcpm_data_role_for_sink(port));
60614 @@ -4103,6 +4144,23 @@ static void run_state_machine(struct tcpm_port *port)
60615                 }
60616                 break;
60617         case SNK_TRANSITION_SINK:
60618 +               /* From the USB PD spec:
60619 +                * "The Sink Shall transition to Sink Standby before a positive or
60620 +                * negative voltage transition of VBUS. During Sink Standby
60621 +                * the Sink Shall reduce its power draw to pSnkStdby."
60622 +                *
60623 +                * This is not applicable to PPS though as the port can continue
60624 +                * to draw negotiated power without switching to standby.
60625 +                */
60626 +               if (port->supply_voltage != port->req_supply_voltage && !port->pps_data.active &&
60627 +                   port->current_limit * port->supply_voltage / 1000 > PD_P_SNK_STDBY_MW) {
60628 +                       u32 stdby_ma = PD_P_SNK_STDBY_MW * 1000 / port->supply_voltage;
60630 +                       tcpm_log(port, "Setting standby current %u mV @ %u mA",
60631 +                                port->supply_voltage, stdby_ma);
60632 +                       tcpm_set_current_limit(port, stdby_ma, port->supply_voltage);
60633 +               }
60634 +               fallthrough;
60635         case SNK_TRANSITION_SINK_VBUS:
60636                 tcpm_set_state(port, hard_reset_state(port),
60637                                PD_T_PS_TRANSITION);
60638 @@ -4676,9 +4734,9 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
60639                 if (tcpm_port_is_disconnected(port) ||
60640                     !tcpm_port_is_source(port)) {
60641                         if (port->port_type == TYPEC_PORT_SRC)
60642 -                               tcpm_set_state(port, SRC_UNATTACHED, 0);
60643 +                               tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
60644                         else
60645 -                               tcpm_set_state(port, SNK_UNATTACHED, 0);
60646 +                               tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
60647                 }
60648                 break;
60649         case SNK_UNATTACHED:
60650 @@ -4709,7 +4767,23 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
60651                         tcpm_set_state(port, SNK_DEBOUNCED, 0);
60652                 break;
60653         case SNK_READY:
60654 -               if (tcpm_port_is_disconnected(port))
60655 +               /*
60656 +                * EXIT condition is based primarily on vbus disconnect and CC is secondary.
60657 +                * "A port that has entered into USB PD communications with the Source and
60658 +                * has seen the CC voltage exceed vRd-USB may monitor the CC pin to detect
60659 +                * cable disconnect in addition to monitoring VBUS.
60660 +                *
60661 +                * A port that is monitoring the CC voltage for disconnect (but is not in
60662 +                * the process of a USB PD PR_Swap or USB PD FR_Swap) shall transition to
60663 +                * Unattached.SNK within tSinkDisconnect after the CC voltage remains below
60664 +                * vRd-USB for tPDDebounce."
60665 +                *
60666 +                * When set_auto_vbus_discharge_threshold is enabled, CC pins go
60667 +                * away before vbus decays to disconnect threshold. Allow
60668 +                * disconnect to be driven by vbus disconnect when auto vbus
60669 +                * discharge is enabled.
60670 +                */
60671 +               if (!port->auto_vbus_discharge_enabled && tcpm_port_is_disconnected(port))
60672                         tcpm_set_state(port, unattached_state(port), 0);
60673                 else if (!port->pd_capable &&
60674                          (cc1 != old_cc1 || cc2 != old_cc2))
60675 @@ -4808,9 +4882,13 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
60676                  * Ignore CC changes here.
60677                  */
60678                 break;
60680         default:
60681 -               if (tcpm_port_is_disconnected(port))
60682 +               /*
60683 +                * While acting as sink and auto vbus discharge is enabled, Allow disconnect
60684 +                * to be driven by vbus disconnect.
60685 +                */
60686 +               if (tcpm_port_is_disconnected(port) && !(port->pwr_role == TYPEC_SINK &&
60687 +                                                        port->auto_vbus_discharge_enabled))
60688                         tcpm_set_state(port, unattached_state(port), 0);
60689                 break;
60690         }
60691 @@ -4974,8 +5052,16 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
60692         case SRC_TRANSITION_SUPPLY:
60693         case SRC_READY:
60694         case SRC_WAIT_NEW_CAPABILITIES:
60695 -               /* Force to unattached state to re-initiate connection */
60696 -               tcpm_set_state(port, SRC_UNATTACHED, 0);
60697 +               /*
60698 +                * Force to unattached state to re-initiate connection.
60699 +                * DRP port should move to Unattached.SNK instead of Unattached.SRC if
60700 +                * sink removed. Although sink removal here is due to source's vbus collapse,
60701 +                * treat it the same way for consistency.
60702 +                */
60703 +               if (port->port_type == TYPEC_PORT_SRC)
60704 +                       tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
60705 +               else
60706 +                       tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
60707                 break;
60709         case PORT_RESET:
60710 @@ -4994,9 +5080,8 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
60711                 break;
60713         default:
60714 -               if (port->pwr_role == TYPEC_SINK &&
60715 -                   port->attached)
60716 -                       tcpm_set_state(port, SNK_UNATTACHED, 0);
60717 +               if (port->pwr_role == TYPEC_SINK && port->attached)
60718 +                       tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
60719                 break;
60720         }
60722 @@ -5018,7 +5103,23 @@ static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
60723                         tcpm_set_state(port, tcpm_try_snk(port) ? SNK_TRY : SRC_ATTACHED,
60724                                        PD_T_CC_DEBOUNCE);
60725                 break;
60726 +       case SRC_STARTUP:
60727 +       case SRC_SEND_CAPABILITIES:
60728 +       case SRC_SEND_CAPABILITIES_TIMEOUT:
60729 +       case SRC_NEGOTIATE_CAPABILITIES:
60730 +       case SRC_TRANSITION_SUPPLY:
60731 +       case SRC_READY:
60732 +       case SRC_WAIT_NEW_CAPABILITIES:
60733 +               if (port->auto_vbus_discharge_enabled) {
60734 +                       if (port->port_type == TYPEC_PORT_SRC)
60735 +                               tcpm_set_state(port, SRC_UNATTACHED, 0);
60736 +                       else
60737 +                               tcpm_set_state(port, SNK_UNATTACHED, 0);
60738 +               }
60739 +               break;
60740         default:
60741 +               if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
60742 +                       tcpm_set_state(port, SNK_UNATTACHED, 0);
60743                 break;
60744         }
60746 @@ -5374,7 +5475,7 @@ static int tcpm_try_role(struct typec_port *p, int role)
60747         return ret;
60750 -static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
60751 +static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 req_op_curr)
60753         unsigned int target_mw;
60754         int ret;
60755 @@ -5392,12 +5493,12 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
60756                 goto port_unlock;
60757         }
60759 -       if (op_curr > port->pps_data.max_curr) {
60760 +       if (req_op_curr > port->pps_data.max_curr) {
60761                 ret = -EINVAL;
60762                 goto port_unlock;
60763         }
60765 -       target_mw = (op_curr * port->pps_data.out_volt) / 1000;
60766 +       target_mw = (req_op_curr * port->supply_voltage) / 1000;
60767         if (target_mw < port->operating_snk_mw) {
60768                 ret = -EINVAL;
60769                 goto port_unlock;
60770 @@ -5411,10 +5512,10 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
60771         }
60773         /* Round down operating current to align with PPS valid steps */
60774 -       op_curr = op_curr - (op_curr % RDO_PROG_CURR_MA_STEP);
60775 +       req_op_curr = req_op_curr - (req_op_curr % RDO_PROG_CURR_MA_STEP);
60777         reinit_completion(&port->pps_complete);
60778 -       port->pps_data.op_curr = op_curr;
60779 +       port->pps_data.req_op_curr = req_op_curr;
60780         port->pps_status = 0;
60781         port->pps_pending = true;
60782         mutex_unlock(&port->lock);
60783 @@ -5435,7 +5536,7 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
60784         return ret;
60787 -static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
60788 +static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt)
60790         unsigned int target_mw;
60791         int ret;
60792 @@ -5453,13 +5554,13 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
60793                 goto port_unlock;
60794         }
60796 -       if (out_volt < port->pps_data.min_volt ||
60797 -           out_volt > port->pps_data.max_volt) {
60798 +       if (req_out_volt < port->pps_data.min_volt ||
60799 +           req_out_volt > port->pps_data.max_volt) {
60800                 ret = -EINVAL;
60801                 goto port_unlock;
60802         }
60804 -       target_mw = (port->pps_data.op_curr * out_volt) / 1000;
60805 +       target_mw = (port->current_limit * req_out_volt) / 1000;
60806         if (target_mw < port->operating_snk_mw) {
60807                 ret = -EINVAL;
60808                 goto port_unlock;
60809 @@ -5473,10 +5574,10 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
60810         }
60812         /* Round down output voltage to align with PPS valid steps */
60813 -       out_volt = out_volt - (out_volt % RDO_PROG_VOLT_MV_STEP);
60814 +       req_out_volt = req_out_volt - (req_out_volt % RDO_PROG_VOLT_MV_STEP);
60816         reinit_completion(&port->pps_complete);
60817 -       port->pps_data.out_volt = out_volt;
60818 +       port->pps_data.req_out_volt = req_out_volt;
60819         port->pps_status = 0;
60820         port->pps_pending = true;
60821         mutex_unlock(&port->lock);
60822 @@ -5534,8 +5635,8 @@ static int tcpm_pps_activate(struct tcpm_port *port, bool activate)
60824         /* Trigger PPS request or move back to standard PDO contract */
60825         if (activate) {
60826 -               port->pps_data.out_volt = port->supply_voltage;
60827 -               port->pps_data.op_curr = port->current_limit;
60828 +               port->pps_data.req_out_volt = port->supply_voltage;
60829 +               port->pps_data.req_op_curr = port->current_limit;
60830         }
60831         mutex_unlock(&port->lock);
60833 diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
60834 index 29bd1c5a283c..4038104568f5 100644
60835 --- a/drivers/usb/typec/tps6598x.c
60836 +++ b/drivers/usb/typec/tps6598x.c
60837 @@ -614,8 +614,8 @@ static int tps6598x_probe(struct i2c_client *client)
60838                 return ret;
60840         fwnode = device_get_named_child_node(&client->dev, "connector");
60841 -       if (IS_ERR(fwnode))
60842 -               return PTR_ERR(fwnode);
60843 +       if (!fwnode)
60844 +               return -ENODEV;
60846         tps->role_sw = fwnode_usb_role_switch_get(fwnode);
60847         if (IS_ERR(tps->role_sw)) {
60848 diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
60849 index 244270755ae6..1e266f083bf8 100644
60850 --- a/drivers/usb/typec/ucsi/ucsi.c
60851 +++ b/drivers/usb/typec/ucsi/ucsi.c
60852 @@ -495,7 +495,8 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient)
60853         }
60856 -static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner)
60857 +static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner,
60858 +                        u32 *pdos, int offset, int num_pdos)
60860         struct ucsi *ucsi = con->ucsi;
60861         u64 command;
60862 @@ -503,17 +504,39 @@ static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner)
60864         command = UCSI_COMMAND(UCSI_GET_PDOS) | UCSI_CONNECTOR_NUMBER(con->num);
60865         command |= UCSI_GET_PDOS_PARTNER_PDO(is_partner);
60866 -       command |= UCSI_GET_PDOS_NUM_PDOS(UCSI_MAX_PDOS - 1);
60867 +       command |= UCSI_GET_PDOS_PDO_OFFSET(offset);
60868 +       command |= UCSI_GET_PDOS_NUM_PDOS(num_pdos - 1);
60869         command |= UCSI_GET_PDOS_SRC_PDOS;
60870 -       ret = ucsi_send_command(ucsi, command, con->src_pdos,
60871 -                              sizeof(con->src_pdos));
60872 -       if (ret < 0) {
60873 +       ret = ucsi_send_command(ucsi, command, pdos + offset,
60874 +                               num_pdos * sizeof(u32));
60875 +       if (ret < 0)
60876                 dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret);
60877 +       if (ret == 0 && offset == 0)
60878 +               dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
60880 +       return ret;
60883 +static void ucsi_get_src_pdos(struct ucsi_connector *con, int is_partner)
60885 +       int ret;
60887 +       /* UCSI max payload means only getting at most 4 PDOs at a time */
60888 +       ret = ucsi_get_pdos(con, 1, con->src_pdos, 0, UCSI_MAX_PDOS);
60889 +       if (ret < 0)
60890                 return;
60891 -       }
60893         con->num_pdos = ret / sizeof(u32); /* number of bytes to 32-bit PDOs */
60894 -       if (ret == 0)
60895 -               dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
60896 +       if (con->num_pdos < UCSI_MAX_PDOS)
60897 +               return;
60899 +       /* get the remaining PDOs, if any */
60900 +       ret = ucsi_get_pdos(con, 1, con->src_pdos, UCSI_MAX_PDOS,
60901 +                           PDO_MAX_OBJECTS - UCSI_MAX_PDOS);
60902 +       if (ret < 0)
60903 +               return;
60905 +       con->num_pdos += ret / sizeof(u32);
60908  static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
60909 @@ -522,7 +545,7 @@ static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
60910         case UCSI_CONSTAT_PWR_OPMODE_PD:
60911                 con->rdo = con->status.request_data_obj;
60912                 typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_PD);
60913 -               ucsi_get_pdos(con, 1);
60914 +               ucsi_get_src_pdos(con, 1);
60915                 break;
60916         case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
60917                 con->rdo = 0;
60918 @@ -999,6 +1022,7 @@ static const struct typec_operations ucsi_ops = {
60919         .pr_set = ucsi_pr_swap
60920  };
60922 +/* Caller must call fwnode_handle_put() after use */
60923  static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con)
60925         struct fwnode_handle *fwnode;
60926 @@ -1033,7 +1057,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
60927         command |= UCSI_CONNECTOR_NUMBER(con->num);
60928         ret = ucsi_send_command(ucsi, command, &con->cap, sizeof(con->cap));
60929         if (ret < 0)
60930 -               goto out;
60931 +               goto out_unlock;
60933         if (con->cap.op_mode & UCSI_CONCAP_OPMODE_DRP)
60934                 cap->data = TYPEC_PORT_DRD;
60935 @@ -1151,6 +1175,8 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
60936         trace_ucsi_register_port(con->num, &con->status);
60938  out:
60939 +       fwnode_handle_put(cap->fwnode);
60940 +out_unlock:
60941         mutex_unlock(&con->lock);
60942         return ret;
60944 diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
60945 index 3920e20a9e9e..cee666790907 100644
60946 --- a/drivers/usb/typec/ucsi/ucsi.h
60947 +++ b/drivers/usb/typec/ucsi/ucsi.h
60948 @@ -8,6 +8,7 @@
60949  #include <linux/power_supply.h>
60950  #include <linux/types.h>
60951  #include <linux/usb/typec.h>
60952 +#include <linux/usb/pd.h>
60953  #include <linux/usb/role.h>
60955  /* -------------------------------------------------------------------------- */
60956 @@ -134,7 +135,9 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
60958  /* GET_PDOS command bits */
60959  #define UCSI_GET_PDOS_PARTNER_PDO(_r_)         ((u64)(_r_) << 23)
60960 +#define UCSI_GET_PDOS_PDO_OFFSET(_r_)          ((u64)(_r_) << 24)
60961  #define UCSI_GET_PDOS_NUM_PDOS(_r_)            ((u64)(_r_) << 32)
60962 +#define UCSI_MAX_PDOS                          (4)
60963  #define UCSI_GET_PDOS_SRC_PDOS                 ((u64)1 << 34)
60965  /* -------------------------------------------------------------------------- */
60966 @@ -302,7 +305,6 @@ struct ucsi {
60968  #define UCSI_MAX_SVID          5
60969  #define UCSI_MAX_ALTMODES      (UCSI_MAX_SVID * 6)
60970 -#define UCSI_MAX_PDOS          (4)
60972  #define UCSI_TYPEC_VSAFE5V     5000
60973  #define UCSI_TYPEC_1_5_CURRENT 1500
60974 @@ -330,7 +332,7 @@ struct ucsi_connector {
60975         struct power_supply *psy;
60976         struct power_supply_desc psy_desc;
60977         u32 rdo;
60978 -       u32 src_pdos[UCSI_MAX_PDOS];
60979 +       u32 src_pdos[PDO_MAX_OBJECTS];
60980         int num_pdos;
60982         struct usb_role_switch *usb_role_sw;
60983 diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
60984 index f7633ee655a1..d1cf6b51bf85 100644
60985 --- a/drivers/usb/usbip/vudc_sysfs.c
60986 +++ b/drivers/usb/usbip/vudc_sysfs.c
60987 @@ -156,12 +156,14 @@ static ssize_t usbip_sockfd_store(struct device *dev,
60988                 tcp_rx = kthread_create(&v_rx_loop, &udc->ud, "vudc_rx");
60989                 if (IS_ERR(tcp_rx)) {
60990                         sockfd_put(socket);
60991 +                       mutex_unlock(&udc->ud.sysfs_lock);
60992                         return -EINVAL;
60993                 }
60994                 tcp_tx = kthread_create(&v_tx_loop, &udc->ud, "vudc_tx");
60995                 if (IS_ERR(tcp_tx)) {
60996                         kthread_stop(tcp_rx);
60997                         sockfd_put(socket);
60998 +                       mutex_unlock(&udc->ud.sysfs_lock);
60999                         return -EINVAL;
61000                 }
61002 diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
61003 index f27e25112c40..8722f5effacd 100644
61004 --- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c
61005 +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
61006 @@ -568,23 +568,39 @@ static int vfio_fsl_mc_init_device(struct vfio_fsl_mc_device *vdev)
61007                 dev_err(&mc_dev->dev, "VFIO_FSL_MC: Failed to setup DPRC (%d)\n", ret);
61008                 goto out_nc_unreg;
61009         }
61010 +       return 0;
61012 +out_nc_unreg:
61013 +       bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
61014 +       return ret;
61017 +static int vfio_fsl_mc_scan_container(struct fsl_mc_device *mc_dev)
61019 +       int ret;
61021 +       /* non dprc devices do not scan for other devices */
61022 +       if (!is_fsl_mc_bus_dprc(mc_dev))
61023 +               return 0;
61024         ret = dprc_scan_container(mc_dev, false);
61025         if (ret) {
61026 -               dev_err(&mc_dev->dev, "VFIO_FSL_MC: Container scanning failed (%d)\n", ret);
61027 -               goto out_dprc_cleanup;
61028 +               dev_err(&mc_dev->dev,
61029 +                       "VFIO_FSL_MC: Container scanning failed (%d)\n", ret);
61030 +               dprc_remove_devices(mc_dev, NULL, 0);
61031 +               return ret;
61032         }
61034         return 0;
61037 +static void vfio_fsl_uninit_device(struct vfio_fsl_mc_device *vdev)
61039 +       struct fsl_mc_device *mc_dev = vdev->mc_dev;
61041 +       if (!is_fsl_mc_bus_dprc(mc_dev))
61042 +               return;
61044 -out_dprc_cleanup:
61045 -       dprc_remove_devices(mc_dev, NULL, 0);
61046         dprc_cleanup(mc_dev);
61047 -out_nc_unreg:
61048         bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
61049 -       vdev->nb.notifier_call = NULL;
61051 -       return ret;
61054  static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
61055 @@ -607,29 +623,39 @@ static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
61056         }
61058         vdev->mc_dev = mc_dev;
61060 -       ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
61061 -       if (ret) {
61062 -               dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n");
61063 -               goto out_group_put;
61064 -       }
61065 +       mutex_init(&vdev->igate);
61067         ret = vfio_fsl_mc_reflck_attach(vdev);
61068         if (ret)
61069 -               goto out_group_dev;
61070 +               goto out_group_put;
61072         ret = vfio_fsl_mc_init_device(vdev);
61073         if (ret)
61074                 goto out_reflck;
61076 -       mutex_init(&vdev->igate);
61077 +       ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
61078 +       if (ret) {
61079 +               dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n");
61080 +               goto out_device;
61081 +       }
61083 +       /*
61084 +        * This triggers recursion into vfio_fsl_mc_probe() on another device
61085 +        * and the vfio_fsl_mc_reflck_attach() must succeed, which relies on the
61086 +        * vfio_add_group_dev() above. It has no impact on this vdev, so it is
61087 +        * safe to be after the vfio device is made live.
61088 +        */
61089 +       ret = vfio_fsl_mc_scan_container(mc_dev);
61090 +       if (ret)
61091 +               goto out_group_dev;
61092         return 0;
61094 -out_reflck:
61095 -       vfio_fsl_mc_reflck_put(vdev->reflck);
61096  out_group_dev:
61097         vfio_del_group_dev(dev);
61098 +out_device:
61099 +       vfio_fsl_uninit_device(vdev);
61100 +out_reflck:
61101 +       vfio_fsl_mc_reflck_put(vdev->reflck);
61102  out_group_put:
61103         vfio_iommu_group_put(group, dev);
61104         return ret;
61105 @@ -646,16 +672,10 @@ static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
61107         mutex_destroy(&vdev->igate);
61109 +       dprc_remove_devices(mc_dev, NULL, 0);
61110 +       vfio_fsl_uninit_device(vdev);
61111         vfio_fsl_mc_reflck_put(vdev->reflck);
61113 -       if (is_fsl_mc_bus_dprc(mc_dev)) {
61114 -               dprc_remove_devices(mc_dev, NULL, 0);
61115 -               dprc_cleanup(mc_dev);
61116 -       }
61118 -       if (vdev->nb.notifier_call)
61119 -               bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
61121         vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
61123         return 0;
61124 diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
61125 index 917fd84c1c6f..367ff5412a38 100644
61126 --- a/drivers/vfio/mdev/mdev_sysfs.c
61127 +++ b/drivers/vfio/mdev/mdev_sysfs.c
61128 @@ -105,6 +105,7 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
61129                 return ERR_PTR(-ENOMEM);
61131         type->kobj.kset = parent->mdev_types_kset;
61132 +       type->parent = parent;
61134         ret = kobject_init_and_add(&type->kobj, &mdev_type_ktype, NULL,
61135                                    "%s-%s", dev_driver_string(parent->dev),
61136 @@ -132,7 +133,6 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
61137         }
61139         type->group = group;
61140 -       type->parent = parent;
61141         return type;
61143  attrs_failed:
61144 diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
61145 index 5023e23db3bc..cb7f2dc09e9d 100644
61146 --- a/drivers/vfio/pci/vfio_pci.c
61147 +++ b/drivers/vfio/pci/vfio_pci.c
61148 @@ -1924,6 +1924,68 @@ static int vfio_pci_bus_notifier(struct notifier_block *nb,
61149         return 0;
61152 +static int vfio_pci_vf_init(struct vfio_pci_device *vdev)
61154 +       struct pci_dev *pdev = vdev->pdev;
61155 +       int ret;
61157 +       if (!pdev->is_physfn)
61158 +               return 0;
61160 +       vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL);
61161 +       if (!vdev->vf_token)
61162 +               return -ENOMEM;
61164 +       mutex_init(&vdev->vf_token->lock);
61165 +       uuid_gen(&vdev->vf_token->uuid);
61167 +       vdev->nb.notifier_call = vfio_pci_bus_notifier;
61168 +       ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
61169 +       if (ret) {
61170 +               kfree(vdev->vf_token);
61171 +               return ret;
61172 +       }
61173 +       return 0;
61176 +static void vfio_pci_vf_uninit(struct vfio_pci_device *vdev)
61178 +       if (!vdev->vf_token)
61179 +               return;
61181 +       bus_unregister_notifier(&pci_bus_type, &vdev->nb);
61182 +       WARN_ON(vdev->vf_token->users);
61183 +       mutex_destroy(&vdev->vf_token->lock);
61184 +       kfree(vdev->vf_token);
61187 +static int vfio_pci_vga_init(struct vfio_pci_device *vdev)
61189 +       struct pci_dev *pdev = vdev->pdev;
61190 +       int ret;
61192 +       if (!vfio_pci_is_vga(pdev))
61193 +               return 0;
61195 +       ret = vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
61196 +       if (ret)
61197 +               return ret;
61198 +       vga_set_legacy_decoding(pdev, vfio_pci_set_vga_decode(vdev, false));
61199 +       return 0;
61202 +static void vfio_pci_vga_uninit(struct vfio_pci_device *vdev)
61204 +       struct pci_dev *pdev = vdev->pdev;
61206 +       if (!vfio_pci_is_vga(pdev))
61207 +               return;
61208 +       vga_client_register(pdev, NULL, NULL, NULL);
61209 +       vga_set_legacy_decoding(pdev, VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
61210 +                                             VGA_RSRC_LEGACY_IO |
61211 +                                             VGA_RSRC_LEGACY_MEM);
61214  static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
61216         struct vfio_pci_device *vdev;
61217 @@ -1970,35 +2032,15 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
61218         INIT_LIST_HEAD(&vdev->vma_list);
61219         init_rwsem(&vdev->memory_lock);
61221 -       ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
61222 +       ret = vfio_pci_reflck_attach(vdev);
61223         if (ret)
61224                 goto out_free;
61226 -       ret = vfio_pci_reflck_attach(vdev);
61227 +       ret = vfio_pci_vf_init(vdev);
61228         if (ret)
61229 -               goto out_del_group_dev;
61231 -       if (pdev->is_physfn) {
61232 -               vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL);
61233 -               if (!vdev->vf_token) {
61234 -                       ret = -ENOMEM;
61235 -                       goto out_reflck;
61236 -               }
61238 -               mutex_init(&vdev->vf_token->lock);
61239 -               uuid_gen(&vdev->vf_token->uuid);
61241 -               vdev->nb.notifier_call = vfio_pci_bus_notifier;
61242 -               ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
61243 -               if (ret)
61244 -                       goto out_vf_token;
61245 -       }
61247 -       if (vfio_pci_is_vga(pdev)) {
61248 -               vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
61249 -               vga_set_legacy_decoding(pdev,
61250 -                                       vfio_pci_set_vga_decode(vdev, false));
61251 -       }
61252 +               goto out_reflck;
61253 +       ret = vfio_pci_vga_init(vdev);
61254 +       if (ret)
61255 +               goto out_vf;
61257         vfio_pci_probe_power_state(vdev);
61259 @@ -2016,15 +2058,20 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
61260                 vfio_pci_set_power_state(vdev, PCI_D3hot);
61261         }
61263 -       return ret;
61264 +       ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
61265 +       if (ret)
61266 +               goto out_power;
61267 +       return 0;
61269 -out_vf_token:
61270 -       kfree(vdev->vf_token);
61271 +out_power:
61272 +       if (!disable_idle_d3)
61273 +               vfio_pci_set_power_state(vdev, PCI_D0);
61274 +out_vf:
61275 +       vfio_pci_vf_uninit(vdev);
61276  out_reflck:
61277         vfio_pci_reflck_put(vdev->reflck);
61278 -out_del_group_dev:
61279 -       vfio_del_group_dev(&pdev->dev);
61280  out_free:
61281 +       kfree(vdev->pm_save);
61282         kfree(vdev);
61283  out_group_put:
61284         vfio_iommu_group_put(group, &pdev->dev);
61285 @@ -2041,33 +2088,19 @@ static void vfio_pci_remove(struct pci_dev *pdev)
61286         if (!vdev)
61287                 return;
61289 -       if (vdev->vf_token) {
61290 -               WARN_ON(vdev->vf_token->users);
61291 -               mutex_destroy(&vdev->vf_token->lock);
61292 -               kfree(vdev->vf_token);
61293 -       }
61295 -       if (vdev->nb.notifier_call)
61296 -               bus_unregister_notifier(&pci_bus_type, &vdev->nb);
61298 +       vfio_pci_vf_uninit(vdev);
61299         vfio_pci_reflck_put(vdev->reflck);
61300 +       vfio_pci_vga_uninit(vdev);
61302         vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
61303 -       kfree(vdev->region);
61304 -       mutex_destroy(&vdev->ioeventfds_lock);
61306         if (!disable_idle_d3)
61307                 vfio_pci_set_power_state(vdev, PCI_D0);
61309 +       mutex_destroy(&vdev->ioeventfds_lock);
61310 +       kfree(vdev->region);
61311         kfree(vdev->pm_save);
61312         kfree(vdev);
61314 -       if (vfio_pci_is_vga(pdev)) {
61315 -               vga_client_register(pdev, NULL, NULL, NULL);
61316 -               vga_set_legacy_decoding(pdev,
61317 -                               VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
61318 -                               VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
61319 -       }
61322  static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
61323 diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
61324 index bfa4c6ef554e..c79d2f2387aa 100644
61325 --- a/drivers/vhost/vdpa.c
61326 +++ b/drivers/vhost/vdpa.c
61327 @@ -993,6 +993,7 @@ static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
61328         if (vma->vm_end - vma->vm_start != notify.size)
61329                 return -ENOTSUPP;
61331 +       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
61332         vma->vm_ops = &vhost_vdpa_vm_ops;
61333         return 0;
61335 diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
61336 index 091f07e7c145..e9fbe2483844 100644
61337 --- a/drivers/video/backlight/qcom-wled.c
61338 +++ b/drivers/video/backlight/qcom-wled.c
61339 @@ -336,19 +336,19 @@ static int wled3_sync_toggle(struct wled *wled)
61340         unsigned int mask = GENMASK(wled->max_string_count - 1, 0);
61342         rc = regmap_update_bits(wled->regmap,
61343 -                               wled->ctrl_addr + WLED3_SINK_REG_SYNC,
61344 +                               wled->sink_addr + WLED3_SINK_REG_SYNC,
61345                                 mask, mask);
61346         if (rc < 0)
61347                 return rc;
61349         rc = regmap_update_bits(wled->regmap,
61350 -                               wled->ctrl_addr + WLED3_SINK_REG_SYNC,
61351 +                               wled->sink_addr + WLED3_SINK_REG_SYNC,
61352                                 mask, WLED3_SINK_REG_SYNC_CLEAR);
61354         return rc;
61357 -static int wled5_sync_toggle(struct wled *wled)
61358 +static int wled5_mod_sync_toggle(struct wled *wled)
61360         int rc;
61361         u8 val;
61362 @@ -445,10 +445,23 @@ static int wled_update_status(struct backlight_device *bl)
61363                         goto unlock_mutex;
61364                 }
61366 -               rc = wled->wled_sync_toggle(wled);
61367 -               if (rc < 0) {
61368 -                       dev_err(wled->dev, "wled sync failed rc:%d\n", rc);
61369 -                       goto unlock_mutex;
61370 +               if (wled->version < 5) {
61371 +                       rc = wled->wled_sync_toggle(wled);
61372 +                       if (rc < 0) {
61373 +                               dev_err(wled->dev, "wled sync failed rc:%d\n", rc);
61374 +                               goto unlock_mutex;
61375 +                       }
61376 +               } else {
61377 +                       /*
61378 +                        * For WLED5 toggling the MOD_SYNC_BIT updates the
61379 +                        * brightness
61380 +                        */
61381 +                       rc = wled5_mod_sync_toggle(wled);
61382 +                       if (rc < 0) {
61383 +                               dev_err(wled->dev, "wled mod sync failed rc:%d\n",
61384 +                                       rc);
61385 +                               goto unlock_mutex;
61386 +                       }
61387                 }
61388         }
61390 @@ -1459,7 +1472,7 @@ static int wled_configure(struct wled *wled)
61391                 size = ARRAY_SIZE(wled5_opts);
61392                 *cfg = wled5_config_defaults;
61393                 wled->wled_set_brightness = wled5_set_brightness;
61394 -               wled->wled_sync_toggle = wled5_sync_toggle;
61395 +               wled->wled_sync_toggle = wled3_sync_toggle;
61396                 wled->wled_cabc_config = wled5_cabc_config;
61397                 wled->wled_ovp_delay = wled5_ovp_delay;
61398                 wled->wled_auto_detection_required =
61399 diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
61400 index 962c12be9774..631eb918f8e1 100644
61401 --- a/drivers/video/console/vgacon.c
61402 +++ b/drivers/video/console/vgacon.c
61403 @@ -383,7 +383,7 @@ static void vgacon_init(struct vc_data *c, int init)
61404                 vc_resize(c, vga_video_num_columns, vga_video_num_lines);
61406         c->vc_scan_lines = vga_scan_lines;
61407 -       c->vc_font.height = vga_video_font_height;
61408 +       c->vc_font.height = c->vc_cell_height = vga_video_font_height;
61409         c->vc_complement_mask = 0x7700;
61410         if (vga_512_chars)
61411                 c->vc_hi_font_mask = 0x0800;
61412 @@ -518,32 +518,32 @@ static void vgacon_cursor(struct vc_data *c, int mode)
61413                 switch (CUR_SIZE(c->vc_cursor_type)) {
61414                 case CUR_UNDERLINE:
61415                         vgacon_set_cursor_size(c->state.x,
61416 -                                              c->vc_font.height -
61417 -                                              (c->vc_font.height <
61418 +                                              c->vc_cell_height -
61419 +                                              (c->vc_cell_height <
61420                                                 10 ? 2 : 3),
61421 -                                              c->vc_font.height -
61422 -                                              (c->vc_font.height <
61423 +                                              c->vc_cell_height -
61424 +                                              (c->vc_cell_height <
61425                                                 10 ? 1 : 2));
61426                         break;
61427                 case CUR_TWO_THIRDS:
61428                         vgacon_set_cursor_size(c->state.x,
61429 -                                              c->vc_font.height / 3,
61430 -                                              c->vc_font.height -
61431 -                                              (c->vc_font.height <
61432 +                                              c->vc_cell_height / 3,
61433 +                                              c->vc_cell_height -
61434 +                                              (c->vc_cell_height <
61435                                                 10 ? 1 : 2));
61436                         break;
61437                 case CUR_LOWER_THIRD:
61438                         vgacon_set_cursor_size(c->state.x,
61439 -                                              (c->vc_font.height * 2) / 3,
61440 -                                              c->vc_font.height -
61441 -                                              (c->vc_font.height <
61442 +                                              (c->vc_cell_height * 2) / 3,
61443 +                                              c->vc_cell_height -
61444 +                                              (c->vc_cell_height <
61445                                                 10 ? 1 : 2));
61446                         break;
61447                 case CUR_LOWER_HALF:
61448                         vgacon_set_cursor_size(c->state.x,
61449 -                                              c->vc_font.height / 2,
61450 -                                              c->vc_font.height -
61451 -                                              (c->vc_font.height <
61452 +                                              c->vc_cell_height / 2,
61453 +                                              c->vc_cell_height -
61454 +                                              (c->vc_cell_height <
61455                                                 10 ? 1 : 2));
61456                         break;
61457                 case CUR_NONE:
61458 @@ -554,7 +554,7 @@ static void vgacon_cursor(struct vc_data *c, int mode)
61459                         break;
61460                 default:
61461                         vgacon_set_cursor_size(c->state.x, 1,
61462 -                                              c->vc_font.height);
61463 +                                              c->vc_cell_height);
61464                         break;
61465                 }
61466                 break;
61467 @@ -565,13 +565,13 @@ static int vgacon_doresize(struct vc_data *c,
61468                 unsigned int width, unsigned int height)
61470         unsigned long flags;
61471 -       unsigned int scanlines = height * c->vc_font.height;
61472 +       unsigned int scanlines = height * c->vc_cell_height;
61473         u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan;
61475         raw_spin_lock_irqsave(&vga_lock, flags);
61477         vgacon_xres = width * VGA_FONTWIDTH;
61478 -       vgacon_yres = height * c->vc_font.height;
61479 +       vgacon_yres = height * c->vc_cell_height;
61480         if (vga_video_type >= VIDEO_TYPE_VGAC) {
61481                 outb_p(VGA_CRTC_MAX_SCAN, vga_video_port_reg);
61482                 max_scan = inb_p(vga_video_port_val);
61483 @@ -626,9 +626,9 @@ static int vgacon_doresize(struct vc_data *c,
61484  static int vgacon_switch(struct vc_data *c)
61486         int x = c->vc_cols * VGA_FONTWIDTH;
61487 -       int y = c->vc_rows * c->vc_font.height;
61488 +       int y = c->vc_rows * c->vc_cell_height;
61489         int rows = screen_info.orig_video_lines * vga_default_font_height/
61490 -               c->vc_font.height;
61491 +               c->vc_cell_height;
61492         /*
61493          * We need to save screen size here as it's the only way
61494          * we can spot the screen has been resized and we need to
61495 @@ -1041,7 +1041,7 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight)
61496                                 cursor_size_lastto = 0;
61497                                 c->vc_sw->con_cursor(c, CM_DRAW);
61498                         }
61499 -                       c->vc_font.height = fontheight;
61500 +                       c->vc_font.height = c->vc_cell_height = fontheight;
61501                         vc_resize(c, 0, rows);  /* Adjust console size */
61502                 }
61503         }
61504 @@ -1089,12 +1089,20 @@ static int vgacon_resize(struct vc_data *c, unsigned int width,
61505         if ((width << 1) * height > vga_vram_size)
61506                 return -EINVAL;
61508 +       if (user) {
61509 +               /*
61510 +                * Ho ho!  Someone (svgatextmode, eh?) may have reprogrammed
61511 +                * the video mode!  Set the new defaults then and go away.
61512 +                */
61513 +               screen_info.orig_video_cols = width;
61514 +               screen_info.orig_video_lines = height;
61515 +               vga_default_font_height = c->vc_cell_height;
61516 +               return 0;
61517 +       }
61518         if (width % 2 || width > screen_info.orig_video_cols ||
61519             height > (screen_info.orig_video_lines * vga_default_font_height)/
61520 -           c->vc_font.height)
61521 -               /* let svgatextmode tinker with video timings and
61522 -                  return success */
61523 -               return (user) ? 0 : -EINVAL;
61524 +           c->vc_cell_height)
61525 +               return -EINVAL;
61527         if (con_is_visible(c) && !vga_is_gfx) /* who knows */
61528                 vgacon_doresize(c, width, height);
61529 diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
61530 index 757d5c3f620b..ff09e57f3c38 100644
61531 --- a/drivers/video/fbdev/core/fbcmap.c
61532 +++ b/drivers/video/fbdev/core/fbcmap.c
61533 @@ -101,17 +101,17 @@ int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags)
61534                 if (!len)
61535                         return 0;
61537 -               cmap->red = kmalloc(size, flags);
61538 +               cmap->red = kzalloc(size, flags);
61539                 if (!cmap->red)
61540                         goto fail;
61541 -               cmap->green = kmalloc(size, flags);
61542 +               cmap->green = kzalloc(size, flags);
61543                 if (!cmap->green)
61544                         goto fail;
61545 -               cmap->blue = kmalloc(size, flags);
61546 +               cmap->blue = kzalloc(size, flags);
61547                 if (!cmap->blue)
61548                         goto fail;
61549                 if (transp) {
61550 -                       cmap->transp = kmalloc(size, flags);
61551 +                       cmap->transp = kzalloc(size, flags);
61552                         if (!cmap->transp)
61553                                 goto fail;
61554                 } else {
61555 diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
61556 index 3406067985b1..22bb3892f6bd 100644
61557 --- a/drivers/video/fbdev/core/fbcon.c
61558 +++ b/drivers/video/fbdev/core/fbcon.c
61559 @@ -2019,7 +2019,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
61560                         return -EINVAL;
61562                 pr_debug("resize now %ix%i\n", var.xres, var.yres);
61563 -               if (con_is_visible(vc)) {
61564 +               if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) {
61565                         var.activate = FB_ACTIVATE_NOW |
61566                                 FB_ACTIVATE_FORCE;
61567                         fb_set_var(info, &var);
61568 diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
61569 index 8bbac7182ad3..bd3d07aa4f0e 100644
61570 --- a/drivers/video/fbdev/hgafb.c
61571 +++ b/drivers/video/fbdev/hgafb.c
61572 @@ -286,7 +286,7 @@ static int hga_card_detect(void)
61574         hga_vram = ioremap(0xb0000, hga_vram_len);
61575         if (!hga_vram)
61576 -               goto error;
61577 +               return -ENOMEM;
61579         if (request_region(0x3b0, 12, "hgafb"))
61580                 release_io_ports = 1;
61581 @@ -346,13 +346,18 @@ static int hga_card_detect(void)
61582                         hga_type_name = "Hercules";
61583                         break;
61584         }
61585 -       return 1;
61586 +       return 0;
61587  error:
61588         if (release_io_ports)
61589                 release_region(0x3b0, 12);
61590         if (release_io_port)
61591                 release_region(0x3bf, 1);
61592 -       return 0;
61594 +       iounmap(hga_vram);
61596 +       pr_err("hgafb: HGA card not detected.\n");
61598 +       return -EINVAL;
61601  /**
61602 @@ -550,13 +555,11 @@ static const struct fb_ops hgafb_ops = {
61603  static int hgafb_probe(struct platform_device *pdev)
61605         struct fb_info *info;
61606 +       int ret;
61608 -       if (! hga_card_detect()) {
61609 -               printk(KERN_INFO "hgafb: HGA card not detected.\n");
61610 -               if (hga_vram)
61611 -                       iounmap(hga_vram);
61612 -               return -EINVAL;
61613 -       }
61614 +       ret = hga_card_detect();
61615 +       if (ret)
61616 +               return ret;
61618         printk(KERN_INFO "hgafb: %s with %ldK of memory detected.\n",
61619                 hga_type_name, hga_vram_len/1024);
61620 diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
61621 index 3ac053b88495..e04411701ec8 100644
61622 --- a/drivers/video/fbdev/imsttfb.c
61623 +++ b/drivers/video/fbdev/imsttfb.c
61624 @@ -1512,11 +1512,6 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
61625         info->fix.smem_start = addr;
61626         info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
61627                                             0x400000 : 0x800000);
61628 -       if (!info->screen_base) {
61629 -               release_mem_region(addr, size);
61630 -               framebuffer_release(info);
61631 -               return -ENOMEM;
61632 -       }
61633         info->fix.mmio_start = addr + 0x800000;
61634         par->dc_regs = ioremap(addr + 0x800000, 0x1000);
61635         par->cmap_regs_phys = addr + 0x840000;
61636 diff --git a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
61637 index cfe63932f825..71c00ef772a3 100644
61638 --- a/drivers/video/fbdev/omap/hwa742.c
61639 +++ b/drivers/video/fbdev/omap/hwa742.c
61640 @@ -913,7 +913,7 @@ static void hwa742_resume(void)
61641                 if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7))
61642                         break;
61643                 set_current_state(TASK_UNINTERRUPTIBLE);
61644 -               schedule_timeout(msecs_to_jiffies(5));
61645 +               schedule_msec_hrtimeout((5));
61646         }
61647         hwa742_set_update_mode(hwa742.update_mode_before_suspend);
61649 diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
61650 index f1551e00eb12..f0f651e92504 100644
61651 --- a/drivers/video/fbdev/pxafb.c
61652 +++ b/drivers/video/fbdev/pxafb.c
61653 @@ -1287,7 +1287,7 @@ static int pxafb_smart_thread(void *arg)
61654                 mutex_unlock(&fbi->ctrlr_lock);
61656                 set_current_state(TASK_INTERRUPTIBLE);
61657 -               schedule_timeout(msecs_to_jiffies(30));
61658 +               schedule_msec_hrtimeout((30));
61659         }
61661         pr_debug("%s(): task ending\n", __func__);
61662 diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c b/drivers/virt/nitro_enclaves/ne_misc_dev.c
61663 index f1964ea4b826..e21e1e86ad15 100644
61664 --- a/drivers/virt/nitro_enclaves/ne_misc_dev.c
61665 +++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c
61666 @@ -1524,7 +1524,8 @@ static const struct file_operations ne_enclave_fops = {
61667   *                       enclave file descriptor to be further used for enclave
61668   *                       resources handling e.g. memory regions and CPUs.
61669   * @ne_pci_dev :       Private data associated with the PCI device.
61670 - * @slot_uid:          Generated unique slot id associated with an enclave.
61671 + * @slot_uid:          User pointer to store the generated unique slot id
61672 + *                     associated with an enclave to.
61673   *
61674   * Context: Process context. This function is called with the ne_pci_dev enclave
61675   *         mutex held.
61676 @@ -1532,7 +1533,7 @@ static const struct file_operations ne_enclave_fops = {
61677   * * Enclave fd on success.
61678   * * Negative return value on failure.
61679   */
61680 -static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 *slot_uid)
61681 +static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 __user *slot_uid)
61683         struct ne_pci_dev_cmd_reply cmd_reply = {};
61684         int enclave_fd = -1;
61685 @@ -1634,7 +1635,18 @@ static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 *slot_uid)
61687         list_add(&ne_enclave->enclave_list_entry, &ne_pci_dev->enclaves_list);
61689 -       *slot_uid = ne_enclave->slot_uid;
61690 +       if (copy_to_user(slot_uid, &ne_enclave->slot_uid, sizeof(ne_enclave->slot_uid))) {
61691 +               /*
61692 +                * As we're holding the only reference to 'enclave_file', fput()
61693 +                * will call ne_enclave_release() which will do a proper cleanup
61694 +                * of all so far allocated resources, leaving only the unused fd
61695 +                * for us to free.
61696 +                */
61697 +               fput(enclave_file);
61698 +               put_unused_fd(enclave_fd);
61700 +               return -EFAULT;
61701 +       }
61703         fd_install(enclave_fd, enclave_file);
61705 @@ -1671,34 +1683,13 @@ static long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
61706         switch (cmd) {
61707         case NE_CREATE_VM: {
61708                 int enclave_fd = -1;
61709 -               struct file *enclave_file = NULL;
61710                 struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev;
61711 -               int rc = -EINVAL;
61712 -               u64 slot_uid = 0;
61713 +               u64 __user *slot_uid = (void __user *)arg;
61715                 mutex_lock(&ne_pci_dev->enclaves_list_mutex);
61717 -               enclave_fd = ne_create_vm_ioctl(ne_pci_dev, &slot_uid);
61718 -               if (enclave_fd < 0) {
61719 -                       rc = enclave_fd;
61721 -                       mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
61723 -                       return rc;
61724 -               }
61726 +               enclave_fd = ne_create_vm_ioctl(ne_pci_dev, slot_uid);
61727                 mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
61729 -               if (copy_to_user((void __user *)arg, &slot_uid, sizeof(slot_uid))) {
61730 -                       enclave_file = fget(enclave_fd);
61731 -                       /* Decrement file refs to have release() called. */
61732 -                       fput(enclave_file);
61733 -                       fput(enclave_file);
61734 -                       put_unused_fd(enclave_fd);
61736 -                       return -EFAULT;
61737 -               }
61739                 return enclave_fd;
61740         }
61742 diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
61743 index f01d58c7a042..a3e7be96527d 100644
61744 --- a/drivers/xen/gntdev.c
61745 +++ b/drivers/xen/gntdev.c
61746 @@ -1017,8 +1017,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
61747                 err = mmu_interval_notifier_insert_locked(
61748                         &map->notifier, vma->vm_mm, vma->vm_start,
61749                         vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
61750 -               if (err)
61751 +               if (err) {
61752 +                       map->vma = NULL;
61753                         goto out_unlock_put;
61754 +               }
61755         }
61756         mutex_unlock(&priv->lock);
61758 diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c
61759 index e64e6befc63b..87e6b7db892f 100644
61760 --- a/drivers/xen/unpopulated-alloc.c
61761 +++ b/drivers/xen/unpopulated-alloc.c
61762 @@ -39,8 +39,10 @@ static int fill_list(unsigned int nr_pages)
61763         }
61765         pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
61766 -       if (!pgmap)
61767 +       if (!pgmap) {
61768 +               ret = -ENOMEM;
61769                 goto err_pgmap;
61770 +       }
61772         pgmap->type = MEMORY_DEVICE_GENERIC;
61773         pgmap->range = (struct range) {
61774 diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c
61775 index 5447b5ab7c76..1221cfd914cb 100644
61776 --- a/drivers/xen/xen-pciback/vpci.c
61777 +++ b/drivers/xen/xen-pciback/vpci.c
61778 @@ -70,7 +70,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
61779                                    struct pci_dev *dev, int devid,
61780                                    publish_pci_dev_cb publish_cb)
61782 -       int err = 0, slot, func = -1;
61783 +       int err = 0, slot, func = PCI_FUNC(dev->devfn);
61784         struct pci_dev_entry *t, *dev_entry;
61785         struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
61787 @@ -95,22 +95,25 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
61789         /*
61790          * Keep multi-function devices together on the virtual PCI bus, except
61791 -        * virtual functions.
61792 +        * that we want to keep virtual functions at func 0 on their own. They
61793 +        * aren't multi-function devices and hence their presence at func 0
61794 +        * may cause guests to not scan the other functions.
61795          */
61796 -       if (!dev->is_virtfn) {
61797 +       if (!dev->is_virtfn || func) {
61798                 for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
61799                         if (list_empty(&vpci_dev->dev_list[slot]))
61800                                 continue;
61802                         t = list_entry(list_first(&vpci_dev->dev_list[slot]),
61803                                        struct pci_dev_entry, list);
61804 +                       if (t->dev->is_virtfn && !PCI_FUNC(t->dev->devfn))
61805 +                               continue;
61807                         if (match_slot(dev, t->dev)) {
61808                                 dev_info(&dev->dev, "vpci: assign to virtual slot %d func %d\n",
61809 -                                        slot, PCI_FUNC(dev->devfn));
61810 +                                        slot, func);
61811                                 list_add_tail(&dev_entry->list,
61812                                               &vpci_dev->dev_list[slot]);
61813 -                               func = PCI_FUNC(dev->devfn);
61814                                 goto unlock;
61815                         }
61816                 }
61817 @@ -123,7 +126,6 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
61818                                  slot);
61819                         list_add_tail(&dev_entry->list,
61820                                       &vpci_dev->dev_list[slot]);
61821 -                       func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn);
61822                         goto unlock;
61823                 }
61824         }
61825 diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
61826 index 5188f02e75fb..c09c7ebd6968 100644
61827 --- a/drivers/xen/xen-pciback/xenbus.c
61828 +++ b/drivers/xen/xen-pciback/xenbus.c
61829 @@ -359,7 +359,8 @@ static int xen_pcibk_publish_pci_root(struct xen_pcibk_device *pdev,
61830         return err;
61833 -static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
61834 +static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev,
61835 +                                enum xenbus_state state)
61837         int err = 0;
61838         int num_devs;
61839 @@ -373,9 +374,7 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
61840         dev_dbg(&pdev->xdev->dev, "Reconfiguring device ...\n");
61842         mutex_lock(&pdev->dev_lock);
61843 -       /* Make sure we only reconfigure once */
61844 -       if (xenbus_read_driver_state(pdev->xdev->nodename) !=
61845 -           XenbusStateReconfiguring)
61846 +       if (xenbus_read_driver_state(pdev->xdev->nodename) != state)
61847                 goto out;
61849         err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d",
61850 @@ -500,6 +499,10 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
61851                 }
61852         }
61854 +       if (state != XenbusStateReconfiguring)
61855 +               /* Make sure we only reconfigure once. */
61856 +               goto out;
61858         err = xenbus_switch_state(pdev->xdev, XenbusStateReconfigured);
61859         if (err) {
61860                 xenbus_dev_fatal(pdev->xdev, err,
61861 @@ -525,7 +528,7 @@ static void xen_pcibk_frontend_changed(struct xenbus_device *xdev,
61862                 break;
61864         case XenbusStateReconfiguring:
61865 -               xen_pcibk_reconfigure(pdev);
61866 +               xen_pcibk_reconfigure(pdev, XenbusStateReconfiguring);
61867                 break;
61869         case XenbusStateConnected:
61870 @@ -664,6 +667,15 @@ static void xen_pcibk_be_watch(struct xenbus_watch *watch,
61871                 xen_pcibk_setup_backend(pdev);
61872                 break;
61874 +       case XenbusStateInitialised:
61875 +               /*
61876 +                * We typically move to Initialised when the first device was
61877 +                * added. Hence subsequent devices getting added may need
61878 +                * reconfiguring.
61879 +                */
61880 +               xen_pcibk_reconfigure(pdev, XenbusStateInitialised);
61881 +               break;
61883         default:
61884                 break;
61885         }
61886 diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
61887 index 649f04f112dc..59c32c9b799f 100644
61888 --- a/fs/9p/vfs_file.c
61889 +++ b/fs/9p/vfs_file.c
61890 @@ -86,8 +86,8 @@ int v9fs_file_open(struct inode *inode, struct file *file)
61891                  * to work.
61892                  */
61893                 writeback_fid = v9fs_writeback_fid(file_dentry(file));
61894 -               if (IS_ERR(fid)) {
61895 -                       err = PTR_ERR(fid);
61896 +               if (IS_ERR(writeback_fid)) {
61897 +                       err = PTR_ERR(writeback_fid);
61898                         mutex_unlock(&v9inode->v_mutex);
61899                         goto out_error;
61900                 }
61901 diff --git a/fs/Kconfig b/fs/Kconfig
61902 index a55bda4233bb..f61330e4efc0 100644
61903 --- a/fs/Kconfig
61904 +++ b/fs/Kconfig
61905 @@ -145,6 +145,7 @@ menu "DOS/FAT/EXFAT/NT Filesystems"
61906  source "fs/fat/Kconfig"
61907  source "fs/exfat/Kconfig"
61908  source "fs/ntfs/Kconfig"
61909 +source "fs/ntfs3/Kconfig"
61911  endmenu
61912  endif # BLOCK
61913 diff --git a/fs/Makefile b/fs/Makefile
61914 index 3215fe205256..6bdfcf712cb1 100644
61915 --- a/fs/Makefile
61916 +++ b/fs/Makefile
61917 @@ -99,6 +99,7 @@ obj-$(CONFIG_SYSV_FS)         += sysv/
61918  obj-$(CONFIG_CIFS)             += cifs/
61919  obj-$(CONFIG_HPFS_FS)          += hpfs/
61920  obj-$(CONFIG_NTFS_FS)          += ntfs/
61921 +obj-$(CONFIG_NTFS3_FS)         += ntfs3/
61922  obj-$(CONFIG_UFS_FS)           += ufs/
61923  obj-$(CONFIG_EFS_FS)           += efs/
61924  obj-$(CONFIG_JFFS2_FS)         += jffs2/
61925 diff --git a/fs/afs/dir.c b/fs/afs/dir.c
61926 index 17548c1faf02..31251d11d576 100644
61927 --- a/fs/afs/dir.c
61928 +++ b/fs/afs/dir.c
61929 @@ -1342,6 +1342,7 @@ static int afs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
61931         afs_op_set_vnode(op, 0, dvnode);
61932         op->file[0].dv_delta = 1;
61933 +       op->file[0].modification = true;
61934         op->file[0].update_ctime = true;
61935         op->dentry      = dentry;
61936         op->create.mode = S_IFDIR | mode;
61937 @@ -1423,6 +1424,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
61939         afs_op_set_vnode(op, 0, dvnode);
61940         op->file[0].dv_delta = 1;
61941 +       op->file[0].modification = true;
61942         op->file[0].update_ctime = true;
61944         op->dentry      = dentry;
61945 @@ -1559,6 +1561,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
61947         afs_op_set_vnode(op, 0, dvnode);
61948         op->file[0].dv_delta = 1;
61949 +       op->file[0].modification = true;
61950         op->file[0].update_ctime = true;
61952         /* Try to make sure we have a callback promise on the victim. */
61953 @@ -1641,6 +1644,7 @@ static int afs_create(struct user_namespace *mnt_userns, struct inode *dir,
61955         afs_op_set_vnode(op, 0, dvnode);
61956         op->file[0].dv_delta = 1;
61957 +       op->file[0].modification = true;
61958         op->file[0].update_ctime = true;
61960         op->dentry      = dentry;
61961 @@ -1715,6 +1719,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
61962         afs_op_set_vnode(op, 0, dvnode);
61963         afs_op_set_vnode(op, 1, vnode);
61964         op->file[0].dv_delta = 1;
61965 +       op->file[0].modification = true;
61966         op->file[0].update_ctime = true;
61967         op->file[1].update_ctime = true;
61969 @@ -1910,6 +1915,8 @@ static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
61970         afs_op_set_vnode(op, 1, new_dvnode); /* May be same as orig_dvnode */
61971         op->file[0].dv_delta = 1;
61972         op->file[1].dv_delta = 1;
61973 +       op->file[0].modification = true;
61974 +       op->file[1].modification = true;
61975         op->file[0].update_ctime = true;
61976         op->file[1].update_ctime = true;
61978 diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c
61979 index 04f75a44f243..dae9a57d7ec0 100644
61980 --- a/fs/afs/dir_silly.c
61981 +++ b/fs/afs/dir_silly.c
61982 @@ -73,6 +73,8 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
61983         afs_op_set_vnode(op, 1, dvnode);
61984         op->file[0].dv_delta = 1;
61985         op->file[1].dv_delta = 1;
61986 +       op->file[0].modification = true;
61987 +       op->file[1].modification = true;
61988         op->file[0].update_ctime = true;
61989         op->file[1].update_ctime = true;
61991 @@ -201,6 +203,7 @@ static int afs_do_silly_unlink(struct afs_vnode *dvnode, struct afs_vnode *vnode
61992         afs_op_set_vnode(op, 0, dvnode);
61993         afs_op_set_vnode(op, 1, vnode);
61994         op->file[0].dv_delta = 1;
61995 +       op->file[0].modification = true;
61996         op->file[0].update_ctime = true;
61997         op->file[1].op_unlinked = true;
61998         op->file[1].update_ctime = true;
61999 diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c
62000 index 71c58723763d..a82515b47350 100644
62001 --- a/fs/afs/fs_operation.c
62002 +++ b/fs/afs/fs_operation.c
62003 @@ -118,6 +118,8 @@ static void afs_prepare_vnode(struct afs_operation *op, struct afs_vnode_param *
62004                 vp->cb_break_before     = afs_calc_vnode_cb_break(vnode);
62005                 if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
62006                         op->flags       |= AFS_OPERATION_CUR_ONLY;
62007 +               if (vp->modification)
62008 +                       set_bit(AFS_VNODE_MODIFYING, &vnode->flags);
62009         }
62011         if (vp->fid.vnode)
62012 @@ -223,6 +225,10 @@ int afs_put_operation(struct afs_operation *op)
62014         if (op->ops && op->ops->put)
62015                 op->ops->put(op);
62016 +       if (op->file[0].modification)
62017 +               clear_bit(AFS_VNODE_MODIFYING, &op->file[0].vnode->flags);
62018 +       if (op->file[1].modification && op->file[1].vnode != op->file[0].vnode)
62019 +               clear_bit(AFS_VNODE_MODIFYING, &op->file[1].vnode->flags);
62020         if (op->file[0].put_vnode)
62021                 iput(&op->file[0].vnode->vfs_inode);
62022         if (op->file[1].put_vnode)
62023 diff --git a/fs/afs/inode.c b/fs/afs/inode.c
62024 index 12be88716e4c..fddf7d54e0b7 100644
62025 --- a/fs/afs/inode.c
62026 +++ b/fs/afs/inode.c
62027 @@ -102,13 +102,13 @@ static int afs_inode_init_from_status(struct afs_operation *op,
62029         switch (status->type) {
62030         case AFS_FTYPE_FILE:
62031 -               inode->i_mode   = S_IFREG | status->mode;
62032 +               inode->i_mode   = S_IFREG | (status->mode & S_IALLUGO);
62033                 inode->i_op     = &afs_file_inode_operations;
62034                 inode->i_fop    = &afs_file_operations;
62035                 inode->i_mapping->a_ops = &afs_fs_aops;
62036                 break;
62037         case AFS_FTYPE_DIR:
62038 -               inode->i_mode   = S_IFDIR | status->mode;
62039 +               inode->i_mode   = S_IFDIR |  (status->mode & S_IALLUGO);
62040                 inode->i_op     = &afs_dir_inode_operations;
62041                 inode->i_fop    = &afs_dir_file_operations;
62042                 inode->i_mapping->a_ops = &afs_dir_aops;
62043 @@ -198,7 +198,7 @@ static void afs_apply_status(struct afs_operation *op,
62044         if (status->mode != vnode->status.mode) {
62045                 mode = inode->i_mode;
62046                 mode &= ~S_IALLUGO;
62047 -               mode |= status->mode;
62048 +               mode |= status->mode & S_IALLUGO;
62049                 WRITE_ONCE(inode->i_mode, mode);
62050         }
62052 @@ -293,8 +293,9 @@ void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *v
62053                         op->flags &= ~AFS_OPERATION_DIR_CONFLICT;
62054                 }
62055         } else if (vp->scb.have_status) {
62056 -               if (vp->dv_before + vp->dv_delta != vp->scb.status.data_version &&
62057 -                   vp->speculative)
62058 +               if (vp->speculative &&
62059 +                   (test_bit(AFS_VNODE_MODIFYING, &vnode->flags) ||
62060 +                    vp->dv_before != vnode->status.data_version))
62061                         /* Ignore the result of a speculative bulk status fetch
62062                          * if it splits around a modification op, thereby
62063                          * appearing to regress the data version.
62064 @@ -910,6 +911,7 @@ int afs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
62065         }
62066         op->ctime = attr->ia_ctime;
62067         op->file[0].update_ctime = 1;
62068 +       op->file[0].modification = true;
62070         op->ops = &afs_setattr_operation;
62071         ret = afs_do_sync_operation(op);
62072 diff --git a/fs/afs/internal.h b/fs/afs/internal.h
62073 index 1627b1872812..be981a9a1add 100644
62074 --- a/fs/afs/internal.h
62075 +++ b/fs/afs/internal.h
62076 @@ -640,6 +640,7 @@ struct afs_vnode {
62077  #define AFS_VNODE_PSEUDODIR    7               /* set if Vnode is a pseudo directory */
62078  #define AFS_VNODE_NEW_CONTENT  8               /* Set if file has new content (create/trunc-0) */
62079  #define AFS_VNODE_SILLY_DELETED        9               /* Set if file has been silly-deleted */
62080 +#define AFS_VNODE_MODIFYING    10              /* Set if we're performing a modification op */
62082         struct list_head        wb_keys;        /* List of keys available for writeback */
62083         struct list_head        pending_locks;  /* locks waiting to be granted */
62084 @@ -756,6 +757,7 @@ struct afs_vnode_param {
62085         bool                    set_size:1;     /* Must update i_size */
62086         bool                    op_unlinked:1;  /* True if file was unlinked by op */
62087         bool                    speculative:1;  /* T if speculative status fetch (no vnode lock) */
62088 +       bool                    modification:1; /* Set if the content gets modified */
62089  };
62091  /*
62092 diff --git a/fs/afs/write.c b/fs/afs/write.c
62093 index eb737ed63afb..ebe3b6493fce 100644
62094 --- a/fs/afs/write.c
62095 +++ b/fs/afs/write.c
62096 @@ -450,6 +450,7 @@ static int afs_store_data(struct address_space *mapping,
62097         afs_op_set_vnode(op, 0, vnode);
62098         op->file[0].dv_delta = 1;
62099         op->store.mapping = mapping;
62100 +       op->file[0].modification = true;
62101         op->store.first = first;
62102         op->store.last = last;
62103         op->store.first_offset = offset;
62104 diff --git a/fs/block_dev.c b/fs/block_dev.c
62105 index 09d6f7229db9..a5a6a7930e5e 100644
62106 --- a/fs/block_dev.c
62107 +++ b/fs/block_dev.c
62108 @@ -1684,6 +1684,7 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
62109         struct inode *bd_inode = bdev_file_inode(file);
62110         loff_t size = i_size_read(bd_inode);
62111         struct blk_plug plug;
62112 +       size_t shorted = 0;
62113         ssize_t ret;
62115         if (bdev_read_only(I_BDEV(bd_inode)))
62116 @@ -1701,12 +1702,17 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
62117         if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
62118                 return -EOPNOTSUPP;
62120 -       iov_iter_truncate(from, size - iocb->ki_pos);
62121 +       size -= iocb->ki_pos;
62122 +       if (iov_iter_count(from) > size) {
62123 +               shorted = iov_iter_count(from) - size;
62124 +               iov_iter_truncate(from, size);
62125 +       }
62127         blk_start_plug(&plug);
62128         ret = __generic_file_write_iter(iocb, from);
62129         if (ret > 0)
62130                 ret = generic_write_sync(iocb, ret);
62131 +       iov_iter_reexpand(from, iov_iter_count(from) + shorted);
62132         blk_finish_plug(&plug);
62133         return ret;
62135 @@ -1718,13 +1724,21 @@ ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
62136         struct inode *bd_inode = bdev_file_inode(file);
62137         loff_t size = i_size_read(bd_inode);
62138         loff_t pos = iocb->ki_pos;
62139 +       size_t shorted = 0;
62140 +       ssize_t ret;
62142         if (pos >= size)
62143                 return 0;
62145         size -= pos;
62146 -       iov_iter_truncate(to, size);
62147 -       return generic_file_read_iter(iocb, to);
62148 +       if (iov_iter_count(to) > size) {
62149 +               shorted = iov_iter_count(to) - size;
62150 +               iov_iter_truncate(to, size);
62151 +       }
62153 +       ret = generic_file_read_iter(iocb, to);
62154 +       iov_iter_reexpand(to, iov_iter_count(to) + shorted);
62155 +       return ret;
62157  EXPORT_SYMBOL_GPL(blkdev_read_iter);
62159 diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
62160 index 744b99ddc28c..a7d9e147dee6 100644
62161 --- a/fs/btrfs/block-group.c
62162 +++ b/fs/btrfs/block-group.c
62163 @@ -3269,6 +3269,7 @@ static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
62164   */
62165  void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
62167 +       struct btrfs_transaction *cur_trans = trans->transaction;
62168         struct btrfs_fs_info *fs_info = trans->fs_info;
62169         struct btrfs_space_info *info;
62170         u64 left;
62171 @@ -3283,6 +3284,7 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
62172         lockdep_assert_held(&fs_info->chunk_mutex);
62174         info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
62175 +again:
62176         spin_lock(&info->lock);
62177         left = info->total_bytes - btrfs_space_info_used(info, true);
62178         spin_unlock(&info->lock);
62179 @@ -3301,6 +3303,58 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
62181         if (left < thresh) {
62182                 u64 flags = btrfs_system_alloc_profile(fs_info);
62183 +               u64 reserved = atomic64_read(&cur_trans->chunk_bytes_reserved);
62185 +               /*
62186 +                * If there's not available space for the chunk tree (system
62187 +                * space) and there are other tasks that reserved space for
62188 +                * creating a new system block group, wait for them to complete
62189 +                * the creation of their system block group and release excess
62190 +                * reserved space. We do this because:
62191 +                *
62192 +                * *) We can end up allocating more system chunks than necessary
62193 +                *    when there are multiple tasks that are concurrently
62194 +                *    allocating block groups, which can lead to exhaustion of
62195 +                *    the system array in the superblock;
62196 +                *
62197 +                * *) If we allocate extra and unnecessary system block groups,
62198 +                *    despite being empty for a long time, and possibly forever,
62199 +                *    they end not being added to the list of unused block groups
62200 +                *    because that typically happens only when deallocating the
62201 +                *    last extent from a block group - which never happens since
62202 +                *    we never allocate from them in the first place. The few
62203 +                *    exceptions are when mounting a filesystem or running scrub,
62204 +                *    which add unused block groups to the list of unused block
62205 +                *    groups, to be deleted by the cleaner kthread.
62206 +                *    And even when they are added to the list of unused block
62207 +                *    groups, it can take a long time until they get deleted,
62208 +                *    since the cleaner kthread might be sleeping or busy with
62209 +                *    other work (deleting subvolumes, running delayed iputs,
62210 +                *    defrag scheduling, etc);
62211 +                *
62212 +                * This is rare in practice, but can happen when too many tasks
62213 +                * are allocating blocks groups in parallel (via fallocate())
62214 +                * and before the one that reserved space for a new system block
62215 +                * group finishes the block group creation and releases the space
62216 +                * reserved in excess (at btrfs_create_pending_block_groups()),
62217 +                * other tasks end up here and see free system space temporarily
62218 +                * not enough for updating the chunk tree.
62219 +                *
62220 +                * We unlock the chunk mutex before waiting for such tasks and
62221 +                * lock it again after the wait, otherwise we would deadlock.
62222 +                * It is safe to do so because allocating a system chunk is the
62223 +                * first thing done while allocating a new block group.
62224 +                */
62225 +               if (reserved > trans->chunk_bytes_reserved) {
62226 +                       const u64 min_needed = reserved - thresh;
62228 +                       mutex_unlock(&fs_info->chunk_mutex);
62229 +                       wait_event(cur_trans->chunk_reserve_wait,
62230 +                          atomic64_read(&cur_trans->chunk_bytes_reserved) <=
62231 +                          min_needed);
62232 +                       mutex_lock(&fs_info->chunk_mutex);
62233 +                       goto again;
62234 +               }
62236                 /*
62237                  * Ignore failure to create system chunk. We might end up not
62238 @@ -3315,8 +3369,10 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
62239                 ret = btrfs_block_rsv_add(fs_info->chunk_root,
62240                                           &fs_info->chunk_block_rsv,
62241                                           thresh, BTRFS_RESERVE_NO_FLUSH);
62242 -               if (!ret)
62243 +               if (!ret) {
62244 +                       atomic64_add(thresh, &cur_trans->chunk_bytes_reserved);
62245                         trans->chunk_bytes_reserved += thresh;
62246 +               }
62247         }
62250 diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
62251 index 28e202e89660..418903604936 100644
62252 --- a/fs/btrfs/btrfs_inode.h
62253 +++ b/fs/btrfs/btrfs_inode.h
62254 @@ -299,6 +299,21 @@ static inline void btrfs_mod_outstanding_extents(struct btrfs_inode *inode,
62255                                                   mod);
62259 + * Called every time after doing a buffered, direct IO or memory mapped write.
62260 + *
62261 + * This is to ensure that if we write to a file that was previously fsynced in
62262 + * the current transaction, then try to fsync it again in the same transaction,
62263 + * we will know that there were changes in the file and that it needs to be
62264 + * logged.
62265 + */
62266 +static inline void btrfs_set_inode_last_sub_trans(struct btrfs_inode *inode)
62268 +       spin_lock(&inode->lock);
62269 +       inode->last_sub_trans = inode->root->log_transid;
62270 +       spin_unlock(&inode->lock);
62273  static inline int btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation)
62275         int ret = 0;
62276 diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
62277 index 3f4c832abfed..81387cdf334d 100644
62278 --- a/fs/btrfs/compression.c
62279 +++ b/fs/btrfs/compression.c
62280 @@ -80,10 +80,15 @@ static int compression_compress_pages(int type, struct list_head *ws,
62281         case BTRFS_COMPRESS_NONE:
62282         default:
62283                 /*
62284 -                * This can't happen, the type is validated several times
62285 -                * before we get here. As a sane fallback, return what the
62286 -                * callers will understand as 'no compression happened'.
62287 +                * This can happen when compression races with remount setting
62288 +                * it to 'no compress', while caller doesn't call
62289 +                * inode_need_compress() to check if we really need to
62290 +                * compress.
62291 +                *
62292 +                * Not a big deal, just need to inform caller that we
62293 +                * haven't allocated any pages yet.
62294                  */
62295 +               *out_pages = 0;
62296                 return -E2BIG;
62297         }
62299 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
62300 index 34b929bd5c1a..f43ce82a6aed 100644
62301 --- a/fs/btrfs/ctree.c
62302 +++ b/fs/btrfs/ctree.c
62303 @@ -1365,10 +1365,30 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
62304                                    "failed to read tree block %llu from get_old_root",
62305                                    logical);
62306                 } else {
62307 +                       struct tree_mod_elem *tm2;
62309                         btrfs_tree_read_lock(old);
62310                         eb = btrfs_clone_extent_buffer(old);
62311 +                       /*
62312 +                        * After the lookup for the most recent tree mod operation
62313 +                        * above and before we locked and cloned the extent buffer
62314 +                        * 'old', a new tree mod log operation may have been added.
62315 +                        * So lookup for a more recent one to make sure the number
62316 +                        * of mod log operations we replay is consistent with the
62317 +                        * number of items we have in the cloned extent buffer,
62318 +                        * otherwise we can hit a BUG_ON when rewinding the extent
62319 +                        * buffer.
62320 +                        */
62321 +                       tm2 = tree_mod_log_search(fs_info, logical, time_seq);
62322                         btrfs_tree_read_unlock(old);
62323                         free_extent_buffer(old);
62324 +                       ASSERT(tm2);
62325 +                       ASSERT(tm2 == tm || tm2->seq > tm->seq);
62326 +                       if (!tm2 || tm2->seq < tm->seq) {
62327 +                               free_extent_buffer(eb);
62328 +                               return NULL;
62329 +                       }
62330 +                       tm = tm2;
62331                 }
62332         } else if (old_root) {
62333                 eb_root_owner = btrfs_header_owner(eb_root);
62334 diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
62335 index 9ae776ab3967..29ef969035df 100644
62336 --- a/fs/btrfs/ctree.h
62337 +++ b/fs/btrfs/ctree.h
62338 @@ -3110,7 +3110,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
62339                                struct btrfs_inode *inode, u64 new_size,
62340                                u32 min_type);
62342 -int btrfs_start_delalloc_snapshot(struct btrfs_root *root);
62343 +int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context);
62344  int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
62345                                bool in_reclaim_context);
62346  int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
62347 diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
62348 index 56642ca7af10..fa1c3bc93ccf 100644
62349 --- a/fs/btrfs/delalloc-space.c
62350 +++ b/fs/btrfs/delalloc-space.c
62351 @@ -311,7 +311,7 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
62352                         flush = BTRFS_RESERVE_FLUSH_LIMIT;
62354                 if (btrfs_transaction_in_commit(fs_info))
62355 -                       schedule_timeout(1);
62356 +                       schedule_min_hrtimeout();
62357         }
62359         num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
62360 diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
62361 index bf25401c9768..c1d2b6786129 100644
62362 --- a/fs/btrfs/delayed-inode.c
62363 +++ b/fs/btrfs/delayed-inode.c
62364 @@ -1589,8 +1589,8 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode,
62365          * We can only do one readdir with delayed items at a time because of
62366          * item->readdir_list.
62367          */
62368 -       inode_unlock_shared(inode);
62369 -       inode_lock(inode);
62370 +       btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
62371 +       btrfs_inode_lock(inode, 0);
62373         mutex_lock(&delayed_node->mutex);
62374         item = __btrfs_first_delayed_insertion_item(delayed_node);
62375 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
62376 index 36a3c973fda1..5b82050b871a 100644
62377 --- a/fs/btrfs/extent-tree.c
62378 +++ b/fs/btrfs/extent-tree.c
62379 @@ -1340,12 +1340,16 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
62380                 stripe = bbio->stripes;
62381                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
62382                         u64 bytes;
62383 +                       struct btrfs_device *device = stripe->dev;
62385 -                       if (!stripe->dev->bdev) {
62386 +                       if (!device->bdev) {
62387                                 ASSERT(btrfs_test_opt(fs_info, DEGRADED));
62388                                 continue;
62389                         }
62391 +                       if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
62392 +                               continue;
62394                         ret = do_discard_extent(stripe, &bytes);
62395                         if (!ret) {
62396                                 discarded_bytes += bytes;
62397 diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
62398 index 0e155f013839..abee4b62741d 100644
62399 --- a/fs/btrfs/file.c
62400 +++ b/fs/btrfs/file.c
62401 @@ -2014,14 +2014,8 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
62402         else
62403                 num_written = btrfs_buffered_write(iocb, from);
62405 -       /*
62406 -        * We also have to set last_sub_trans to the current log transid,
62407 -        * otherwise subsequent syncs to a file that's been synced in this
62408 -        * transaction will appear to have already occurred.
62409 -        */
62410 -       spin_lock(&inode->lock);
62411 -       inode->last_sub_trans = inode->root->log_transid;
62412 -       spin_unlock(&inode->lock);
62413 +       btrfs_set_inode_last_sub_trans(inode);
62415         if (num_written > 0)
62416                 num_written = generic_write_sync(iocb, num_written);
62418 @@ -2073,6 +2067,30 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
62419         return ret;
62422 +static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
62424 +       struct btrfs_inode *inode = BTRFS_I(ctx->inode);
62425 +       struct btrfs_fs_info *fs_info = inode->root->fs_info;
62427 +       if (btrfs_inode_in_log(inode, fs_info->generation) &&
62428 +           list_empty(&ctx->ordered_extents))
62429 +               return true;
62431 +       /*
62432 +        * If we are doing a fast fsync we can not bail out if the inode's
62433 +        * last_trans is <= then the last committed transaction, because we only
62434 +        * update the last_trans of the inode during ordered extent completion,
62435 +        * and for a fast fsync we don't wait for that, we only wait for the
62436 +        * writeback to complete.
62437 +        */
62438 +       if (inode->last_trans <= fs_info->last_trans_committed &&
62439 +           (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) ||
62440 +            list_empty(&ctx->ordered_extents)))
62441 +               return true;
62443 +       return false;
62446  /*
62447   * fsync call for both files and directories.  This logs the inode into
62448   * the tree log instead of forcing full commits whenever possible.
62449 @@ -2122,7 +2140,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
62450         if (ret)
62451                 goto out;
62453 -       inode_lock(inode);
62454 +       btrfs_inode_lock(inode, 0);
62456         atomic_inc(&root->log_batch);
62458 @@ -2154,7 +2172,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
62459          */
62460         ret = start_ordered_ops(inode, start, end);
62461         if (ret) {
62462 -               inode_unlock(inode);
62463 +               btrfs_inode_unlock(inode, 0);
62464                 goto out;
62465         }
62467 @@ -2191,17 +2209,8 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
62469         atomic_inc(&root->log_batch);
62471 -       /*
62472 -        * If we are doing a fast fsync we can not bail out if the inode's
62473 -        * last_trans is <= then the last committed transaction, because we only
62474 -        * update the last_trans of the inode during ordered extent completion,
62475 -        * and for a fast fsync we don't wait for that, we only wait for the
62476 -        * writeback to complete.
62477 -        */
62478         smp_mb();
62479 -       if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) ||
62480 -           (BTRFS_I(inode)->last_trans <= fs_info->last_trans_committed &&
62481 -            (full_sync || list_empty(&ctx.ordered_extents)))) {
62482 +       if (skip_inode_logging(&ctx)) {
62483                 /*
62484                  * We've had everything committed since the last time we were
62485                  * modified so clear this flag in case it was set for whatever
62486 @@ -2255,7 +2264,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
62487          * file again, but that will end up using the synchronization
62488          * inside btrfs_sync_log to keep things safe.
62489          */
62490 -       inode_unlock(inode);
62491 +       btrfs_inode_unlock(inode, 0);
62493         if (ret != BTRFS_NO_LOG_SYNC) {
62494                 if (!ret) {
62495 @@ -2285,7 +2294,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
62497  out_release_extents:
62498         btrfs_release_log_ctx_extents(&ctx);
62499 -       inode_unlock(inode);
62500 +       btrfs_inode_unlock(inode, 0);
62501         goto out;
62504 @@ -2735,8 +2744,6 @@ int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path,
62505                         extent_info->file_offset += replace_len;
62506                 }
62508 -               cur_offset = drop_args.drop_end;
62510                 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
62511                 if (ret)
62512                         break;
62513 @@ -2756,7 +2763,9 @@ int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path,
62514                 BUG_ON(ret);    /* shouldn't happen */
62515                 trans->block_rsv = rsv;
62517 -               if (!extent_info) {
62518 +               cur_offset = drop_args.drop_end;
62519 +               len = end - cur_offset;
62520 +               if (!extent_info && len) {
62521                         ret = find_first_non_hole(BTRFS_I(inode), &cur_offset,
62522                                                   &len);
62523                         if (unlikely(ret < 0))
62524 @@ -2868,7 +2877,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
62525         if (ret)
62526                 return ret;
62528 -       inode_lock(inode);
62529 +       btrfs_inode_lock(inode, 0);
62530         ino_size = round_up(inode->i_size, fs_info->sectorsize);
62531         ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
62532         if (ret < 0)
62533 @@ -2908,7 +2917,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
62534                 truncated_block = true;
62535                 ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
62536                 if (ret) {
62537 -                       inode_unlock(inode);
62538 +                       btrfs_inode_unlock(inode, 0);
62539                         return ret;
62540                 }
62541         }
62542 @@ -3009,7 +3018,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
62543                                 ret = ret2;
62544                 }
62545         }
62546 -       inode_unlock(inode);
62547 +       btrfs_inode_unlock(inode, 0);
62548         return ret;
62551 @@ -3377,7 +3386,7 @@ static long btrfs_fallocate(struct file *file, int mode,
62553         if (mode & FALLOC_FL_ZERO_RANGE) {
62554                 ret = btrfs_zero_range(inode, offset, len, mode);
62555 -               inode_unlock(inode);
62556 +               btrfs_inode_unlock(inode, 0);
62557                 return ret;
62558         }
62560 @@ -3487,7 +3496,7 @@ static long btrfs_fallocate(struct file *file, int mode,
62561         unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
62562                              &cached_state);
62563  out:
62564 -       inode_unlock(inode);
62565 +       btrfs_inode_unlock(inode, 0);
62566         /* Let go of our reservation. */
62567         if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
62568                 btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
62569 diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
62570 index 9988decd5717..ac9c2691376d 100644
62571 --- a/fs/btrfs/free-space-cache.c
62572 +++ b/fs/btrfs/free-space-cache.c
62573 @@ -3942,7 +3942,7 @@ static int cleanup_free_space_cache_v1(struct btrfs_fs_info *fs_info,
62575         struct btrfs_block_group *block_group;
62576         struct rb_node *node;
62577 -       int ret;
62578 +       int ret = 0;
62580         btrfs_info(fs_info, "cleaning free space cache v1");
62582 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
62583 index a520775949a0..81b93c9c659b 100644
62584 --- a/fs/btrfs/inode.c
62585 +++ b/fs/btrfs/inode.c
62586 @@ -3253,6 +3253,7 @@ void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
62587                 inode = list_first_entry(&fs_info->delayed_iputs,
62588                                 struct btrfs_inode, delayed_iput);
62589                 run_delayed_iput_locked(fs_info, inode);
62590 +               cond_resched_lock(&fs_info->delayed_iput_lock);
62591         }
62592         spin_unlock(&fs_info->delayed_iput_lock);
62594 @@ -8619,9 +8620,7 @@ vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
62595         set_page_dirty(page);
62596         SetPageUptodate(page);
62598 -       BTRFS_I(inode)->last_trans = fs_info->generation;
62599 -       BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
62600 -       BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
62601 +       btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
62603         unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
62605 @@ -9674,7 +9673,7 @@ static int start_delalloc_inodes(struct btrfs_root *root,
62606         return ret;
62609 -int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
62610 +int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
62612         struct writeback_control wbc = {
62613                 .nr_to_write = LONG_MAX,
62614 @@ -9687,7 +9686,7 @@ int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
62615         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
62616                 return -EROFS;
62618 -       return start_delalloc_inodes(root, &wbc, true, false);
62619 +       return start_delalloc_inodes(root, &wbc, true, in_reclaim_context);
62622  int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
62623 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
62624 index e8d53fea4c61..f9ecb6c0bf15 100644
62625 --- a/fs/btrfs/ioctl.c
62626 +++ b/fs/btrfs/ioctl.c
62627 @@ -226,7 +226,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
62628         if (ret)
62629                 return ret;
62631 -       inode_lock(inode);
62632 +       btrfs_inode_lock(inode, 0);
62633         fsflags = btrfs_mask_fsflags_for_type(inode, fsflags);
62634         old_fsflags = btrfs_inode_flags_to_fsflags(binode->flags);
62636 @@ -353,7 +353,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
62637   out_end_trans:
62638         btrfs_end_transaction(trans);
62639   out_unlock:
62640 -       inode_unlock(inode);
62641 +       btrfs_inode_unlock(inode, 0);
62642         mnt_drop_write_file(file);
62643         return ret;
62645 @@ -449,7 +449,7 @@ static int btrfs_ioctl_fssetxattr(struct file *file, void __user *arg)
62646         if (ret)
62647                 return ret;
62649 -       inode_lock(inode);
62650 +       btrfs_inode_lock(inode, 0);
62652         old_flags = binode->flags;
62653         old_i_flags = inode->i_flags;
62654 @@ -501,7 +501,7 @@ static int btrfs_ioctl_fssetxattr(struct file *file, void __user *arg)
62655                 inode->i_flags = old_i_flags;
62656         }
62658 -       inode_unlock(inode);
62659 +       btrfs_inode_unlock(inode, 0);
62660         mnt_drop_write_file(file);
62662         return ret;
62663 @@ -697,8 +697,6 @@ static noinline int create_subvol(struct inode *dir,
62664         btrfs_set_root_otransid(root_item, trans->transid);
62666         btrfs_tree_unlock(leaf);
62667 -       free_extent_buffer(leaf);
62668 -       leaf = NULL;
62670         btrfs_set_root_dirid(root_item, BTRFS_FIRST_FREE_OBJECTID);
62672 @@ -707,8 +705,22 @@ static noinline int create_subvol(struct inode *dir,
62673         key.type = BTRFS_ROOT_ITEM_KEY;
62674         ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
62675                                 root_item);
62676 -       if (ret)
62677 +       if (ret) {
62678 +               /*
62679 +                * Since we don't abort the transaction in this case, free the
62680 +                * tree block so that we don't leak space and leave the
62681 +                * filesystem in an inconsistent state (an extent item in the
62682 +                * extent tree without backreferences). Also no need to have
62683 +                * the tree block locked since it is not in any tree at this
62684 +                * point, so no other task can find it and use it.
62685 +                */
62686 +               btrfs_free_tree_block(trans, root, leaf, 0, 1);
62687 +               free_extent_buffer(leaf);
62688                 goto fail;
62689 +       }
62691 +       free_extent_buffer(leaf);
62692 +       leaf = NULL;
62694         key.offset = (u64)-1;
62695         new_root = btrfs_get_new_fs_root(fs_info, objectid, anon_dev);
62696 @@ -1014,7 +1026,7 @@ static noinline int btrfs_mksubvol(const struct path *parent,
62697  out_dput:
62698         dput(dentry);
62699  out_unlock:
62700 -       inode_unlock(dir);
62701 +       btrfs_inode_unlock(dir, 0);
62702         return error;
62705 @@ -1034,7 +1046,7 @@ static noinline int btrfs_mksnapshot(const struct path *parent,
62706          */
62707         btrfs_drew_read_lock(&root->snapshot_lock);
62709 -       ret = btrfs_start_delalloc_snapshot(root);
62710 +       ret = btrfs_start_delalloc_snapshot(root, false);
62711         if (ret)
62712                 goto out;
62714 @@ -1612,7 +1624,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
62715                         ra_index += cluster;
62716                 }
62718 -               inode_lock(inode);
62719 +               btrfs_inode_lock(inode, 0);
62720                 if (IS_SWAPFILE(inode)) {
62721                         ret = -ETXTBSY;
62722                 } else {
62723 @@ -1621,13 +1633,13 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
62724                         ret = cluster_pages_for_defrag(inode, pages, i, cluster);
62725                 }
62726                 if (ret < 0) {
62727 -                       inode_unlock(inode);
62728 +                       btrfs_inode_unlock(inode, 0);
62729                         goto out_ra;
62730                 }
62732                 defrag_count += ret;
62733                 balance_dirty_pages_ratelimited(inode->i_mapping);
62734 -               inode_unlock(inode);
62735 +               btrfs_inode_unlock(inode, 0);
62737                 if (newer_than) {
62738                         if (newer_off == (u64)-1)
62739 @@ -1675,9 +1687,9 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
62741  out_ra:
62742         if (do_compress) {
62743 -               inode_lock(inode);
62744 +               btrfs_inode_lock(inode, 0);
62745                 BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE;
62746 -               inode_unlock(inode);
62747 +               btrfs_inode_unlock(inode, 0);
62748         }
62749         if (!file)
62750                 kfree(ra);
62751 @@ -3112,9 +3124,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
62752                 goto out_dput;
62753         }
62755 -       inode_lock(inode);
62756 +       btrfs_inode_lock(inode, 0);
62757         err = btrfs_delete_subvolume(dir, dentry);
62758 -       inode_unlock(inode);
62759 +       btrfs_inode_unlock(inode, 0);
62760         if (!err) {
62761                 fsnotify_rmdir(dir, dentry);
62762                 d_delete(dentry);
62763 @@ -3123,7 +3135,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
62764  out_dput:
62765         dput(dentry);
62766  out_unlock_dir:
62767 -       inode_unlock(dir);
62768 +       btrfs_inode_unlock(dir, 0);
62769  free_subvol_name:
62770         kfree(subvol_name_ptr);
62771  free_parent:
62772 diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
62773 index 985a21558437..043e3fa961e0 100644
62774 --- a/fs/btrfs/ordered-data.c
62775 +++ b/fs/btrfs/ordered-data.c
62776 @@ -995,7 +995,7 @@ int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
62778         if (pre)
62779                 ret = clone_ordered_extent(ordered, 0, pre);
62780 -       if (post)
62781 +       if (ret == 0 && post)
62782                 ret = clone_ordered_extent(ordered, pre + ordered->disk_num_bytes,
62783                                            post);
62785 diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
62786 index f0b9ef13153a..2991287a71a8 100644
62787 --- a/fs/btrfs/qgroup.c
62788 +++ b/fs/btrfs/qgroup.c
62789 @@ -3579,7 +3579,7 @@ static int try_flush_qgroup(struct btrfs_root *root)
62790                 return 0;
62791         }
62793 -       ret = btrfs_start_delalloc_snapshot(root);
62794 +       ret = btrfs_start_delalloc_snapshot(root, true);
62795         if (ret < 0)
62796                 goto out;
62797         btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
62798 diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
62799 index 762881b777b3..0abbf050580d 100644
62800 --- a/fs/btrfs/reflink.c
62801 +++ b/fs/btrfs/reflink.c
62802 @@ -833,7 +833,7 @@ loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
62803                 return -EINVAL;
62805         if (same_inode)
62806 -               inode_lock(src_inode);
62807 +               btrfs_inode_lock(src_inode, 0);
62808         else
62809                 lock_two_nondirectories(src_inode, dst_inode);
62811 @@ -849,7 +849,7 @@ loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
62813  out_unlock:
62814         if (same_inode)
62815 -               inode_unlock(src_inode);
62816 +               btrfs_inode_unlock(src_inode, 0);
62817         else
62818                 unlock_two_nondirectories(src_inode, dst_inode);
62820 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
62821 index 232d5da7b7be..829dc8dcc151 100644
62822 --- a/fs/btrfs/relocation.c
62823 +++ b/fs/btrfs/relocation.c
62824 @@ -733,10 +733,12 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
62825         struct extent_buffer *eb;
62826         struct btrfs_root_item *root_item;
62827         struct btrfs_key root_key;
62828 -       int ret;
62829 +       int ret = 0;
62830 +       bool must_abort = false;
62832         root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
62833 -       BUG_ON(!root_item);
62834 +       if (!root_item)
62835 +               return ERR_PTR(-ENOMEM);
62837         root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
62838         root_key.type = BTRFS_ROOT_ITEM_KEY;
62839 @@ -748,7 +750,9 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
62840                 /* called by btrfs_init_reloc_root */
62841                 ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
62842                                       BTRFS_TREE_RELOC_OBJECTID);
62843 -               BUG_ON(ret);
62844 +               if (ret)
62845 +                       goto fail;
62847                 /*
62848                  * Set the last_snapshot field to the generation of the commit
62849                  * root - like this ctree.c:btrfs_block_can_be_shared() behaves
62850 @@ -769,9 +773,16 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
62851                  */
62852                 ret = btrfs_copy_root(trans, root, root->node, &eb,
62853                                       BTRFS_TREE_RELOC_OBJECTID);
62854 -               BUG_ON(ret);
62855 +               if (ret)
62856 +                       goto fail;
62857         }
62859 +       /*
62860 +        * We have changed references at this point, we must abort the
62861 +        * transaction if anything fails.
62862 +        */
62863 +       must_abort = true;
62865         memcpy(root_item, &root->root_item, sizeof(*root_item));
62866         btrfs_set_root_bytenr(root_item, eb->start);
62867         btrfs_set_root_level(root_item, btrfs_header_level(eb));
62868 @@ -789,14 +800,25 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
62870         ret = btrfs_insert_root(trans, fs_info->tree_root,
62871                                 &root_key, root_item);
62872 -       BUG_ON(ret);
62873 +       if (ret)
62874 +               goto fail;
62876         kfree(root_item);
62878         reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
62879 -       BUG_ON(IS_ERR(reloc_root));
62880 +       if (IS_ERR(reloc_root)) {
62881 +               ret = PTR_ERR(reloc_root);
62882 +               goto abort;
62883 +       }
62884         set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
62885         reloc_root->last_trans = trans->transid;
62886         return reloc_root;
62887 +fail:
62888 +       kfree(root_item);
62889 +abort:
62890 +       if (must_abort)
62891 +               btrfs_abort_transaction(trans, ret);
62892 +       return ERR_PTR(ret);
62895  /*
62896 @@ -875,7 +897,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
62897         int ret;
62899         if (!have_reloc_root(root))
62900 -               goto out;
62901 +               return 0;
62903         reloc_root = root->reloc_root;
62904         root_item = &reloc_root->root_item;
62905 @@ -908,10 +930,8 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
62907         ret = btrfs_update_root(trans, fs_info->tree_root,
62908                                 &reloc_root->root_key, root_item);
62909 -       BUG_ON(ret);
62910         btrfs_put_root(reloc_root);
62911 -out:
62912 -       return 0;
62913 +       return ret;
62916  /*
62917 @@ -1185,8 +1205,8 @@ int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
62918         int ret;
62919         int slot;
62921 -       BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
62922 -       BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
62923 +       ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
62924 +       ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
62926         last_snapshot = btrfs_root_last_snapshot(&src->root_item);
62927  again:
62928 @@ -1217,7 +1237,7 @@ int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
62929         parent = eb;
62930         while (1) {
62931                 level = btrfs_header_level(parent);
62932 -               BUG_ON(level < lowest_level);
62933 +               ASSERT(level >= lowest_level);
62935                 ret = btrfs_bin_search(parent, &key, &slot);
62936                 if (ret < 0)
62937 @@ -2578,7 +2598,7 @@ static noinline_for_stack int prealloc_file_extent_cluster(
62938                 return btrfs_end_transaction(trans);
62939         }
62941 -       inode_lock(&inode->vfs_inode);
62942 +       btrfs_inode_lock(&inode->vfs_inode, 0);
62943         for (nr = 0; nr < cluster->nr; nr++) {
62944                 start = cluster->boundary[nr] - offset;
62945                 if (nr + 1 < cluster->nr)
62946 @@ -2596,7 +2616,7 @@ static noinline_for_stack int prealloc_file_extent_cluster(
62947                 if (ret)
62948                         break;
62949         }
62950 -       inode_unlock(&inode->vfs_inode);
62951 +       btrfs_inode_unlock(&inode->vfs_inode, 0);
62953         if (cur_offset < prealloc_end)
62954                 btrfs_free_reserved_data_space_noquota(inode->root->fs_info,
62955 diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
62956 index 3d9088eab2fc..b9202a1f1af1 100644
62957 --- a/fs/btrfs/scrub.c
62958 +++ b/fs/btrfs/scrub.c
62959 @@ -3682,8 +3682,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
62960                         spin_lock(&cache->lock);
62961                         if (!cache->to_copy) {
62962                                 spin_unlock(&cache->lock);
62963 -                               ro_set = 0;
62964 -                               goto done;
62965 +                               btrfs_put_block_group(cache);
62966 +                               goto skip;
62967                         }
62968                         spin_unlock(&cache->lock);
62969                 }
62970 @@ -3841,7 +3841,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
62971                                                       cache, found_key.offset))
62972                         ro_set = 0;
62974 -done:
62975                 down_write(&dev_replace->rwsem);
62976                 dev_replace->cursor_left = dev_replace->cursor_right;
62977                 dev_replace->item_needs_writeback = 1;
62978 diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
62979 index 8f323859156b..8ae8f1732fd2 100644
62980 --- a/fs/btrfs/send.c
62981 +++ b/fs/btrfs/send.c
62982 @@ -7139,7 +7139,7 @@ static int flush_delalloc_roots(struct send_ctx *sctx)
62983         int i;
62985         if (root) {
62986 -               ret = btrfs_start_delalloc_snapshot(root);
62987 +               ret = btrfs_start_delalloc_snapshot(root, false);
62988                 if (ret)
62989                         return ret;
62990                 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
62991 @@ -7147,7 +7147,7 @@ static int flush_delalloc_roots(struct send_ctx *sctx)
62993         for (i = 0; i < sctx->clone_roots_cnt; i++) {
62994                 root = sctx->clone_roots[i].root;
62995 -               ret = btrfs_start_delalloc_snapshot(root);
62996 +               ret = btrfs_start_delalloc_snapshot(root, false);
62997                 if (ret)
62998                         return ret;
62999                 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
63000 diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
63001 index acff6bb49a97..d56d3e7ca324 100644
63002 --- a/fs/btrfs/transaction.c
63003 +++ b/fs/btrfs/transaction.c
63004 @@ -260,6 +260,7 @@ static inline int extwriter_counter_read(struct btrfs_transaction *trans)
63005  void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
63007         struct btrfs_fs_info *fs_info = trans->fs_info;
63008 +       struct btrfs_transaction *cur_trans = trans->transaction;
63010         if (!trans->chunk_bytes_reserved)
63011                 return;
63012 @@ -268,6 +269,8 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
63014         btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv,
63015                                 trans->chunk_bytes_reserved, NULL);
63016 +       atomic64_sub(trans->chunk_bytes_reserved, &cur_trans->chunk_bytes_reserved);
63017 +       cond_wake_up(&cur_trans->chunk_reserve_wait);
63018         trans->chunk_bytes_reserved = 0;
63021 @@ -383,6 +386,8 @@ static noinline int join_transaction(struct btrfs_fs_info *fs_info,
63022         spin_lock_init(&cur_trans->dropped_roots_lock);
63023         INIT_LIST_HEAD(&cur_trans->releasing_ebs);
63024         spin_lock_init(&cur_trans->releasing_ebs_lock);
63025 +       atomic64_set(&cur_trans->chunk_bytes_reserved, 0);
63026 +       init_waitqueue_head(&cur_trans->chunk_reserve_wait);
63027         list_add_tail(&cur_trans->list, &fs_info->trans_list);
63028         extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
63029                         IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode);
63030 @@ -1961,7 +1966,6 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
63031          */
63032         BUG_ON(list_empty(&cur_trans->list));
63034 -       list_del_init(&cur_trans->list);
63035         if (cur_trans == fs_info->running_transaction) {
63036                 cur_trans->state = TRANS_STATE_COMMIT_DOING;
63037                 spin_unlock(&fs_info->trans_lock);
63038 @@ -1970,6 +1974,17 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
63040                 spin_lock(&fs_info->trans_lock);
63041         }
63043 +       /*
63044 +        * Now that we know no one else is still using the transaction we can
63045 +        * remove the transaction from the list of transactions. This avoids
63046 +        * the transaction kthread from cleaning up the transaction while some
63047 +        * other task is still using it, which could result in a use-after-free
63048 +        * on things like log trees, as it forces the transaction kthread to
63049 +        * wait for this transaction to be cleaned up by us.
63050 +        */
63051 +       list_del_init(&cur_trans->list);
63053         spin_unlock(&fs_info->trans_lock);
63055         btrfs_cleanup_one_transaction(trans->transaction, fs_info);
63056 diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
63057 index 6335716e513f..364cfbb4c5c5 100644
63058 --- a/fs/btrfs/transaction.h
63059 +++ b/fs/btrfs/transaction.h
63060 @@ -96,6 +96,13 @@ struct btrfs_transaction {
63062         spinlock_t releasing_ebs_lock;
63063         struct list_head releasing_ebs;
63065 +       /*
63066 +        * The number of bytes currently reserved, by all transaction handles
63067 +        * attached to this transaction, for metadata extents of the chunk tree.
63068 +        */
63069 +       atomic64_t chunk_bytes_reserved;
63070 +       wait_queue_head_t chunk_reserve_wait;
63071  };
63073  #define __TRANS_FREEZABLE      (1U << 0)
63074 @@ -175,7 +182,7 @@ static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
63075         spin_lock(&inode->lock);
63076         inode->last_trans = trans->transaction->transid;
63077         inode->last_sub_trans = inode->root->log_transid;
63078 -       inode->last_log_commit = inode->root->last_log_commit;
63079 +       inode->last_log_commit = inode->last_sub_trans - 1;
63080         spin_unlock(&inode->lock);
63083 diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
63084 index 92a368627791..53624fca0747 100644
63085 --- a/fs/btrfs/tree-log.c
63086 +++ b/fs/btrfs/tree-log.c
63087 @@ -3165,20 +3165,22 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
63088          */
63089         mutex_unlock(&root->log_mutex);
63091 -       btrfs_init_log_ctx(&root_log_ctx, NULL);
63093 -       mutex_lock(&log_root_tree->log_mutex);
63095         if (btrfs_is_zoned(fs_info)) {
63096 +               mutex_lock(&fs_info->tree_root->log_mutex);
63097                 if (!log_root_tree->node) {
63098                         ret = btrfs_alloc_log_tree_node(trans, log_root_tree);
63099                         if (ret) {
63100 -                               mutex_unlock(&log_root_tree->log_mutex);
63101 +                               mutex_unlock(&fs_info->tree_log_mutex);
63102                                 goto out;
63103                         }
63104                 }
63105 +               mutex_unlock(&fs_info->tree_root->log_mutex);
63106         }
63108 +       btrfs_init_log_ctx(&root_log_ctx, NULL);
63110 +       mutex_lock(&log_root_tree->log_mutex);
63112         index2 = log_root_tree->log_transid % 2;
63113         list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
63114         root_log_ctx.log_transid = log_root_tree->log_transid;
63115 @@ -6058,7 +6060,8 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
63116          * (since logging them is pointless, a link count of 0 means they
63117          * will never be accessible).
63118          */
63119 -       if (btrfs_inode_in_log(inode, trans->transid) ||
63120 +       if ((btrfs_inode_in_log(inode, trans->transid) &&
63121 +            list_empty(&ctx->ordered_extents)) ||
63122             inode->vfs_inode.i_nlink == 0) {
63123                 ret = BTRFS_NO_LOG_SYNC;
63124                 goto end_no_trans;
63125 @@ -6454,6 +6457,24 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
63126             (!old_dir || old_dir->logged_trans < trans->transid))
63127                 return;
63129 +       /*
63130 +        * If we are doing a rename (old_dir is not NULL) from a directory that
63131 +        * was previously logged, make sure the next log attempt on the directory
63132 +        * is not skipped and logs the inode again. This is because the log may
63133 +        * not currently be authoritative for a range including the old
63134 +        * BTRFS_DIR_ITEM_KEY and BTRFS_DIR_INDEX_KEY keys, so we want to make
63135 +        * sure after a log replay we do not end up with both the new and old
63136 +        * dentries around (in case the inode is a directory we would have a
63137 +        * directory with two hard links and 2 inode references for different
63138 +        * parents). The next log attempt of old_dir will happen at
63139 +        * btrfs_log_all_parents(), called through btrfs_log_inode_parent()
63140 +        * below, because we have previously set inode->last_unlink_trans to the
63141 +        * current transaction ID, either here or at btrfs_record_unlink_dir() in
63142 +        * case inode is a directory.
63143 +        */
63144 +       if (old_dir)
63145 +               old_dir->logged_trans = 0;
63147         btrfs_init_log_ctx(&ctx, &inode->vfs_inode);
63148         ctx.logging_new_name = true;
63149         /*
63150 diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
63151 index 1c6810bbaf8b..3912eda7905f 100644
63152 --- a/fs/btrfs/volumes.c
63153 +++ b/fs/btrfs/volumes.c
63154 @@ -4989,6 +4989,8 @@ static void init_alloc_chunk_ctl_policy_zoned(
63155                 ctl->max_chunk_size = 2 * ctl->max_stripe_size;
63156                 ctl->devs_max = min_t(int, ctl->devs_max,
63157                                       BTRFS_MAX_DEVS_SYS_CHUNK);
63158 +       } else {
63159 +               BUG();
63160         }
63162         /* We don't want a chunk larger than 10% of writable space */
63163 diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
63164 index eeb3ebe11d7a..304ce64c70a4 100644
63165 --- a/fs/btrfs/zoned.c
63166 +++ b/fs/btrfs/zoned.c
63167 @@ -342,6 +342,13 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
63168         if (!IS_ALIGNED(nr_sectors, zone_sectors))
63169                 zone_info->nr_zones++;
63171 +       if (bdev_is_zoned(bdev) && zone_info->max_zone_append_size == 0) {
63172 +               btrfs_err(fs_info, "zoned: device %pg does not support zone append",
63173 +                         bdev);
63174 +               ret = -EINVAL;
63175 +               goto out;
63176 +       }
63178         zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
63179         if (!zone_info->seq_zones) {
63180                 ret = -ENOMEM;
63181 @@ -1119,6 +1126,11 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
63182                         goto out;
63183                 }
63185 +               if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
63186 +                       ret = -EIO;
63187 +                       goto out;
63188 +               }
63190                 switch (zone.cond) {
63191                 case BLK_ZONE_COND_OFFLINE:
63192                 case BLK_ZONE_COND_READONLY:
63193 diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
63194 index 8e9626d63976..14418b02c189 100644
63195 --- a/fs/btrfs/zstd.c
63196 +++ b/fs/btrfs/zstd.c
63197 @@ -28,10 +28,10 @@
63198  /* 307s to avoid pathologically clashing with transaction commit */
63199  #define ZSTD_BTRFS_RECLAIM_JIFFIES (307 * HZ)
63201 -static ZSTD_parameters zstd_get_btrfs_parameters(unsigned int level,
63202 +static zstd_parameters zstd_get_btrfs_parameters(unsigned int level,
63203                                                  size_t src_len)
63205 -       ZSTD_parameters params = ZSTD_getParams(level, src_len, 0);
63206 +       zstd_parameters params = zstd_get_params(level, src_len);
63208         if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG)
63209                 params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG;
63210 @@ -48,8 +48,8 @@ struct workspace {
63211         unsigned long last_used; /* jiffies */
63212         struct list_head list;
63213         struct list_head lru_list;
63214 -       ZSTD_inBuffer in_buf;
63215 -       ZSTD_outBuffer out_buf;
63216 +       zstd_in_buffer in_buf;
63217 +       zstd_out_buffer out_buf;
63218  };
63220  /*
63221 @@ -155,12 +155,12 @@ static void zstd_calc_ws_mem_sizes(void)
63222         unsigned int level;
63224         for (level = 1; level <= ZSTD_BTRFS_MAX_LEVEL; level++) {
63225 -               ZSTD_parameters params =
63226 +               zstd_parameters params =
63227                         zstd_get_btrfs_parameters(level, ZSTD_BTRFS_MAX_INPUT);
63228                 size_t level_size =
63229                         max_t(size_t,
63230 -                             ZSTD_CStreamWorkspaceBound(params.cParams),
63231 -                             ZSTD_DStreamWorkspaceBound(ZSTD_BTRFS_MAX_INPUT));
63232 +                             zstd_cstream_workspace_bound(&params.cParams),
63233 +                             zstd_dstream_workspace_bound(ZSTD_BTRFS_MAX_INPUT));
63235                 max_size = max_t(size_t, max_size, level_size);
63236                 zstd_ws_mem_sizes[level - 1] = max_size;
63237 @@ -371,7 +371,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
63238                 unsigned long *total_in, unsigned long *total_out)
63240         struct workspace *workspace = list_entry(ws, struct workspace, list);
63241 -       ZSTD_CStream *stream;
63242 +       zstd_cstream *stream;
63243         int ret = 0;
63244         int nr_pages = 0;
63245         struct page *in_page = NULL;  /* The current page to read */
63246 @@ -381,7 +381,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
63247         unsigned long len = *total_out;
63248         const unsigned long nr_dest_pages = *out_pages;
63249         unsigned long max_out = nr_dest_pages * PAGE_SIZE;
63250 -       ZSTD_parameters params = zstd_get_btrfs_parameters(workspace->req_level,
63251 +       zstd_parameters params = zstd_get_btrfs_parameters(workspace->req_level,
63252                                                            len);
63254         *out_pages = 0;
63255 @@ -389,10 +389,10 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
63256         *total_in = 0;
63258         /* Initialize the stream */
63259 -       stream = ZSTD_initCStream(params, len, workspace->mem,
63260 +       stream = zstd_init_cstream(&params, len, workspace->mem,
63261                         workspace->size);
63262         if (!stream) {
63263 -               pr_warn("BTRFS: ZSTD_initCStream failed\n");
63264 +               pr_warn("BTRFS: zstd_init_cstream failed\n");
63265                 ret = -EIO;
63266                 goto out;
63267         }
63268 @@ -418,11 +418,11 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
63269         while (1) {
63270                 size_t ret2;
63272 -               ret2 = ZSTD_compressStream(stream, &workspace->out_buf,
63273 +               ret2 = zstd_compress_stream(stream, &workspace->out_buf,
63274                                 &workspace->in_buf);
63275 -               if (ZSTD_isError(ret2)) {
63276 -                       pr_debug("BTRFS: ZSTD_compressStream returned %d\n",
63277 -                                       ZSTD_getErrorCode(ret2));
63278 +               if (zstd_is_error(ret2)) {
63279 +                       pr_debug("BTRFS: zstd_compress_stream returned %d\n",
63280 +                                       zstd_get_error_code(ret2));
63281                         ret = -EIO;
63282                         goto out;
63283                 }
63284 @@ -487,10 +487,10 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
63285         while (1) {
63286                 size_t ret2;
63288 -               ret2 = ZSTD_endStream(stream, &workspace->out_buf);
63289 -               if (ZSTD_isError(ret2)) {
63290 -                       pr_debug("BTRFS: ZSTD_endStream returned %d\n",
63291 -                                       ZSTD_getErrorCode(ret2));
63292 +               ret2 = zstd_end_stream(stream, &workspace->out_buf);
63293 +               if (zstd_is_error(ret2)) {
63294 +                       pr_debug("BTRFS: zstd_end_stream returned %d\n",
63295 +                                       zstd_get_error_code(ret2));
63296                         ret = -EIO;
63297                         goto out;
63298                 }
63299 @@ -550,17 +550,17 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
63300         u64 disk_start = cb->start;
63301         struct bio *orig_bio = cb->orig_bio;
63302         size_t srclen = cb->compressed_len;
63303 -       ZSTD_DStream *stream;
63304 +       zstd_dstream *stream;
63305         int ret = 0;
63306         unsigned long page_in_index = 0;
63307         unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
63308         unsigned long buf_start;
63309         unsigned long total_out = 0;
63311 -       stream = ZSTD_initDStream(
63312 +       stream = zstd_init_dstream(
63313                         ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
63314         if (!stream) {
63315 -               pr_debug("BTRFS: ZSTD_initDStream failed\n");
63316 +               pr_debug("BTRFS: zstd_init_dstream failed\n");
63317                 ret = -EIO;
63318                 goto done;
63319         }
63320 @@ -576,11 +576,11 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
63321         while (1) {
63322                 size_t ret2;
63324 -               ret2 = ZSTD_decompressStream(stream, &workspace->out_buf,
63325 +               ret2 = zstd_decompress_stream(stream, &workspace->out_buf,
63326                                 &workspace->in_buf);
63327 -               if (ZSTD_isError(ret2)) {
63328 -                       pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
63329 -                                       ZSTD_getErrorCode(ret2));
63330 +               if (zstd_is_error(ret2)) {
63331 +                       pr_debug("BTRFS: zstd_decompress_stream returned %d\n",
63332 +                                       zstd_get_error_code(ret2));
63333                         ret = -EIO;
63334                         goto done;
63335                 }
63336 @@ -626,17 +626,17 @@ int zstd_decompress(struct list_head *ws, unsigned char *data_in,
63337                 size_t destlen)
63339         struct workspace *workspace = list_entry(ws, struct workspace, list);
63340 -       ZSTD_DStream *stream;
63341 +       zstd_dstream *stream;
63342         int ret = 0;
63343         size_t ret2;
63344         unsigned long total_out = 0;
63345         unsigned long pg_offset = 0;
63346         char *kaddr;
63348 -       stream = ZSTD_initDStream(
63349 +       stream = zstd_init_dstream(
63350                         ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
63351         if (!stream) {
63352 -               pr_warn("BTRFS: ZSTD_initDStream failed\n");
63353 +               pr_warn("BTRFS: zstd_init_dstream failed\n");
63354                 ret = -EIO;
63355                 goto finish;
63356         }
63357 @@ -660,15 +660,15 @@ int zstd_decompress(struct list_head *ws, unsigned char *data_in,
63359                 /* Check if the frame is over and we still need more input */
63360                 if (ret2 == 0) {
63361 -                       pr_debug("BTRFS: ZSTD_decompressStream ended early\n");
63362 +                       pr_debug("BTRFS: zstd_decompress_stream ended early\n");
63363                         ret = -EIO;
63364                         goto finish;
63365                 }
63366 -               ret2 = ZSTD_decompressStream(stream, &workspace->out_buf,
63367 +               ret2 = zstd_decompress_stream(stream, &workspace->out_buf,
63368                                 &workspace->in_buf);
63369 -               if (ZSTD_isError(ret2)) {
63370 -                       pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
63371 -                                       ZSTD_getErrorCode(ret2));
63372 +               if (zstd_is_error(ret2)) {
63373 +                       pr_debug("BTRFS: zstd_decompress_stream returned %d\n",
63374 +                                       zstd_get_error_code(ret2));
63375                         ret = -EIO;
63376                         goto finish;
63377                 }
63378 diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
63379 index 570731c4d019..d405ba801492 100644
63380 --- a/fs/ceph/caps.c
63381 +++ b/fs/ceph/caps.c
63382 @@ -1867,6 +1867,7 @@ static int try_nonblocking_invalidate(struct inode *inode)
63383         u32 invalidating_gen = ci->i_rdcache_gen;
63385         spin_unlock(&ci->i_ceph_lock);
63386 +       ceph_fscache_invalidate(inode);
63387         invalidate_mapping_pages(&inode->i_data, 0, -1);
63388         spin_lock(&ci->i_ceph_lock);
63390 diff --git a/fs/ceph/export.c b/fs/ceph/export.c
63391 index e088843a7734..042bb4a02c0a 100644
63392 --- a/fs/ceph/export.c
63393 +++ b/fs/ceph/export.c
63394 @@ -129,6 +129,10 @@ static struct inode *__lookup_inode(struct super_block *sb, u64 ino)
63396         vino.ino = ino;
63397         vino.snap = CEPH_NOSNAP;
63399 +       if (ceph_vino_is_reserved(vino))
63400 +               return ERR_PTR(-ESTALE);
63402         inode = ceph_find_inode(sb, vino);
63403         if (!inode) {
63404                 struct ceph_mds_request *req;
63405 @@ -178,8 +182,10 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
63406                 return ERR_CAST(inode);
63407         /* We need LINK caps to reliably check i_nlink */
63408         err = ceph_do_getattr(inode, CEPH_CAP_LINK_SHARED, false);
63409 -       if (err)
63410 +       if (err) {
63411 +               iput(inode);
63412                 return ERR_PTR(err);
63413 +       }
63414         /* -ESTALE if inode as been unlinked and no file is open */
63415         if ((inode->i_nlink == 0) && (atomic_read(&inode->i_count) == 1)) {
63416                 iput(inode);
63417 @@ -212,6 +218,10 @@ static struct dentry *__snapfh_to_dentry(struct super_block *sb,
63418                 vino.ino = sfh->ino;
63419                 vino.snap = sfh->snapid;
63420         }
63422 +       if (ceph_vino_is_reserved(vino))
63423 +               return ERR_PTR(-ESTALE);
63425         inode = ceph_find_inode(sb, vino);
63426         if (inode)
63427                 return d_obtain_alias(inode);
63428 diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
63429 index 156f849f5385..179d2ef69a24 100644
63430 --- a/fs/ceph/inode.c
63431 +++ b/fs/ceph/inode.c
63432 @@ -56,6 +56,9 @@ struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
63434         struct inode *inode;
63436 +       if (ceph_vino_is_reserved(vino))
63437 +               return ERR_PTR(-EREMOTEIO);
63439         inode = iget5_locked(sb, (unsigned long)vino.ino, ceph_ino_compare,
63440                              ceph_set_ino_cb, &vino);
63441         if (!inode)
63442 @@ -87,14 +90,15 @@ struct inode *ceph_get_snapdir(struct inode *parent)
63443         inode->i_mtime = parent->i_mtime;
63444         inode->i_ctime = parent->i_ctime;
63445         inode->i_atime = parent->i_atime;
63446 -       inode->i_op = &ceph_snapdir_iops;
63447 -       inode->i_fop = &ceph_snapdir_fops;
63448 -       ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
63449         ci->i_rbytes = 0;
63450         ci->i_btime = ceph_inode(parent)->i_btime;
63452 -       if (inode->i_state & I_NEW)
63453 +       if (inode->i_state & I_NEW) {
63454 +               inode->i_op = &ceph_snapdir_iops;
63455 +               inode->i_fop = &ceph_snapdir_fops;
63456 +               ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
63457                 unlock_new_inode(inode);
63458 +       }
63460         return inode;
63462 @@ -1863,6 +1867,7 @@ static void ceph_do_invalidate_pages(struct inode *inode)
63463         orig_gen = ci->i_rdcache_gen;
63464         spin_unlock(&ci->i_ceph_lock);
63466 +       ceph_fscache_invalidate(inode);
63467         if (invalidate_inode_pages2(inode->i_mapping) < 0) {
63468                 pr_err("invalidate_pages %p fails\n", inode);
63469         }
63470 diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
63471 index d87bd852ed96..298cb0b3d28c 100644
63472 --- a/fs/ceph/mds_client.c
63473 +++ b/fs/ceph/mds_client.c
63474 @@ -433,6 +433,13 @@ static int ceph_parse_deleg_inos(void **p, void *end,
63476                 ceph_decode_64_safe(p, end, start, bad);
63477                 ceph_decode_64_safe(p, end, len, bad);
63479 +               /* Don't accept a delegation of system inodes */
63480 +               if (start < CEPH_INO_SYSTEM_BASE) {
63481 +                       pr_warn_ratelimited("ceph: ignoring reserved inode range delegation (start=0x%llx len=0x%llx)\n",
63482 +                                       start, len);
63483 +                       continue;
63484 +               }
63485                 while (len--) {
63486                         int err = xa_insert(&s->s_delegated_inos, ino = start++,
63487                                             DELEGATED_INO_AVAILABLE,
63488 diff --git a/fs/ceph/super.h b/fs/ceph/super.h
63489 index c48bb30c8d70..1d2fe70439bd 100644
63490 --- a/fs/ceph/super.h
63491 +++ b/fs/ceph/super.h
63492 @@ -529,10 +529,34 @@ static inline int ceph_ino_compare(struct inode *inode, void *data)
63493                 ci->i_vino.snap == pvino->snap;
63497 + * The MDS reserves a set of inodes for its own usage. These should never
63498 + * be accessible by clients, and so the MDS has no reason to ever hand these
63499 + * out. The range is CEPH_MDS_INO_MDSDIR_OFFSET..CEPH_INO_SYSTEM_BASE.
63500 + *
63501 + * These come from src/mds/mdstypes.h in the ceph sources.
63502 + */
63503 +#define CEPH_MAX_MDS           0x100
63504 +#define CEPH_NUM_STRAY         10
63505 +#define CEPH_MDS_INO_MDSDIR_OFFSET     (1 * CEPH_MAX_MDS)
63506 +#define CEPH_INO_SYSTEM_BASE           ((6*CEPH_MAX_MDS) + (CEPH_MAX_MDS * CEPH_NUM_STRAY))
63508 +static inline bool ceph_vino_is_reserved(const struct ceph_vino vino)
63510 +       if (vino.ino < CEPH_INO_SYSTEM_BASE &&
63511 +           vino.ino >= CEPH_MDS_INO_MDSDIR_OFFSET) {
63512 +               WARN_RATELIMIT(1, "Attempt to access reserved inode number 0x%llx", vino.ino);
63513 +               return true;
63514 +       }
63515 +       return false;
63518  static inline struct inode *ceph_find_inode(struct super_block *sb,
63519                                             struct ceph_vino vino)
63521 +       if (ceph_vino_is_reserved(vino))
63522 +               return NULL;
63524         /*
63525          * NB: The hashval will be run through the fs/inode.c hash function
63526          * anyway, so there is no need to squash the inode number down to
63527 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
63528 index 5ddd20b62484..fa896a1c8b07 100644
63529 --- a/fs/cifs/cifsfs.c
63530 +++ b/fs/cifs/cifsfs.c
63531 @@ -834,7 +834,7 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
63532                 goto out;
63533         }
63535 -       rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, old_ctx->UNC);
63536 +       rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, NULL);
63537         if (rc) {
63538                 root = ERR_PTR(rc);
63539                 goto out;
63540 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
63541 index 24668eb006c6..3d62d52d730b 100644
63542 --- a/fs/cifs/connect.c
63543 +++ b/fs/cifs/connect.c
63544 @@ -488,6 +488,7 @@ server_unresponsive(struct TCP_Server_Info *server)
63545          */
63546         if ((server->tcpStatus == CifsGood ||
63547             server->tcpStatus == CifsNeedNegotiate) &&
63548 +           (!server->ops->can_echo || server->ops->can_echo(server)) &&
63549             time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
63550                 cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n",
63551                          (3 * server->echo_interval) / HZ);
63552 @@ -3175,17 +3176,29 @@ static int do_dfs_failover(const char *path, const char *full_path, struct cifs_
63553  int
63554  cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const char *devname)
63556 -       int rc = 0;
63557 +       int rc;
63559 -       smb3_parse_devname(devname, ctx);
63560 +       if (devname) {
63561 +               cifs_dbg(FYI, "%s: devname=%s\n", __func__, devname);
63562 +               rc = smb3_parse_devname(devname, ctx);
63563 +               if (rc) {
63564 +                       cifs_dbg(VFS, "%s: failed to parse %s: %d\n", __func__, devname, rc);
63565 +                       return rc;
63566 +               }
63567 +       }
63569         if (mntopts) {
63570                 char *ip;
63572 -               cifs_dbg(FYI, "%s: mntopts=%s\n", __func__, mntopts);
63573                 rc = smb3_parse_opt(mntopts, "ip", &ip);
63574 -               if (!rc && !cifs_convert_address((struct sockaddr *)&ctx->dstaddr, ip,
63575 -                                                strlen(ip))) {
63576 +               if (rc) {
63577 +                       cifs_dbg(VFS, "%s: failed to parse ip options: %d\n", __func__, rc);
63578 +                       return rc;
63579 +               }
63581 +               rc = cifs_convert_address((struct sockaddr *)&ctx->dstaddr, ip, strlen(ip));
63582 +               kfree(ip);
63583 +               if (!rc) {
63584                         cifs_dbg(VFS, "%s: failed to convert ip address\n", __func__);
63585                         return -EINVAL;
63586                 }
63587 @@ -3205,7 +3218,7 @@ cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const c
63588                 return -EINVAL;
63589         }
63591 -       return rc;
63592 +       return 0;
63595  static int
63596 diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
63597 index 78889024a7ed..a7253eb2e955 100644
63598 --- a/fs/cifs/fs_context.c
63599 +++ b/fs/cifs/fs_context.c
63600 @@ -475,6 +475,7 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
63602         /* move "pos" up to delimiter or NULL */
63603         pos += len;
63604 +       kfree(ctx->UNC);
63605         ctx->UNC = kstrndup(devname, pos - devname, GFP_KERNEL);
63606         if (!ctx->UNC)
63607                 return -ENOMEM;
63608 @@ -485,6 +486,9 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
63609         if (*pos == '/' || *pos == '\\')
63610                 pos++;
63612 +       kfree(ctx->prepath);
63613 +       ctx->prepath = NULL;
63615         /* If pos is NULL then no prepath */
63616         if (!*pos)
63617                 return 0;
63618 @@ -995,6 +999,9 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
63619                         goto cifs_parse_mount_err;
63620                 }
63621                 ctx->max_channels = result.uint_32;
63622 +               /* If more than one channel requested ... they want multichan */
63623 +               if (result.uint_32 > 1)
63624 +                       ctx->multichannel = true;
63625                 break;
63626         case Opt_handletimeout:
63627                 ctx->handle_timeout = result.uint_32;
63628 diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
63629 index 63d517b9f2ff..a92a1fb7cb52 100644
63630 --- a/fs/cifs/sess.c
63631 +++ b/fs/cifs/sess.c
63632 @@ -97,6 +97,12 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
63633                 return 0;
63634         }
63636 +       if (!(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
63637 +               cifs_dbg(VFS, "server %s does not support multichannel\n", ses->server->hostname);
63638 +               ses->chan_max = 1;
63639 +               return 0;
63640 +       }
63642         /*
63643          * Make a copy of the iface list at the time and use that
63644          * instead so as to not hold the iface spinlock for opening
63645 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
63646 index f703204fb185..e9a530da4255 100644
63647 --- a/fs/cifs/smb2ops.c
63648 +++ b/fs/cifs/smb2ops.c
63649 @@ -1763,18 +1763,14 @@ smb2_ioctl_query_info(const unsigned int xid,
63650         }
63652   iqinf_exit:
63653 -       kfree(vars);
63654 -       kfree(buffer);
63655 -       SMB2_open_free(&rqst[0]);
63656 -       if (qi.flags & PASSTHRU_FSCTL)
63657 -               SMB2_ioctl_free(&rqst[1]);
63658 -       else
63659 -               SMB2_query_info_free(&rqst[1]);
63661 -       SMB2_close_free(&rqst[2]);
63662 +       cifs_small_buf_release(rqst[0].rq_iov[0].iov_base);
63663 +       cifs_small_buf_release(rqst[1].rq_iov[0].iov_base);
63664 +       cifs_small_buf_release(rqst[2].rq_iov[0].iov_base);
63665         free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
63666         free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
63667         free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
63668 +       kfree(vars);
63669 +       kfree(buffer);
63670         return rc;
63672  e_fault:
63673 @@ -1826,6 +1822,8 @@ smb2_copychunk_range(const unsigned int xid,
63674                         cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
63676                 /* Request server copy to target from src identified by key */
63677 +               kfree(retbuf);
63678 +               retbuf = NULL;
63679                 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
63680                         trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
63681                         true /* is_fsctl */, (char *)pcchunk,
63682 @@ -2232,7 +2230,7 @@ smb3_notify(const unsigned int xid, struct file *pfile,
63684         cifs_sb = CIFS_SB(inode->i_sb);
63686 -       utf16_path = cifs_convert_path_to_utf16(path + 1, cifs_sb);
63687 +       utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
63688         if (utf16_path == NULL) {
63689                 rc = -ENOMEM;
63690                 goto notify_exit;
63691 @@ -4178,7 +4176,7 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
63692         }
63693         spin_unlock(&cifs_tcp_ses_lock);
63695 -       return 1;
63696 +       return -EAGAIN;
63698  /*
63699   * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
63700 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
63701 index 2199a9bfae8f..29272d99102c 100644
63702 --- a/fs/cifs/smb2pdu.c
63703 +++ b/fs/cifs/smb2pdu.c
63704 @@ -841,6 +841,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
63705                 req->SecurityMode = 0;
63707         req->Capabilities = cpu_to_le32(server->vals->req_capabilities);
63708 +       if (ses->chan_max > 1)
63709 +               req->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
63711         /* ClientGUID must be zero for SMB2.02 dialect */
63712         if (server->vals->protocol_id == SMB20_PROT_ID)
63713 @@ -1032,6 +1034,9 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
63715         pneg_inbuf->Capabilities =
63716                         cpu_to_le32(server->vals->req_capabilities);
63717 +       if (tcon->ses->chan_max > 1)
63718 +               pneg_inbuf->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
63720         memcpy(pneg_inbuf->Guid, server->client_guid,
63721                                         SMB2_CLIENT_GUID_SIZE);
63723 diff --git a/fs/dax.c b/fs/dax.c
63724 index b3d27fdc6775..df5485b4bddf 100644
63725 --- a/fs/dax.c
63726 +++ b/fs/dax.c
63727 @@ -144,6 +144,16 @@ struct wait_exceptional_entry_queue {
63728         struct exceptional_entry_key key;
63729  };
63731 +/**
63732 + * enum dax_wake_mode: waitqueue wakeup behaviour
63733 + * @WAKE_ALL: wake all waiters in the waitqueue
63734 + * @WAKE_NEXT: wake only the first waiter in the waitqueue
63735 + */
63736 +enum dax_wake_mode {
63737 +       WAKE_ALL,
63738 +       WAKE_NEXT,
63741  static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
63742                 void *entry, struct exceptional_entry_key *key)
63744 @@ -182,7 +192,8 @@ static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
63745   * The important information it's conveying is whether the entry at
63746   * this index used to be a PMD entry.
63747   */
63748 -static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
63749 +static void dax_wake_entry(struct xa_state *xas, void *entry,
63750 +                          enum dax_wake_mode mode)
63752         struct exceptional_entry_key key;
63753         wait_queue_head_t *wq;
63754 @@ -196,7 +207,7 @@ static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
63755          * must be in the waitqueue and the following check will see them.
63756          */
63757         if (waitqueue_active(wq))
63758 -               __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
63759 +               __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key);
63762  /*
63763 @@ -264,11 +275,11 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
63764         finish_wait(wq, &ewait.wait);
63767 -static void put_unlocked_entry(struct xa_state *xas, void *entry)
63768 +static void put_unlocked_entry(struct xa_state *xas, void *entry,
63769 +                              enum dax_wake_mode mode)
63771 -       /* If we were the only waiter woken, wake the next one */
63772         if (entry && !dax_is_conflict(entry))
63773 -               dax_wake_entry(xas, entry, false);
63774 +               dax_wake_entry(xas, entry, mode);
63777  /*
63778 @@ -286,7 +297,7 @@ static void dax_unlock_entry(struct xa_state *xas, void *entry)
63779         old = xas_store(xas, entry);
63780         xas_unlock_irq(xas);
63781         BUG_ON(!dax_is_locked(old));
63782 -       dax_wake_entry(xas, entry, false);
63783 +       dax_wake_entry(xas, entry, WAKE_NEXT);
63786  /*
63787 @@ -524,7 +535,7 @@ static void *grab_mapping_entry(struct xa_state *xas,
63789                 dax_disassociate_entry(entry, mapping, false);
63790                 xas_store(xas, NULL);   /* undo the PMD join */
63791 -               dax_wake_entry(xas, entry, true);
63792 +               dax_wake_entry(xas, entry, WAKE_ALL);
63793                 mapping->nrexceptional--;
63794                 entry = NULL;
63795                 xas_set(xas, index);
63796 @@ -622,7 +633,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping,
63797                         entry = get_unlocked_entry(&xas, 0);
63798                 if (entry)
63799                         page = dax_busy_page(entry);
63800 -               put_unlocked_entry(&xas, entry);
63801 +               put_unlocked_entry(&xas, entry, WAKE_NEXT);
63802                 if (page)
63803                         break;
63804                 if (++scanned % XA_CHECK_SCHED)
63805 @@ -664,7 +675,7 @@ static int __dax_invalidate_entry(struct address_space *mapping,
63806         mapping->nrexceptional--;
63807         ret = 1;
63808  out:
63809 -       put_unlocked_entry(&xas, entry);
63810 +       put_unlocked_entry(&xas, entry, WAKE_ALL);
63811         xas_unlock_irq(&xas);
63812         return ret;
63814 @@ -937,13 +948,13 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
63815         xas_lock_irq(xas);
63816         xas_store(xas, entry);
63817         xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
63818 -       dax_wake_entry(xas, entry, false);
63819 +       dax_wake_entry(xas, entry, WAKE_NEXT);
63821         trace_dax_writeback_one(mapping->host, index, count);
63822         return ret;
63824   put_unlocked:
63825 -       put_unlocked_entry(xas, entry);
63826 +       put_unlocked_entry(xas, entry, WAKE_NEXT);
63827         return ret;
63830 @@ -1684,7 +1695,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
63831         /* Did we race with someone splitting entry or so? */
63832         if (!entry || dax_is_conflict(entry) ||
63833             (order == 0 && !dax_is_pte_entry(entry))) {
63834 -               put_unlocked_entry(&xas, entry);
63835 +               put_unlocked_entry(&xas, entry, WAKE_NEXT);
63836                 xas_unlock_irq(&xas);
63837                 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
63838                                                       VM_FAULT_NOPAGE);
63839 diff --git a/fs/dcache.c b/fs/dcache.c
63840 index 7d24ff7eb206..9deb97404201 100644
63841 --- a/fs/dcache.c
63842 +++ b/fs/dcache.c
63843 @@ -71,7 +71,7 @@
63844   * If no ancestor relationship:
63845   * arbitrary, since it's serialized on rename_lock
63846   */
63847 -int sysctl_vfs_cache_pressure __read_mostly = 100;
63848 +int sysctl_vfs_cache_pressure __read_mostly = 50;
63849  EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
63851  __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
63852 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
63853 index 22e86ae4dd5a..1d252164d97b 100644
63854 --- a/fs/debugfs/inode.c
63855 +++ b/fs/debugfs/inode.c
63856 @@ -35,7 +35,7 @@
63857  static struct vfsmount *debugfs_mount;
63858  static int debugfs_mount_count;
63859  static bool debugfs_registered;
63860 -static unsigned int debugfs_allow = DEFAULT_DEBUGFS_ALLOW_BITS;
63861 +static unsigned int debugfs_allow __ro_after_init = DEFAULT_DEBUGFS_ALLOW_BITS;
63863  /*
63864   * Don't allow access attributes to be changed whilst the kernel is locked down
63865 diff --git a/fs/dlm/config.c b/fs/dlm/config.c
63866 index 49c5f9407098..88d95d96e36c 100644
63867 --- a/fs/dlm/config.c
63868 +++ b/fs/dlm/config.c
63869 @@ -125,7 +125,7 @@ static ssize_t cluster_cluster_name_store(struct config_item *item,
63870  CONFIGFS_ATTR(cluster_, cluster_name);
63872  static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
63873 -                          int *info_field, bool (*check_cb)(unsigned int x),
63874 +                          int *info_field, int (*check_cb)(unsigned int x),
63875                            const char *buf, size_t len)
63877         unsigned int x;
63878 @@ -137,8 +137,11 @@ static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
63879         if (rc)
63880                 return rc;
63882 -       if (check_cb && check_cb(x))
63883 -               return -EINVAL;
63884 +       if (check_cb) {
63885 +               rc = check_cb(x);
63886 +               if (rc)
63887 +                       return rc;
63888 +       }
63890         *cl_field = x;
63891         *info_field = x;
63892 @@ -161,17 +164,53 @@ static ssize_t cluster_##name##_show(struct config_item *item, char *buf)     \
63893  }                                                                             \
63894  CONFIGFS_ATTR(cluster_, name);
63896 -static bool dlm_check_zero(unsigned int x)
63897 +static int dlm_check_protocol_and_dlm_running(unsigned int x)
63899 +       switch (x) {
63900 +       case 0:
63901 +               /* TCP */
63902 +               break;
63903 +       case 1:
63904 +               /* SCTP */
63905 +               break;
63906 +       default:
63907 +               return -EINVAL;
63908 +       }
63910 +       if (dlm_allow_conn)
63911 +               return -EBUSY;
63913 +       return 0;
63916 +static int dlm_check_zero_and_dlm_running(unsigned int x)
63918 +       if (!x)
63919 +               return -EINVAL;
63921 +       if (dlm_allow_conn)
63922 +               return -EBUSY;
63924 +       return 0;
63927 +static int dlm_check_zero(unsigned int x)
63929 -       return !x;
63930 +       if (!x)
63931 +               return -EINVAL;
63933 +       return 0;
63936 -static bool dlm_check_buffer_size(unsigned int x)
63937 +static int dlm_check_buffer_size(unsigned int x)
63939 -       return (x < DEFAULT_BUFFER_SIZE);
63940 +       if (x < DEFAULT_BUFFER_SIZE)
63941 +               return -EINVAL;
63943 +       return 0;
63946 -CLUSTER_ATTR(tcp_port, dlm_check_zero);
63947 +CLUSTER_ATTR(tcp_port, dlm_check_zero_and_dlm_running);
63948  CLUSTER_ATTR(buffer_size, dlm_check_buffer_size);
63949  CLUSTER_ATTR(rsbtbl_size, dlm_check_zero);
63950  CLUSTER_ATTR(recover_timer, dlm_check_zero);
63951 @@ -179,7 +218,7 @@ CLUSTER_ATTR(toss_secs, dlm_check_zero);
63952  CLUSTER_ATTR(scan_secs, dlm_check_zero);
63953  CLUSTER_ATTR(log_debug, NULL);
63954  CLUSTER_ATTR(log_info, NULL);
63955 -CLUSTER_ATTR(protocol, NULL);
63956 +CLUSTER_ATTR(protocol, dlm_check_protocol_and_dlm_running);
63957  CLUSTER_ATTR(mark, NULL);
63958  CLUSTER_ATTR(timewarn_cs, dlm_check_zero);
63959  CLUSTER_ATTR(waitwarn_us, NULL);
63960 @@ -688,6 +727,7 @@ static ssize_t comm_mark_show(struct config_item *item, char *buf)
63961  static ssize_t comm_mark_store(struct config_item *item, const char *buf,
63962                                size_t len)
63964 +       struct dlm_comm *comm;
63965         unsigned int mark;
63966         int rc;
63968 @@ -695,7 +735,15 @@ static ssize_t comm_mark_store(struct config_item *item, const char *buf,
63969         if (rc)
63970                 return rc;
63972 -       config_item_to_comm(item)->mark = mark;
63973 +       if (mark == 0)
63974 +               mark = dlm_config.ci_mark;
63976 +       comm = config_item_to_comm(item);
63977 +       rc = dlm_lowcomms_nodes_set_mark(comm->nodeid, mark);
63978 +       if (rc)
63979 +               return rc;
63981 +       comm->mark = mark;
63982         return len;
63985 @@ -870,24 +918,6 @@ int dlm_comm_seq(int nodeid, uint32_t *seq)
63986         return 0;
63989 -void dlm_comm_mark(int nodeid, unsigned int *mark)
63991 -       struct dlm_comm *cm;
63993 -       cm = get_comm(nodeid);
63994 -       if (!cm) {
63995 -               *mark = dlm_config.ci_mark;
63996 -               return;
63997 -       }
63999 -       if (cm->mark)
64000 -               *mark = cm->mark;
64001 -       else
64002 -               *mark = dlm_config.ci_mark;
64004 -       put_comm(cm);
64007  int dlm_our_nodeid(void)
64009         return local_comm ? local_comm->nodeid : 0;
64010 diff --git a/fs/dlm/config.h b/fs/dlm/config.h
64011 index c210250a2581..d2cd4bd20313 100644
64012 --- a/fs/dlm/config.h
64013 +++ b/fs/dlm/config.h
64014 @@ -48,7 +48,6 @@ void dlm_config_exit(void);
64015  int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
64016                      int *count_out);
64017  int dlm_comm_seq(int nodeid, uint32_t *seq);
64018 -void dlm_comm_mark(int nodeid, unsigned int *mark);
64019  int dlm_our_nodeid(void);
64020  int dlm_our_addr(struct sockaddr_storage *addr, int num);
64022 diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
64023 index d6bbccb0ed15..d5bd990bcab8 100644
64024 --- a/fs/dlm/debug_fs.c
64025 +++ b/fs/dlm/debug_fs.c
64026 @@ -542,6 +542,7 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
64028                 if (bucket >= ls->ls_rsbtbl_size) {
64029                         kfree(ri);
64030 +                       ++*pos;
64031                         return NULL;
64032                 }
64033                 tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep;
64034 diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
64035 index 561dcad08ad6..c14cf2b7faab 100644
64036 --- a/fs/dlm/lockspace.c
64037 +++ b/fs/dlm/lockspace.c
64038 @@ -404,12 +404,6 @@ static int threads_start(void)
64039         return error;
64042 -static void threads_stop(void)
64044 -       dlm_scand_stop();
64045 -       dlm_lowcomms_stop();
64048  static int new_lockspace(const char *name, const char *cluster,
64049                          uint32_t flags, int lvblen,
64050                          const struct dlm_lockspace_ops *ops, void *ops_arg,
64051 @@ -702,8 +696,11 @@ int dlm_new_lockspace(const char *name, const char *cluster,
64052                 ls_count++;
64053         if (error > 0)
64054                 error = 0;
64055 -       if (!ls_count)
64056 -               threads_stop();
64057 +       if (!ls_count) {
64058 +               dlm_scand_stop();
64059 +               dlm_lowcomms_shutdown();
64060 +               dlm_lowcomms_stop();
64061 +       }
64062   out:
64063         mutex_unlock(&ls_lock);
64064         return error;
64065 @@ -788,6 +785,11 @@ static int release_lockspace(struct dlm_ls *ls, int force)
64067         dlm_recoverd_stop(ls);
64069 +       if (ls_count == 1) {
64070 +               dlm_scand_stop();
64071 +               dlm_lowcomms_shutdown();
64072 +       }
64074         dlm_callback_stop(ls);
64076         remove_lockspace(ls);
64077 @@ -880,7 +882,7 @@ int dlm_release_lockspace(void *lockspace, int force)
64078         if (!error)
64079                 ls_count--;
64080         if (!ls_count)
64081 -               threads_stop();
64082 +               dlm_lowcomms_stop();
64083         mutex_unlock(&ls_lock);
64085         return error;
64086 diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
64087 index 372c34ff8594..45c2fdaf34c4 100644
64088 --- a/fs/dlm/lowcomms.c
64089 +++ b/fs/dlm/lowcomms.c
64090 @@ -116,6 +116,7 @@ struct writequeue_entry {
64091  struct dlm_node_addr {
64092         struct list_head list;
64093         int nodeid;
64094 +       int mark;
64095         int addr_count;
64096         int curr_addr_index;
64097         struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
64098 @@ -134,7 +135,7 @@ static DEFINE_SPINLOCK(dlm_node_addrs_spin);
64099  static struct listen_connection listen_con;
64100  static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
64101  static int dlm_local_count;
64102 -static int dlm_allow_conn;
64103 +int dlm_allow_conn;
64105  /* Work queues */
64106  static struct workqueue_struct *recv_workqueue;
64107 @@ -303,7 +304,8 @@ static int addr_compare(const struct sockaddr_storage *x,
64110  static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
64111 -                         struct sockaddr *sa_out, bool try_new_addr)
64112 +                         struct sockaddr *sa_out, bool try_new_addr,
64113 +                         unsigned int *mark)
64115         struct sockaddr_storage sas;
64116         struct dlm_node_addr *na;
64117 @@ -331,6 +333,8 @@ static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
64118         if (!na->addr_count)
64119                 return -ENOENT;
64121 +       *mark = na->mark;
64123         if (sas_out)
64124                 memcpy(sas_out, &sas, sizeof(struct sockaddr_storage));
64126 @@ -350,7 +354,8 @@ static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
64127         return 0;
64130 -static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
64131 +static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid,
64132 +                         unsigned int *mark)
64134         struct dlm_node_addr *na;
64135         int rv = -EEXIST;
64136 @@ -364,6 +369,7 @@ static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
64137                 for (addr_i = 0; addr_i < na->addr_count; addr_i++) {
64138                         if (addr_compare(na->addr[addr_i], addr)) {
64139                                 *nodeid = na->nodeid;
64140 +                               *mark = na->mark;
64141                                 rv = 0;
64142                                 goto unlock;
64143                         }
64144 @@ -412,6 +418,7 @@ int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
64145                 new_node->nodeid = nodeid;
64146                 new_node->addr[0] = new_addr;
64147                 new_node->addr_count = 1;
64148 +               new_node->mark = dlm_config.ci_mark;
64149                 list_add(&new_node->list, &dlm_node_addrs);
64150                 spin_unlock(&dlm_node_addrs_spin);
64151                 return 0;
64152 @@ -519,6 +526,23 @@ int dlm_lowcomms_connect_node(int nodeid)
64153         return 0;
64156 +int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark)
64158 +       struct dlm_node_addr *na;
64160 +       spin_lock(&dlm_node_addrs_spin);
64161 +       na = find_node_addr(nodeid);
64162 +       if (!na) {
64163 +               spin_unlock(&dlm_node_addrs_spin);
64164 +               return -ENOENT;
64165 +       }
64167 +       na->mark = mark;
64168 +       spin_unlock(&dlm_node_addrs_spin);
64170 +       return 0;
64173  static void lowcomms_error_report(struct sock *sk)
64175         struct connection *con;
64176 @@ -685,10 +709,7 @@ static void shutdown_connection(struct connection *con)
64178         int ret;
64180 -       if (cancel_work_sync(&con->swork)) {
64181 -               log_print("canceled swork for node %d", con->nodeid);
64182 -               clear_bit(CF_WRITE_PENDING, &con->flags);
64183 -       }
64184 +       flush_work(&con->swork);
64186         mutex_lock(&con->sock_mutex);
64187         /* nothing to shutdown */
64188 @@ -867,7 +888,7 @@ static int accept_from_sock(struct listen_connection *con)
64190         /* Get the new node's NODEID */
64191         make_sockaddr(&peeraddr, 0, &len);
64192 -       if (addr_to_nodeid(&peeraddr, &nodeid)) {
64193 +       if (addr_to_nodeid(&peeraddr, &nodeid, &mark)) {
64194                 unsigned char *b=(unsigned char *)&peeraddr;
64195                 log_print("connect from non cluster node");
64196                 print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE, 
64197 @@ -876,9 +897,6 @@ static int accept_from_sock(struct listen_connection *con)
64198                 return -1;
64199         }
64201 -       dlm_comm_mark(nodeid, &mark);
64202 -       sock_set_mark(newsock->sk, mark);
64204         log_print("got connection from %d", nodeid);
64206         /*  Check to see if we already have a connection to this node. This
64207 @@ -892,6 +910,8 @@ static int accept_from_sock(struct listen_connection *con)
64208                 goto accept_err;
64209         }
64211 +       sock_set_mark(newsock->sk, mark);
64213         mutex_lock(&newcon->sock_mutex);
64214         if (newcon->sock) {
64215                 struct connection *othercon = newcon->othercon;
64216 @@ -908,6 +928,7 @@ static int accept_from_sock(struct listen_connection *con)
64217                         result = dlm_con_init(othercon, nodeid);
64218                         if (result < 0) {
64219                                 kfree(othercon);
64220 +                               mutex_unlock(&newcon->sock_mutex);
64221                                 goto accept_err;
64222                         }
64224 @@ -1015,8 +1036,6 @@ static void sctp_connect_to_sock(struct connection *con)
64225         struct socket *sock;
64226         unsigned int mark;
64228 -       dlm_comm_mark(con->nodeid, &mark);
64230         mutex_lock(&con->sock_mutex);
64232         /* Some odd races can cause double-connects, ignore them */
64233 @@ -1029,7 +1048,7 @@ static void sctp_connect_to_sock(struct connection *con)
64234         }
64236         memset(&daddr, 0, sizeof(daddr));
64237 -       result = nodeid_to_addr(con->nodeid, &daddr, NULL, true);
64238 +       result = nodeid_to_addr(con->nodeid, &daddr, NULL, true, &mark);
64239         if (result < 0) {
64240                 log_print("no address for nodeid %d", con->nodeid);
64241                 goto out;
64242 @@ -1104,13 +1123,11 @@ static void sctp_connect_to_sock(struct connection *con)
64243  static void tcp_connect_to_sock(struct connection *con)
64245         struct sockaddr_storage saddr, src_addr;
64246 +       unsigned int mark;
64247         int addr_len;
64248         struct socket *sock = NULL;
64249 -       unsigned int mark;
64250         int result;
64252 -       dlm_comm_mark(con->nodeid, &mark);
64254         mutex_lock(&con->sock_mutex);
64255         if (con->retries++ > MAX_CONNECT_RETRIES)
64256                 goto out;
64257 @@ -1125,15 +1142,15 @@ static void tcp_connect_to_sock(struct connection *con)
64258         if (result < 0)
64259                 goto out_err;
64261 -       sock_set_mark(sock->sk, mark);
64263         memset(&saddr, 0, sizeof(saddr));
64264 -       result = nodeid_to_addr(con->nodeid, &saddr, NULL, false);
64265 +       result = nodeid_to_addr(con->nodeid, &saddr, NULL, false, &mark);
64266         if (result < 0) {
64267                 log_print("no address for nodeid %d", con->nodeid);
64268                 goto out_err;
64269         }
64271 +       sock_set_mark(sock->sk, mark);
64273         add_sock(sock, con);
64275         /* Bind to our cluster-known address connecting to avoid
64276 @@ -1355,9 +1372,11 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
64277         struct writequeue_entry *e;
64278         int offset = 0;
64280 -       if (len > LOWCOMMS_MAX_TX_BUFFER_LEN) {
64281 -               BUILD_BUG_ON(PAGE_SIZE < LOWCOMMS_MAX_TX_BUFFER_LEN);
64282 +       if (len > DEFAULT_BUFFER_SIZE ||
64283 +           len < sizeof(struct dlm_header)) {
64284 +               BUILD_BUG_ON(PAGE_SIZE < DEFAULT_BUFFER_SIZE);
64285                 log_print("failed to allocate a buffer of size %d", len);
64286 +               WARN_ON(1);
64287                 return NULL;
64288         }
64290 @@ -1589,6 +1608,29 @@ static int work_start(void)
64291         return 0;
64294 +static void shutdown_conn(struct connection *con)
64296 +       if (con->shutdown_action)
64297 +               con->shutdown_action(con);
64300 +void dlm_lowcomms_shutdown(void)
64302 +       /* Set all the flags to prevent any
64303 +        * socket activity.
64304 +        */
64305 +       dlm_allow_conn = 0;
64307 +       if (recv_workqueue)
64308 +               flush_workqueue(recv_workqueue);
64309 +       if (send_workqueue)
64310 +               flush_workqueue(send_workqueue);
64312 +       dlm_close_sock(&listen_con.sock);
64314 +       foreach_conn(shutdown_conn);
64317  static void _stop_conn(struct connection *con, bool and_other)
64319         mutex_lock(&con->sock_mutex);
64320 @@ -1610,12 +1652,6 @@ static void stop_conn(struct connection *con)
64321         _stop_conn(con, true);
64324 -static void shutdown_conn(struct connection *con)
64326 -       if (con->shutdown_action)
64327 -               con->shutdown_action(con);
64330  static void connection_release(struct rcu_head *rcu)
64332         struct connection *con = container_of(rcu, struct connection, rcu);
64333 @@ -1672,19 +1708,6 @@ static void work_flush(void)
64335  void dlm_lowcomms_stop(void)
64337 -       /* Set all the flags to prevent any
64338 -          socket activity.
64339 -       */
64340 -       dlm_allow_conn = 0;
64342 -       if (recv_workqueue)
64343 -               flush_workqueue(recv_workqueue);
64344 -       if (send_workqueue)
64345 -               flush_workqueue(send_workqueue);
64347 -       dlm_close_sock(&listen_con.sock);
64349 -       foreach_conn(shutdown_conn);
64350         work_flush();
64351         foreach_conn(free_conn);
64352         work_stop();
64353 diff --git a/fs/dlm/lowcomms.h b/fs/dlm/lowcomms.h
64354 index 0918f9376489..48bbc4e18761 100644
64355 --- a/fs/dlm/lowcomms.h
64356 +++ b/fs/dlm/lowcomms.h
64357 @@ -14,13 +14,18 @@
64359  #define LOWCOMMS_MAX_TX_BUFFER_LEN     4096
64361 +/* switch to check if dlm is running */
64362 +extern int dlm_allow_conn;
64364  int dlm_lowcomms_start(void);
64365 +void dlm_lowcomms_shutdown(void);
64366  void dlm_lowcomms_stop(void);
64367  void dlm_lowcomms_exit(void);
64368  int dlm_lowcomms_close(int nodeid);
64369  void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc);
64370  void dlm_lowcomms_commit_buffer(void *mh);
64371  int dlm_lowcomms_connect_node(int nodeid);
64372 +int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark);
64373  int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len);
64375  #endif                         /* __LOWCOMMS_DOT_H__ */
64376 diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
64377 index fde3a6afe4be..0bedfa8606a2 100644
64378 --- a/fs/dlm/midcomms.c
64379 +++ b/fs/dlm/midcomms.c
64380 @@ -49,9 +49,10 @@ int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int len)
64381                  * cannot deliver this message to upper layers
64382                  */
64383                 msglen = get_unaligned_le16(&hd->h_length);
64384 -               if (msglen > DEFAULT_BUFFER_SIZE) {
64385 -                       log_print("received invalid length header: %u, will abort message parsing",
64386 -                                 msglen);
64387 +               if (msglen > DEFAULT_BUFFER_SIZE ||
64388 +                   msglen < sizeof(struct dlm_header)) {
64389 +                       log_print("received invalid length header: %u from node %d, will abort message parsing",
64390 +                                 msglen, nodeid);
64391                         return -EBADMSG;
64392                 }
64394 diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
64395 index 943e523f4c9d..3d8623139538 100644
64396 --- a/fs/ecryptfs/crypto.c
64397 +++ b/fs/ecryptfs/crypto.c
64398 @@ -296,10 +296,8 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
64399         struct extent_crypt_result ecr;
64400         int rc = 0;
64402 -       if (!crypt_stat || !crypt_stat->tfm
64403 -              || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED))
64404 -               return -EINVAL;
64406 +       BUG_ON(!crypt_stat || !crypt_stat->tfm
64407 +              || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED));
64408         if (unlikely(ecryptfs_verbosity > 0)) {
64409                 ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
64410                                 crypt_stat->key_size);
64411 diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
64412 index cdf40a54a35d..cf772c72ab2b 100644
64413 --- a/fs/ecryptfs/main.c
64414 +++ b/fs/ecryptfs/main.c
64415 @@ -492,6 +492,12 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
64416                 goto out;
64417         }
64419 +       if (!dev_name) {
64420 +               rc = -EINVAL;
64421 +               err = "Device name cannot be null";
64422 +               goto out;
64423 +       }
64425         rc = ecryptfs_parse_options(sbi, raw_data, &check_ruid);
64426         if (rc) {
64427                 err = "Error parsing options";
64428 diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h
64429 index 9ad1615f4474..e8d04d808fa6 100644
64430 --- a/fs/erofs/erofs_fs.h
64431 +++ b/fs/erofs/erofs_fs.h
64432 @@ -75,6 +75,9 @@ static inline bool erofs_inode_is_data_compressed(unsigned int datamode)
64433  #define EROFS_I_VERSION_BIT             0
64434  #define EROFS_I_DATALAYOUT_BIT          1
64436 +#define EROFS_I_ALL    \
64437 +       ((1 << (EROFS_I_DATALAYOUT_BIT + EROFS_I_DATALAYOUT_BITS)) - 1)
64439  /* 32-byte reduced form of an ondisk inode */
64440  struct erofs_inode_compact {
64441         __le16 i_format;        /* inode format hints */
64442 diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
64443 index 119fdce1b520..7ed2d7391692 100644
64444 --- a/fs/erofs/inode.c
64445 +++ b/fs/erofs/inode.c
64446 @@ -44,6 +44,13 @@ static struct page *erofs_read_inode(struct inode *inode,
64447         dic = page_address(page) + *ofs;
64448         ifmt = le16_to_cpu(dic->i_format);
64450 +       if (ifmt & ~EROFS_I_ALL) {
64451 +               erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu",
64452 +                         ifmt, vi->nid);
64453 +               err = -EOPNOTSUPP;
64454 +               goto err_out;
64455 +       }
64457         vi->datalayout = erofs_inode_datalayout(ifmt);
64458         if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
64459                 erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
64460 diff --git a/fs/eventpoll.c b/fs/eventpoll.c
64461 index 3196474cbe24..e42477fcbfa0 100644
64462 --- a/fs/eventpoll.c
64463 +++ b/fs/eventpoll.c
64464 @@ -657,6 +657,12 @@ static void ep_done_scan(struct eventpoll *ep,
64465          */
64466         list_splice(txlist, &ep->rdllist);
64467         __pm_relax(ep->ws);
64469 +       if (!list_empty(&ep->rdllist)) {
64470 +               if (waitqueue_active(&ep->wq))
64471 +                       wake_up(&ep->wq);
64472 +       }
64474         write_unlock_irq(&ep->lock);
64477 diff --git a/fs/exec.c b/fs/exec.c
64478 index 18594f11c31f..c691d4d7720c 100644
64479 --- a/fs/exec.c
64480 +++ b/fs/exec.c
64481 @@ -1008,6 +1008,7 @@ static int exec_mmap(struct mm_struct *mm)
64482         active_mm = tsk->active_mm;
64483         tsk->active_mm = mm;
64484         tsk->mm = mm;
64485 +       lru_gen_add_mm(mm);
64486         /*
64487          * This prevents preemption while active_mm is being loaded and
64488          * it and mm are being updated, which could cause problems for
64489 @@ -1018,6 +1019,7 @@ static int exec_mmap(struct mm_struct *mm)
64490         if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
64491                 local_irq_enable();
64492         activate_mm(active_mm, mm);
64493 +       lru_gen_switch_mm(active_mm, mm);
64494         if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
64495                 local_irq_enable();
64496         tsk->mm->vmacache_seqnum = 0;
64497 diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
64498 index 761c79c3a4ba..411fb0a8da10 100644
64499 --- a/fs/exfat/balloc.c
64500 +++ b/fs/exfat/balloc.c
64501 @@ -141,10 +141,6 @@ void exfat_free_bitmap(struct exfat_sb_info *sbi)
64502         kfree(sbi->vol_amap);
64506 - * If the value of "clu" is 0, it means cluster 2 which is the first cluster of
64507 - * the cluster heap.
64508 - */
64509  int exfat_set_bitmap(struct inode *inode, unsigned int clu)
64511         int i, b;
64512 @@ -162,10 +158,6 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu)
64513         return 0;
64517 - * If the value of "clu" is 0, it means cluster 2 which is the first cluster of
64518 - * the cluster heap.
64519 - */
64520  void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
64522         int i, b;
64523 @@ -186,8 +178,7 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
64524                 int ret_discard;
64526                 ret_discard = sb_issue_discard(sb,
64527 -                       exfat_cluster_to_sector(sbi, clu +
64528 -                                               EXFAT_RESERVED_CLUSTERS),
64529 +                       exfat_cluster_to_sector(sbi, clu),
64530                         (1 << sbi->sect_per_clus_bits), GFP_NOFS, 0);
64532                 if (ret_discard == -EOPNOTSUPP) {
64533 diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
64534 index 7541d0b5d706..eda14f630def 100644
64535 --- a/fs/ext4/fast_commit.c
64536 +++ b/fs/ext4/fast_commit.c
64537 @@ -1088,8 +1088,10 @@ static int ext4_fc_perform_commit(journal_t *journal)
64538                 head.fc_tid = cpu_to_le32(
64539                         sbi->s_journal->j_running_transaction->t_tid);
64540                 if (!ext4_fc_add_tlv(sb, EXT4_FC_TAG_HEAD, sizeof(head),
64541 -                       (u8 *)&head, &crc))
64542 +                       (u8 *)&head, &crc)) {
64543 +                       ret = -ENOSPC;
64544                         goto out;
64545 +               }
64546         }
64548         spin_lock(&sbi->s_fc_lock);
64549 @@ -1734,7 +1736,7 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
64550                 }
64552                 /* Range is mapped and needs a state change */
64553 -               jbd_debug(1, "Converting from %d to %d %lld",
64554 +               jbd_debug(1, "Converting from %ld to %d %lld",
64555                                 map.m_flags & EXT4_MAP_UNWRITTEN,
64556                         ext4_ext_is_unwritten(ex), map.m_pblk);
64557                 ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
64558 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
64559 index 194f5d00fa32..7924634ab0bf 100644
64560 --- a/fs/ext4/file.c
64561 +++ b/fs/ext4/file.c
64562 @@ -371,15 +371,32 @@ static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
64563  static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
64564                                  int error, unsigned int flags)
64566 -       loff_t offset = iocb->ki_pos;
64567 +       loff_t pos = iocb->ki_pos;
64568         struct inode *inode = file_inode(iocb->ki_filp);
64570         if (error)
64571                 return error;
64573 -       if (size && flags & IOMAP_DIO_UNWRITTEN)
64574 -               return ext4_convert_unwritten_extents(NULL, inode,
64575 -                                                     offset, size);
64576 +       if (size && flags & IOMAP_DIO_UNWRITTEN) {
64577 +               error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
64578 +               if (error < 0)
64579 +                       return error;
64580 +       }
64581 +       /*
64582 +        * If we are extending the file, we have to update i_size here before
64583 +        * page cache gets invalidated in iomap_dio_rw(). Otherwise racing
64584 +        * buffered reads could zero out too much from page cache pages. Update
64585 +        * of on-disk size will happen later in ext4_dio_write_iter() where
64586 +        * we have enough information to also perform orphan list handling etc.
64587 +        * Note that we perform all extending writes synchronously under
64588 +        * i_rwsem held exclusively so i_size update is safe here in that case.
64589 +        * If the write was not extending, we cannot see pos > i_size here
64590 +        * because operations reducing i_size like truncate wait for all
64591 +        * outstanding DIO before updating i_size.
64592 +        */
64593 +       pos += size;
64594 +       if (pos > i_size_read(inode))
64595 +               i_size_write(inode, pos);
64597         return 0;
64599 diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
64600 index 633ae7becd61..71d321b3b984 100644
64601 --- a/fs/ext4/ialloc.c
64602 +++ b/fs/ext4/ialloc.c
64603 @@ -1292,7 +1292,8 @@ struct inode *__ext4_new_inode(struct user_namespace *mnt_userns,
64605         ei->i_extra_isize = sbi->s_want_extra_isize;
64606         ei->i_inline_off = 0;
64607 -       if (ext4_has_feature_inline_data(sb))
64608 +       if (ext4_has_feature_inline_data(sb) &&
64609 +           (!(ei->i_flags & EXT4_DAX_FL) || S_ISDIR(mode)))
64610                 ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
64611         ret = inode;
64612         err = dquot_alloc_inode(inode);
64613 @@ -1513,6 +1514,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
64614         handle_t *handle;
64615         ext4_fsblk_t blk;
64616         int num, ret = 0, used_blks = 0;
64617 +       unsigned long used_inos = 0;
64619         /* This should not happen, but just to be sure check this */
64620         if (sb_rdonly(sb)) {
64621 @@ -1543,22 +1545,37 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
64622          * used inodes so we need to skip blocks with used inodes in
64623          * inode table.
64624          */
64625 -       if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
64626 -               used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) -
64627 -                           ext4_itable_unused_count(sb, gdp)),
64628 -                           sbi->s_inodes_per_block);
64630 -       if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
64631 -           ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
64632 -                              ext4_itable_unused_count(sb, gdp)) <
64633 -                             EXT4_FIRST_INO(sb)))) {
64634 -               ext4_error(sb, "Something is wrong with group %u: "
64635 -                          "used itable blocks: %d; "
64636 -                          "itable unused count: %u",
64637 -                          group, used_blks,
64638 -                          ext4_itable_unused_count(sb, gdp));
64639 -               ret = 1;
64640 -               goto err_out;
64641 +       if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
64642 +               used_inos = EXT4_INODES_PER_GROUP(sb) -
64643 +                           ext4_itable_unused_count(sb, gdp);
64644 +               used_blks = DIV_ROUND_UP(used_inos, sbi->s_inodes_per_block);
64646 +               /* Bogus inode unused count? */
64647 +               if (used_blks < 0 || used_blks > sbi->s_itb_per_group) {
64648 +                       ext4_error(sb, "Something is wrong with group %u: "
64649 +                                  "used itable blocks: %d; "
64650 +                                  "itable unused count: %u",
64651 +                                  group, used_blks,
64652 +                                  ext4_itable_unused_count(sb, gdp));
64653 +                       ret = 1;
64654 +                       goto err_out;
64655 +               }
64657 +               used_inos += group * EXT4_INODES_PER_GROUP(sb);
64658 +               /*
64659 +                * Are there some uninitialized inodes in the inode table
64660 +                * before the first normal inode?
64661 +                */
64662 +               if ((used_blks != sbi->s_itb_per_group) &&
64663 +                    (used_inos < EXT4_FIRST_INO(sb))) {
64664 +                       ext4_error(sb, "Something is wrong with group %u: "
64665 +                                  "itable unused count: %u; "
64666 +                                  "itables initialized count: %ld",
64667 +                                  group, ext4_itable_unused_count(sb, gdp),
64668 +                                  used_inos);
64669 +                       ret = 1;
64670 +                       goto err_out;
64671 +               }
64672         }
64674         blk = ext4_inode_table(sb, gdp) + used_blks;
64675 diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
64676 index a2cf35066f46..0796bfa72829 100644
64677 --- a/fs/ext4/ioctl.c
64678 +++ b/fs/ext4/ioctl.c
64679 @@ -315,6 +315,12 @@ static void ext4_dax_dontcache(struct inode *inode, unsigned int flags)
64680  static bool dax_compatible(struct inode *inode, unsigned int oldflags,
64681                            unsigned int flags)
64683 +       /* Allow the DAX flag to be changed on inline directories */
64684 +       if (S_ISDIR(inode->i_mode)) {
64685 +               flags &= ~EXT4_INLINE_DATA_FL;
64686 +               oldflags &= ~EXT4_INLINE_DATA_FL;
64687 +       }
64689         if (flags & EXT4_DAX_FL) {
64690                 if ((oldflags & EXT4_DAX_MUT_EXCL) ||
64691                      ext4_test_inode_state(inode,
64692 diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
64693 index 795c3ff2907c..68fbeedd627b 100644
64694 --- a/fs/ext4/mmp.c
64695 +++ b/fs/ext4/mmp.c
64696 @@ -56,7 +56,7 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
64697         wait_on_buffer(bh);
64698         sb_end_write(sb);
64699         if (unlikely(!buffer_uptodate(bh)))
64700 -               return 1;
64701 +               return -EIO;
64703         return 0;
64705 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
64706 index b9693680463a..77c1cb258262 100644
64707 --- a/fs/ext4/super.c
64708 +++ b/fs/ext4/super.c
64709 @@ -667,9 +667,6 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
64710                         ext4_commit_super(sb);
64711         }
64713 -       if (sb_rdonly(sb) || continue_fs)
64714 -               return;
64716         /*
64717          * We force ERRORS_RO behavior when system is rebooting. Otherwise we
64718          * could panic during 'reboot -f' as the underlying device got already
64719 @@ -679,6 +676,10 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
64720                 panic("EXT4-fs (device %s): panic forced after error\n",
64721                         sb->s_id);
64722         }
64724 +       if (sb_rdonly(sb) || continue_fs)
64725 +               return;
64727         ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
64728         /*
64729          * Make sure updated value of ->s_mount_flags will be visible before
64730 @@ -3023,9 +3024,6 @@ static void ext4_orphan_cleanup(struct super_block *sb,
64731                 sb->s_flags &= ~SB_RDONLY;
64732         }
64733  #ifdef CONFIG_QUOTA
64734 -       /* Needed for iput() to work correctly and not trash data */
64735 -       sb->s_flags |= SB_ACTIVE;
64737         /*
64738          * Turn on quotas which were not enabled for read-only mounts if
64739          * filesystem has quota feature, so that they are updated correctly.
64740 @@ -5561,8 +5559,10 @@ static int ext4_commit_super(struct super_block *sb)
64741         struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
64742         int error = 0;
64744 -       if (!sbh || block_device_ejected(sb))
64745 -               return error;
64746 +       if (!sbh)
64747 +               return -EINVAL;
64748 +       if (block_device_ejected(sb))
64749 +               return -ENODEV;
64751         ext4_update_super(sb);
64753 diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
64754 index 77fa342de38f..582b11afb0d5 100644
64755 --- a/fs/f2fs/compress.c
64756 +++ b/fs/f2fs/compress.c
64757 @@ -123,19 +123,6 @@ static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
64758         f2fs_drop_rpages(cc, len, true);
64761 -static void f2fs_put_rpages_mapping(struct address_space *mapping,
64762 -                               pgoff_t start, int len)
64764 -       int i;
64766 -       for (i = 0; i < len; i++) {
64767 -               struct page *page = find_get_page(mapping, start + i);
64769 -               put_page(page);
64770 -               put_page(page);
64771 -       }
64774  static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
64775                 struct writeback_control *wbc, bool redirty, int unlock)
64777 @@ -164,13 +151,14 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc)
64778         return cc->rpages ? 0 : -ENOMEM;
64781 -void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
64782 +void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
64784         page_array_free(cc->inode, cc->rpages, cc->cluster_size);
64785         cc->rpages = NULL;
64786         cc->nr_rpages = 0;
64787         cc->nr_cpages = 0;
64788 -       cc->cluster_idx = NULL_CLUSTER;
64789 +       if (!reuse)
64790 +               cc->cluster_idx = NULL_CLUSTER;
64793  void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
64794 @@ -351,8 +339,8 @@ static const struct f2fs_compress_ops f2fs_lz4_ops = {
64796  static int zstd_init_compress_ctx(struct compress_ctx *cc)
64798 -       ZSTD_parameters params;
64799 -       ZSTD_CStream *stream;
64800 +       zstd_parameters params;
64801 +       zstd_cstream *stream;
64802         void *workspace;
64803         unsigned int workspace_size;
64804         unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
64805 @@ -361,17 +349,17 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc)
64806         if (!level)
64807                 level = F2FS_ZSTD_DEFAULT_CLEVEL;
64809 -       params = ZSTD_getParams(level, cc->rlen, 0);
64810 -       workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
64811 +       params = zstd_get_params(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen);
64812 +       workspace_size = zstd_cstream_workspace_bound(&params.cParams);
64814         workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
64815                                         workspace_size, GFP_NOFS);
64816         if (!workspace)
64817                 return -ENOMEM;
64819 -       stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
64820 +       stream = zstd_init_cstream(&params, 0, workspace, workspace_size);
64821         if (!stream) {
64822 -               printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
64823 +               printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_cstream failed\n",
64824                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
64825                                 __func__);
64826                 kvfree(workspace);
64827 @@ -394,9 +382,9 @@ static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
64829  static int zstd_compress_pages(struct compress_ctx *cc)
64831 -       ZSTD_CStream *stream = cc->private2;
64832 -       ZSTD_inBuffer inbuf;
64833 -       ZSTD_outBuffer outbuf;
64834 +       zstd_cstream *stream = cc->private2;
64835 +       zstd_in_buffer inbuf;
64836 +       zstd_out_buffer outbuf;
64837         int src_size = cc->rlen;
64838         int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
64839         int ret;
64840 @@ -409,19 +397,19 @@ static int zstd_compress_pages(struct compress_ctx *cc)
64841         outbuf.dst = cc->cbuf->cdata;
64842         outbuf.size = dst_size;
64844 -       ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
64845 -       if (ZSTD_isError(ret)) {
64846 -               printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
64847 +       ret = zstd_compress_stream(stream, &outbuf, &inbuf);
64848 +       if (zstd_is_error(ret)) {
64849 +               printk_ratelimited("%sF2FS-fs (%s): %s zstd_compress_stream failed, ret: %d\n",
64850                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
64851 -                               __func__, ZSTD_getErrorCode(ret));
64852 +                               __func__, zstd_get_error_code(ret));
64853                 return -EIO;
64854         }
64856 -       ret = ZSTD_endStream(stream, &outbuf);
64857 -       if (ZSTD_isError(ret)) {
64858 -               printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
64859 +       ret = zstd_end_stream(stream, &outbuf);
64860 +       if (zstd_is_error(ret)) {
64861 +               printk_ratelimited("%sF2FS-fs (%s): %s zstd_end_stream returned %d\n",
64862                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
64863 -                               __func__, ZSTD_getErrorCode(ret));
64864 +                               __func__, zstd_get_error_code(ret));
64865                 return -EIO;
64866         }
64868 @@ -438,22 +426,22 @@ static int zstd_compress_pages(struct compress_ctx *cc)
64870  static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
64872 -       ZSTD_DStream *stream;
64873 +       zstd_dstream *stream;
64874         void *workspace;
64875         unsigned int workspace_size;
64876         unsigned int max_window_size =
64877                         MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
64879 -       workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size);
64880 +       workspace_size = zstd_dstream_workspace_bound(max_window_size);
64882         workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
64883                                         workspace_size, GFP_NOFS);
64884         if (!workspace)
64885                 return -ENOMEM;
64887 -       stream = ZSTD_initDStream(max_window_size, workspace, workspace_size);
64888 +       stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
64889         if (!stream) {
64890 -               printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
64891 +               printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_dstream failed\n",
64892                                 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
64893                                 __func__);
64894                 kvfree(workspace);
64895 @@ -475,9 +463,9 @@ static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
64897  static int zstd_decompress_pages(struct decompress_io_ctx *dic)
64899 -       ZSTD_DStream *stream = dic->private2;
64900 -       ZSTD_inBuffer inbuf;
64901 -       ZSTD_outBuffer outbuf;
64902 +       zstd_dstream *stream = dic->private2;
64903 +       zstd_in_buffer inbuf;
64904 +       zstd_out_buffer outbuf;
64905         int ret;
64907         inbuf.pos = 0;
64908 @@ -488,11 +476,11 @@ static int zstd_decompress_pages(struct decompress_io_ctx *dic)
64909         outbuf.dst = dic->rbuf;
64910         outbuf.size = dic->rlen;
64912 -       ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
64913 -       if (ZSTD_isError(ret)) {
64914 -               printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
64915 +       ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
64916 +       if (zstd_is_error(ret)) {
64917 +               printk_ratelimited("%sF2FS-fs (%s): %s zstd_decompress_stream failed, ret: %d\n",
64918                                 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
64919 -                               __func__, ZSTD_getErrorCode(ret));
64920 +                               __func__, zstd_get_error_code(ret));
64921                 return -EIO;
64922         }
64924 @@ -1048,7 +1036,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
64925                 }
64927                 if (PageUptodate(page))
64928 -                       unlock_page(page);
64929 +                       f2fs_put_page(page, 1);
64930                 else
64931                         f2fs_compress_ctx_add_page(cc, page);
64932         }
64933 @@ -1058,33 +1046,35 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
64935                 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
64936                                         &last_block_in_bio, false, true);
64937 -               f2fs_destroy_compress_ctx(cc);
64938 +               f2fs_put_rpages(cc);
64939 +               f2fs_destroy_compress_ctx(cc, true);
64940                 if (ret)
64941 -                       goto release_pages;
64942 +                       goto out;
64943                 if (bio)
64944                         f2fs_submit_bio(sbi, bio, DATA);
64946                 ret = f2fs_init_compress_ctx(cc);
64947                 if (ret)
64948 -                       goto release_pages;
64949 +                       goto out;
64950         }
64952         for (i = 0; i < cc->cluster_size; i++) {
64953                 f2fs_bug_on(sbi, cc->rpages[i]);
64955                 page = find_lock_page(mapping, start_idx + i);
64956 -               f2fs_bug_on(sbi, !page);
64957 +               if (!page) {
64958 +                       /* page can be truncated */
64959 +                       goto release_and_retry;
64960 +               }
64962                 f2fs_wait_on_page_writeback(page, DATA, true, true);
64964                 f2fs_compress_ctx_add_page(cc, page);
64965 -               f2fs_put_page(page, 0);
64967                 if (!PageUptodate(page)) {
64968 +release_and_retry:
64969 +                       f2fs_put_rpages(cc);
64970                         f2fs_unlock_rpages(cc, i + 1);
64971 -                       f2fs_put_rpages_mapping(mapping, start_idx,
64972 -                                       cc->cluster_size);
64973 -                       f2fs_destroy_compress_ctx(cc);
64974 +                       f2fs_destroy_compress_ctx(cc, true);
64975                         goto retry;
64976                 }
64977         }
64978 @@ -1115,10 +1105,10 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
64979         }
64981  unlock_pages:
64982 +       f2fs_put_rpages(cc);
64983         f2fs_unlock_rpages(cc, i);
64984 -release_pages:
64985 -       f2fs_put_rpages_mapping(mapping, start_idx, i);
64986 -       f2fs_destroy_compress_ctx(cc);
64987 +       f2fs_destroy_compress_ctx(cc, true);
64988 +out:
64989         return ret;
64992 @@ -1153,7 +1143,7 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
64993                 set_cluster_dirty(&cc);
64995         f2fs_put_rpages_wbc(&cc, NULL, false, 1);
64996 -       f2fs_destroy_compress_ctx(&cc);
64997 +       f2fs_destroy_compress_ctx(&cc, false);
64999         return first_index;
65001 @@ -1372,7 +1362,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
65002         f2fs_put_rpages(cc);
65003         page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
65004         cc->cpages = NULL;
65005 -       f2fs_destroy_compress_ctx(cc);
65006 +       f2fs_destroy_compress_ctx(cc, false);
65007         return 0;
65009  out_destroy_crypt:
65010 @@ -1383,7 +1373,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
65011         for (i = 0; i < cc->nr_cpages; i++) {
65012                 if (!cc->cpages[i])
65013                         continue;
65014 -               f2fs_put_page(cc->cpages[i], 1);
65015 +               f2fs_compress_free_page(cc->cpages[i]);
65016 +               cc->cpages[i] = NULL;
65017         }
65018  out_put_cic:
65019         kmem_cache_free(cic_entry_slab, cic);
65020 @@ -1533,7 +1524,7 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
65021         err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
65022         f2fs_put_rpages_wbc(cc, wbc, false, 0);
65023  destroy_out:
65024 -       f2fs_destroy_compress_ctx(cc);
65025 +       f2fs_destroy_compress_ctx(cc, false);
65026         return err;
65029 diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
65030 index 4e5257c763d0..8804a5d51380 100644
65031 --- a/fs/f2fs/data.c
65032 +++ b/fs/f2fs/data.c
65033 @@ -2276,7 +2276,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
65034                                                         max_nr_pages,
65035                                                         &last_block_in_bio,
65036                                                         rac != NULL, false);
65037 -                               f2fs_destroy_compress_ctx(&cc);
65038 +                               f2fs_destroy_compress_ctx(&cc, false);
65039                                 if (ret)
65040                                         goto set_error_page;
65041                         }
65042 @@ -2321,7 +2321,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
65043                                                         max_nr_pages,
65044                                                         &last_block_in_bio,
65045                                                         rac != NULL, false);
65046 -                               f2fs_destroy_compress_ctx(&cc);
65047 +                               f2fs_destroy_compress_ctx(&cc, false);
65048                         }
65049                 }
65050  #endif
65051 @@ -3022,7 +3022,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
65052                 }
65053         }
65054         if (f2fs_compressed_file(inode))
65055 -               f2fs_destroy_compress_ctx(&cc);
65056 +               f2fs_destroy_compress_ctx(&cc, false);
65057  #endif
65058         if (retry) {
65059                 index = 0;
65060 diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
65061 index e2d302ae3a46..f3fabb1edfe9 100644
65062 --- a/fs/f2fs/f2fs.h
65063 +++ b/fs/f2fs/f2fs.h
65064 @@ -3376,6 +3376,7 @@ block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
65065  int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
65066  void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
65067  int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
65068 +bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno);
65069  void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
65070  void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi);
65071  void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi);
65072 @@ -3383,7 +3384,7 @@ void f2fs_get_new_segment(struct f2fs_sb_info *sbi,
65073                         unsigned int *newseg, bool new_sec, int dir);
65074  void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
65075                                         unsigned int start, unsigned int end);
65076 -void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type);
65077 +void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type);
65078  void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
65079  int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
65080  bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
65081 @@ -3547,7 +3548,7 @@ void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
65082  int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
65083  void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
65084  block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
65085 -int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background,
65086 +int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, bool force,
65087                         unsigned int segno);
65088  void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
65089  int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count);
65090 @@ -3949,7 +3950,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
65091  void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
65092  void f2fs_put_page_dic(struct page *page);
65093  int f2fs_init_compress_ctx(struct compress_ctx *cc);
65094 -void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
65095 +void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
65096  void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
65097  int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
65098  void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
65099 diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
65100 index d26ff2ae3f5e..dc79694e512c 100644
65101 --- a/fs/f2fs/file.c
65102 +++ b/fs/f2fs/file.c
65103 @@ -1619,9 +1619,10 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
65104         struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
65105                         .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
65106                         .m_may_create = true };
65107 -       pgoff_t pg_end;
65108 +       pgoff_t pg_start, pg_end;
65109         loff_t new_size = i_size_read(inode);
65110         loff_t off_end;
65111 +       block_t expanded = 0;
65112         int err;
65114         err = inode_newsize_ok(inode, (len + offset));
65115 @@ -1634,11 +1635,12 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
65117         f2fs_balance_fs(sbi, true);
65119 +       pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
65120         pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
65121         off_end = (offset + len) & (PAGE_SIZE - 1);
65123 -       map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
65124 -       map.m_len = pg_end - map.m_lblk;
65125 +       map.m_lblk = pg_start;
65126 +       map.m_len = pg_end - pg_start;
65127         if (off_end)
65128                 map.m_len++;
65130 @@ -1646,19 +1648,15 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
65131                 return 0;
65133         if (f2fs_is_pinned_file(inode)) {
65134 -               block_t len = (map.m_len >> sbi->log_blocks_per_seg) <<
65135 -                                       sbi->log_blocks_per_seg;
65136 -               block_t done = 0;
65138 -               if (map.m_len % sbi->blocks_per_seg)
65139 -                       len += sbi->blocks_per_seg;
65140 +               block_t sec_blks = BLKS_PER_SEC(sbi);
65141 +               block_t sec_len = roundup(map.m_len, sec_blks);
65143 -               map.m_len = sbi->blocks_per_seg;
65144 +               map.m_len = sec_blks;
65145  next_alloc:
65146                 if (has_not_enough_free_secs(sbi, 0,
65147                         GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
65148                         down_write(&sbi->gc_lock);
65149 -                       err = f2fs_gc(sbi, true, false, NULL_SEGNO);
65150 +                       err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
65151                         if (err && err != -ENODATA && err != -EAGAIN)
65152                                 goto out_err;
65153                 }
65154 @@ -1666,7 +1664,7 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
65155                 down_write(&sbi->pin_sem);
65157                 f2fs_lock_op(sbi);
65158 -               f2fs_allocate_new_segment(sbi, CURSEG_COLD_DATA_PINNED);
65159 +               f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED);
65160                 f2fs_unlock_op(sbi);
65162                 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
65163 @@ -1674,24 +1672,25 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
65165                 up_write(&sbi->pin_sem);
65167 -               done += map.m_len;
65168 -               len -= map.m_len;
65169 +               expanded += map.m_len;
65170 +               sec_len -= map.m_len;
65171                 map.m_lblk += map.m_len;
65172 -               if (!err && len)
65173 +               if (!err && sec_len)
65174                         goto next_alloc;
65176 -               map.m_len = done;
65177 +               map.m_len = expanded;
65178         } else {
65179                 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
65180 +               expanded = map.m_len;
65181         }
65182  out_err:
65183         if (err) {
65184                 pgoff_t last_off;
65186 -               if (!map.m_len)
65187 +               if (!expanded)
65188                         return err;
65190 -               last_off = map.m_lblk + map.m_len - 1;
65191 +               last_off = pg_start + expanded - 1;
65193                 /* update new size to the failed position */
65194                 new_size = (last_off == pg_end) ? offset + len :
65195 @@ -2489,7 +2488,7 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
65196                 down_write(&sbi->gc_lock);
65197         }
65199 -       ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
65200 +       ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
65201  out:
65202         mnt_drop_write_file(filp);
65203         return ret;
65204 @@ -2525,7 +2524,8 @@ static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
65205                 down_write(&sbi->gc_lock);
65206         }
65208 -       ret = f2fs_gc(sbi, range->sync, true, GET_SEGNO(sbi, range->start));
65209 +       ret = f2fs_gc(sbi, range->sync, true, false,
65210 +                               GET_SEGNO(sbi, range->start));
65211         if (ret) {
65212                 if (ret == -EBUSY)
65213                         ret = -EAGAIN;
65214 @@ -2978,7 +2978,7 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
65215                 sm->last_victim[GC_CB] = end_segno + 1;
65216                 sm->last_victim[GC_GREEDY] = end_segno + 1;
65217                 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
65218 -               ret = f2fs_gc(sbi, true, true, start_segno);
65219 +               ret = f2fs_gc(sbi, true, true, true, start_segno);
65220                 if (ret == -EAGAIN)
65221                         ret = 0;
65222                 else if (ret < 0)
65223 diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
65224 index 39330ad3c44e..a8567cb47621 100644
65225 --- a/fs/f2fs/gc.c
65226 +++ b/fs/f2fs/gc.c
65227 @@ -112,7 +112,7 @@ static int gc_thread_func(void *data)
65228                 sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
65230                 /* if return value is not zero, no victim was selected */
65231 -               if (f2fs_gc(sbi, sync_mode, true, NULL_SEGNO))
65232 +               if (f2fs_gc(sbi, sync_mode, true, false, NULL_SEGNO))
65233                         wait_ms = gc_th->no_gc_sleep_time;
65235                 trace_f2fs_background_gc(sbi->sb, wait_ms,
65236 @@ -392,10 +392,6 @@ static void add_victim_entry(struct f2fs_sb_info *sbi,
65237                 if (p->gc_mode == GC_AT &&
65238                         get_valid_blocks(sbi, segno, true) == 0)
65239                         return;
65241 -               if (p->alloc_mode == AT_SSR &&
65242 -                       get_seg_entry(sbi, segno)->ckpt_valid_blocks == 0)
65243 -                       return;
65244         }
65246         for (i = 0; i < sbi->segs_per_sec; i++)
65247 @@ -728,11 +724,27 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
65249                 if (sec_usage_check(sbi, secno))
65250                         goto next;
65252                 /* Don't touch checkpointed data */
65253 -               if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
65254 -                                       get_ckpt_valid_blocks(sbi, segno) &&
65255 -                                       p.alloc_mode == LFS))
65256 -                       goto next;
65257 +               if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
65258 +                       if (p.alloc_mode == LFS) {
65259 +                               /*
65260 +                                * LFS is set to find source section during GC.
65261 +                                * The victim should have no checkpointed data.
65262 +                                */
65263 +                               if (get_ckpt_valid_blocks(sbi, segno, true))
65264 +                                       goto next;
65265 +                       } else {
65266 +                               /*
65267 +                                * SSR | AT_SSR are set to find target segment
65268 +                                * for writes which can be full by checkpointed
65269 +                                * and newly written blocks.
65270 +                                */
65271 +                               if (!f2fs_segment_has_free_slot(sbi, segno))
65272 +                                       goto next;
65273 +                       }
65274 +               }
65276                 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
65277                         goto next;
65279 @@ -1354,7 +1366,8 @@ static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
65280   * the victim data block is ignored.
65281   */
65282  static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
65283 -               struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
65284 +               struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
65285 +               bool force_migrate)
65287         struct super_block *sb = sbi->sb;
65288         struct f2fs_summary *entry;
65289 @@ -1383,8 +1396,8 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
65290                  * race condition along with SSR block allocation.
65291                  */
65292                 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
65293 -                               get_valid_blocks(sbi, segno, true) ==
65294 -                                                       BLKS_PER_SEC(sbi))
65295 +                       (!force_migrate && get_valid_blocks(sbi, segno, true) ==
65296 +                                                       BLKS_PER_SEC(sbi)))
65297                         return submitted;
65299                 if (check_valid_map(sbi, segno, off) == 0)
65300 @@ -1519,7 +1532,8 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
65302  static int do_garbage_collect(struct f2fs_sb_info *sbi,
65303                                 unsigned int start_segno,
65304 -                               struct gc_inode_list *gc_list, int gc_type)
65305 +                               struct gc_inode_list *gc_list, int gc_type,
65306 +                               bool force_migrate)
65308         struct page *sum_page;
65309         struct f2fs_summary_block *sum;
65310 @@ -1606,7 +1620,8 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
65311                                                                 gc_type);
65312                 else
65313                         submitted += gc_data_segment(sbi, sum->entries, gc_list,
65314 -                                                       segno, gc_type);
65315 +                                                       segno, gc_type,
65316 +                                                       force_migrate);
65318                 stat_inc_seg_count(sbi, type, gc_type);
65319                 migrated++;
65320 @@ -1634,7 +1649,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
65323  int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
65324 -                       bool background, unsigned int segno)
65325 +                       bool background, bool force, unsigned int segno)
65327         int gc_type = sync ? FG_GC : BG_GC;
65328         int sec_freed = 0, seg_freed = 0, total_freed = 0;
65329 @@ -1696,7 +1711,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
65330         if (ret)
65331                 goto stop;
65333 -       seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
65334 +       seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, force);
65335         if (gc_type == FG_GC &&
65336                 seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
65337                 sec_freed++;
65338 @@ -1835,7 +1850,7 @@ static int free_segment_range(struct f2fs_sb_info *sbi,
65339                         .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
65340                 };
65342 -               do_garbage_collect(sbi, segno, &gc_list, FG_GC);
65343 +               do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
65344                 put_gc_inode(&gc_list);
65346                 if (!gc_only && get_valid_blocks(sbi, segno, true)) {
65347 @@ -1974,7 +1989,20 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
65349         /* stop CP to protect MAIN_SEC in free_segment_range */
65350         f2fs_lock_op(sbi);
65352 +       spin_lock(&sbi->stat_lock);
65353 +       if (shrunk_blocks + valid_user_blocks(sbi) +
65354 +               sbi->current_reserved_blocks + sbi->unusable_block_count +
65355 +               F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
65356 +               err = -ENOSPC;
65357 +       spin_unlock(&sbi->stat_lock);
65359 +       if (err)
65360 +               goto out_unlock;
65362         err = free_segment_range(sbi, secs, true);
65364 +out_unlock:
65365         f2fs_unlock_op(sbi);
65366         up_write(&sbi->gc_lock);
65367         if (err)
65368 diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
65369 index 993caefcd2bb..92652ca7a7c8 100644
65370 --- a/fs/f2fs/inline.c
65371 +++ b/fs/f2fs/inline.c
65372 @@ -219,7 +219,8 @@ int f2fs_convert_inline_inode(struct inode *inode)
65374         f2fs_put_page(page, 1);
65376 -       f2fs_balance_fs(sbi, dn.node_changed);
65377 +       if (!err)
65378 +               f2fs_balance_fs(sbi, dn.node_changed);
65380         return err;
65382 diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
65383 index 4b0e2e3c2c88..45c8cf1afe66 100644
65384 --- a/fs/f2fs/node.c
65385 +++ b/fs/f2fs/node.c
65386 @@ -2785,6 +2785,9 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
65387                 struct f2fs_nat_entry raw_ne;
65388                 nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
65390 +               if (f2fs_check_nid_range(sbi, nid))
65391 +                       continue;
65393                 raw_ne = nat_in_journal(journal, i);
65395                 ne = __lookup_nat_cache(nm_i, nid);
65396 diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
65397 index c2866561263e..bb6d86255741 100644
65398 --- a/fs/f2fs/segment.c
65399 +++ b/fs/f2fs/segment.c
65400 @@ -186,7 +186,10 @@ void f2fs_register_inmem_page(struct inode *inode, struct page *page)
65402         struct inmem_pages *new;
65404 -       f2fs_set_page_private(page, ATOMIC_WRITTEN_PAGE);
65405 +       if (PagePrivate(page))
65406 +               set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
65407 +       else
65408 +               f2fs_set_page_private(page, ATOMIC_WRITTEN_PAGE);
65410         new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
65412 @@ -324,23 +327,27 @@ void f2fs_drop_inmem_pages(struct inode *inode)
65413         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
65414         struct f2fs_inode_info *fi = F2FS_I(inode);
65416 -       while (!list_empty(&fi->inmem_pages)) {
65417 +       do {
65418                 mutex_lock(&fi->inmem_lock);
65419 +               if (list_empty(&fi->inmem_pages)) {
65420 +                       fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
65422 +                       spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
65423 +                       if (!list_empty(&fi->inmem_ilist))
65424 +                               list_del_init(&fi->inmem_ilist);
65425 +                       if (f2fs_is_atomic_file(inode)) {
65426 +                               clear_inode_flag(inode, FI_ATOMIC_FILE);
65427 +                               sbi->atomic_files--;
65428 +                       }
65429 +                       spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
65431 +                       mutex_unlock(&fi->inmem_lock);
65432 +                       break;
65433 +               }
65434                 __revoke_inmem_pages(inode, &fi->inmem_pages,
65435                                                 true, false, true);
65436                 mutex_unlock(&fi->inmem_lock);
65437 -       }
65439 -       fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
65441 -       spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
65442 -       if (!list_empty(&fi->inmem_ilist))
65443 -               list_del_init(&fi->inmem_ilist);
65444 -       if (f2fs_is_atomic_file(inode)) {
65445 -               clear_inode_flag(inode, FI_ATOMIC_FILE);
65446 -               sbi->atomic_files--;
65447 -       }
65448 -       spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
65449 +       } while (1);
65452  void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
65453 @@ -504,7 +511,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
65454          */
65455         if (has_not_enough_free_secs(sbi, 0, 0)) {
65456                 down_write(&sbi->gc_lock);
65457 -               f2fs_gc(sbi, false, false, NULL_SEGNO);
65458 +               f2fs_gc(sbi, false, false, false, NULL_SEGNO);
65459         }
65462 @@ -861,7 +868,7 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
65463         mutex_lock(&dirty_i->seglist_lock);
65465         valid_blocks = get_valid_blocks(sbi, segno, false);
65466 -       ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno);
65467 +       ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false);
65469         if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
65470                 ckpt_valid_blocks == usable_blocks)) {
65471 @@ -946,7 +953,7 @@ static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
65472         for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
65473                 if (get_valid_blocks(sbi, segno, false))
65474                         continue;
65475 -               if (get_ckpt_valid_blocks(sbi, segno))
65476 +               if (get_ckpt_valid_blocks(sbi, segno, false))
65477                         continue;
65478                 mutex_unlock(&dirty_i->seglist_lock);
65479                 return segno;
65480 @@ -2636,6 +2643,23 @@ static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
65481                 seg->next_blkoff++;
65484 +bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
65486 +       struct seg_entry *se = get_seg_entry(sbi, segno);
65487 +       int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
65488 +       unsigned long *target_map = SIT_I(sbi)->tmp_map;
65489 +       unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
65490 +       unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
65491 +       int i, pos;
65493 +       for (i = 0; i < entries; i++)
65494 +               target_map[i] = ckpt_map[i] | cur_map[i];
65496 +       pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, 0);
65498 +       return pos < sbi->blocks_per_seg;
65501  /*
65502   * This function always allocates a used segment(from dirty seglist) by SSR
65503   * manner, so it should recover the existing segment information of valid blocks
65504 @@ -2893,7 +2917,8 @@ void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
65505         up_read(&SM_I(sbi)->curseg_lock);
65508 -static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type)
65509 +static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
65510 +                                                               bool new_sec)
65512         struct curseg_info *curseg = CURSEG_I(sbi, type);
65513         unsigned int old_segno;
65514 @@ -2901,32 +2926,42 @@ static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type)
65515         if (!curseg->inited)
65516                 goto alloc;
65518 -       if (!curseg->next_blkoff &&
65519 -               !get_valid_blocks(sbi, curseg->segno, false) &&
65520 -               !get_ckpt_valid_blocks(sbi, curseg->segno))
65521 -               return;
65522 +       if (curseg->next_blkoff ||
65523 +               get_valid_blocks(sbi, curseg->segno, new_sec))
65524 +               goto alloc;
65526 +       if (!get_ckpt_valid_blocks(sbi, curseg->segno, new_sec))
65527 +               return;
65528  alloc:
65529         old_segno = curseg->segno;
65530         SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
65531         locate_dirty_segment(sbi, old_segno);
65534 -void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type)
65535 +static void __allocate_new_section(struct f2fs_sb_info *sbi, int type)
65537 +       __allocate_new_segment(sbi, type, true);
65540 +void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type)
65542 +       down_read(&SM_I(sbi)->curseg_lock);
65543         down_write(&SIT_I(sbi)->sentry_lock);
65544 -       __allocate_new_segment(sbi, type);
65545 +       __allocate_new_section(sbi, type);
65546         up_write(&SIT_I(sbi)->sentry_lock);
65547 +       up_read(&SM_I(sbi)->curseg_lock);
65550  void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
65552         int i;
65554 +       down_read(&SM_I(sbi)->curseg_lock);
65555         down_write(&SIT_I(sbi)->sentry_lock);
65556         for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
65557 -               __allocate_new_segment(sbi, i);
65558 +               __allocate_new_segment(sbi, i, false);
65559         up_write(&SIT_I(sbi)->sentry_lock);
65560 +       up_read(&SM_I(sbi)->curseg_lock);
65563  static const struct segment_allocation default_salloc_ops = {
65564 @@ -3365,12 +3400,12 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
65565                 f2fs_inode_chksum_set(sbi, page);
65566         }
65568 -       if (F2FS_IO_ALIGNED(sbi))
65569 -               fio->retry = false;
65571         if (fio) {
65572                 struct f2fs_bio_info *io;
65574 +               if (F2FS_IO_ALIGNED(sbi))
65575 +                       fio->retry = false;
65577                 INIT_LIST_HEAD(&fio->list);
65578                 fio->in_list = true;
65579                 io = sbi->write_io[fio->type] + fio->temp;
65580 diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
65581 index e9a7a637d688..afb175739de5 100644
65582 --- a/fs/f2fs/segment.h
65583 +++ b/fs/f2fs/segment.h
65584 @@ -361,8 +361,20 @@ static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
65587  static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
65588 -                               unsigned int segno)
65589 +                               unsigned int segno, bool use_section)
65591 +       if (use_section && __is_large_section(sbi)) {
65592 +               unsigned int start_segno = START_SEGNO(segno);
65593 +               unsigned int blocks = 0;
65594 +               int i;
65596 +               for (i = 0; i < sbi->segs_per_sec; i++, start_segno++) {
65597 +                       struct seg_entry *se = get_seg_entry(sbi, start_segno);
65599 +                       blocks += se->ckpt_valid_blocks;
65600 +               }
65601 +               return blocks;
65602 +       }
65603         return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
65606 diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
65607 index 82592b19b4e0..3c8426709f34 100644
65608 --- a/fs/f2fs/super.c
65609 +++ b/fs/f2fs/super.c
65610 @@ -525,7 +525,7 @@ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
65611         if (kstrtouint(str + 1, 10, &level))
65612                 return -EINVAL;
65614 -       if (!level || level > ZSTD_maxCLevel()) {
65615 +       if (!level || level > zstd_max_clevel()) {
65616                 f2fs_info(sbi, "invalid zstd compress level: %d", level);
65617                 return -EINVAL;
65618         }
65619 @@ -1865,7 +1865,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
65621         while (!f2fs_time_over(sbi, DISABLE_TIME)) {
65622                 down_write(&sbi->gc_lock);
65623 -               err = f2fs_gc(sbi, true, false, NULL_SEGNO);
65624 +               err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
65625                 if (err == -ENODATA) {
65626                         err = 0;
65627                         break;
65628 @@ -3929,10 +3929,18 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
65629                  * previous checkpoint was not done by clean system shutdown.
65630                  */
65631                 if (f2fs_hw_is_readonly(sbi)) {
65632 -                       if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))
65633 -                               f2fs_err(sbi, "Need to recover fsync data, but write access unavailable");
65634 -                       else
65635 -                               f2fs_info(sbi, "write access unavailable, skipping recovery");
65636 +                       if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
65637 +                               err = f2fs_recover_fsync_data(sbi, true);
65638 +                               if (err > 0) {
65639 +                                       err = -EROFS;
65640 +                                       f2fs_err(sbi, "Need to recover fsync data, but "
65641 +                                               "write access unavailable, please try "
65642 +                                               "mount w/ disable_roll_forward or norecovery");
65643 +                               }
65644 +                               if (err < 0)
65645 +                                       goto free_meta;
65646 +                       }
65647 +                       f2fs_info(sbi, "write access unavailable, skipping recovery");
65648                         goto reset_checkpoint;
65649                 }
65651 diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
65652 index 054ec852b5ea..15ba36926fad 100644
65653 --- a/fs/f2fs/verity.c
65654 +++ b/fs/f2fs/verity.c
65655 @@ -152,40 +152,73 @@ static int f2fs_end_enable_verity(struct file *filp, const void *desc,
65656                                   size_t desc_size, u64 merkle_tree_size)
65658         struct inode *inode = file_inode(filp);
65659 +       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
65660         u64 desc_pos = f2fs_verity_metadata_pos(inode) + merkle_tree_size;
65661         struct fsverity_descriptor_location dloc = {
65662                 .version = cpu_to_le32(F2FS_VERIFY_VER),
65663                 .size = cpu_to_le32(desc_size),
65664                 .pos = cpu_to_le64(desc_pos),
65665         };
65666 -       int err = 0;
65667 +       int err = 0, err2 = 0;
65669 -       if (desc != NULL) {
65670 -               /* Succeeded; write the verity descriptor. */
65671 -               err = pagecache_write(inode, desc, desc_size, desc_pos);
65672 +       /*
65673 +        * If an error already occurred (which fs/verity/ signals by passing
65674 +        * desc == NULL), then only clean-up is needed.
65675 +        */
65676 +       if (desc == NULL)
65677 +               goto cleanup;
65679 -               /* Write all pages before clearing FI_VERITY_IN_PROGRESS. */
65680 -               if (!err)
65681 -                       err = filemap_write_and_wait(inode->i_mapping);
65682 -       }
65683 +       /* Append the verity descriptor. */
65684 +       err = pagecache_write(inode, desc, desc_size, desc_pos);
65685 +       if (err)
65686 +               goto cleanup;
65688 +       /*
65689 +        * Write all pages (both data and verity metadata).  Note that this must
65690 +        * happen before clearing FI_VERITY_IN_PROGRESS; otherwise pages beyond
65691 +        * i_size won't be written properly.  For crash consistency, this also
65692 +        * must happen before the verity inode flag gets persisted.
65693 +        */
65694 +       err = filemap_write_and_wait(inode->i_mapping);
65695 +       if (err)
65696 +               goto cleanup;
65698 +       /* Set the verity xattr. */
65699 +       err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_VERITY,
65700 +                           F2FS_XATTR_NAME_VERITY, &dloc, sizeof(dloc),
65701 +                           NULL, XATTR_CREATE);
65702 +       if (err)
65703 +               goto cleanup;
65705 -       /* If we failed, truncate anything we wrote past i_size. */
65706 -       if (desc == NULL || err)
65707 -               f2fs_truncate(inode);
65708 +       /* Finally, set the verity inode flag. */
65709 +       file_set_verity(inode);
65710 +       f2fs_set_inode_flags(inode);
65711 +       f2fs_mark_inode_dirty_sync(inode, true);
65713         clear_inode_flag(inode, FI_VERITY_IN_PROGRESS);
65714 +       return 0;
65716 -       if (desc != NULL && !err) {
65717 -               err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_VERITY,
65718 -                                   F2FS_XATTR_NAME_VERITY, &dloc, sizeof(dloc),
65719 -                                   NULL, XATTR_CREATE);
65720 -               if (!err) {
65721 -                       file_set_verity(inode);
65722 -                       f2fs_set_inode_flags(inode);
65723 -                       f2fs_mark_inode_dirty_sync(inode, true);
65724 -               }
65725 +cleanup:
65726 +       /*
65727 +        * Verity failed to be enabled, so clean up by truncating any verity
65728 +        * metadata that was written beyond i_size (both from cache and from
65729 +        * disk) and clearing FI_VERITY_IN_PROGRESS.
65730 +        *
65731 +        * Taking i_gc_rwsem[WRITE] is needed to stop f2fs garbage collection
65732 +        * from re-instantiating cached pages we are truncating (since unlike
65733 +        * normal file accesses, garbage collection isn't limited by i_size).
65734 +        */
65735 +       down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
65736 +       truncate_inode_pages(inode->i_mapping, inode->i_size);
65737 +       err2 = f2fs_truncate(inode);
65738 +       if (err2) {
65739 +               f2fs_err(sbi, "Truncating verity metadata failed (errno=%d)",
65740 +                        err2);
65741 +               set_sbi_flag(sbi, SBI_NEED_FSCK);
65742         }
65743 -       return err;
65744 +       up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
65745 +       clear_inode_flag(inode, FI_VERITY_IN_PROGRESS);
65746 +       return err ?: err2;
65749  static int f2fs_get_verity_descriptor(struct inode *inode, void *buf,
65750 diff --git a/fs/file.c b/fs/file.c
65751 index f633348029a5..b56c4dd78a19 100644
65752 --- a/fs/file.c
65753 +++ b/fs/file.c
65754 @@ -771,6 +771,7 @@ int __close_fd_get_file(unsigned int fd, struct file **res)
65755         *res = NULL;
65756         return -ENOENT;
65758 +EXPORT_SYMBOL(close_fd_get_file);
65760  /*
65761   * variant of close_fd that gets a ref on the file for later fput.
65762 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
65763 index 45082269e698..a37528b51798 100644
65764 --- a/fs/fuse/cuse.c
65765 +++ b/fs/fuse/cuse.c
65766 @@ -627,6 +627,8 @@ static int __init cuse_init(void)
65767         cuse_channel_fops.owner         = THIS_MODULE;
65768         cuse_channel_fops.open          = cuse_channel_open;
65769         cuse_channel_fops.release       = cuse_channel_release;
65770 +       /* CUSE is not prepared for FUSE_DEV_IOC_CLONE */
65771 +       cuse_channel_fops.unlocked_ioctl        = NULL;
65773         cuse_class = class_create(THIS_MODULE, "cuse");
65774         if (IS_ERR(cuse_class))
65775 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
65776 index c0fee830a34e..f784c118f00f 100644
65777 --- a/fs/fuse/dev.c
65778 +++ b/fs/fuse/dev.c
65779 @@ -784,7 +784,8 @@ static int fuse_check_page(struct page *page)
65780                1 << PG_lru |
65781                1 << PG_active |
65782                1 << PG_reclaim |
65783 -              1 << PG_waiters))) {
65784 +              1 << PG_waiters |
65785 +              LRU_GEN_MASK | LRU_USAGE_MASK))) {
65786                 dump_page(page, "fuse: trying to steal weird page");
65787                 return 1;
65788         }
65789 @@ -2233,11 +2234,8 @@ static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
65790         int oldfd;
65791         struct fuse_dev *fud = NULL;
65793 -       if (_IOC_TYPE(cmd) != FUSE_DEV_IOC_MAGIC)
65794 -               return -ENOTTY;
65796 -       switch (_IOC_NR(cmd)) {
65797 -       case _IOC_NR(FUSE_DEV_IOC_CLONE):
65798 +       switch (cmd) {
65799 +       case FUSE_DEV_IOC_CLONE:
65800                 res = -EFAULT;
65801                 if (!get_user(oldfd, (__u32 __user *)arg)) {
65802                         struct file *old = fget(oldfd);
65803 diff --git a/fs/fuse/file.c b/fs/fuse/file.c
65804 index 8cccecb55fb8..6e6d1e599869 100644
65805 --- a/fs/fuse/file.c
65806 +++ b/fs/fuse/file.c
65807 @@ -1099,6 +1099,7 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
65808         struct fuse_file *ff = file->private_data;
65809         struct fuse_mount *fm = ff->fm;
65810         unsigned int offset, i;
65811 +       bool short_write;
65812         int err;
65814         for (i = 0; i < ap->num_pages; i++)
65815 @@ -1113,32 +1114,38 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
65816         if (!err && ia->write.out.size > count)
65817                 err = -EIO;
65819 +       short_write = ia->write.out.size < count;
65820         offset = ap->descs[0].offset;
65821         count = ia->write.out.size;
65822         for (i = 0; i < ap->num_pages; i++) {
65823                 struct page *page = ap->pages[i];
65825 -               if (!err && !offset && count >= PAGE_SIZE)
65826 -                       SetPageUptodate(page);
65828 -               if (count > PAGE_SIZE - offset)
65829 -                       count -= PAGE_SIZE - offset;
65830 -               else
65831 -                       count = 0;
65832 -               offset = 0;
65834 -               unlock_page(page);
65835 +               if (err) {
65836 +                       ClearPageUptodate(page);
65837 +               } else {
65838 +                       if (count >= PAGE_SIZE - offset)
65839 +                               count -= PAGE_SIZE - offset;
65840 +                       else {
65841 +                               if (short_write)
65842 +                                       ClearPageUptodate(page);
65843 +                               count = 0;
65844 +                       }
65845 +                       offset = 0;
65846 +               }
65847 +               if (ia->write.page_locked && (i == ap->num_pages - 1))
65848 +                       unlock_page(page);
65849                 put_page(page);
65850         }
65852         return err;
65855 -static ssize_t fuse_fill_write_pages(struct fuse_args_pages *ap,
65856 +static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
65857                                      struct address_space *mapping,
65858                                      struct iov_iter *ii, loff_t pos,
65859                                      unsigned int max_pages)
65861 +       struct fuse_args_pages *ap = &ia->ap;
65862         struct fuse_conn *fc = get_fuse_conn(mapping->host);
65863         unsigned offset = pos & (PAGE_SIZE - 1);
65864         size_t count = 0;
65865 @@ -1191,6 +1198,16 @@ static ssize_t fuse_fill_write_pages(struct fuse_args_pages *ap,
65866                 if (offset == PAGE_SIZE)
65867                         offset = 0;
65869 +               /* If we copied full page, mark it uptodate */
65870 +               if (tmp == PAGE_SIZE)
65871 +                       SetPageUptodate(page);
65873 +               if (PageUptodate(page)) {
65874 +                       unlock_page(page);
65875 +               } else {
65876 +                       ia->write.page_locked = true;
65877 +                       break;
65878 +               }
65879                 if (!fc->big_writes)
65880                         break;
65881         } while (iov_iter_count(ii) && count < fc->max_write &&
65882 @@ -1234,7 +1251,7 @@ static ssize_t fuse_perform_write(struct kiocb *iocb,
65883                         break;
65884                 }
65886 -               count = fuse_fill_write_pages(ap, mapping, ii, pos, nr_pages);
65887 +               count = fuse_fill_write_pages(&ia, mapping, ii, pos, nr_pages);
65888                 if (count <= 0) {
65889                         err = count;
65890                 } else {
65891 @@ -1759,8 +1776,17 @@ static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
65892                 container_of(args, typeof(*wpa), ia.ap.args);
65893         struct inode *inode = wpa->inode;
65894         struct fuse_inode *fi = get_fuse_inode(inode);
65895 +       struct fuse_conn *fc = get_fuse_conn(inode);
65897         mapping_set_error(inode->i_mapping, error);
65898 +       /*
65899 +        * A writeback finished and this might have updated mtime/ctime on
65900 +        * server making local mtime/ctime stale.  Hence invalidate attrs.
65901 +        * Do this only if writeback_cache is not enabled.  If writeback_cache
65902 +        * is enabled, we trust local ctime/mtime.
65903 +        */
65904 +       if (!fc->writeback_cache)
65905 +               fuse_invalidate_attr(inode);
65906         spin_lock(&fi->lock);
65907         rb_erase(&wpa->writepages_entry, &fi->writepages);
65908         while (wpa->next) {
65909 diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
65910 index 63d97a15ffde..74d888c78fa4 100644
65911 --- a/fs/fuse/fuse_i.h
65912 +++ b/fs/fuse/fuse_i.h
65913 @@ -912,6 +912,7 @@ struct fuse_io_args {
65914                 struct {
65915                         struct fuse_write_in in;
65916                         struct fuse_write_out out;
65917 +                       bool page_locked;
65918                 } write;
65919         };
65920         struct fuse_args_pages ap;
65921 diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
65922 index 4ee6f734ba83..005209b1cd50 100644
65923 --- a/fs/fuse/virtio_fs.c
65924 +++ b/fs/fuse/virtio_fs.c
65925 @@ -896,6 +896,7 @@ static int virtio_fs_probe(struct virtio_device *vdev)
65926  out_vqs:
65927         vdev->config->reset(vdev);
65928         virtio_fs_cleanup_vqs(vdev, fs);
65929 +       kfree(fs->vqs);
65931  out:
65932         vdev->priv = NULL;
65933 @@ -1436,8 +1437,7 @@ static int virtio_fs_get_tree(struct fs_context *fsc)
65934         if (!fm)
65935                 goto out_err;
65937 -       fuse_conn_init(fc, fm, get_user_ns(current_user_ns()),
65938 -                      &virtio_fs_fiq_ops, fs);
65939 +       fuse_conn_init(fc, fm, fsc->user_ns, &virtio_fs_fiq_ops, fs);
65940         fc->release = fuse_free_conn;
65941         fc->delete_stale = true;
65942         fc->auto_submounts = true;
65943 diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
65944 index a930ddd15681..7054a542689f 100644
65945 --- a/fs/hfsplus/extents.c
65946 +++ b/fs/hfsplus/extents.c
65947 @@ -598,13 +598,15 @@ void hfsplus_file_truncate(struct inode *inode)
65948                 res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
65949                 if (res)
65950                         break;
65951 -               hfs_brec_remove(&fd);
65953 -               mutex_unlock(&fd.tree->tree_lock);
65954                 start = hip->cached_start;
65955 +               if (blk_cnt <= start)
65956 +                       hfs_brec_remove(&fd);
65957 +               mutex_unlock(&fd.tree->tree_lock);
65958                 hfsplus_free_extents(sb, hip->cached_extents,
65959                                      alloc_cnt - start, alloc_cnt - blk_cnt);
65960                 hfsplus_dump_extent(hip->cached_extents);
65961 +               mutex_lock(&fd.tree->tree_lock);
65962                 if (blk_cnt > start) {
65963                         hip->extent_state |= HFSPLUS_EXT_DIRTY;
65964                         break;
65965 @@ -612,7 +614,6 @@ void hfsplus_file_truncate(struct inode *inode)
65966                 alloc_cnt = start;
65967                 hip->cached_start = hip->cached_blocks = 0;
65968                 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
65969 -               mutex_lock(&fd.tree->tree_lock);
65970         }
65971         hfs_find_exit(&fd);
65973 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
65974 index 701c82c36138..c63d0a7f7ba4 100644
65975 --- a/fs/hugetlbfs/inode.c
65976 +++ b/fs/hugetlbfs/inode.c
65977 @@ -131,6 +131,7 @@ static void huge_pagevec_release(struct pagevec *pvec)
65978  static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
65980         struct inode *inode = file_inode(file);
65981 +       struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
65982         loff_t len, vma_len;
65983         int ret;
65984         struct hstate *h = hstate_file(file);
65985 @@ -146,6 +147,10 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
65986         vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
65987         vma->vm_ops = &hugetlb_vm_ops;
65989 +       ret = seal_check_future_write(info->seals, vma);
65990 +       if (ret)
65991 +               return ret;
65993         /*
65994          * page based offset in vm_pgoff could be sufficiently large to
65995          * overflow a loff_t when converted to byte offset.  This can
65996 @@ -527,7 +532,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
65997                          * the subpool and global reserve usage count can need
65998                          * to be adjusted.
65999                          */
66000 -                       VM_BUG_ON(PagePrivate(page));
66001 +                       VM_BUG_ON(HPageRestoreReserve(page));
66002                         remove_huge_page(page);
66003                         freed++;
66004                         if (!truncate_op) {
66005 diff --git a/fs/inode.c b/fs/inode.c
66006 index a047ab306f9a..c5e1dd13fd40 100644
66007 --- a/fs/inode.c
66008 +++ b/fs/inode.c
66009 @@ -139,6 +139,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
66010         inode->i_blkbits = sb->s_blocksize_bits;
66011         inode->i_flags = 0;
66012         atomic64_set(&inode->i_sequence, 0);
66013 +       atomic64_set(&inode->i_sequence2, 0);
66014         atomic_set(&inode->i_count, 1);
66015         inode->i_op = &empty_iops;
66016         inode->i_fop = &no_open_fops;
66017 diff --git a/fs/io_uring.c b/fs/io_uring.c
66018 index dff34975d86b..144056b0cac9 100644
66019 --- a/fs/io_uring.c
66020 +++ b/fs/io_uring.c
66021 @@ -238,7 +238,7 @@ struct fixed_rsrc_data {
66022  struct io_buffer {
66023         struct list_head list;
66024         __u64 addr;
66025 -       __s32 len;
66026 +       __u32 len;
66027         __u16 bid;
66028  };
66030 @@ -614,7 +614,7 @@ struct io_splice {
66031  struct io_provide_buf {
66032         struct file                     *file;
66033         __u64                           addr;
66034 -       __s32                           len;
66035 +       __u32                           len;
66036         __u32                           bgid;
66037         __u16                           nbufs;
66038         __u16                           bid;
66039 @@ -1008,7 +1008,7 @@ static void io_uring_del_task_file(unsigned long index);
66040  static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
66041                                          struct task_struct *task,
66042                                          struct files_struct *files);
66043 -static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx);
66044 +static void io_uring_cancel_sqpoll(struct io_sq_data *sqd);
66045  static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node);
66046  static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
66047                         struct io_ring_ctx *ctx);
66048 @@ -3979,7 +3979,7 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
66049  static int io_provide_buffers_prep(struct io_kiocb *req,
66050                                    const struct io_uring_sqe *sqe)
66052 -       unsigned long size;
66053 +       unsigned long size, tmp_check;
66054         struct io_provide_buf *p = &req->pbuf;
66055         u64 tmp;
66057 @@ -3993,6 +3993,12 @@ static int io_provide_buffers_prep(struct io_kiocb *req,
66058         p->addr = READ_ONCE(sqe->addr);
66059         p->len = READ_ONCE(sqe->len);
66061 +       if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
66062 +                               &size))
66063 +               return -EOVERFLOW;
66064 +       if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
66065 +               return -EOVERFLOW;
66067         size = (unsigned long)p->len * p->nbufs;
66068         if (!access_ok(u64_to_user_ptr(p->addr), size))
66069                 return -EFAULT;
66070 @@ -4017,7 +4023,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
66071                         break;
66073                 buf->addr = addr;
66074 -               buf->len = pbuf->len;
66075 +               buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
66076                 buf->bid = bid;
66077                 addr += pbuf->len;
66078                 bid++;
66079 @@ -6710,6 +6716,10 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
66080                 if (!list_empty(&ctx->iopoll_list))
66081                         io_do_iopoll(ctx, &nr_events, 0);
66083 +               /*
66084 +                * Don't submit if refs are dying, good for io_uring_register(),
66085 +                * but also it is relied upon by io_ring_exit_work()
66086 +                */
66087                 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
66088                     !(ctx->flags & IORING_SETUP_R_DISABLED))
66089                         ret = io_submit_sqes(ctx, to_submit);
66090 @@ -6832,15 +6842,14 @@ static int io_sq_thread(void *data)
66091                 timeout = jiffies + sqd->sq_thread_idle;
66092         }
66094 -       list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
66095 -               io_uring_cancel_sqpoll(ctx);
66096 +       io_uring_cancel_sqpoll(sqd);
66097         sqd->thread = NULL;
66098         list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
66099                 io_ring_set_wakeup_flag(ctx);
66100 -       mutex_unlock(&sqd->lock);
66102         io_run_task_work();
66103         io_run_task_work_head(&sqd->park_task_work);
66104 +       mutex_unlock(&sqd->lock);
66106         complete(&sqd->exited);
66107         do_exit(0);
66109 @@ -7200,8 +7209,6 @@ static void io_sq_thread_finish(struct io_ring_ctx *ctx)
66111                 io_put_sq_data(sqd);
66112                 ctx->sq_data = NULL;
66113 -               if (ctx->sq_creds)
66114 -                       put_cred(ctx->sq_creds);
66115         }
66118 @@ -8469,6 +8476,8 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
66119         mutex_unlock(&ctx->uring_lock);
66120         io_eventfd_unregister(ctx);
66121         io_destroy_buffers(ctx);
66122 +       if (ctx->sq_creds)
66123 +               put_cred(ctx->sq_creds);
66125  #if defined(CONFIG_UNIX)
66126         if (ctx->ring_sock) {
66127 @@ -8568,6 +8577,13 @@ static void io_tctx_exit_cb(struct callback_head *cb)
66128         complete(&work->completion);
66131 +static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
66133 +       struct io_kiocb *req = container_of(work, struct io_kiocb, work);
66135 +       return req->ctx == data;
66138  static void io_ring_exit_work(struct work_struct *work)
66140         struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
66141 @@ -8576,14 +8592,6 @@ static void io_ring_exit_work(struct work_struct *work)
66142         struct io_tctx_node *node;
66143         int ret;
66145 -       /* prevent SQPOLL from submitting new requests */
66146 -       if (ctx->sq_data) {
66147 -               io_sq_thread_park(ctx->sq_data);
66148 -               list_del_init(&ctx->sqd_list);
66149 -               io_sqd_update_thread_idle(ctx->sq_data);
66150 -               io_sq_thread_unpark(ctx->sq_data);
66151 -       }
66153         /*
66154          * If we're doing polled IO and end up having requests being
66155          * submitted async (out-of-line), then completions can come in while
66156 @@ -8592,6 +8600,17 @@ static void io_ring_exit_work(struct work_struct *work)
66157          */
66158         do {
66159                 io_uring_try_cancel_requests(ctx, NULL, NULL);
66160 +               if (ctx->sq_data) {
66161 +                       struct io_sq_data *sqd = ctx->sq_data;
66162 +                       struct task_struct *tsk;
66164 +                       io_sq_thread_park(sqd);
66165 +                       tsk = sqd->thread;
66166 +                       if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
66167 +                               io_wq_cancel_cb(tsk->io_uring->io_wq,
66168 +                                               io_cancel_ctx_cb, ctx, true);
66169 +                       io_sq_thread_unpark(sqd);
66170 +               }
66172                 WARN_ON_ONCE(time_after(jiffies, timeout));
66173         } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
66174 @@ -8736,13 +8755,6 @@ static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
66175         return true;
66178 -static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
66180 -       struct io_kiocb *req = container_of(work, struct io_kiocb, work);
66182 -       return req->ctx == data;
66185  static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
66187         struct io_tctx_node *node;
66188 @@ -8935,11 +8947,11 @@ static s64 tctx_inflight(struct io_uring_task *tctx)
66189  static void io_sqpoll_cancel_cb(struct callback_head *cb)
66191         struct io_tctx_exit *work = container_of(cb, struct io_tctx_exit, task_work);
66192 -       struct io_ring_ctx *ctx = work->ctx;
66193 -       struct io_sq_data *sqd = ctx->sq_data;
66194 +       struct io_sq_data *sqd = work->ctx->sq_data;
66196         if (sqd->thread)
66197 -               io_uring_cancel_sqpoll(ctx);
66198 +               io_uring_cancel_sqpoll(sqd);
66199 +       list_del_init(&work->ctx->sqd_list);
66200         complete(&work->completion);
66203 @@ -8950,7 +8962,6 @@ static void io_sqpoll_cancel_sync(struct io_ring_ctx *ctx)
66204         struct task_struct *task;
66206         io_sq_thread_park(sqd);
66207 -       list_del_init(&ctx->sqd_list);
66208         io_sqd_update_thread_idle(sqd);
66209         task = sqd->thread;
66210         if (task) {
66211 @@ -8958,6 +8969,8 @@ static void io_sqpoll_cancel_sync(struct io_ring_ctx *ctx)
66212                 init_task_work(&work.task_work, io_sqpoll_cancel_cb);
66213                 io_task_work_add_head(&sqd->park_task_work, &work.task_work);
66214                 wake_up_process(task);
66215 +       } else {
66216 +               list_del_init(&ctx->sqd_list);
66217         }
66218         io_sq_thread_unpark(sqd);
66220 @@ -8991,14 +9004,16 @@ void __io_uring_files_cancel(struct files_struct *files)
66223  /* should only be called by SQPOLL task */
66224 -static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
66225 +static void io_uring_cancel_sqpoll(struct io_sq_data *sqd)
66227 -       struct io_sq_data *sqd = ctx->sq_data;
66228         struct io_uring_task *tctx = current->io_uring;
66229 +       struct io_ring_ctx *ctx;
66230         s64 inflight;
66231         DEFINE_WAIT(wait);
66233 -       WARN_ON_ONCE(!sqd || ctx->sq_data->thread != current);
66234 +       if (!current->io_uring)
66235 +               return;
66236 +       WARN_ON_ONCE(!sqd || sqd->thread != current);
66238         atomic_inc(&tctx->in_idle);
66239         do {
66240 @@ -9006,7 +9021,8 @@ static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
66241                 inflight = tctx_inflight(tctx);
66242                 if (!inflight)
66243                         break;
66244 -               io_uring_try_cancel_requests(ctx, current, NULL);
66245 +               list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
66246 +                       io_uring_try_cancel_requests(ctx, current, NULL);
66248                 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
66249                 /*
66250 diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
66251 index 69f18fe20923..d47a0d96bf30 100644
66252 --- a/fs/jbd2/recovery.c
66253 +++ b/fs/jbd2/recovery.c
66254 @@ -245,15 +245,14 @@ static int fc_do_one_pass(journal_t *journal,
66255                 return 0;
66257         while (next_fc_block <= journal->j_fc_last) {
66258 -               jbd_debug(3, "Fast commit replay: next block %ld",
66259 +               jbd_debug(3, "Fast commit replay: next block %ld\n",
66260                           next_fc_block);
66261                 err = jread(&bh, journal, next_fc_block);
66262                 if (err) {
66263 -                       jbd_debug(3, "Fast commit replay: read error");
66264 +                       jbd_debug(3, "Fast commit replay: read error\n");
66265                         break;
66266                 }
66268 -               jbd_debug(3, "Processing fast commit blk with seq %d");
66269                 err = journal->j_fc_replay_callback(journal, bh, pass,
66270                                         next_fc_block - journal->j_fc_first,
66271                                         expected_commit_id);
66272 diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
66273 index 9396666b7314..e8fc45fd751f 100644
66274 --- a/fs/jbd2/transaction.c
66275 +++ b/fs/jbd2/transaction.c
66276 @@ -349,7 +349,12 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
66277         }
66279  alloc_transaction:
66280 -       if (!journal->j_running_transaction) {
66281 +       /*
66282 +        * This check is racy but it is just an optimization of allocating new
66283 +        * transaction early if there are high chances we'll need it. If we
66284 +        * guess wrong, we'll retry or free unused transaction.
66285 +        */
66286 +       if (!data_race(journal->j_running_transaction)) {
66287                 /*
66288                  * If __GFP_FS is not present, then we may be being called from
66289                  * inside the fs writeback layer, so we MUST NOT fail.
66290 @@ -1474,8 +1479,8 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
66291          * crucial to catch bugs so let's do a reliable check until the
66292          * lockless handling is fully proven.
66293          */
66294 -       if (jh->b_transaction != transaction &&
66295 -           jh->b_next_transaction != transaction) {
66296 +       if (data_race(jh->b_transaction != transaction &&
66297 +           jh->b_next_transaction != transaction)) {
66298                 spin_lock(&jh->b_state_lock);
66299                 J_ASSERT_JH(jh, jh->b_transaction == transaction ||
66300                                 jh->b_next_transaction == transaction);
66301 @@ -1483,8 +1488,8 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
66302         }
66303         if (jh->b_modified == 1) {
66304                 /* If it's in our transaction it must be in BJ_Metadata list. */
66305 -               if (jh->b_transaction == transaction &&
66306 -                   jh->b_jlist != BJ_Metadata) {
66307 +               if (data_race(jh->b_transaction == transaction &&
66308 +                   jh->b_jlist != BJ_Metadata)) {
66309                         spin_lock(&jh->b_state_lock);
66310                         if (jh->b_transaction == transaction &&
66311                             jh->b_jlist != BJ_Metadata)
66312 diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
66313 index f8fb89b10227..4fc8cd698d1a 100644
66314 --- a/fs/jffs2/file.c
66315 +++ b/fs/jffs2/file.c
66316 @@ -57,6 +57,7 @@ const struct file_operations jffs2_file_operations =
66317         .mmap =         generic_file_readonly_mmap,
66318         .fsync =        jffs2_fsync,
66319         .splice_read =  generic_file_splice_read,
66320 +       .splice_write = iter_file_splice_write,
66321  };
66323  /* jffs2_file_inode_operations */
66324 diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
66325 index db72a9d2d0af..b676056826be 100644
66326 --- a/fs/jffs2/scan.c
66327 +++ b/fs/jffs2/scan.c
66328 @@ -1079,7 +1079,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
66329         memcpy(&fd->name, rd->name, checkedlen);
66330         fd->name[checkedlen] = 0;
66332 -       crc = crc32(0, fd->name, rd->nsize);
66333 +       crc = crc32(0, fd->name, checkedlen);
66334         if (crc != je32_to_cpu(rd->name_crc)) {
66335                 pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
66336                           __func__, ofs, je32_to_cpu(rd->name_crc), crc);
66337 diff --git a/fs/namespace.c b/fs/namespace.c
66338 index 56bb5a5fdc0d..4d2e827ddb59 100644
66339 --- a/fs/namespace.c
66340 +++ b/fs/namespace.c
66341 @@ -3853,8 +3853,12 @@ static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
66342         if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP))
66343                 return -EINVAL;
66345 +       /* Don't yet support filesystem mountable in user namespaces. */
66346 +       if (m->mnt_sb->s_user_ns != &init_user_ns)
66347 +               return -EINVAL;
66349         /* We're not controlling the superblock. */
66350 -       if (!ns_capable(m->mnt_sb->s_user_ns, CAP_SYS_ADMIN))
66351 +       if (!capable(CAP_SYS_ADMIN))
66352                 return -EPERM;
66354         /* Mount has already been visible in the filesystem hierarchy. */
66355 diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
66356 index f7786e00a6a7..ed9d580826f5 100644
66357 --- a/fs/nfs/callback_proc.c
66358 +++ b/fs/nfs/callback_proc.c
66359 @@ -137,12 +137,12 @@ static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
66360                 list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
66361                         if (!pnfs_layout_is_valid(lo))
66362                                 continue;
66363 -                       if (stateid != NULL &&
66364 -                           !nfs4_stateid_match_other(stateid, &lo->plh_stateid))
66365 +                       if (!nfs4_stateid_match_other(stateid, &lo->plh_stateid))
66366                                 continue;
66367 -                       if (!nfs_sb_active(server->super))
66368 -                               continue;
66369 -                       inode = igrab(lo->plh_inode);
66370 +                       if (nfs_sb_active(server->super))
66371 +                               inode = igrab(lo->plh_inode);
66372 +                       else
66373 +                               inode = ERR_PTR(-EAGAIN);
66374                         rcu_read_unlock();
66375                         if (inode)
66376                                 return inode;
66377 @@ -176,9 +176,10 @@ static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
66378                                 continue;
66379                         if (nfsi->layout != lo)
66380                                 continue;
66381 -                       if (!nfs_sb_active(server->super))
66382 -                               continue;
66383 -                       inode = igrab(lo->plh_inode);
66384 +                       if (nfs_sb_active(server->super))
66385 +                               inode = igrab(lo->plh_inode);
66386 +                       else
66387 +                               inode = ERR_PTR(-EAGAIN);
66388                         rcu_read_unlock();
66389                         if (inode)
66390                                 return inode;
66391 diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
66392 index fc4f490f2d78..0cd7c59a6601 100644
66393 --- a/fs/nfs/dir.c
66394 +++ b/fs/nfs/dir.c
66395 @@ -866,6 +866,8 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc,
66396                         break;
66397                 }
66399 +               verf_arg = verf_res;
66401                 status = nfs_readdir_page_filler(desc, entry, pages, pglen,
66402                                                  arrays, narrays);
66403         } while (!status && nfs_readdir_page_needs_filling(page));
66404 @@ -927,7 +929,12 @@ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc)
66405                         }
66406                         return res;
66407                 }
66408 -               memcpy(nfsi->cookieverf, verf, sizeof(nfsi->cookieverf));
66409 +               /*
66410 +                * Set the cookie verifier if the page cache was empty
66411 +                */
66412 +               if (desc->page_index == 0)
66413 +                       memcpy(nfsi->cookieverf, verf,
66414 +                              sizeof(nfsi->cookieverf));
66415         }
66416         res = nfs_readdir_search_array(desc);
66417         if (res == 0) {
66418 @@ -974,10 +981,10 @@ static int readdir_search_pagecache(struct nfs_readdir_descriptor *desc)
66419  /*
66420   * Once we've found the start of the dirent within a page: fill 'er up...
66421   */
66422 -static void nfs_do_filldir(struct nfs_readdir_descriptor *desc)
66423 +static void nfs_do_filldir(struct nfs_readdir_descriptor *desc,
66424 +                          const __be32 *verf)
66426         struct file     *file = desc->file;
66427 -       struct nfs_inode *nfsi = NFS_I(file_inode(file));
66428         struct nfs_cache_array *array;
66429         unsigned int i = 0;
66431 @@ -991,7 +998,7 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc)
66432                         desc->eof = true;
66433                         break;
66434                 }
66435 -               memcpy(desc->verf, nfsi->cookieverf, sizeof(desc->verf));
66436 +               memcpy(desc->verf, verf, sizeof(desc->verf));
66437                 if (i < (array->size-1))
66438                         desc->dir_cookie = array->array[i+1].cookie;
66439                 else
66440 @@ -1048,7 +1055,7 @@ static int uncached_readdir(struct nfs_readdir_descriptor *desc)
66442         for (i = 0; !desc->eof && i < sz && arrays[i]; i++) {
66443                 desc->page = arrays[i];
66444 -               nfs_do_filldir(desc);
66445 +               nfs_do_filldir(desc, verf);
66446         }
66447         desc->page = NULL;
66449 @@ -1069,6 +1076,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
66451         struct dentry   *dentry = file_dentry(file);
66452         struct inode    *inode = d_inode(dentry);
66453 +       struct nfs_inode *nfsi = NFS_I(inode);
66454         struct nfs_open_dir_context *dir_ctx = file->private_data;
66455         struct nfs_readdir_descriptor *desc;
66456         int res;
66457 @@ -1122,7 +1130,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
66458                         break;
66459                 }
66460                 if (res == -ETOOSMALL && desc->plus) {
66461 -                       clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
66462 +                       clear_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags);
66463                         nfs_zap_caches(inode);
66464                         desc->page_index = 0;
66465                         desc->plus = false;
66466 @@ -1132,7 +1140,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
66467                 if (res < 0)
66468                         break;
66470 -               nfs_do_filldir(desc);
66471 +               nfs_do_filldir(desc, nfsi->cookieverf);
66472                 nfs_readdir_page_unlock_and_put_cached(desc);
66473         } while (!desc->eof);
66475 diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
66476 index 872112bffcab..d383de00d486 100644
66477 --- a/fs/nfs/flexfilelayout/flexfilelayout.c
66478 +++ b/fs/nfs/flexfilelayout/flexfilelayout.c
66479 @@ -106,7 +106,7 @@ static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
66480         if (unlikely(!p))
66481                 return -ENOBUFS;
66482         fh->size = be32_to_cpup(p++);
66483 -       if (fh->size > sizeof(struct nfs_fh)) {
66484 +       if (fh->size > NFS_MAXFHSIZE) {
66485                 printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
66486                        fh->size);
66487                 return -EOVERFLOW;
66488 diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
66489 index 971a9251c1d9..902db1262d2b 100644
66490 --- a/fs/nfs/fs_context.c
66491 +++ b/fs/nfs/fs_context.c
66492 @@ -973,6 +973,15 @@ static int nfs23_parse_monolithic(struct fs_context *fc,
66493                         memset(mntfh->data + mntfh->size, 0,
66494                                sizeof(mntfh->data) - mntfh->size);
66496 +               /*
66497 +                * for proto == XPRT_TRANSPORT_UDP, which is what uses
66498 +                * to_exponential, implying shift: limit the shift value
66499 +                * to BITS_PER_LONG (majortimeo is unsigned long)
66500 +                */
66501 +               if (!(data->flags & NFS_MOUNT_TCP)) /* this will be UDP */
66502 +                       if (data->retrans >= 64) /* shift value is too large */
66503 +                               goto out_invalid_data;
66505                 /*
66506                  * Translate to nfs_fs_context, which nfs_fill_super
66507                  * can deal with.
66508 @@ -1073,6 +1082,9 @@ static int nfs23_parse_monolithic(struct fs_context *fc,
66510  out_invalid_fh:
66511         return nfs_invalf(fc, "NFS: invalid root filehandle");
66513 +out_invalid_data:
66514 +       return nfs_invalf(fc, "NFS: invalid binary mount data");
66517  #if IS_ENABLED(CONFIG_NFS_V4)
66518 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
66519 index a7fb076a5f44..ae8bc84e39fb 100644
66520 --- a/fs/nfs/inode.c
66521 +++ b/fs/nfs/inode.c
66522 @@ -219,15 +219,16 @@ void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
66523                                 | NFS_INO_INVALID_SIZE
66524                                 | NFS_INO_REVAL_PAGECACHE
66525                                 | NFS_INO_INVALID_XATTR);
66526 -       }
66527 +       } else if (flags & NFS_INO_REVAL_PAGECACHE)
66528 +               flags |= NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE;
66530         if (!nfs_has_xattr_cache(nfsi))
66531                 flags &= ~NFS_INO_INVALID_XATTR;
66532 +       if (flags & NFS_INO_INVALID_DATA)
66533 +               nfs_fscache_invalidate(inode);
66534         if (inode->i_mapping->nrpages == 0)
66535                 flags &= ~(NFS_INO_INVALID_DATA|NFS_INO_DATA_INVAL_DEFER);
66536         nfsi->cache_validity |= flags;
66537 -       if (flags & NFS_INO_INVALID_DATA)
66538 -               nfs_fscache_invalidate(inode);
66540  EXPORT_SYMBOL_GPL(nfs_set_cache_invalid);
66542 @@ -1662,10 +1663,10 @@ EXPORT_SYMBOL_GPL(_nfs_display_fhandle);
66543   */
66544  static int nfs_inode_attrs_need_update(const struct inode *inode, const struct nfs_fattr *fattr)
66546 -       const struct nfs_inode *nfsi = NFS_I(inode);
66547 +       unsigned long attr_gencount = NFS_I(inode)->attr_gencount;
66549 -       return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 ||
66550 -               ((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0);
66551 +       return (long)(fattr->gencount - attr_gencount) > 0 ||
66552 +              (long)(attr_gencount - nfs_read_attr_generation_counter()) > 0;
66555  static int nfs_refresh_inode_locked(struct inode *inode, struct nfs_fattr *fattr)
66556 @@ -2094,7 +2095,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
66557                         nfsi->attrtimeo_timestamp = now;
66558                 }
66559                 /* Set the barrier to be more recent than this fattr */
66560 -               if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0)
66561 +               if ((long)(fattr->gencount - nfsi->attr_gencount) > 0)
66562                         nfsi->attr_gencount = fattr->gencount;
66563         }
66565 diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
66566 index 094024b0aca1..3875120ef3ef 100644
66567 --- a/fs/nfs/nfs42proc.c
66568 +++ b/fs/nfs/nfs42proc.c
66569 @@ -46,11 +46,12 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
66571         struct inode *inode = file_inode(filep);
66572         struct nfs_server *server = NFS_SERVER(inode);
66573 +       u32 bitmask[3];
66574         struct nfs42_falloc_args args = {
66575                 .falloc_fh      = NFS_FH(inode),
66576                 .falloc_offset  = offset,
66577                 .falloc_length  = len,
66578 -               .falloc_bitmask = nfs4_fattr_bitmap,
66579 +               .falloc_bitmask = bitmask,
66580         };
66581         struct nfs42_falloc_res res = {
66582                 .falloc_server  = server,
66583 @@ -68,6 +69,10 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
66584                 return status;
66585         }
66587 +       memcpy(bitmask, server->cache_consistency_bitmask, sizeof(bitmask));
66588 +       if (server->attr_bitmask[1] & FATTR4_WORD1_SPACE_USED)
66589 +               bitmask[1] |= FATTR4_WORD1_SPACE_USED;
66591         res.falloc_fattr = nfs_alloc_fattr();
66592         if (!res.falloc_fattr)
66593                 return -ENOMEM;
66594 @@ -75,7 +80,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
66595         status = nfs4_call_sync(server->client, server, msg,
66596                                 &args.seq_args, &res.seq_res, 0);
66597         if (status == 0)
66598 -               status = nfs_post_op_update_inode(inode, res.falloc_fattr);
66599 +               status = nfs_post_op_update_inode_force_wcc(inode,
66600 +                                                           res.falloc_fattr);
66602         kfree(res.falloc_fattr);
66603         return status;
66604 @@ -84,7 +90,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
66605  static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
66606                                 loff_t offset, loff_t len)
66608 -       struct nfs_server *server = NFS_SERVER(file_inode(filep));
66609 +       struct inode *inode = file_inode(filep);
66610 +       struct nfs_server *server = NFS_SERVER(inode);
66611         struct nfs4_exception exception = { };
66612         struct nfs_lock_context *lock;
66613         int err;
66614 @@ -93,9 +100,13 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
66615         if (IS_ERR(lock))
66616                 return PTR_ERR(lock);
66618 -       exception.inode = file_inode(filep);
66619 +       exception.inode = inode;
66620         exception.state = lock->open_context->state;
66622 +       err = nfs_sync_inode(inode);
66623 +       if (err)
66624 +               goto out;
66626         do {
66627                 err = _nfs42_proc_fallocate(msg, filep, lock, offset, len);
66628                 if (err == -ENOTSUPP) {
66629 @@ -104,7 +115,7 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
66630                 }
66631                 err = nfs4_handle_exception(server, err, &exception);
66632         } while (exception.retry);
66634 +out:
66635         nfs_put_lock_context(lock);
66636         return err;
66638 @@ -142,16 +153,13 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
66639                 return -EOPNOTSUPP;
66641         inode_lock(inode);
66642 -       err = nfs_sync_inode(inode);
66643 -       if (err)
66644 -               goto out_unlock;
66646         err = nfs42_proc_fallocate(&msg, filep, offset, len);
66647         if (err == 0)
66648                 truncate_pagecache_range(inode, offset, (offset + len) -1);
66649         if (err == -EOPNOTSUPP)
66650                 NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE;
66651 -out_unlock:
66653         inode_unlock(inode);
66654         return err;
66656 @@ -261,6 +269,33 @@ static int process_copy_commit(struct file *dst, loff_t pos_dst,
66657         return status;
66660 +/**
66661 + * nfs42_copy_dest_done - perform inode cache updates after clone/copy offload
66662 + * @inode: pointer to destination inode
66663 + * @pos: destination offset
66664 + * @len: copy length
66665 + *
66666 + * Punch a hole in the inode page cache, so that the NFS client will
66667 + * know to retrieve new data.
66668 + * Update the file size if necessary, and then mark the inode as having
66669 + * invalid cached values for change attribute, ctime, mtime and space used.
66670 + */
66671 +static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len)
66673 +       loff_t newsize = pos + len;
66674 +       loff_t end = newsize - 1;
66676 +       truncate_pagecache_range(inode, pos, end);
66677 +       spin_lock(&inode->i_lock);
66678 +       if (newsize > i_size_read(inode))
66679 +               i_size_write(inode, newsize);
66680 +       nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
66681 +                                            NFS_INO_INVALID_CTIME |
66682 +                                            NFS_INO_INVALID_MTIME |
66683 +                                            NFS_INO_INVALID_BLOCKS);
66684 +       spin_unlock(&inode->i_lock);
66687  static ssize_t _nfs42_proc_copy(struct file *src,
66688                                 struct nfs_lock_context *src_lock,
66689                                 struct file *dst,
66690 @@ -354,14 +389,8 @@ static ssize_t _nfs42_proc_copy(struct file *src,
66691                         goto out;
66692         }
66694 -       truncate_pagecache_range(dst_inode, pos_dst,
66695 -                                pos_dst + res->write_res.count);
66696 -       spin_lock(&dst_inode->i_lock);
66697 -       nfs_set_cache_invalid(
66698 -               dst_inode, NFS_INO_REVAL_PAGECACHE | NFS_INO_REVAL_FORCED |
66699 -                                  NFS_INO_INVALID_SIZE | NFS_INO_INVALID_ATTR |
66700 -                                  NFS_INO_INVALID_DATA);
66701 -       spin_unlock(&dst_inode->i_lock);
66702 +       nfs42_copy_dest_done(dst_inode, pos_dst, res->write_res.count);
66704         spin_lock(&src_inode->i_lock);
66705         nfs_set_cache_invalid(src_inode, NFS_INO_REVAL_PAGECACHE |
66706                                                  NFS_INO_REVAL_FORCED |
66707 @@ -659,7 +688,10 @@ static loff_t _nfs42_proc_llseek(struct file *filep,
66708         if (status)
66709                 return status;
66711 -       return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
66712 +       if (whence == SEEK_DATA && res.sr_eof)
66713 +               return -NFS4ERR_NXIO;
66714 +       else
66715 +               return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
66718  loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
66719 @@ -1044,8 +1076,10 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
66721         status = nfs4_call_sync(server->client, server, msg,
66722                                 &args.seq_args, &res.seq_res, 0);
66723 -       if (status == 0)
66724 +       if (status == 0) {
66725 +               nfs42_copy_dest_done(dst_inode, dst_offset, count);
66726                 status = nfs_post_op_update_inode(dst_inode, res.dst_fattr);
66727 +       }
66729         kfree(res.dst_fattr);
66730         return status;
66731 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
66732 index c65c4b41e2c1..820abae88cf0 100644
66733 --- a/fs/nfs/nfs4proc.c
66734 +++ b/fs/nfs/nfs4proc.c
66735 @@ -108,9 +108,10 @@ static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
66736  static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
66737                 const struct cred *, bool);
66738  #endif
66739 -static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode,
66740 -               struct nfs_server *server,
66741 -               struct nfs4_label *label);
66742 +static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ],
66743 +                            const __u32 *src, struct inode *inode,
66744 +                            struct nfs_server *server,
66745 +                            struct nfs4_label *label);
66747  #ifdef CONFIG_NFS_V4_SECURITY_LABEL
66748  static inline struct nfs4_label *
66749 @@ -3591,6 +3592,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
66750         struct nfs4_closedata *calldata = data;
66751         struct nfs4_state *state = calldata->state;
66752         struct inode *inode = calldata->inode;
66753 +       struct nfs_server *server = NFS_SERVER(inode);
66754         struct pnfs_layout_hdr *lo;
66755         bool is_rdonly, is_wronly, is_rdwr;
66756         int call_close = 0;
66757 @@ -3647,8 +3649,10 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
66758         if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) {
66759                 /* Close-to-open cache consistency revalidation */
66760                 if (!nfs4_have_delegation(inode, FMODE_READ)) {
66761 -                       calldata->arg.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
66762 -                       nfs4_bitmask_adjust(calldata->arg.bitmask, inode, NFS_SERVER(inode), NULL);
66763 +                       nfs4_bitmask_set(calldata->arg.bitmask_store,
66764 +                                        server->cache_consistency_bitmask,
66765 +                                        inode, server, NULL);
66766 +                       calldata->arg.bitmask = calldata->arg.bitmask_store;
66767                 } else
66768                         calldata->arg.bitmask = NULL;
66769         }
66770 @@ -5416,19 +5420,17 @@ bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
66771         return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
66774 -static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode,
66775 -                               struct nfs_server *server,
66776 -                               struct nfs4_label *label)
66777 +static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ], const __u32 *src,
66778 +                            struct inode *inode, struct nfs_server *server,
66779 +                            struct nfs4_label *label)
66782         unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
66783 +       unsigned int i;
66785 -       if ((cache_validity & NFS_INO_INVALID_DATA) ||
66786 -               (cache_validity & NFS_INO_REVAL_PAGECACHE) ||
66787 -               (cache_validity & NFS_INO_REVAL_FORCED) ||
66788 -               (cache_validity & NFS_INO_INVALID_OTHER))
66789 -               nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, label), inode);
66790 +       memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ);
66792 +       if (cache_validity & (NFS_INO_INVALID_CHANGE | NFS_INO_REVAL_PAGECACHE))
66793 +               bitmask[0] |= FATTR4_WORD0_CHANGE;
66794         if (cache_validity & NFS_INO_INVALID_ATIME)
66795                 bitmask[1] |= FATTR4_WORD1_TIME_ACCESS;
66796         if (cache_validity & NFS_INO_INVALID_OTHER)
66797 @@ -5437,16 +5439,22 @@ static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode,
66798                                 FATTR4_WORD1_NUMLINKS;
66799         if (label && label->len && cache_validity & NFS_INO_INVALID_LABEL)
66800                 bitmask[2] |= FATTR4_WORD2_SECURITY_LABEL;
66801 -       if (cache_validity & NFS_INO_INVALID_CHANGE)
66802 -               bitmask[0] |= FATTR4_WORD0_CHANGE;
66803         if (cache_validity & NFS_INO_INVALID_CTIME)
66804                 bitmask[1] |= FATTR4_WORD1_TIME_METADATA;
66805         if (cache_validity & NFS_INO_INVALID_MTIME)
66806                 bitmask[1] |= FATTR4_WORD1_TIME_MODIFY;
66807 -       if (cache_validity & NFS_INO_INVALID_SIZE)
66808 -               bitmask[0] |= FATTR4_WORD0_SIZE;
66809         if (cache_validity & NFS_INO_INVALID_BLOCKS)
66810                 bitmask[1] |= FATTR4_WORD1_SPACE_USED;
66812 +       if (nfs4_have_delegation(inode, FMODE_READ) &&
66813 +           !(cache_validity & NFS_INO_REVAL_FORCED))
66814 +               bitmask[0] &= ~FATTR4_WORD0_SIZE;
66815 +       else if (cache_validity &
66816 +                (NFS_INO_INVALID_SIZE | NFS_INO_REVAL_PAGECACHE))
66817 +               bitmask[0] |= FATTR4_WORD0_SIZE;
66819 +       for (i = 0; i < NFS4_BITMASK_SZ; i++)
66820 +               bitmask[i] &= server->attr_bitmask[i];
66823  static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
66824 @@ -5459,8 +5467,10 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
66825                 hdr->args.bitmask = NULL;
66826                 hdr->res.fattr = NULL;
66827         } else {
66828 -               hdr->args.bitmask = server->cache_consistency_bitmask;
66829 -               nfs4_bitmask_adjust(hdr->args.bitmask, hdr->inode, server, NULL);
66830 +               nfs4_bitmask_set(hdr->args.bitmask_store,
66831 +                                server->cache_consistency_bitmask,
66832 +                                hdr->inode, server, NULL);
66833 +               hdr->args.bitmask = hdr->args.bitmask_store;
66834         }
66836         if (!hdr->pgio_done_cb)
66837 @@ -6502,8 +6512,10 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
66839         data->args.fhandle = &data->fh;
66840         data->args.stateid = &data->stateid;
66841 -       data->args.bitmask = server->cache_consistency_bitmask;
66842 -       nfs4_bitmask_adjust(data->args.bitmask, inode, server, NULL);
66843 +       nfs4_bitmask_set(data->args.bitmask_store,
66844 +                        server->cache_consistency_bitmask, inode, server,
66845 +                        NULL);
66846 +       data->args.bitmask = data->args.bitmask_store;
66847         nfs_copy_fh(&data->fh, NFS_FH(inode));
66848         nfs4_stateid_copy(&data->stateid, stateid);
66849         data->res.fattr = &data->fattr;
66850 diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
66851 index 102b66e0bdef..f726f8b12b7e 100644
66852 --- a/fs/nfs/pnfs.c
66853 +++ b/fs/nfs/pnfs.c
66854 @@ -1344,7 +1344,7 @@ _pnfs_return_layout(struct inode *ino)
66855         }
66856         valid_layout = pnfs_layout_is_valid(lo);
66857         pnfs_clear_layoutcommit(ino, &tmp_list);
66858 -       pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0);
66859 +       pnfs_mark_matching_lsegs_return(lo, &tmp_list, NULL, 0);
66861         if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
66862                 struct pnfs_layout_range range = {
66863 @@ -2468,6 +2468,9 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
66865         assert_spin_locked(&lo->plh_inode->i_lock);
66867 +       if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
66868 +               tmp_list = &lo->plh_return_segs;
66870         list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
66871                 if (pnfs_match_lseg_recall(lseg, return_range, seq)) {
66872                         dprintk("%s: marking lseg %p iomode %d "
66873 @@ -2475,6 +2478,8 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
66874                                 lseg, lseg->pls_range.iomode,
66875                                 lseg->pls_range.offset,
66876                                 lseg->pls_range.length);
66877 +                       if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
66878 +                               tmp_list = &lo->plh_return_segs;
66879                         if (mark_lseg_invalid(lseg, tmp_list))
66880                                 continue;
66881                         remaining++;
66882 diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
66883 index dd9f38d072dd..e13c4c81fb89 100644
66884 --- a/fs/nfsd/nfs4proc.c
66885 +++ b/fs/nfsd/nfs4proc.c
66886 @@ -1538,8 +1538,8 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
66887                 if (!nfs4_init_copy_state(nn, copy))
66888                         goto out_err;
66889                 refcount_set(&async_copy->refcount, 1);
66890 -               memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid,
66891 -                       sizeof(copy->cp_stateid));
66892 +               memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid.stid,
66893 +                       sizeof(copy->cp_res.cb_stateid));
66894                 dup_copy_fields(copy, async_copy);
66895                 async_copy->copy_task = kthread_create(nfsd4_do_async_copy,
66896                                 async_copy, "%s", "copy thread");
66897 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
66898 index 97447a64bad0..886e50ed07c2 100644
66899 --- a/fs/nfsd/nfs4state.c
66900 +++ b/fs/nfsd/nfs4state.c
66901 @@ -4869,6 +4869,11 @@ static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
66902         if (nf)
66903                 nfsd_file_put(nf);
66905 +       status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode,
66906 +                                                               access));
66907 +       if (status)
66908 +               goto out_put_access;
66910         status = nfsd4_truncate(rqstp, cur_fh, open);
66911         if (status)
66912                 goto out_put_access;
66913 @@ -6849,11 +6854,20 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
66914  static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
66916         struct nfsd_file *nf;
66917 -       __be32 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
66918 -       if (!err) {
66919 -               err = nfserrno(vfs_test_lock(nf->nf_file, lock));
66920 -               nfsd_file_put(nf);
66921 -       }
66922 +       __be32 err;
66924 +       err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
66925 +       if (err)
66926 +               return err;
66927 +       fh_lock(fhp); /* to block new leases till after test_lock: */
66928 +       err = nfserrno(nfsd_open_break_lease(fhp->fh_dentry->d_inode,
66929 +                                                       NFSD_MAY_READ));
66930 +       if (err)
66931 +               goto out;
66932 +       err = nfserrno(vfs_test_lock(nf->nf_file, lock));
66933 +out:
66934 +       fh_unlock(fhp);
66935 +       nfsd_file_put(nf);
66936         return err;
66939 diff --git a/fs/ntfs3/Kconfig b/fs/ntfs3/Kconfig
66940 new file mode 100644
66941 index 000000000000..6e4cbc48ab8e
66942 --- /dev/null
66943 +++ b/fs/ntfs3/Kconfig
66944 @@ -0,0 +1,46 @@
66945 +# SPDX-License-Identifier: GPL-2.0-only
66946 +config NTFS3_FS
66947 +       tristate "NTFS Read-Write file system support"
66948 +       select NLS
66949 +       help
66950 +         Windows OS native file system (NTFS) support up to NTFS version 3.1.
66952 +         Y or M enables the NTFS3 driver with full features enabled (read,
66953 +         write, journal replaying, sparse/compressed files support).
66954 +         File system type to use on mount is "ntfs3". Module name (M option)
66955 +         is also "ntfs3".
66957 +         Documentation: <file:Documentation/filesystems/ntfs3.rst>
66959 +config NTFS3_64BIT_CLUSTER
66960 +       bool "64 bits per NTFS clusters"
66961 +       depends on NTFS3_FS && 64BIT
66962 +       help
66963 +         Windows implementation of ntfs.sys uses 32 bits per clusters.
66964 +         If activated 64 bits per clusters you will be able to use 4k cluster
66965 +         for 16T+ volumes. Windows will not be able to mount such volumes.
66967 +         It is recommended to say N here.
66969 +config NTFS3_LZX_XPRESS
66970 +       bool "activate support of external compressions lzx/xpress"
66971 +       depends on NTFS3_FS
66972 +       help
66973 +         In Windows 10 one can use command "compact" to compress any files.
66974 +         4 possible variants of compression are: xpress4k, xpress8k, xpress16k and lzx.
66975 +         If activated you will be able to read such files correctly.
66977 +         It is recommended to say Y here.
66979 +config NTFS3_FS_POSIX_ACL
66980 +       bool "NTFS POSIX Access Control Lists"
66981 +       depends on NTFS3_FS
66982 +       select FS_POSIX_ACL
66983 +       help
66984 +         POSIX Access Control Lists (ACLs) support additional access rights
66985 +         for users and groups beyond the standard owner/group/world scheme,
66986 +         and this option selects support for ACLs specifically for ntfs
66987 +         filesystems.
66988 +         NOTE: this is linux only feature. Windows will ignore these ACLs.
66990 +         If you don't know what Access Control Lists are, say N.
66991 diff --git a/fs/ntfs3/Makefile b/fs/ntfs3/Makefile
66992 new file mode 100644
66993 index 000000000000..5adc54ebac5a
66994 --- /dev/null
66995 +++ b/fs/ntfs3/Makefile
66996 @@ -0,0 +1,38 @@
66997 +# SPDX-License-Identifier: GPL-2.0
66999 +# Makefile for the ntfs3 filesystem support.
67002 +# to check robot warnings
67003 +ccflags-y += -Wint-to-pointer-cast
67004 +condflags := \
67005 +       $(call cc-option, -Wunused-but-set-variable) \
67006 +       $(call cc-option, -Wold-style-declaration)
67007 +ccflags-y += $(condflags)
67009 +obj-$(CONFIG_NTFS3_FS) += ntfs3.o
67011 +ntfs3-y :=     attrib.o \
67012 +               attrlist.o \
67013 +               bitfunc.o \
67014 +               bitmap.o \
67015 +               dir.o \
67016 +               fsntfs.o \
67017 +               frecord.o \
67018 +               file.o \
67019 +               fslog.o \
67020 +               inode.o \
67021 +               index.o \
67022 +               lznt.o \
67023 +               namei.o \
67024 +               record.o \
67025 +               run.o \
67026 +               super.o \
67027 +               upcase.o \
67028 +               xattr.o
67030 +ntfs3-$(CONFIG_NTFS3_LZX_XPRESS) += $(addprefix lib/,\
67031 +               decompress_common.o \
67032 +               lzx_decompress.o \
67033 +               xpress_decompress.o \
67034 +               )
67035 diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
67036 new file mode 100644
67037 index 000000000000..bca85e7b6eaf
67038 --- /dev/null
67039 +++ b/fs/ntfs3/attrib.c
67040 @@ -0,0 +1,2082 @@
67041 +// SPDX-License-Identifier: GPL-2.0
67043 + *
67044 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
67045 + *
67046 + * TODO: merge attr_set_size/attr_data_get_block/attr_allocate_frame?
67047 + */
67049 +#include <linux/blkdev.h>
67050 +#include <linux/buffer_head.h>
67051 +#include <linux/fs.h>
67052 +#include <linux/hash.h>
67053 +#include <linux/nls.h>
67054 +#include <linux/ratelimit.h>
67055 +#include <linux/slab.h>
67057 +#include "debug.h"
67058 +#include "ntfs.h"
67059 +#include "ntfs_fs.h"
67062 + * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
67063 + * preallocate algorithm
67064 + */
67065 +#ifndef NTFS_MIN_LOG2_OF_CLUMP
67066 +#define NTFS_MIN_LOG2_OF_CLUMP 16
67067 +#endif
67069 +#ifndef NTFS_MAX_LOG2_OF_CLUMP
67070 +#define NTFS_MAX_LOG2_OF_CLUMP 26
67071 +#endif
67073 +// 16M
67074 +#define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
67075 +// 16G
67076 +#define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
67079 + * get_pre_allocated
67080 + *
67081 + */
67082 +static inline u64 get_pre_allocated(u64 size)
67084 +       u32 clump;
67085 +       u8 align_shift;
67086 +       u64 ret;
67088 +       if (size <= NTFS_CLUMP_MIN) {
67089 +               clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
67090 +               align_shift = NTFS_MIN_LOG2_OF_CLUMP;
67091 +       } else if (size >= NTFS_CLUMP_MAX) {
67092 +               clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
67093 +               align_shift = NTFS_MAX_LOG2_OF_CLUMP;
67094 +       } else {
67095 +               align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
67096 +                             __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
67097 +               clump = 1u << align_shift;
67098 +       }
67100 +       ret = (((size + clump - 1) >> align_shift)) << align_shift;
67102 +       return ret;
67106 + * attr_must_be_resident
67107 + *
67108 + * returns true if attribute must be resident
67109 + */
67110 +static inline bool attr_must_be_resident(struct ntfs_sb_info *sbi,
67111 +                                        enum ATTR_TYPE type)
67113 +       const struct ATTR_DEF_ENTRY *de;
67115 +       switch (type) {
67116 +       case ATTR_STD:
67117 +       case ATTR_NAME:
67118 +       case ATTR_ID:
67119 +       case ATTR_LABEL:
67120 +       case ATTR_VOL_INFO:
67121 +       case ATTR_ROOT:
67122 +       case ATTR_EA_INFO:
67123 +               return true;
67124 +       default:
67125 +               de = ntfs_query_def(sbi, type);
67126 +               if (de && (de->flags & NTFS_ATTR_MUST_BE_RESIDENT))
67127 +                       return true;
67128 +               return false;
67129 +       }
67133 + * attr_load_runs
67134 + *
67135 + * load all runs stored in 'attr'
67136 + */
67137 +int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
67138 +                  struct runs_tree *run, const CLST *vcn)
67140 +       int err;
67141 +       CLST svcn = le64_to_cpu(attr->nres.svcn);
67142 +       CLST evcn = le64_to_cpu(attr->nres.evcn);
67143 +       u32 asize;
67144 +       u16 run_off;
67146 +       if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
67147 +               return 0;
67149 +       if (vcn && (evcn < *vcn || *vcn < svcn))
67150 +               return -EINVAL;
67152 +       asize = le32_to_cpu(attr->size);
67153 +       run_off = le16_to_cpu(attr->nres.run_off);
67154 +       err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
67155 +                           vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
67156 +                           asize - run_off);
67157 +       if (err < 0)
67158 +               return err;
67160 +       return 0;
67164 + * int run_deallocate_ex
67165 + *
67166 + * Deallocate clusters
67167 + */
67168 +static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
67169 +                            CLST vcn, CLST len, CLST *done, bool trim)
67171 +       int err = 0;
67172 +       CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
67173 +       size_t idx;
67175 +       if (!len)
67176 +               goto out;
67178 +       if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
67179 +failed:
67180 +               run_truncate(run, vcn0);
67181 +               err = -EINVAL;
67182 +               goto out;
67183 +       }
67185 +       for (;;) {
67186 +               if (clen > len)
67187 +                       clen = len;
67189 +               if (!clen) {
67190 +                       err = -EINVAL;
67191 +                       goto out;
67192 +               }
67194 +               if (lcn != SPARSE_LCN) {
67195 +                       mark_as_free_ex(sbi, lcn, clen, trim);
67196 +                       dn += clen;
67197 +               }
67199 +               len -= clen;
67200 +               if (!len)
67201 +                       break;
67203 +               vcn_next = vcn + clen;
67204 +               if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
67205 +                   vcn != vcn_next) {
67206 +                       // save memory - don't load entire run
67207 +                       goto failed;
67208 +               }
67209 +       }
67211 +out:
67212 +       if (done)
67213 +               *done += dn;
67215 +       return err;
67219 + * attr_allocate_clusters
67220 + *
67221 + * find free space, mark it as used and store in 'run'
67222 + */
67223 +int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
67224 +                          CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
67225 +                          enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
67226 +                          CLST *new_lcn)
67228 +       int err;
67229 +       CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
67230 +       struct wnd_bitmap *wnd = &sbi->used.bitmap;
67231 +       size_t cnt = run->count;
67233 +       for (;;) {
67234 +               err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
67235 +                                              opt);
67237 +               if (err == -ENOSPC && pre) {
67238 +                       pre = 0;
67239 +                       if (*pre_alloc)
67240 +                               *pre_alloc = 0;
67241 +                       continue;
67242 +               }
67244 +               if (err)
67245 +                       goto out;
67247 +               if (new_lcn && vcn == vcn0)
67248 +                       *new_lcn = lcn;
67250 +               /* Add new fragment into run storage */
67251 +               if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
67252 +                       down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
67253 +                       wnd_set_free(wnd, lcn, flen);
67254 +                       up_write(&wnd->rw_lock);
67255 +                       err = -ENOMEM;
67256 +                       goto out;
67257 +               }
67259 +               vcn += flen;
67261 +               if (flen >= len || opt == ALLOCATE_MFT ||
67262 +                   (fr && run->count - cnt >= fr)) {
67263 +                       *alen = vcn - vcn0;
67264 +                       return 0;
67265 +               }
67267 +               len -= flen;
67268 +       }
67270 +out:
67271 +       /* undo */
67272 +       run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
67273 +       run_truncate(run, vcn0);
67275 +       return err;
67279 + * if page is not NULL - it is already contains resident data
67280 + * and locked (called from ni_write_frame)
67281 + */
67282 +int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
67283 +                         struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
67284 +                         u64 new_size, struct runs_tree *run,
67285 +                         struct ATTRIB **ins_attr, struct page *page)
67287 +       struct ntfs_sb_info *sbi;
67288 +       struct ATTRIB *attr_s;
67289 +       struct MFT_REC *rec;
67290 +       u32 used, asize, rsize, aoff, align;
67291 +       bool is_data;
67292 +       CLST len, alen;
67293 +       char *next;
67294 +       int err;
67296 +       if (attr->non_res) {
67297 +               *ins_attr = attr;
67298 +               return 0;
67299 +       }
67301 +       sbi = mi->sbi;
67302 +       rec = mi->mrec;
67303 +       attr_s = NULL;
67304 +       used = le32_to_cpu(rec->used);
67305 +       asize = le32_to_cpu(attr->size);
67306 +       next = Add2Ptr(attr, asize);
67307 +       aoff = PtrOffset(rec, attr);
67308 +       rsize = le32_to_cpu(attr->res.data_size);
67309 +       is_data = attr->type == ATTR_DATA && !attr->name_len;
67311 +       align = sbi->cluster_size;
67312 +       if (is_attr_compressed(attr))
67313 +               align <<= COMPRESSION_UNIT;
67314 +       len = (rsize + align - 1) >> sbi->cluster_bits;
67316 +       run_init(run);
67318 +       /* make a copy of original attribute */
67319 +       attr_s = ntfs_memdup(attr, asize);
67320 +       if (!attr_s) {
67321 +               err = -ENOMEM;
67322 +               goto out;
67323 +       }
67325 +       if (!len) {
67326 +               /* empty resident -> empty nonresident */
67327 +               alen = 0;
67328 +       } else {
67329 +               const char *data = resident_data(attr);
67331 +               err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
67332 +                                            ALLOCATE_DEF, &alen, 0, NULL);
67333 +               if (err)
67334 +                       goto out1;
67336 +               if (!rsize) {
67337 +                       /* empty resident -> non empty nonresident */
67338 +               } else if (!is_data) {
67339 +                       err = ntfs_sb_write_run(sbi, run, 0, data, rsize);
67340 +                       if (err)
67341 +                               goto out2;
67342 +               } else if (!page) {
67343 +                       char *kaddr;
67345 +                       page = grab_cache_page(ni->vfs_inode.i_mapping, 0);
67346 +                       if (!page) {
67347 +                               err = -ENOMEM;
67348 +                               goto out2;
67349 +                       }
67350 +                       kaddr = kmap_atomic(page);
67351 +                       memcpy(kaddr, data, rsize);
67352 +                       memset(kaddr + rsize, 0, PAGE_SIZE - rsize);
67353 +                       kunmap_atomic(kaddr);
67354 +                       flush_dcache_page(page);
67355 +                       SetPageUptodate(page);
67356 +                       set_page_dirty(page);
67357 +                       unlock_page(page);
67358 +                       put_page(page);
67359 +               }
67360 +       }
67362 +       /* remove original attribute */
67363 +       used -= asize;
67364 +       memmove(attr, Add2Ptr(attr, asize), used - aoff);
67365 +       rec->used = cpu_to_le32(used);
67366 +       mi->dirty = true;
67367 +       if (le)
67368 +               al_remove_le(ni, le);
67370 +       err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
67371 +                                   attr_s->name_len, run, 0, alen,
67372 +                                   attr_s->flags, &attr, NULL);
67373 +       if (err)
67374 +               goto out3;
67376 +       ntfs_free(attr_s);
67377 +       attr->nres.data_size = cpu_to_le64(rsize);
67378 +       attr->nres.valid_size = attr->nres.data_size;
67380 +       *ins_attr = attr;
67382 +       if (is_data)
67383 +               ni->ni_flags &= ~NI_FLAG_RESIDENT;
67385 +       /* Resident attribute becomes non resident */
67386 +       return 0;
67388 +out3:
67389 +       attr = Add2Ptr(rec, aoff);
67390 +       memmove(next, attr, used - aoff);
67391 +       memcpy(attr, attr_s, asize);
67392 +       rec->used = cpu_to_le32(used + asize);
67393 +       mi->dirty = true;
67394 +out2:
67395 +       /* undo: do not trim new allocated clusters */
67396 +       run_deallocate(sbi, run, false);
67397 +       run_close(run);
67398 +out1:
67399 +       ntfs_free(attr_s);
67400 +       /*reinsert le*/
67401 +out:
67402 +       return err;
67406 + * attr_set_size_res
67407 + *
67408 + * helper for attr_set_size
67409 + */
67410 +static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
67411 +                            struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
67412 +                            u64 new_size, struct runs_tree *run,
67413 +                            struct ATTRIB **ins_attr)
67415 +       struct ntfs_sb_info *sbi = mi->sbi;
67416 +       struct MFT_REC *rec = mi->mrec;
67417 +       u32 used = le32_to_cpu(rec->used);
67418 +       u32 asize = le32_to_cpu(attr->size);
67419 +       u32 aoff = PtrOffset(rec, attr);
67420 +       u32 rsize = le32_to_cpu(attr->res.data_size);
67421 +       u32 tail = used - aoff - asize;
67422 +       char *next = Add2Ptr(attr, asize);
67423 +       s64 dsize = QuadAlign(new_size) - QuadAlign(rsize);
67425 +       if (dsize < 0) {
67426 +               memmove(next + dsize, next, tail);
67427 +       } else if (dsize > 0) {
67428 +               if (used + dsize > sbi->max_bytes_per_attr)
67429 +                       return attr_make_nonresident(ni, attr, le, mi, new_size,
67430 +                                                    run, ins_attr, NULL);
67432 +               memmove(next + dsize, next, tail);
67433 +               memset(next, 0, dsize);
67434 +       }
67436 +       if (new_size > rsize)
67437 +               memset(Add2Ptr(resident_data(attr), rsize), 0,
67438 +                      new_size - rsize);
67440 +       rec->used = cpu_to_le32(used + dsize);
67441 +       attr->size = cpu_to_le32(asize + dsize);
67442 +       attr->res.data_size = cpu_to_le32(new_size);
67443 +       mi->dirty = true;
67444 +       *ins_attr = attr;
67446 +       return 0;
67450 + * attr_set_size
67451 + *
67452 + * change the size of attribute
67453 + * Extend:
67454 + *   - sparse/compressed: no allocated clusters
67455 + *   - normal: append allocated and preallocated new clusters
67456 + * Shrink:
67457 + *   - no deallocate if keep_prealloc is set
67458 + */
67459 +int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
67460 +                 const __le16 *name, u8 name_len, struct runs_tree *run,
67461 +                 u64 new_size, const u64 *new_valid, bool keep_prealloc,
67462 +                 struct ATTRIB **ret)
67464 +       int err = 0;
67465 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
67466 +       u8 cluster_bits = sbi->cluster_bits;
67467 +       bool is_mft =
67468 +               ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && !name_len;
67469 +       u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
67470 +       struct ATTRIB *attr = NULL, *attr_b;
67471 +       struct ATTR_LIST_ENTRY *le, *le_b;
67472 +       struct mft_inode *mi, *mi_b;
67473 +       CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
67474 +       CLST next_svcn, pre_alloc = -1, done = 0;
67475 +       bool is_ext;
67476 +       u32 align;
67477 +       struct MFT_REC *rec;
67479 +again:
67480 +       le_b = NULL;
67481 +       attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
67482 +                             &mi_b);
67483 +       if (!attr_b) {
67484 +               err = -ENOENT;
67485 +               goto out;
67486 +       }
67488 +       if (!attr_b->non_res) {
67489 +               err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
67490 +                                       &attr_b);
67491 +               if (err || !attr_b->non_res)
67492 +                       goto out;
67494 +               /* layout of records may be changed, so do a full search */
67495 +               goto again;
67496 +       }
67498 +       is_ext = is_attr_ext(attr_b);
67500 +again_1:
67501 +       align = sbi->cluster_size;
67503 +       if (is_ext) {
67504 +               align <<= attr_b->nres.c_unit;
67505 +               if (is_attr_sparsed(attr_b))
67506 +                       keep_prealloc = false;
67507 +       }
67509 +       old_valid = le64_to_cpu(attr_b->nres.valid_size);
67510 +       old_size = le64_to_cpu(attr_b->nres.data_size);
67511 +       old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
67512 +       old_alen = old_alloc >> cluster_bits;
67514 +       new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
67515 +       new_alen = new_alloc >> cluster_bits;
67517 +       if (keep_prealloc && is_ext)
67518 +               keep_prealloc = false;
67520 +       if (keep_prealloc && new_size < old_size) {
67521 +               attr_b->nres.data_size = cpu_to_le64(new_size);
67522 +               mi_b->dirty = true;
67523 +               goto ok;
67524 +       }
67526 +       vcn = old_alen - 1;
67528 +       svcn = le64_to_cpu(attr_b->nres.svcn);
67529 +       evcn = le64_to_cpu(attr_b->nres.evcn);
67531 +       if (svcn <= vcn && vcn <= evcn) {
67532 +               attr = attr_b;
67533 +               le = le_b;
67534 +               mi = mi_b;
67535 +       } else if (!le_b) {
67536 +               err = -EINVAL;
67537 +               goto out;
67538 +       } else {
67539 +               le = le_b;
67540 +               attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
67541 +                                   &mi);
67542 +               if (!attr) {
67543 +                       err = -EINVAL;
67544 +                       goto out;
67545 +               }
67547 +next_le_1:
67548 +               svcn = le64_to_cpu(attr->nres.svcn);
67549 +               evcn = le64_to_cpu(attr->nres.evcn);
67550 +       }
67552 +next_le:
67553 +       rec = mi->mrec;
67555 +       err = attr_load_runs(attr, ni, run, NULL);
67556 +       if (err)
67557 +               goto out;
67559 +       if (new_size > old_size) {
67560 +               CLST to_allocate;
67561 +               size_t free;
67563 +               if (new_alloc <= old_alloc) {
67564 +                       attr_b->nres.data_size = cpu_to_le64(new_size);
67565 +                       mi_b->dirty = true;
67566 +                       goto ok;
67567 +               }
67569 +               to_allocate = new_alen - old_alen;
67570 +add_alloc_in_same_attr_seg:
67571 +               lcn = 0;
67572 +               if (is_mft) {
67573 +                       /* mft allocates clusters from mftzone */
67574 +                       pre_alloc = 0;
67575 +               } else if (is_ext) {
67576 +                       /* no preallocate for sparse/compress */
67577 +                       pre_alloc = 0;
67578 +               } else if (pre_alloc == -1) {
67579 +                       pre_alloc = 0;
67580 +                       if (type == ATTR_DATA && !name_len &&
67581 +                           sbi->options.prealloc) {
67582 +                               CLST new_alen2 = bytes_to_cluster(
67583 +                                       sbi, get_pre_allocated(new_size));
67584 +                               pre_alloc = new_alen2 - new_alen;
67585 +                       }
67587 +                       /* Get the last lcn to allocate from */
67588 +                       if (old_alen &&
67589 +                           !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
67590 +                               lcn = SPARSE_LCN;
67591 +                       }
67593 +                       if (lcn == SPARSE_LCN)
67594 +                               lcn = 0;
67595 +                       else if (lcn)
67596 +                               lcn += 1;
67598 +                       free = wnd_zeroes(&sbi->used.bitmap);
67599 +                       if (to_allocate > free) {
67600 +                               err = -ENOSPC;
67601 +                               goto out;
67602 +                       }
67604 +                       if (pre_alloc && to_allocate + pre_alloc > free)
67605 +                               pre_alloc = 0;
67606 +               }
67608 +               vcn = old_alen;
67610 +               if (is_ext) {
67611 +                       if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
67612 +                                          false)) {
67613 +                               err = -ENOMEM;
67614 +                               goto out;
67615 +                       }
67616 +                       alen = to_allocate;
67617 +               } else {
67618 +                       /* ~3 bytes per fragment */
67619 +                       err = attr_allocate_clusters(
67620 +                               sbi, run, vcn, lcn, to_allocate, &pre_alloc,
67621 +                               is_mft ? ALLOCATE_MFT : 0, &alen,
67622 +                               is_mft ? 0
67623 +                                      : (sbi->record_size -
67624 +                                         le32_to_cpu(rec->used) + 8) /
67625 +                                                        3 +
67626 +                                                1,
67627 +                               NULL);
67628 +                       if (err)
67629 +                               goto out;
67630 +               }
67632 +               done += alen;
67633 +               vcn += alen;
67634 +               if (to_allocate > alen)
67635 +                       to_allocate -= alen;
67636 +               else
67637 +                       to_allocate = 0;
67639 +pack_runs:
67640 +               err = mi_pack_runs(mi, attr, run, vcn - svcn);
67641 +               if (err)
67642 +                       goto out;
67644 +               next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
67645 +               new_alloc_tmp = (u64)next_svcn << cluster_bits;
67646 +               attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
67647 +               mi_b->dirty = true;
67649 +               if (next_svcn >= vcn && !to_allocate) {
67650 +                       /* Normal way. update attribute and exit */
67651 +                       attr_b->nres.data_size = cpu_to_le64(new_size);
67652 +                       goto ok;
67653 +               }
67655 +               /* at least two mft to avoid recursive loop*/
67656 +               if (is_mft && next_svcn == vcn &&
67657 +                   ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
67658 +                       new_size = new_alloc_tmp;
67659 +                       attr_b->nres.data_size = attr_b->nres.alloc_size;
67660 +                       goto ok;
67661 +               }
67663 +               if (le32_to_cpu(rec->used) < sbi->record_size) {
67664 +                       old_alen = next_svcn;
67665 +                       evcn = old_alen - 1;
67666 +                       goto add_alloc_in_same_attr_seg;
67667 +               }
67669 +               attr_b->nres.data_size = attr_b->nres.alloc_size;
67670 +               if (new_alloc_tmp < old_valid)
67671 +                       attr_b->nres.valid_size = attr_b->nres.data_size;
67673 +               if (type == ATTR_LIST) {
67674 +                       err = ni_expand_list(ni);
67675 +                       if (err)
67676 +                               goto out;
67677 +                       if (next_svcn < vcn)
67678 +                               goto pack_runs;
67680 +                       /* layout of records is changed */
67681 +                       goto again;
67682 +               }
67684 +               if (!ni->attr_list.size) {
67685 +                       err = ni_create_attr_list(ni);
67686 +                       if (err)
67687 +                               goto out;
67688 +                       /* layout of records is changed */
67689 +               }
67691 +               if (next_svcn >= vcn) {
67692 +                       /* this is mft data, repeat */
67693 +                       goto again;
67694 +               }
67696 +               /* insert new attribute segment */
67697 +               err = ni_insert_nonresident(ni, type, name, name_len, run,
67698 +                                           next_svcn, vcn - next_svcn,
67699 +                                           attr_b->flags, &attr, &mi);
67700 +               if (err)
67701 +                       goto out;
67703 +               if (!is_mft)
67704 +                       run_truncate_head(run, evcn + 1);
67706 +               svcn = le64_to_cpu(attr->nres.svcn);
67707 +               evcn = le64_to_cpu(attr->nres.evcn);
67709 +               le_b = NULL;
67710 +               /* layout of records maybe changed */
67711 +               /* find base attribute to update*/
67712 +               attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
67713 +                                     NULL, &mi_b);
67714 +               if (!attr_b) {
67715 +                       err = -ENOENT;
67716 +                       goto out;
67717 +               }
67719 +               attr_b->nres.alloc_size = cpu_to_le64((u64)vcn << cluster_bits);
67720 +               attr_b->nres.data_size = attr_b->nres.alloc_size;
67721 +               attr_b->nres.valid_size = attr_b->nres.alloc_size;
67722 +               mi_b->dirty = true;
67723 +               goto again_1;
67724 +       }
67726 +       if (new_size != old_size ||
67727 +           (new_alloc != old_alloc && !keep_prealloc)) {
67728 +               vcn = max(svcn, new_alen);
67729 +               new_alloc_tmp = (u64)vcn << cluster_bits;
67731 +               alen = 0;
67732 +               err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &alen,
67733 +                                       true);
67734 +               if (err)
67735 +                       goto out;
67737 +               run_truncate(run, vcn);
67739 +               if (vcn > svcn) {
67740 +                       err = mi_pack_runs(mi, attr, run, vcn - svcn);
67741 +                       if (err)
67742 +                               goto out;
67743 +               } else if (le && le->vcn) {
67744 +                       u16 le_sz = le16_to_cpu(le->size);
67746 +                       /*
67747 +                        * NOTE: list entries for one attribute are always
67748 +                        * the same size. We deal with last entry (vcn==0)
67749 +                        * and it is not first in entries array
67750 +                        * (list entry for std attribute always first)
67751 +                        * So it is safe to step back
67752 +                        */
67753 +                       mi_remove_attr(mi, attr);
67755 +                       if (!al_remove_le(ni, le)) {
67756 +                               err = -EINVAL;
67757 +                               goto out;
67758 +                       }
67760 +                       le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
67761 +               } else {
67762 +                       attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
67763 +                       mi->dirty = true;
67764 +               }
67766 +               attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
67768 +               if (vcn == new_alen) {
67769 +                       attr_b->nres.data_size = cpu_to_le64(new_size);
67770 +                       if (new_size < old_valid)
67771 +                               attr_b->nres.valid_size =
67772 +                                       attr_b->nres.data_size;
67773 +               } else {
67774 +                       if (new_alloc_tmp <=
67775 +                           le64_to_cpu(attr_b->nres.data_size))
67776 +                               attr_b->nres.data_size =
67777 +                                       attr_b->nres.alloc_size;
67778 +                       if (new_alloc_tmp <
67779 +                           le64_to_cpu(attr_b->nres.valid_size))
67780 +                               attr_b->nres.valid_size =
67781 +                                       attr_b->nres.alloc_size;
67782 +               }
67784 +               if (is_ext)
67785 +                       le64_sub_cpu(&attr_b->nres.total_size,
67786 +                                    ((u64)alen << cluster_bits));
67788 +               mi_b->dirty = true;
67790 +               if (new_alloc_tmp <= new_alloc)
67791 +                       goto ok;
67793 +               old_size = new_alloc_tmp;
67794 +               vcn = svcn - 1;
67796 +               if (le == le_b) {
67797 +                       attr = attr_b;
67798 +                       mi = mi_b;
67799 +                       evcn = svcn - 1;
67800 +                       svcn = 0;
67801 +                       goto next_le;
67802 +               }
67804 +               if (le->type != type || le->name_len != name_len ||
67805 +                   memcmp(le_name(le), name, name_len * sizeof(short))) {
67806 +                       err = -EINVAL;
67807 +                       goto out;
67808 +               }
67810 +               err = ni_load_mi(ni, le, &mi);
67811 +               if (err)
67812 +                       goto out;
67814 +               attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
67815 +               if (!attr) {
67816 +                       err = -EINVAL;
67817 +                       goto out;
67818 +               }
67819 +               goto next_le_1;
67820 +       }
67822 +ok:
67823 +       if (new_valid) {
67824 +               __le64 valid = cpu_to_le64(min(*new_valid, new_size));
67826 +               if (attr_b->nres.valid_size != valid) {
67827 +                       attr_b->nres.valid_size = valid;
67828 +                       mi_b->dirty = true;
67829 +               }
67830 +       }
67832 +out:
67833 +       if (!err && attr_b && ret)
67834 +               *ret = attr_b;
67836 +       /* update inode_set_bytes*/
67837 +       if (!err && ((type == ATTR_DATA && !name_len) ||
67838 +                    (type == ATTR_ALLOC && name == I30_NAME))) {
67839 +               bool dirty = false;
67841 +               if (ni->vfs_inode.i_size != new_size) {
67842 +                       ni->vfs_inode.i_size = new_size;
67843 +                       dirty = true;
67844 +               }
67846 +               if (attr_b && attr_b->non_res) {
67847 +                       new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
67848 +                       if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
67849 +                               inode_set_bytes(&ni->vfs_inode, new_alloc);
67850 +                               dirty = true;
67851 +                       }
67852 +               }
67854 +               if (dirty) {
67855 +                       ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
67856 +                       mark_inode_dirty(&ni->vfs_inode);
67857 +               }
67858 +       }
67860 +       return err;
67863 +int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
67864 +                       CLST *len, bool *new)
67866 +       int err = 0;
67867 +       struct runs_tree *run = &ni->file.run;
67868 +       struct ntfs_sb_info *sbi;
67869 +       u8 cluster_bits;
67870 +       struct ATTRIB *attr = NULL, *attr_b;
67871 +       struct ATTR_LIST_ENTRY *le, *le_b;
67872 +       struct mft_inode *mi, *mi_b;
67873 +       CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end;
67874 +       u64 total_size;
67875 +       u32 clst_per_frame;
67876 +       bool ok;
67878 +       if (new)
67879 +               *new = false;
67881 +       down_read(&ni->file.run_lock);
67882 +       ok = run_lookup_entry(run, vcn, lcn, len, NULL);
67883 +       up_read(&ni->file.run_lock);
67885 +       if (ok && (*lcn != SPARSE_LCN || !new)) {
67886 +               /* normal way */
67887 +               return 0;
67888 +       }
67890 +       if (!clen)
67891 +               clen = 1;
67893 +       if (ok && clen > *len)
67894 +               clen = *len;
67896 +       sbi = ni->mi.sbi;
67897 +       cluster_bits = sbi->cluster_bits;
67899 +       ni_lock(ni);
67900 +       down_write(&ni->file.run_lock);
67902 +       le_b = NULL;
67903 +       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
67904 +       if (!attr_b) {
67905 +               err = -ENOENT;
67906 +               goto out;
67907 +       }
67909 +       if (!attr_b->non_res) {
67910 +               *lcn = RESIDENT_LCN;
67911 +               *len = 1;
67912 +               goto out;
67913 +       }
67915 +       asize = le64_to_cpu(attr_b->nres.alloc_size) >> sbi->cluster_bits;
67916 +       if (vcn >= asize) {
67917 +               err = -EINVAL;
67918 +               goto out;
67919 +       }
67921 +       clst_per_frame = 1u << attr_b->nres.c_unit;
67922 +       to_alloc = (clen + clst_per_frame - 1) & ~(clst_per_frame - 1);
67924 +       if (vcn + to_alloc > asize)
67925 +               to_alloc = asize - vcn;
67927 +       svcn = le64_to_cpu(attr_b->nres.svcn);
67928 +       evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
67930 +       attr = attr_b;
67931 +       le = le_b;
67932 +       mi = mi_b;
67934 +       if (le_b && (vcn < svcn || evcn1 <= vcn)) {
67935 +               attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
67936 +                                   &mi);
67937 +               if (!attr) {
67938 +                       err = -EINVAL;
67939 +                       goto out;
67940 +               }
67941 +               svcn = le64_to_cpu(attr->nres.svcn);
67942 +               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
67943 +       }
67945 +       err = attr_load_runs(attr, ni, run, NULL);
67946 +       if (err)
67947 +               goto out;
67949 +       if (!ok) {
67950 +               ok = run_lookup_entry(run, vcn, lcn, len, NULL);
67951 +               if (ok && (*lcn != SPARSE_LCN || !new)) {
67952 +                       /* normal way */
67953 +                       err = 0;
67954 +                       goto ok;
67955 +               }
67957 +               if (!ok && !new) {
67958 +                       *len = 0;
67959 +                       err = 0;
67960 +                       goto ok;
67961 +               }
67963 +               if (ok && clen > *len) {
67964 +                       clen = *len;
67965 +                       to_alloc = (clen + clst_per_frame - 1) &
67966 +                                  ~(clst_per_frame - 1);
67967 +               }
67968 +       }
67970 +       if (!is_attr_ext(attr_b)) {
67971 +               err = -EINVAL;
67972 +               goto out;
67973 +       }
67975 +       /* Get the last lcn to allocate from */
67976 +       hint = 0;
67978 +       if (vcn > evcn1) {
67979 +               if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
67980 +                                  false)) {
67981 +                       err = -ENOMEM;
67982 +                       goto out;
67983 +               }
67984 +       } else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
67985 +               hint = -1;
67986 +       }
67988 +       err = attr_allocate_clusters(
67989 +               sbi, run, vcn, hint + 1, to_alloc, NULL, 0, len,
67990 +               (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1,
67991 +               lcn);
67992 +       if (err)
67993 +               goto out;
67994 +       *new = true;
67996 +       end = vcn + *len;
67998 +       total_size = le64_to_cpu(attr_b->nres.total_size) +
67999 +                    ((u64)*len << cluster_bits);
68001 +repack:
68002 +       err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
68003 +       if (err)
68004 +               goto out;
68006 +       attr_b->nres.total_size = cpu_to_le64(total_size);
68007 +       inode_set_bytes(&ni->vfs_inode, total_size);
68008 +       ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
68010 +       mi_b->dirty = true;
68011 +       mark_inode_dirty(&ni->vfs_inode);
68013 +       /* stored [vcn : next_svcn) from [vcn : end) */
68014 +       next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
68016 +       if (end <= evcn1) {
68017 +               if (next_svcn == evcn1) {
68018 +                       /* Normal way. update attribute and exit */
68019 +                       goto ok;
68020 +               }
68021 +               /* add new segment [next_svcn : evcn1 - next_svcn )*/
68022 +               if (!ni->attr_list.size) {
68023 +                       err = ni_create_attr_list(ni);
68024 +                       if (err)
68025 +                               goto out;
68026 +                       /* layout of records is changed */
68027 +                       le_b = NULL;
68028 +                       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
68029 +                                             0, NULL, &mi_b);
68030 +                       if (!attr_b) {
68031 +                               err = -ENOENT;
68032 +                               goto out;
68033 +                       }
68035 +                       attr = attr_b;
68036 +                       le = le_b;
68037 +                       mi = mi_b;
68038 +                       goto repack;
68039 +               }
68040 +       }
68042 +       svcn = evcn1;
68044 +       /* Estimate next attribute */
68045 +       attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
68047 +       if (attr) {
68048 +               CLST alloc = bytes_to_cluster(
68049 +                       sbi, le64_to_cpu(attr_b->nres.alloc_size));
68050 +               CLST evcn = le64_to_cpu(attr->nres.evcn);
68052 +               if (end < next_svcn)
68053 +                       end = next_svcn;
68054 +               while (end > evcn) {
68055 +                       /* remove segment [svcn : evcn)*/
68056 +                       mi_remove_attr(mi, attr);
68058 +                       if (!al_remove_le(ni, le)) {
68059 +                               err = -EINVAL;
68060 +                               goto out;
68061 +                       }
68063 +                       if (evcn + 1 >= alloc) {
68064 +                               /* last attribute segment */
68065 +                               evcn1 = evcn + 1;
68066 +                               goto ins_ext;
68067 +                       }
68069 +                       if (ni_load_mi(ni, le, &mi)) {
68070 +                               attr = NULL;
68071 +                               goto out;
68072 +                       }
68074 +                       attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
68075 +                                           &le->id);
68076 +                       if (!attr) {
68077 +                               err = -EINVAL;
68078 +                               goto out;
68079 +                       }
68080 +                       svcn = le64_to_cpu(attr->nres.svcn);
68081 +                       evcn = le64_to_cpu(attr->nres.evcn);
68082 +               }
68084 +               if (end < svcn)
68085 +                       end = svcn;
68087 +               err = attr_load_runs(attr, ni, run, &end);
68088 +               if (err)
68089 +                       goto out;
68091 +               evcn1 = evcn + 1;
68092 +               attr->nres.svcn = cpu_to_le64(next_svcn);
68093 +               err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
68094 +               if (err)
68095 +                       goto out;
68097 +               le->vcn = cpu_to_le64(next_svcn);
68098 +               ni->attr_list.dirty = true;
68099 +               mi->dirty = true;
68101 +               next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
68102 +       }
68103 +ins_ext:
68104 +       if (evcn1 > next_svcn) {
68105 +               err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
68106 +                                           next_svcn, evcn1 - next_svcn,
68107 +                                           attr_b->flags, &attr, &mi);
68108 +               if (err)
68109 +                       goto out;
68110 +       }
68111 +ok:
68112 +       run_truncate_around(run, vcn);
68113 +out:
68114 +       up_write(&ni->file.run_lock);
68115 +       ni_unlock(ni);
68117 +       return err;
68120 +int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
68122 +       u64 vbo;
68123 +       struct ATTRIB *attr;
68124 +       u32 data_size;
68126 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
68127 +       if (!attr)
68128 +               return -EINVAL;
68130 +       if (attr->non_res)
68131 +               return E_NTFS_NONRESIDENT;
68133 +       vbo = page->index << PAGE_SHIFT;
68134 +       data_size = le32_to_cpu(attr->res.data_size);
68135 +       if (vbo < data_size) {
68136 +               const char *data = resident_data(attr);
68137 +               char *kaddr = kmap_atomic(page);
68138 +               u32 use = data_size - vbo;
68140 +               if (use > PAGE_SIZE)
68141 +                       use = PAGE_SIZE;
68143 +               memcpy(kaddr, data + vbo, use);
68144 +               memset(kaddr + use, 0, PAGE_SIZE - use);
68145 +               kunmap_atomic(kaddr);
68146 +               flush_dcache_page(page);
68147 +               SetPageUptodate(page);
68148 +       } else if (!PageUptodate(page)) {
68149 +               zero_user_segment(page, 0, PAGE_SIZE);
68150 +               SetPageUptodate(page);
68151 +       }
68153 +       return 0;
68156 +int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
68158 +       u64 vbo;
68159 +       struct mft_inode *mi;
68160 +       struct ATTRIB *attr;
68161 +       u32 data_size;
68163 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
68164 +       if (!attr)
68165 +               return -EINVAL;
68167 +       if (attr->non_res) {
68168 +               /*return special error code to check this case*/
68169 +               return E_NTFS_NONRESIDENT;
68170 +       }
68172 +       vbo = page->index << PAGE_SHIFT;
68173 +       data_size = le32_to_cpu(attr->res.data_size);
68174 +       if (vbo < data_size) {
68175 +               char *data = resident_data(attr);
68176 +               char *kaddr = kmap_atomic(page);
68177 +               u32 use = data_size - vbo;
68179 +               if (use > PAGE_SIZE)
68180 +                       use = PAGE_SIZE;
68181 +               memcpy(data + vbo, kaddr, use);
68182 +               kunmap_atomic(kaddr);
68183 +               mi->dirty = true;
68184 +       }
68185 +       ni->i_valid = data_size;
68187 +       return 0;
68191 + * attr_load_runs_vcn
68192 + *
68193 + * load runs with vcn
68194 + */
68195 +int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
68196 +                      const __le16 *name, u8 name_len, struct runs_tree *run,
68197 +                      CLST vcn)
68199 +       struct ATTRIB *attr;
68200 +       int err;
68201 +       CLST svcn, evcn;
68202 +       u16 ro;
68204 +       attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
68205 +       if (!attr)
68206 +               return -ENOENT;
68208 +       svcn = le64_to_cpu(attr->nres.svcn);
68209 +       evcn = le64_to_cpu(attr->nres.evcn);
68211 +       if (evcn < vcn || vcn < svcn)
68212 +               return -EINVAL;
68214 +       ro = le16_to_cpu(attr->nres.run_off);
68215 +       err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
68216 +                           Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
68217 +       if (err < 0)
68218 +               return err;
68219 +       return 0;
68223 + * load runs for given range [from to)
68224 + */
68225 +int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
68226 +                        const __le16 *name, u8 name_len, struct runs_tree *run,
68227 +                        u64 from, u64 to)
68229 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
68230 +       u8 cluster_bits = sbi->cluster_bits;
68231 +       CLST vcn = from >> cluster_bits;
68232 +       CLST vcn_last = (to - 1) >> cluster_bits;
68233 +       CLST lcn, clen;
68234 +       int err;
68236 +       for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
68237 +               if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
68238 +                       err = attr_load_runs_vcn(ni, type, name, name_len, run,
68239 +                                                vcn);
68240 +                       if (err)
68241 +                               return err;
68242 +                       clen = 0; /*next run_lookup_entry(vcn) must be success*/
68243 +               }
68244 +       }
68246 +       return 0;
68249 +#ifdef CONFIG_NTFS3_LZX_XPRESS
68251 + * attr_wof_frame_info
68252 + *
68253 + * read header of xpress/lzx file to get info about frame
68254 + */
68255 +int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
68256 +                       struct runs_tree *run, u64 frame, u64 frames,
68257 +                       u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
68259 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
68260 +       u64 vbo[2], off[2], wof_size;
68261 +       u32 voff;
68262 +       u8 bytes_per_off;
68263 +       char *addr;
68264 +       struct page *page;
68265 +       int i, err;
68266 +       __le32 *off32;
68267 +       __le64 *off64;
68269 +       if (ni->vfs_inode.i_size < 0x100000000ull) {
68270 +               /* file starts with array of 32 bit offsets */
68271 +               bytes_per_off = sizeof(__le32);
68272 +               vbo[1] = frame << 2;
68273 +               *vbo_data = frames << 2;
68274 +       } else {
68275 +               /* file starts with array of 64 bit offsets */
68276 +               bytes_per_off = sizeof(__le64);
68277 +               vbo[1] = frame << 3;
68278 +               *vbo_data = frames << 3;
68279 +       }
68281 +       /*
68282 +        * read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts
68283 +        * read 4/8 bytes at [vbo] == offset where compressed frame ends
68284 +        */
68285 +       if (!attr->non_res) {
68286 +               if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
68287 +                       ntfs_inode_err(&ni->vfs_inode, "is corrupted");
68288 +                       return -EINVAL;
68289 +               }
68290 +               addr = resident_data(attr);
68292 +               if (bytes_per_off == sizeof(__le32)) {
68293 +                       off32 = Add2Ptr(addr, vbo[1]);
68294 +                       off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
68295 +                       off[1] = le32_to_cpu(off32[0]);
68296 +               } else {
68297 +                       off64 = Add2Ptr(addr, vbo[1]);
68298 +                       off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
68299 +                       off[1] = le64_to_cpu(off64[0]);
68300 +               }
68302 +               *vbo_data += off[0];
68303 +               *ondisk_size = off[1] - off[0];
68304 +               return 0;
68305 +       }
68307 +       wof_size = le64_to_cpu(attr->nres.data_size);
68308 +       down_write(&ni->file.run_lock);
68309 +       page = ni->file.offs_page;
68310 +       if (!page) {
68311 +               page = alloc_page(GFP_KERNEL);
68312 +               if (!page) {
68313 +                       err = -ENOMEM;
68314 +                       goto out;
68315 +               }
68316 +               page->index = -1;
68317 +               ni->file.offs_page = page;
68318 +       }
68319 +       lock_page(page);
68320 +       addr = page_address(page);
68322 +       if (vbo[1]) {
68323 +               voff = vbo[1] & (PAGE_SIZE - 1);
68324 +               vbo[0] = vbo[1] - bytes_per_off;
68325 +               i = 0;
68326 +       } else {
68327 +               voff = 0;
68328 +               vbo[0] = 0;
68329 +               off[0] = 0;
68330 +               i = 1;
68331 +       }
68333 +       do {
68334 +               pgoff_t index = vbo[i] >> PAGE_SHIFT;
68336 +               if (index != page->index) {
68337 +                       u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
68338 +                       u64 to = min(from + PAGE_SIZE, wof_size);
68340 +                       err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
68341 +                                                  ARRAY_SIZE(WOF_NAME), run,
68342 +                                                  from, to);
68343 +                       if (err)
68344 +                               goto out1;
68346 +                       err = ntfs_bio_pages(sbi, run, &page, 1, from,
68347 +                                            to - from, REQ_OP_READ);
68348 +                       if (err) {
68349 +                               page->index = -1;
68350 +                               goto out1;
68351 +                       }
68352 +                       page->index = index;
68353 +               }
68355 +               if (i) {
68356 +                       if (bytes_per_off == sizeof(__le32)) {
68357 +                               off32 = Add2Ptr(addr, voff);
68358 +                               off[1] = le32_to_cpu(*off32);
68359 +                       } else {
68360 +                               off64 = Add2Ptr(addr, voff);
68361 +                               off[1] = le64_to_cpu(*off64);
68362 +                       }
68363 +               } else if (!voff) {
68364 +                       if (bytes_per_off == sizeof(__le32)) {
68365 +                               off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
68366 +                               off[0] = le32_to_cpu(*off32);
68367 +                       } else {
68368 +                               off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
68369 +                               off[0] = le64_to_cpu(*off64);
68370 +                       }
68371 +               } else {
68372 +                       /* two values in one page*/
68373 +                       if (bytes_per_off == sizeof(__le32)) {
68374 +                               off32 = Add2Ptr(addr, voff);
68375 +                               off[0] = le32_to_cpu(off32[-1]);
68376 +                               off[1] = le32_to_cpu(off32[0]);
68377 +                       } else {
68378 +                               off64 = Add2Ptr(addr, voff);
68379 +                               off[0] = le64_to_cpu(off64[-1]);
68380 +                               off[1] = le64_to_cpu(off64[0]);
68381 +                       }
68382 +                       break;
68383 +               }
68384 +       } while (++i < 2);
68386 +       *vbo_data += off[0];
68387 +       *ondisk_size = off[1] - off[0];
68389 +out1:
68390 +       unlock_page(page);
68391 +out:
68392 +       up_write(&ni->file.run_lock);
68393 +       return err;
68395 +#endif
68398 + * attr_is_frame_compressed
68399 + *
68400 + * This function is used to detect compressed frame
68401 + */
68402 +int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
68403 +                            CLST frame, CLST *clst_data)
68405 +       int err;
68406 +       u32 clst_frame;
68407 +       CLST clen, lcn, vcn, alen, slen, vcn_next;
68408 +       size_t idx;
68409 +       struct runs_tree *run;
68411 +       *clst_data = 0;
68413 +       if (!is_attr_compressed(attr))
68414 +               return 0;
68416 +       if (!attr->non_res)
68417 +               return 0;
68419 +       clst_frame = 1u << attr->nres.c_unit;
68420 +       vcn = frame * clst_frame;
68421 +       run = &ni->file.run;
68423 +       if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
68424 +               err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
68425 +                                        attr->name_len, run, vcn);
68426 +               if (err)
68427 +                       return err;
68429 +               if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
68430 +                       return -EINVAL;
68431 +       }
68433 +       if (lcn == SPARSE_LCN) {
68434 +               /* sparsed frame */
68435 +               return 0;
68436 +       }
68438 +       if (clen >= clst_frame) {
68439 +               /*
68440 +                * The frame is not compressed 'cause
68441 +                * it does not contain any sparse clusters
68442 +                */
68443 +               *clst_data = clst_frame;
68444 +               return 0;
68445 +       }
68447 +       alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
68448 +       slen = 0;
68449 +       *clst_data = clen;
68451 +       /*
68452 +        * The frame is compressed if *clst_data + slen >= clst_frame
68453 +        * Check next fragments
68454 +        */
68455 +       while ((vcn += clen) < alen) {
68456 +               vcn_next = vcn;
68458 +               if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
68459 +                   vcn_next != vcn) {
68460 +                       err = attr_load_runs_vcn(ni, attr->type,
68461 +                                                attr_name(attr),
68462 +                                                attr->name_len, run, vcn_next);
68463 +                       if (err)
68464 +                               return err;
68465 +                       vcn = vcn_next;
68467 +                       if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
68468 +                               return -EINVAL;
68469 +               }
68471 +               if (lcn == SPARSE_LCN) {
68472 +                       slen += clen;
68473 +               } else {
68474 +                       if (slen) {
68475 +                               /*
68476 +                                * data_clusters + sparse_clusters =
68477 +                                * not enough for frame
68478 +                                */
68479 +                               return -EINVAL;
68480 +                       }
68481 +                       *clst_data += clen;
68482 +               }
68484 +               if (*clst_data + slen >= clst_frame) {
68485 +                       if (!slen) {
68486 +                               /*
68487 +                                * There is no sparsed clusters in this frame
68488 +                                * So it is not compressed
68489 +                                */
68490 +                               *clst_data = clst_frame;
68491 +                       } else {
68492 +                               /*frame is compressed*/
68493 +                       }
68494 +                       break;
68495 +               }
68496 +       }
68498 +       return 0;
68502 + * attr_allocate_frame
68503 + *
68504 + * allocate/free clusters for 'frame'
68505 + * assumed: down_write(&ni->file.run_lock);
68506 + */
68507 +int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
68508 +                       u64 new_valid)
68510 +       int err = 0;
68511 +       struct runs_tree *run = &ni->file.run;
68512 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
68513 +       struct ATTRIB *attr = NULL, *attr_b;
68514 +       struct ATTR_LIST_ENTRY *le, *le_b;
68515 +       struct mft_inode *mi, *mi_b;
68516 +       CLST svcn, evcn1, next_svcn, lcn, len;
68517 +       CLST vcn, end, clst_data;
68518 +       u64 total_size, valid_size, data_size;
68520 +       le_b = NULL;
68521 +       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
68522 +       if (!attr_b)
68523 +               return -ENOENT;
68525 +       if (!is_attr_ext(attr_b))
68526 +               return -EINVAL;
68528 +       vcn = frame << NTFS_LZNT_CUNIT;
68529 +       total_size = le64_to_cpu(attr_b->nres.total_size);
68531 +       svcn = le64_to_cpu(attr_b->nres.svcn);
68532 +       evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
68533 +       data_size = le64_to_cpu(attr_b->nres.data_size);
68535 +       if (svcn <= vcn && vcn < evcn1) {
68536 +               attr = attr_b;
68537 +               le = le_b;
68538 +               mi = mi_b;
68539 +       } else if (!le_b) {
68540 +               err = -EINVAL;
68541 +               goto out;
68542 +       } else {
68543 +               le = le_b;
68544 +               attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
68545 +                                   &mi);
68546 +               if (!attr) {
68547 +                       err = -EINVAL;
68548 +                       goto out;
68549 +               }
68550 +               svcn = le64_to_cpu(attr->nres.svcn);
68551 +               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
68552 +       }
68554 +       err = attr_load_runs(attr, ni, run, NULL);
68555 +       if (err)
68556 +               goto out;
68558 +       err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
68559 +       if (err)
68560 +               goto out;
68562 +       total_size -= (u64)clst_data << sbi->cluster_bits;
68564 +       len = bytes_to_cluster(sbi, compr_size);
68566 +       if (len == clst_data)
68567 +               goto out;
68569 +       if (len < clst_data) {
68570 +               err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
68571 +                                       NULL, true);
68572 +               if (err)
68573 +                       goto out;
68575 +               if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
68576 +                                  false)) {
68577 +                       err = -ENOMEM;
68578 +                       goto out;
68579 +               }
68580 +               end = vcn + clst_data;
68581 +               /* run contains updated range [vcn + len : end) */
68582 +       } else {
68583 +               CLST alen, hint = 0;
68584 +               /* Get the last lcn to allocate from */
68585 +               if (vcn + clst_data &&
68586 +                   !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
68587 +                                     NULL)) {
68588 +                       hint = -1;
68589 +               }
68591 +               err = attr_allocate_clusters(sbi, run, vcn + clst_data,
68592 +                                            hint + 1, len - clst_data, NULL, 0,
68593 +                                            &alen, 0, &lcn);
68594 +               if (err)
68595 +                       goto out;
68597 +               end = vcn + len;
68598 +               /* run contains updated range [vcn + clst_data : end) */
68599 +       }
68601 +       total_size += (u64)len << sbi->cluster_bits;
68603 +repack:
68604 +       err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
68605 +       if (err)
68606 +               goto out;
68608 +       attr_b->nres.total_size = cpu_to_le64(total_size);
68609 +       inode_set_bytes(&ni->vfs_inode, total_size);
68611 +       mi_b->dirty = true;
68612 +       mark_inode_dirty(&ni->vfs_inode);
68614 +       /* stored [vcn : next_svcn) from [vcn : end) */
68615 +       next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
68617 +       if (end <= evcn1) {
68618 +               if (next_svcn == evcn1) {
68619 +                       /* Normal way. update attribute and exit */
68620 +                       goto ok;
68621 +               }
68622 +               /* add new segment [next_svcn : evcn1 - next_svcn )*/
68623 +               if (!ni->attr_list.size) {
68624 +                       err = ni_create_attr_list(ni);
68625 +                       if (err)
68626 +                               goto out;
68627 +                       /* layout of records is changed */
68628 +                       le_b = NULL;
68629 +                       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
68630 +                                             0, NULL, &mi_b);
68631 +                       if (!attr_b) {
68632 +                               err = -ENOENT;
68633 +                               goto out;
68634 +                       }
68636 +                       attr = attr_b;
68637 +                       le = le_b;
68638 +                       mi = mi_b;
68639 +                       goto repack;
68640 +               }
68641 +       }
68643 +       svcn = evcn1;
68645 +       /* Estimate next attribute */
68646 +       attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
68648 +       if (attr) {
68649 +               CLST alloc = bytes_to_cluster(
68650 +                       sbi, le64_to_cpu(attr_b->nres.alloc_size));
68651 +               CLST evcn = le64_to_cpu(attr->nres.evcn);
68653 +               if (end < next_svcn)
68654 +                       end = next_svcn;
68655 +               while (end > evcn) {
68656 +                       /* remove segment [svcn : evcn)*/
68657 +                       mi_remove_attr(mi, attr);
68659 +                       if (!al_remove_le(ni, le)) {
68660 +                               err = -EINVAL;
68661 +                               goto out;
68662 +                       }
68664 +                       if (evcn + 1 >= alloc) {
68665 +                               /* last attribute segment */
68666 +                               evcn1 = evcn + 1;
68667 +                               goto ins_ext;
68668 +                       }
68670 +                       if (ni_load_mi(ni, le, &mi)) {
68671 +                               attr = NULL;
68672 +                               goto out;
68673 +                       }
68675 +                       attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
68676 +                                           &le->id);
68677 +                       if (!attr) {
68678 +                               err = -EINVAL;
68679 +                               goto out;
68680 +                       }
68681 +                       svcn = le64_to_cpu(attr->nres.svcn);
68682 +                       evcn = le64_to_cpu(attr->nres.evcn);
68683 +               }
68685 +               if (end < svcn)
68686 +                       end = svcn;
68688 +               err = attr_load_runs(attr, ni, run, &end);
68689 +               if (err)
68690 +                       goto out;
68692 +               evcn1 = evcn + 1;
68693 +               attr->nres.svcn = cpu_to_le64(next_svcn);
68694 +               err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
68695 +               if (err)
68696 +                       goto out;
68698 +               le->vcn = cpu_to_le64(next_svcn);
68699 +               ni->attr_list.dirty = true;
68700 +               mi->dirty = true;
68702 +               next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
68703 +       }
68704 +ins_ext:
68705 +       if (evcn1 > next_svcn) {
68706 +               err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
68707 +                                           next_svcn, evcn1 - next_svcn,
68708 +                                           attr_b->flags, &attr, &mi);
68709 +               if (err)
68710 +                       goto out;
68711 +       }
68712 +ok:
68713 +       run_truncate_around(run, vcn);
68714 +out:
68715 +       if (new_valid > data_size)
68716 +               new_valid = data_size;
68718 +       valid_size = le64_to_cpu(attr_b->nres.valid_size);
68719 +       if (new_valid != valid_size) {
68720 +               attr_b->nres.valid_size = cpu_to_le64(valid_size);
68721 +               mi_b->dirty = true;
68722 +       }
68724 +       return err;
68727 +/* Collapse range in file */
68728 +int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
68730 +       int err = 0;
68731 +       struct runs_tree *run = &ni->file.run;
68732 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
68733 +       struct ATTRIB *attr = NULL, *attr_b;
68734 +       struct ATTR_LIST_ENTRY *le, *le_b;
68735 +       struct mft_inode *mi, *mi_b;
68736 +       CLST svcn, evcn1, len, dealloc, alen;
68737 +       CLST vcn, end;
68738 +       u64 valid_size, data_size, alloc_size, total_size;
68739 +       u32 mask;
68740 +       __le16 a_flags;
68742 +       if (!bytes)
68743 +               return 0;
68745 +       le_b = NULL;
68746 +       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
68747 +       if (!attr_b)
68748 +               return -ENOENT;
68750 +       if (!attr_b->non_res) {
68751 +               /* Attribute is resident. Nothing to do? */
68752 +               return 0;
68753 +       }
68755 +       data_size = le64_to_cpu(attr_b->nres.data_size);
68756 +       alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
68757 +       a_flags = attr_b->flags;
68759 +       if (is_attr_ext(attr_b)) {
68760 +               total_size = le64_to_cpu(attr_b->nres.total_size);
68761 +               mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
68762 +       } else {
68763 +               total_size = alloc_size;
68764 +               mask = sbi->cluster_mask;
68765 +       }
68767 +       if ((vbo & mask) || (bytes & mask)) {
68768 +               /* allow to collapse only cluster aligned ranges */
68769 +               return -EINVAL;
68770 +       }
68772 +       if (vbo > data_size)
68773 +               return -EINVAL;
68775 +       down_write(&ni->file.run_lock);
68777 +       if (vbo + bytes >= data_size) {
68778 +               u64 new_valid = min(ni->i_valid, vbo);
68780 +               /* Simple truncate file at 'vbo' */
68781 +               truncate_setsize(&ni->vfs_inode, vbo);
68782 +               err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
68783 +                                   &new_valid, true, NULL);
68785 +               if (!err && new_valid < ni->i_valid)
68786 +                       ni->i_valid = new_valid;
68788 +               goto out;
68789 +       }
68791 +       /*
68792 +        * Enumerate all attribute segments and collapse
68793 +        */
68794 +       alen = alloc_size >> sbi->cluster_bits;
68795 +       vcn = vbo >> sbi->cluster_bits;
68796 +       len = bytes >> sbi->cluster_bits;
68797 +       end = vcn + len;
68798 +       dealloc = 0;
68800 +       svcn = le64_to_cpu(attr_b->nres.svcn);
68801 +       evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
68803 +       if (svcn <= vcn && vcn < evcn1) {
68804 +               attr = attr_b;
68805 +               le = le_b;
68806 +               mi = mi_b;
68807 +       } else if (!le_b) {
68808 +               err = -EINVAL;
68809 +               goto out;
68810 +       } else {
68811 +               le = le_b;
68812 +               attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
68813 +                                   &mi);
68814 +               if (!attr) {
68815 +                       err = -EINVAL;
68816 +                       goto out;
68817 +               }
68819 +               svcn = le64_to_cpu(attr->nres.svcn);
68820 +               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
68821 +       }
68823 +       for (;;) {
68824 +               if (svcn >= end) {
68825 +                       /* shift vcn */
68826 +                       attr->nres.svcn = cpu_to_le64(svcn - len);
68827 +                       attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
68828 +                       if (le) {
68829 +                               le->vcn = attr->nres.svcn;
68830 +                               ni->attr_list.dirty = true;
68831 +                       }
68832 +                       mi->dirty = true;
68833 +               } else if (svcn < vcn || end < evcn1) {
68834 +                       CLST vcn1, eat, next_svcn;
68836 +                       /* collapse a part of this attribute segment */
68837 +                       err = attr_load_runs(attr, ni, run, &svcn);
68838 +                       if (err)
68839 +                               goto out;
68840 +                       vcn1 = max(vcn, svcn);
68841 +                       eat = min(end, evcn1) - vcn1;
68843 +                       err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
68844 +                                               true);
68845 +                       if (err)
68846 +                               goto out;
68848 +                       if (!run_collapse_range(run, vcn1, eat)) {
68849 +                               err = -ENOMEM;
68850 +                               goto out;
68851 +                       }
68853 +                       if (svcn >= vcn) {
68854 +                               /* shift vcn */
68855 +                               attr->nres.svcn = cpu_to_le64(vcn);
68856 +                               if (le) {
68857 +                                       le->vcn = attr->nres.svcn;
68858 +                                       ni->attr_list.dirty = true;
68859 +                               }
68860 +                       }
68862 +                       err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
68863 +                       if (err)
68864 +                               goto out;
68866 +                       next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
68867 +                       if (next_svcn + eat < evcn1) {
68868 +                               err = ni_insert_nonresident(
68869 +                                       ni, ATTR_DATA, NULL, 0, run, next_svcn,
68870 +                                       evcn1 - eat - next_svcn, a_flags, &attr,
68871 +                                       &mi);
68872 +                               if (err)
68873 +                                       goto out;
68875 +                               /* layout of records maybe changed */
68876 +                               attr_b = NULL;
68877 +                               le = al_find_ex(ni, NULL, ATTR_DATA, NULL, 0,
68878 +                                               &next_svcn);
68879 +                               if (!le) {
68880 +                                       err = -EINVAL;
68881 +                                       goto out;
68882 +                               }
68883 +                       }
68885 +                       /* free all allocated memory */
68886 +                       run_truncate(run, 0);
68887 +               } else {
68888 +                       u16 le_sz;
68889 +                       u16 roff = le16_to_cpu(attr->nres.run_off);
68891 +                       /*run==1 means unpack and deallocate*/
68892 +                       run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
68893 +                                     evcn1 - 1, svcn, Add2Ptr(attr, roff),
68894 +                                     le32_to_cpu(attr->size) - roff);
68896 +                       /* delete this attribute segment */
68897 +                       mi_remove_attr(mi, attr);
68898 +                       if (!le)
68899 +                               break;
68901 +                       le_sz = le16_to_cpu(le->size);
68902 +                       if (!al_remove_le(ni, le)) {
68903 +                               err = -EINVAL;
68904 +                               goto out;
68905 +                       }
68907 +                       if (evcn1 >= alen)
68908 +                               break;
68910 +                       if (!svcn) {
68911 +                               /* Load next record that contains this attribute */
68912 +                               if (ni_load_mi(ni, le, &mi)) {
68913 +                                       err = -EINVAL;
68914 +                                       goto out;
68915 +                               }
68917 +                               /* Look for required attribute */
68918 +                               attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
68919 +                                                   0, &le->id);
68920 +                               if (!attr) {
68921 +                                       err = -EINVAL;
68922 +                                       goto out;
68923 +                               }
68924 +                               goto next_attr;
68925 +                       }
68926 +                       le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
68927 +               }
68929 +               if (evcn1 >= alen)
68930 +                       break;
68932 +               attr = ni_enum_attr_ex(ni, attr, &le, &mi);
68933 +               if (!attr) {
68934 +                       err = -EINVAL;
68935 +                       goto out;
68936 +               }
68938 +next_attr:
68939 +               svcn = le64_to_cpu(attr->nres.svcn);
68940 +               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
68941 +       }
68943 +       if (!attr_b) {
68944 +               le_b = NULL;
68945 +               attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
68946 +                                     &mi_b);
68947 +               if (!attr_b) {
68948 +                       err = -ENOENT;
68949 +                       goto out;
68950 +               }
68951 +       }
68953 +       data_size -= bytes;
68954 +       valid_size = ni->i_valid;
68955 +       if (vbo + bytes <= valid_size)
68956 +               valid_size -= bytes;
68957 +       else if (vbo < valid_size)
68958 +               valid_size = vbo;
68960 +       attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
68961 +       attr_b->nres.data_size = cpu_to_le64(data_size);
68962 +       attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
68963 +       total_size -= (u64)dealloc << sbi->cluster_bits;
68964 +       if (is_attr_ext(attr_b))
68965 +               attr_b->nres.total_size = cpu_to_le64(total_size);
68966 +       mi_b->dirty = true;
68968 +       /*update inode size*/
68969 +       ni->i_valid = valid_size;
68970 +       ni->vfs_inode.i_size = data_size;
68971 +       inode_set_bytes(&ni->vfs_inode, total_size);
68972 +       ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
68973 +       mark_inode_dirty(&ni->vfs_inode);
68975 +out:
68976 +       up_write(&ni->file.run_lock);
68977 +       if (err)
68978 +               make_bad_inode(&ni->vfs_inode);
68980 +       return err;
68983 +/* not for normal files */
68984 +int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes)
68986 +       int err = 0;
68987 +       struct runs_tree *run = &ni->file.run;
68988 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
68989 +       struct ATTRIB *attr = NULL, *attr_b;
68990 +       struct ATTR_LIST_ENTRY *le, *le_b;
68991 +       struct mft_inode *mi, *mi_b;
68992 +       CLST svcn, evcn1, vcn, len, end, alen, dealloc;
68993 +       u64 total_size, alloc_size;
68995 +       if (!bytes)
68996 +               return 0;
68998 +       le_b = NULL;
68999 +       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
69000 +       if (!attr_b)
69001 +               return -ENOENT;
69003 +       if (!attr_b->non_res) {
69004 +               u32 data_size = le32_to_cpu(attr->res.data_size);
69005 +               u32 from, to;
69007 +               if (vbo > data_size)
69008 +                       return 0;
69010 +               from = vbo;
69011 +               to = (vbo + bytes) < data_size ? (vbo + bytes) : data_size;
69012 +               memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
69013 +               return 0;
69014 +       }
69016 +       /* TODO: add support for normal files too */
69017 +       if (!is_attr_ext(attr_b))
69018 +               return -EOPNOTSUPP;
69020 +       alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
69021 +       total_size = le64_to_cpu(attr_b->nres.total_size);
69023 +       if (vbo >= alloc_size) {
69024 +               // NOTE: it is allowed
69025 +               return 0;
69026 +       }
69028 +       if (vbo + bytes > alloc_size)
69029 +               bytes = alloc_size - vbo;
69031 +       down_write(&ni->file.run_lock);
69032 +       /*
69033 +        * Enumerate all attribute segments and punch hole where necessary
69034 +        */
69035 +       alen = alloc_size >> sbi->cluster_bits;
69036 +       vcn = vbo >> sbi->cluster_bits;
69037 +       len = bytes >> sbi->cluster_bits;
69038 +       end = vcn + len;
69039 +       dealloc = 0;
69041 +       svcn = le64_to_cpu(attr_b->nres.svcn);
69042 +       evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
69044 +       if (svcn <= vcn && vcn < evcn1) {
69045 +               attr = attr_b;
69046 +               le = le_b;
69047 +               mi = mi_b;
69048 +       } else if (!le_b) {
69049 +               err = -EINVAL;
69050 +               goto out;
69051 +       } else {
69052 +               le = le_b;
69053 +               attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
69054 +                                   &mi);
69055 +               if (!attr) {
69056 +                       err = -EINVAL;
69057 +                       goto out;
69058 +               }
69060 +               svcn = le64_to_cpu(attr->nres.svcn);
69061 +               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
69062 +       }
69064 +       while (svcn < end) {
69065 +               CLST vcn1, zero, dealloc2;
69067 +               err = attr_load_runs(attr, ni, run, &svcn);
69068 +               if (err)
69069 +                       goto out;
69070 +               vcn1 = max(vcn, svcn);
69071 +               zero = min(end, evcn1) - vcn1;
69073 +               dealloc2 = dealloc;
69074 +               err = run_deallocate_ex(sbi, run, vcn1, zero, &dealloc, true);
69075 +               if (err)
69076 +                       goto out;
69078 +               if (dealloc2 == dealloc) {
69079 +                       /* looks like  the required range is already sparsed */
69080 +               } else {
69081 +                       if (!run_add_entry(run, vcn1, SPARSE_LCN, zero,
69082 +                                          false)) {
69083 +                               err = -ENOMEM;
69084 +                               goto out;
69085 +                       }
69087 +                       err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
69088 +                       if (err)
69089 +                               goto out;
69090 +               }
69091 +               /* free all allocated memory */
69092 +               run_truncate(run, 0);
69094 +               if (evcn1 >= alen)
69095 +                       break;
69097 +               attr = ni_enum_attr_ex(ni, attr, &le, &mi);
69098 +               if (!attr) {
69099 +                       err = -EINVAL;
69100 +                       goto out;
69101 +               }
69103 +               svcn = le64_to_cpu(attr->nres.svcn);
69104 +               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
69105 +       }
69107 +       total_size -= (u64)dealloc << sbi->cluster_bits;
69108 +       attr_b->nres.total_size = cpu_to_le64(total_size);
69109 +       mi_b->dirty = true;
69111 +       /*update inode size*/
69112 +       inode_set_bytes(&ni->vfs_inode, total_size);
69113 +       ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
69114 +       mark_inode_dirty(&ni->vfs_inode);
69116 +out:
69117 +       up_write(&ni->file.run_lock);
69118 +       if (err)
69119 +               make_bad_inode(&ni->vfs_inode);
69121 +       return err;
69123 diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c
69124 new file mode 100644
69125 index 000000000000..ea561361b576
69126 --- /dev/null
69127 +++ b/fs/ntfs3/attrlist.c
69128 @@ -0,0 +1,456 @@
69129 +// SPDX-License-Identifier: GPL-2.0
69131 + *
69132 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
69133 + *
69134 + */
69136 +#include <linux/blkdev.h>
69137 +#include <linux/buffer_head.h>
69138 +#include <linux/fs.h>
69139 +#include <linux/nls.h>
69141 +#include "debug.h"
69142 +#include "ntfs.h"
69143 +#include "ntfs_fs.h"
69145 +/* Returns true if le is valid */
69146 +static inline bool al_is_valid_le(const struct ntfs_inode *ni,
69147 +                                 struct ATTR_LIST_ENTRY *le)
69149 +       if (!le || !ni->attr_list.le || !ni->attr_list.size)
69150 +               return false;
69152 +       return PtrOffset(ni->attr_list.le, le) + le16_to_cpu(le->size) <=
69153 +              ni->attr_list.size;
69156 +void al_destroy(struct ntfs_inode *ni)
69158 +       run_close(&ni->attr_list.run);
69159 +       ntfs_free(ni->attr_list.le);
69160 +       ni->attr_list.le = NULL;
69161 +       ni->attr_list.size = 0;
69162 +       ni->attr_list.dirty = false;
69166 + * ntfs_load_attr_list
69167 + *
69168 + * This method makes sure that the ATTRIB list, if present,
69169 + * has been properly set up.
69170 + */
69171 +int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
69173 +       int err;
69174 +       size_t lsize;
69175 +       void *le = NULL;
69177 +       if (ni->attr_list.size)
69178 +               return 0;
69180 +       if (!attr->non_res) {
69181 +               lsize = le32_to_cpu(attr->res.data_size);
69182 +               le = ntfs_malloc(al_aligned(lsize));
69183 +               if (!le) {
69184 +                       err = -ENOMEM;
69185 +                       goto out;
69186 +               }
69187 +               memcpy(le, resident_data(attr), lsize);
69188 +       } else if (attr->nres.svcn) {
69189 +               err = -EINVAL;
69190 +               goto out;
69191 +       } else {
69192 +               u16 run_off = le16_to_cpu(attr->nres.run_off);
69194 +               lsize = le64_to_cpu(attr->nres.data_size);
69196 +               run_init(&ni->attr_list.run);
69198 +               err = run_unpack_ex(&ni->attr_list.run, ni->mi.sbi, ni->mi.rno,
69199 +                                   0, le64_to_cpu(attr->nres.evcn), 0,
69200 +                                   Add2Ptr(attr, run_off),
69201 +                                   le32_to_cpu(attr->size) - run_off);
69202 +               if (err < 0)
69203 +                       goto out;
69205 +               le = ntfs_malloc(al_aligned(lsize));
69206 +               if (!le) {
69207 +                       err = -ENOMEM;
69208 +                       goto out;
69209 +               }
69211 +               err = ntfs_read_run_nb(ni->mi.sbi, &ni->attr_list.run, 0, le,
69212 +                                      lsize, NULL);
69213 +               if (err)
69214 +                       goto out;
69215 +       }
69217 +       ni->attr_list.size = lsize;
69218 +       ni->attr_list.le = le;
69220 +       return 0;
69222 +out:
69223 +       ni->attr_list.le = le;
69224 +       al_destroy(ni);
69226 +       return err;
69230 + * al_enumerate
69231 + *
69232 + * Returns the next list 'le'
69233 + * if 'le' is NULL then returns the first 'le'
69234 + */
69235 +struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
69236 +                                    struct ATTR_LIST_ENTRY *le)
69238 +       size_t off;
69239 +       u16 sz;
69241 +       if (!le) {
69242 +               le = ni->attr_list.le;
69243 +       } else {
69244 +               sz = le16_to_cpu(le->size);
69245 +               if (sz < sizeof(struct ATTR_LIST_ENTRY)) {
69246 +                       /* Impossible 'cause we should not return such 'le' */
69247 +                       return NULL;
69248 +               }
69249 +               le = Add2Ptr(le, sz);
69250 +       }
69252 +       /* Check boundary */
69253 +       off = PtrOffset(ni->attr_list.le, le);
69254 +       if (off + sizeof(struct ATTR_LIST_ENTRY) > ni->attr_list.size) {
69255 +               // The regular end of list
69256 +               return NULL;
69257 +       }
69259 +       sz = le16_to_cpu(le->size);
69261 +       /* Check 'le' for errors */
69262 +       if (sz < sizeof(struct ATTR_LIST_ENTRY) ||
69263 +           off + sz > ni->attr_list.size ||
69264 +           sz < le->name_off + le->name_len * sizeof(short)) {
69265 +               return NULL;
69266 +       }
69268 +       return le;
69272 + * al_find_le
69273 + *
69274 + * finds the first 'le' in the list which matches type, name and vcn
69275 + * Returns NULL if not found
69276 + */
69277 +struct ATTR_LIST_ENTRY *al_find_le(struct ntfs_inode *ni,
69278 +                                  struct ATTR_LIST_ENTRY *le,
69279 +                                  const struct ATTRIB *attr)
69281 +       CLST svcn = attr_svcn(attr);
69283 +       return al_find_ex(ni, le, attr->type, attr_name(attr), attr->name_len,
69284 +                         &svcn);
69288 + * al_find_ex
69289 + *
69290 + * finds the first 'le' in the list which matches type, name and vcn
69291 + * Returns NULL if not found
69292 + */
69293 +struct ATTR_LIST_ENTRY *al_find_ex(struct ntfs_inode *ni,
69294 +                                  struct ATTR_LIST_ENTRY *le,
69295 +                                  enum ATTR_TYPE type, const __le16 *name,
69296 +                                  u8 name_len, const CLST *vcn)
69298 +       struct ATTR_LIST_ENTRY *ret = NULL;
69299 +       u32 type_in = le32_to_cpu(type);
69301 +       while ((le = al_enumerate(ni, le))) {
69302 +               u64 le_vcn;
69303 +               int diff = le32_to_cpu(le->type) - type_in;
69305 +               /* List entries are sorted by type, name and vcn */
69306 +               if (diff < 0)
69307 +                       continue;
69309 +               if (diff > 0)
69310 +                       return ret;
69312 +               if (le->name_len != name_len)
69313 +                       continue;
69315 +               le_vcn = le64_to_cpu(le->vcn);
69316 +               if (!le_vcn) {
69317 +                       /*
69318 +                        * compare entry names only for entry with vcn == 0
69319 +                        */
69320 +                       diff = ntfs_cmp_names(le_name(le), name_len, name,
69321 +                                             name_len, ni->mi.sbi->upcase,
69322 +                                             true);
69323 +                       if (diff < 0)
69324 +                               continue;
69326 +                       if (diff > 0)
69327 +                               return ret;
69328 +               }
69330 +               if (!vcn)
69331 +                       return le;
69333 +               if (*vcn == le_vcn)
69334 +                       return le;
69336 +               if (*vcn < le_vcn)
69337 +                       return ret;
69339 +               ret = le;
69340 +       }
69342 +       return ret;
69346 + * al_find_le_to_insert
69347 + *
69348 + * finds the first list entry which matches type, name and vcn
69349 + */
69350 +static struct ATTR_LIST_ENTRY *al_find_le_to_insert(struct ntfs_inode *ni,
69351 +                                                   enum ATTR_TYPE type,
69352 +                                                   const __le16 *name,
69353 +                                                   u8 name_len, CLST vcn)
69355 +       struct ATTR_LIST_ENTRY *le = NULL, *prev;
69356 +       u32 type_in = le32_to_cpu(type);
69358 +       /* List entries are sorted by type, name, vcn */
69359 +       while ((le = al_enumerate(ni, prev = le))) {
69360 +               int diff = le32_to_cpu(le->type) - type_in;
69362 +               if (diff < 0)
69363 +                       continue;
69365 +               if (diff > 0)
69366 +                       return le;
69368 +               if (!le->vcn) {
69369 +                       /*
69370 +                        * compare entry names only for entry with vcn == 0
69371 +                        */
69372 +                       diff = ntfs_cmp_names(le_name(le), le->name_len, name,
69373 +                                             name_len, ni->mi.sbi->upcase,
69374 +                                             true);
69375 +                       if (diff < 0)
69376 +                               continue;
69378 +                       if (diff > 0)
69379 +                               return le;
69380 +               }
69382 +               if (le64_to_cpu(le->vcn) >= vcn)
69383 +                       return le;
69384 +       }
69386 +       return prev ? Add2Ptr(prev, le16_to_cpu(prev->size)) : ni->attr_list.le;
69390 + * al_add_le
69391 + *
69392 + * adds an "attribute list entry" to the list.
69393 + */
69394 +int al_add_le(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name,
69395 +             u8 name_len, CLST svcn, __le16 id, const struct MFT_REF *ref,
69396 +             struct ATTR_LIST_ENTRY **new_le)
69398 +       int err;
69399 +       struct ATTRIB *attr;
69400 +       struct ATTR_LIST_ENTRY *le;
69401 +       size_t off;
69402 +       u16 sz;
69403 +       size_t asize, new_asize;
69404 +       u64 new_size;
69405 +       typeof(ni->attr_list) *al = &ni->attr_list;
69407 +       /*
69408 +        * Compute the size of the new 'le'
69409 +        */
69410 +       sz = le_size(name_len);
69411 +       new_size = al->size + sz;
69412 +       asize = al_aligned(al->size);
69413 +       new_asize = al_aligned(new_size);
69415 +       /* Scan forward to the point at which the new 'le' should be inserted. */
69416 +       le = al_find_le_to_insert(ni, type, name, name_len, svcn);
69417 +       off = PtrOffset(al->le, le);
69419 +       if (new_size > asize) {
69420 +               void *ptr = ntfs_malloc(new_asize);
69422 +               if (!ptr)
69423 +                       return -ENOMEM;
69425 +               memcpy(ptr, al->le, off);
69426 +               memcpy(Add2Ptr(ptr, off + sz), le, al->size - off);
69427 +               le = Add2Ptr(ptr, off);
69428 +               ntfs_free(al->le);
69429 +               al->le = ptr;
69430 +       } else {
69431 +               memmove(Add2Ptr(le, sz), le, al->size - off);
69432 +       }
69434 +       al->size = new_size;
69436 +       le->type = type;
69437 +       le->size = cpu_to_le16(sz);
69438 +       le->name_len = name_len;
69439 +       le->name_off = offsetof(struct ATTR_LIST_ENTRY, name);
69440 +       le->vcn = cpu_to_le64(svcn);
69441 +       le->ref = *ref;
69442 +       le->id = id;
69443 +       memcpy(le->name, name, sizeof(short) * name_len);
69445 +       al->dirty = true;
69447 +       err = attr_set_size(ni, ATTR_LIST, NULL, 0, &al->run, new_size,
69448 +                           &new_size, true, &attr);
69449 +       if (err)
69450 +               return err;
69452 +       if (attr && attr->non_res) {
69453 +               err = ntfs_sb_write_run(ni->mi.sbi, &al->run, 0, al->le,
69454 +                                       al->size);
69455 +               if (err)
69456 +                       return err;
69457 +       }
69459 +       al->dirty = false;
69460 +       *new_le = le;
69462 +       return 0;
69466 + * al_remove_le
69467 + *
69468 + * removes 'le' from attribute list
69469 + */
69470 +bool al_remove_le(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le)
69472 +       u16 size;
69473 +       size_t off;
69474 +       typeof(ni->attr_list) *al = &ni->attr_list;
69476 +       if (!al_is_valid_le(ni, le))
69477 +               return false;
69479 +       /* Save on stack the size of 'le' */
69480 +       size = le16_to_cpu(le->size);
69481 +       off = PtrOffset(al->le, le);
69483 +       memmove(le, Add2Ptr(le, size), al->size - (off + size));
69485 +       al->size -= size;
69486 +       al->dirty = true;
69488 +       return true;
69492 + * al_delete_le
69493 + *
69494 + * deletes from the list the first 'le' which matches its parameters.
69495 + */
69496 +bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn,
69497 +                 const __le16 *name, size_t name_len,
69498 +                 const struct MFT_REF *ref)
69500 +       u16 size;
69501 +       struct ATTR_LIST_ENTRY *le;
69502 +       size_t off;
69503 +       typeof(ni->attr_list) *al = &ni->attr_list;
69505 +       /* Scan forward to the first 'le' that matches the input */
69506 +       le = al_find_ex(ni, NULL, type, name, name_len, &vcn);
69507 +       if (!le)
69508 +               return false;
69510 +       off = PtrOffset(al->le, le);
69512 +next:
69513 +       if (off >= al->size)
69514 +               return false;
69515 +       if (le->type != type)
69516 +               return false;
69517 +       if (le->name_len != name_len)
69518 +               return false;
69519 +       if (name_len && ntfs_cmp_names(le_name(le), name_len, name, name_len,
69520 +                                      ni->mi.sbi->upcase, true))
69521 +               return false;
69522 +       if (le64_to_cpu(le->vcn) != vcn)
69523 +               return false;
69525 +       /*
69526 +        * The caller specified a segment reference, so we have to
69527 +        * scan through the matching entries until we find that segment
69528 +        * reference or we run of matching entries.
69529 +        */
69530 +       if (ref && memcmp(ref, &le->ref, sizeof(*ref))) {
69531 +               off += le16_to_cpu(le->size);
69532 +               le = Add2Ptr(al->le, off);
69533 +               goto next;
69534 +       }
69536 +       /* Save on stack the size of 'le' */
69537 +       size = le16_to_cpu(le->size);
69538 +       /* Delete 'le'. */
69539 +       memmove(le, Add2Ptr(le, size), al->size - (off + size));
69541 +       al->size -= size;
69542 +       al->dirty = true;
69544 +       return true;
69548 + * al_update
69549 + */
69550 +int al_update(struct ntfs_inode *ni)
69552 +       int err;
69553 +       struct ATTRIB *attr;
69554 +       typeof(ni->attr_list) *al = &ni->attr_list;
69556 +       if (!al->dirty || !al->size)
69557 +               return 0;
69559 +       /*
69560 +        * attribute list increased on demand in al_add_le
69561 +        * attribute list decreased here
69562 +        */
69563 +       err = attr_set_size(ni, ATTR_LIST, NULL, 0, &al->run, al->size, NULL,
69564 +                           false, &attr);
69565 +       if (err)
69566 +               goto out;
69568 +       if (!attr->non_res) {
69569 +               memcpy(resident_data(attr), al->le, al->size);
69570 +       } else {
69571 +               err = ntfs_sb_write_run(ni->mi.sbi, &al->run, 0, al->le,
69572 +                                       al->size);
69573 +               if (err)
69574 +                       goto out;
69576 +               attr->nres.valid_size = attr->nres.data_size;
69577 +       }
69579 +       ni->mi.dirty = true;
69580 +       al->dirty = false;
69582 +out:
69583 +       return err;
69585 diff --git a/fs/ntfs3/bitfunc.c b/fs/ntfs3/bitfunc.c
69586 new file mode 100644
69587 index 000000000000..2de5faef2721
69588 --- /dev/null
69589 +++ b/fs/ntfs3/bitfunc.c
69590 @@ -0,0 +1,135 @@
69591 +// SPDX-License-Identifier: GPL-2.0
69593 + *
69594 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
69595 + *
69596 + */
69597 +#include <linux/blkdev.h>
69598 +#include <linux/buffer_head.h>
69599 +#include <linux/fs.h>
69600 +#include <linux/nls.h>
69602 +#include "debug.h"
69603 +#include "ntfs.h"
69604 +#include "ntfs_fs.h"
69606 +#define BITS_IN_SIZE_T (sizeof(size_t) * 8)
69609 + * fill_mask[i] - first i bits are '1' , i = 0,1,2,3,4,5,6,7,8
69610 + * fill_mask[i] = 0xFF >> (8-i)
69611 + */
69612 +static const u8 fill_mask[] = { 0x00, 0x01, 0x03, 0x07, 0x0F,
69613 +                               0x1F, 0x3F, 0x7F, 0xFF };
69616 + * zero_mask[i] - first i bits are '0' , i = 0,1,2,3,4,5,6,7,8
69617 + * zero_mask[i] = 0xFF << i
69618 + */
69619 +static const u8 zero_mask[] = { 0xFF, 0xFE, 0xFC, 0xF8, 0xF0,
69620 +                               0xE0, 0xC0, 0x80, 0x00 };
69623 + * are_bits_clear
69624 + *
69625 + * Returns true if all bits [bit, bit+nbits) are zeros "0"
69626 + */
69627 +bool are_bits_clear(const ulong *lmap, size_t bit, size_t nbits)
69629 +       size_t pos = bit & 7;
69630 +       const u8 *map = (u8 *)lmap + (bit >> 3);
69632 +       if (pos) {
69633 +               if (8 - pos >= nbits)
69634 +                       return !nbits || !(*map & fill_mask[pos + nbits] &
69635 +                                          zero_mask[pos]);
69637 +               if (*map++ & zero_mask[pos])
69638 +                       return false;
69639 +               nbits -= 8 - pos;
69640 +       }
69642 +       pos = ((size_t)map) & (sizeof(size_t) - 1);
69643 +       if (pos) {
69644 +               pos = sizeof(size_t) - pos;
69645 +               if (nbits >= pos * 8) {
69646 +                       for (nbits -= pos * 8; pos; pos--, map++) {
69647 +                               if (*map)
69648 +                                       return false;
69649 +                       }
69650 +               }
69651 +       }
69653 +       for (pos = nbits / BITS_IN_SIZE_T; pos; pos--, map += sizeof(size_t)) {
69654 +               if (*((size_t *)map))
69655 +                       return false;
69656 +       }
69658 +       for (pos = (nbits % BITS_IN_SIZE_T) >> 3; pos; pos--, map++) {
69659 +               if (*map)
69660 +                       return false;
69661 +       }
69663 +       pos = nbits & 7;
69664 +       if (pos && (*map & fill_mask[pos]))
69665 +               return false;
69667 +       // All bits are zero
69668 +       return true;
69672 + * are_bits_set
69673 + *
69674 + * Returns true if all bits [bit, bit+nbits) are ones "1"
69675 + */
69676 +bool are_bits_set(const ulong *lmap, size_t bit, size_t nbits)
69678 +       u8 mask;
69679 +       size_t pos = bit & 7;
69680 +       const u8 *map = (u8 *)lmap + (bit >> 3);
69682 +       if (pos) {
69683 +               if (8 - pos >= nbits) {
69684 +                       mask = fill_mask[pos + nbits] & zero_mask[pos];
69685 +                       return !nbits || (*map & mask) == mask;
69686 +               }
69688 +               mask = zero_mask[pos];
69689 +               if ((*map++ & mask) != mask)
69690 +                       return false;
69691 +               nbits -= 8 - pos;
69692 +       }
69694 +       pos = ((size_t)map) & (sizeof(size_t) - 1);
69695 +       if (pos) {
69696 +               pos = sizeof(size_t) - pos;
69697 +               if (nbits >= pos * 8) {
69698 +                       for (nbits -= pos * 8; pos; pos--, map++) {
69699 +                               if (*map != 0xFF)
69700 +                                       return false;
69701 +                       }
69702 +               }
69703 +       }
69705 +       for (pos = nbits / BITS_IN_SIZE_T; pos; pos--, map += sizeof(size_t)) {
69706 +               if (*((size_t *)map) != MINUS_ONE_T)
69707 +                       return false;
69708 +       }
69710 +       for (pos = (nbits % BITS_IN_SIZE_T) >> 3; pos; pos--, map++) {
69711 +               if (*map != 0xFF)
69712 +                       return false;
69713 +       }
69715 +       pos = nbits & 7;
69716 +       if (pos) {
69717 +               u8 mask = fill_mask[pos];
69719 +               if ((*map & mask) != mask)
69720 +                       return false;
69721 +       }
69723 +       // All bits are ones
69724 +       return true;
69726 diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
69727 new file mode 100644
69728 index 000000000000..32aab0031221
69729 --- /dev/null
69730 +++ b/fs/ntfs3/bitmap.c
69731 @@ -0,0 +1,1519 @@
69732 +// SPDX-License-Identifier: GPL-2.0
69734 + *
69735 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
69736 + *
69737 + * This code builds two trees of free clusters extents.
69738 + * Trees are sorted by start of extent and by length of extent.
69739 + * NTFS_MAX_WND_EXTENTS defines the maximum number of elements in trees.
69740 + * In extreme case code reads on-disk bitmap to find free clusters
69741 + *
69742 + */
69744 +#include <linux/blkdev.h>
69745 +#include <linux/buffer_head.h>
69746 +#include <linux/fs.h>
69747 +#include <linux/nls.h>
69749 +#include "debug.h"
69750 +#include "ntfs.h"
69751 +#include "ntfs_fs.h"
69754 + * Maximum number of extents in tree.
69755 + */
69756 +#define NTFS_MAX_WND_EXTENTS (32u * 1024u)
69758 +struct rb_node_key {
69759 +       struct rb_node node;
69760 +       size_t key;
69764 + * Tree is sorted by start (key)
69765 + */
69766 +struct e_node {
69767 +       struct rb_node_key start; /* Tree sorted by start */
69768 +       struct rb_node_key count; /* Tree sorted by len*/
69771 +static int wnd_rescan(struct wnd_bitmap *wnd);
69772 +static struct buffer_head *wnd_map(struct wnd_bitmap *wnd, size_t iw);
69773 +static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits);
69775 +static struct kmem_cache *ntfs_enode_cachep;
69777 +int __init ntfs3_init_bitmap(void)
69779 +       ntfs_enode_cachep =
69780 +               kmem_cache_create("ntfs3_enode_cache", sizeof(struct e_node), 0,
69781 +                                 SLAB_RECLAIM_ACCOUNT, NULL);
69782 +       return ntfs_enode_cachep ? 0 : -ENOMEM;
69785 +void ntfs3_exit_bitmap(void)
69787 +       kmem_cache_destroy(ntfs_enode_cachep);
69790 +static inline u32 wnd_bits(const struct wnd_bitmap *wnd, size_t i)
69792 +       return i + 1 == wnd->nwnd ? wnd->bits_last : wnd->sb->s_blocksize * 8;
69796 + * b_pos + b_len - biggest fragment
69797 + * Scan range [wpos wbits) window 'buf'
69798 + * Returns -1 if not found
69799 + */
69800 +static size_t wnd_scan(const ulong *buf, size_t wbit, u32 wpos, u32 wend,
69801 +                      size_t to_alloc, size_t *prev_tail, size_t *b_pos,
69802 +                      size_t *b_len)
69804 +       while (wpos < wend) {
69805 +               size_t free_len;
69806 +               u32 free_bits, end;
69807 +               u32 used = find_next_zero_bit(buf, wend, wpos);
69809 +               if (used >= wend) {
69810 +                       if (*b_len < *prev_tail) {
69811 +                               *b_pos = wbit - *prev_tail;
69812 +                               *b_len = *prev_tail;
69813 +                       }
69815 +                       *prev_tail = 0;
69816 +                       return -1;
69817 +               }
69819 +               if (used > wpos) {
69820 +                       wpos = used;
69821 +                       if (*b_len < *prev_tail) {
69822 +                               *b_pos = wbit - *prev_tail;
69823 +                               *b_len = *prev_tail;
69824 +                       }
69826 +                       *prev_tail = 0;
69827 +               }
69829 +               /*
69830 +                * Now we have a fragment [wpos, wend) staring with 0
69831 +                */
69832 +               end = wpos + to_alloc - *prev_tail;
69833 +               free_bits = find_next_bit(buf, min(end, wend), wpos);
69835 +               free_len = *prev_tail + free_bits - wpos;
69837 +               if (*b_len < free_len) {
69838 +                       *b_pos = wbit + wpos - *prev_tail;
69839 +                       *b_len = free_len;
69840 +               }
69842 +               if (free_len >= to_alloc)
69843 +                       return wbit + wpos - *prev_tail;
69845 +               if (free_bits >= wend) {
69846 +                       *prev_tail += free_bits - wpos;
69847 +                       return -1;
69848 +               }
69850 +               wpos = free_bits + 1;
69852 +               *prev_tail = 0;
69853 +       }
69855 +       return -1;
69859 + * wnd_close
69860 + *
69861 + * Frees all resources
69862 + */
69863 +void wnd_close(struct wnd_bitmap *wnd)
69865 +       struct rb_node *node, *next;
69867 +       ntfs_free(wnd->free_bits);
69868 +       run_close(&wnd->run);
69870 +       node = rb_first(&wnd->start_tree);
69872 +       while (node) {
69873 +               next = rb_next(node);
69874 +               rb_erase(node, &wnd->start_tree);
69875 +               kmem_cache_free(ntfs_enode_cachep,
69876 +                               rb_entry(node, struct e_node, start.node));
69877 +               node = next;
69878 +       }
69881 +static struct rb_node *rb_lookup(struct rb_root *root, size_t v)
69883 +       struct rb_node **p = &root->rb_node;
69884 +       struct rb_node *r = NULL;
69886 +       while (*p) {
69887 +               struct rb_node_key *k;
69889 +               k = rb_entry(*p, struct rb_node_key, node);
69890 +               if (v < k->key) {
69891 +                       p = &(*p)->rb_left;
69892 +               } else if (v > k->key) {
69893 +                       r = &k->node;
69894 +                       p = &(*p)->rb_right;
69895 +               } else {
69896 +                       return &k->node;
69897 +               }
69898 +       }
69900 +       return r;
69904 + * rb_insert_count
69905 + *
69906 + * Helper function to insert special kind of 'count' tree
69907 + */
69908 +static inline bool rb_insert_count(struct rb_root *root, struct e_node *e)
69910 +       struct rb_node **p = &root->rb_node;
69911 +       struct rb_node *parent = NULL;
69912 +       size_t e_ckey = e->count.key;
69913 +       size_t e_skey = e->start.key;
69915 +       while (*p) {
69916 +               struct e_node *k =
69917 +                       rb_entry(parent = *p, struct e_node, count.node);
69919 +               if (e_ckey > k->count.key) {
69920 +                       p = &(*p)->rb_left;
69921 +               } else if (e_ckey < k->count.key) {
69922 +                       p = &(*p)->rb_right;
69923 +               } else if (e_skey < k->start.key) {
69924 +                       p = &(*p)->rb_left;
69925 +               } else if (e_skey > k->start.key) {
69926 +                       p = &(*p)->rb_right;
69927 +               } else {
69928 +                       WARN_ON(1);
69929 +                       return false;
69930 +               }
69931 +       }
69933 +       rb_link_node(&e->count.node, parent, p);
69934 +       rb_insert_color(&e->count.node, root);
69935 +       return true;
69939 + * inline bool rb_insert_start
69940 + *
69941 + * Helper function to insert special kind of 'start' tree
69942 + */
69943 +static inline bool rb_insert_start(struct rb_root *root, struct e_node *e)
69945 +       struct rb_node **p = &root->rb_node;
69946 +       struct rb_node *parent = NULL;
69947 +       size_t e_skey = e->start.key;
69949 +       while (*p) {
69950 +               struct e_node *k;
69952 +               parent = *p;
69954 +               k = rb_entry(parent, struct e_node, start.node);
69955 +               if (e_skey < k->start.key) {
69956 +                       p = &(*p)->rb_left;
69957 +               } else if (e_skey > k->start.key) {
69958 +                       p = &(*p)->rb_right;
69959 +               } else {
69960 +                       WARN_ON(1);
69961 +                       return false;
69962 +               }
69963 +       }
69965 +       rb_link_node(&e->start.node, parent, p);
69966 +       rb_insert_color(&e->start.node, root);
69967 +       return true;
69971 + * wnd_add_free_ext
69972 + *
69973 + * adds a new extent of free space
69974 + * build = 1 when building tree
69975 + */
69976 +static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len,
69977 +                            bool build)
69979 +       struct e_node *e, *e0 = NULL;
69980 +       size_t ib, end_in = bit + len;
69981 +       struct rb_node *n;
69983 +       if (build) {
69984 +               /* Use extent_min to filter too short extents */
69985 +               if (wnd->count >= NTFS_MAX_WND_EXTENTS &&
69986 +                   len <= wnd->extent_min) {
69987 +                       wnd->uptodated = -1;
69988 +                       return;
69989 +               }
69990 +       } else {
69991 +               /* Try to find extent before 'bit' */
69992 +               n = rb_lookup(&wnd->start_tree, bit);
69994 +               if (!n) {
69995 +                       n = rb_first(&wnd->start_tree);
69996 +               } else {
69997 +                       e = rb_entry(n, struct e_node, start.node);
69998 +                       n = rb_next(n);
69999 +                       if (e->start.key + e->count.key == bit) {
70000 +                               /* Remove left */
70001 +                               bit = e->start.key;
70002 +                               len += e->count.key;
70003 +                               rb_erase(&e->start.node, &wnd->start_tree);
70004 +                               rb_erase(&e->count.node, &wnd->count_tree);
70005 +                               wnd->count -= 1;
70006 +                               e0 = e;
70007 +                       }
70008 +               }
70010 +               while (n) {
70011 +                       size_t next_end;
70013 +                       e = rb_entry(n, struct e_node, start.node);
70014 +                       next_end = e->start.key + e->count.key;
70015 +                       if (e->start.key > end_in)
70016 +                               break;
70018 +                       /* Remove right */
70019 +                       n = rb_next(n);
70020 +                       len += next_end - end_in;
70021 +                       end_in = next_end;
70022 +                       rb_erase(&e->start.node, &wnd->start_tree);
70023 +                       rb_erase(&e->count.node, &wnd->count_tree);
70024 +                       wnd->count -= 1;
70026 +                       if (!e0)
70027 +                               e0 = e;
70028 +                       else
70029 +                               kmem_cache_free(ntfs_enode_cachep, e);
70030 +               }
70032 +               if (wnd->uptodated != 1) {
70033 +                       /* Check bits before 'bit' */
70034 +                       ib = wnd->zone_bit == wnd->zone_end ||
70035 +                                            bit < wnd->zone_end
70036 +                                    ? 0
70037 +                                    : wnd->zone_end;
70039 +                       while (bit > ib && wnd_is_free_hlp(wnd, bit - 1, 1)) {
70040 +                               bit -= 1;
70041 +                               len += 1;
70042 +                       }
70044 +                       /* Check bits after 'end_in' */
70045 +                       ib = wnd->zone_bit == wnd->zone_end ||
70046 +                                            end_in > wnd->zone_bit
70047 +                                    ? wnd->nbits
70048 +                                    : wnd->zone_bit;
70050 +                       while (end_in < ib && wnd_is_free_hlp(wnd, end_in, 1)) {
70051 +                               end_in += 1;
70052 +                               len += 1;
70053 +                       }
70054 +               }
70055 +       }
70056 +       /* Insert new fragment */
70057 +       if (wnd->count >= NTFS_MAX_WND_EXTENTS) {
70058 +               if (e0)
70059 +                       kmem_cache_free(ntfs_enode_cachep, e0);
70061 +               wnd->uptodated = -1;
70063 +               /* Compare with smallest fragment */
70064 +               n = rb_last(&wnd->count_tree);
70065 +               e = rb_entry(n, struct e_node, count.node);
70066 +               if (len <= e->count.key)
70067 +                       goto out; /* Do not insert small fragments */
70069 +               if (build) {
70070 +                       struct e_node *e2;
70072 +                       n = rb_prev(n);
70073 +                       e2 = rb_entry(n, struct e_node, count.node);
70074 +                       /* smallest fragment will be 'e2->count.key' */
70075 +                       wnd->extent_min = e2->count.key;
70076 +               }
70078 +               /* Replace smallest fragment by new one */
70079 +               rb_erase(&e->start.node, &wnd->start_tree);
70080 +               rb_erase(&e->count.node, &wnd->count_tree);
70081 +               wnd->count -= 1;
70082 +       } else {
70083 +               e = e0 ? e0 : kmem_cache_alloc(ntfs_enode_cachep, GFP_ATOMIC);
70084 +               if (!e) {
70085 +                       wnd->uptodated = -1;
70086 +                       goto out;
70087 +               }
70089 +               if (build && len <= wnd->extent_min)
70090 +                       wnd->extent_min = len;
70091 +       }
70092 +       e->start.key = bit;
70093 +       e->count.key = len;
70094 +       if (len > wnd->extent_max)
70095 +               wnd->extent_max = len;
70097 +       rb_insert_start(&wnd->start_tree, e);
70098 +       rb_insert_count(&wnd->count_tree, e);
70099 +       wnd->count += 1;
70101 +out:;
70105 + * wnd_remove_free_ext
70106 + *
70107 + * removes a run from the cached free space
70108 + */
70109 +static void wnd_remove_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len)
70111 +       struct rb_node *n, *n3;
70112 +       struct e_node *e, *e3;
70113 +       size_t end_in = bit + len;
70114 +       size_t end3, end, new_key, new_len, max_new_len;
70116 +       /* Try to find extent before 'bit' */
70117 +       n = rb_lookup(&wnd->start_tree, bit);
70119 +       if (!n)
70120 +               return;
70122 +       e = rb_entry(n, struct e_node, start.node);
70123 +       end = e->start.key + e->count.key;
70125 +       new_key = new_len = 0;
70126 +       len = e->count.key;
70128 +       /* Range [bit,end_in) must be inside 'e' or outside 'e' and 'n' */
70129 +       if (e->start.key > bit)
70130 +               ;
70131 +       else if (end_in <= end) {
70132 +               /* Range [bit,end_in) inside 'e' */
70133 +               new_key = end_in;
70134 +               new_len = end - end_in;
70135 +               len = bit - e->start.key;
70136 +       } else if (bit > end) {
70137 +               bool bmax = false;
70139 +               n3 = rb_next(n);
70141 +               while (n3) {
70142 +                       e3 = rb_entry(n3, struct e_node, start.node);
70143 +                       if (e3->start.key >= end_in)
70144 +                               break;
70146 +                       if (e3->count.key == wnd->extent_max)
70147 +                               bmax = true;
70149 +                       end3 = e3->start.key + e3->count.key;
70150 +                       if (end3 > end_in) {
70151 +                               e3->start.key = end_in;
70152 +                               rb_erase(&e3->count.node, &wnd->count_tree);
70153 +                               e3->count.key = end3 - end_in;
70154 +                               rb_insert_count(&wnd->count_tree, e3);
70155 +                               break;
70156 +                       }
70158 +                       n3 = rb_next(n3);
70159 +                       rb_erase(&e3->start.node, &wnd->start_tree);
70160 +                       rb_erase(&e3->count.node, &wnd->count_tree);
70161 +                       wnd->count -= 1;
70162 +                       kmem_cache_free(ntfs_enode_cachep, e3);
70163 +               }
70164 +               if (!bmax)
70165 +                       return;
70166 +               n3 = rb_first(&wnd->count_tree);
70167 +               wnd->extent_max =
70168 +                       n3 ? rb_entry(n3, struct e_node, count.node)->count.key
70169 +                          : 0;
70170 +               return;
70171 +       }
70173 +       if (e->count.key != wnd->extent_max) {
70174 +               ;
70175 +       } else if (rb_prev(&e->count.node)) {
70176 +               ;
70177 +       } else {
70178 +               n3 = rb_next(&e->count.node);
70179 +               max_new_len = len > new_len ? len : new_len;
70180 +               if (!n3) {
70181 +                       wnd->extent_max = max_new_len;
70182 +               } else {
70183 +                       e3 = rb_entry(n3, struct e_node, count.node);
70184 +                       wnd->extent_max = max(e3->count.key, max_new_len);
70185 +               }
70186 +       }
70188 +       if (!len) {
70189 +               if (new_len) {
70190 +                       e->start.key = new_key;
70191 +                       rb_erase(&e->count.node, &wnd->count_tree);
70192 +                       e->count.key = new_len;
70193 +                       rb_insert_count(&wnd->count_tree, e);
70194 +               } else {
70195 +                       rb_erase(&e->start.node, &wnd->start_tree);
70196 +                       rb_erase(&e->count.node, &wnd->count_tree);
70197 +                       wnd->count -= 1;
70198 +                       kmem_cache_free(ntfs_enode_cachep, e);
70199 +               }
70200 +               goto out;
70201 +       }
70202 +       rb_erase(&e->count.node, &wnd->count_tree);
70203 +       e->count.key = len;
70204 +       rb_insert_count(&wnd->count_tree, e);
70206 +       if (!new_len)
70207 +               goto out;
70209 +       if (wnd->count >= NTFS_MAX_WND_EXTENTS) {
70210 +               wnd->uptodated = -1;
70212 +               /* Get minimal extent */
70213 +               e = rb_entry(rb_last(&wnd->count_tree), struct e_node,
70214 +                            count.node);
70215 +               if (e->count.key > new_len)
70216 +                       goto out;
70218 +               /* Replace minimum */
70219 +               rb_erase(&e->start.node, &wnd->start_tree);
70220 +               rb_erase(&e->count.node, &wnd->count_tree);
70221 +               wnd->count -= 1;
70222 +       } else {
70223 +               e = kmem_cache_alloc(ntfs_enode_cachep, GFP_ATOMIC);
70224 +               if (!e)
70225 +                       wnd->uptodated = -1;
70226 +       }
70228 +       if (e) {
70229 +               e->start.key = new_key;
70230 +               e->count.key = new_len;
70231 +               rb_insert_start(&wnd->start_tree, e);
70232 +               rb_insert_count(&wnd->count_tree, e);
70233 +               wnd->count += 1;
70234 +       }
70236 +out:
70237 +       if (!wnd->count && 1 != wnd->uptodated)
70238 +               wnd_rescan(wnd);
70242 + * wnd_rescan
70243 + *
70244 + * Scan all bitmap. used while initialization.
70245 + */
70246 +static int wnd_rescan(struct wnd_bitmap *wnd)
70248 +       int err = 0;
70249 +       size_t prev_tail = 0;
70250 +       struct super_block *sb = wnd->sb;
70251 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
70252 +       u64 lbo, len = 0;
70253 +       u32 blocksize = sb->s_blocksize;
70254 +       u8 cluster_bits = sbi->cluster_bits;
70255 +       u32 wbits = 8 * sb->s_blocksize;
70256 +       u32 used, frb;
70257 +       const ulong *buf;
70258 +       size_t wpos, wbit, iw, vbo;
70259 +       struct buffer_head *bh = NULL;
70260 +       CLST lcn, clen;
70262 +       wnd->uptodated = 0;
70263 +       wnd->extent_max = 0;
70264 +       wnd->extent_min = MINUS_ONE_T;
70265 +       wnd->total_zeroes = 0;
70267 +       vbo = 0;
70269 +       for (iw = 0; iw < wnd->nwnd; iw++) {
70270 +               if (iw + 1 == wnd->nwnd)
70271 +                       wbits = wnd->bits_last;
70273 +               if (wnd->inited) {
70274 +                       if (!wnd->free_bits[iw]) {
70275 +                               /* all ones */
70276 +                               if (prev_tail) {
70277 +                                       wnd_add_free_ext(wnd,
70278 +                                                        vbo * 8 - prev_tail,
70279 +                                                        prev_tail, true);
70280 +                                       prev_tail = 0;
70281 +                               }
70282 +                               goto next_wnd;
70283 +                       }
70284 +                       if (wbits == wnd->free_bits[iw]) {
70285 +                               /* all zeroes */
70286 +                               prev_tail += wbits;
70287 +                               wnd->total_zeroes += wbits;
70288 +                               goto next_wnd;
70289 +                       }
70290 +               }
70292 +               if (!len) {
70293 +                       u32 off = vbo & sbi->cluster_mask;
70295 +                       if (!run_lookup_entry(&wnd->run, vbo >> cluster_bits,
70296 +                                             &lcn, &clen, NULL)) {
70297 +                               err = -ENOENT;
70298 +                               goto out;
70299 +                       }
70301 +                       lbo = ((u64)lcn << cluster_bits) + off;
70302 +                       len = ((u64)clen << cluster_bits) - off;
70303 +               }
70305 +               bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
70306 +               if (!bh) {
70307 +                       err = -EIO;
70308 +                       goto out;
70309 +               }
70311 +               buf = (ulong *)bh->b_data;
70313 +               used = __bitmap_weight(buf, wbits);
70314 +               if (used < wbits) {
70315 +                       frb = wbits - used;
70316 +                       wnd->free_bits[iw] = frb;
70317 +                       wnd->total_zeroes += frb;
70318 +               }
70320 +               wpos = 0;
70321 +               wbit = vbo * 8;
70323 +               if (wbit + wbits > wnd->nbits)
70324 +                       wbits = wnd->nbits - wbit;
70326 +               do {
70327 +                       used = find_next_zero_bit(buf, wbits, wpos);
70329 +                       if (used > wpos && prev_tail) {
70330 +                               wnd_add_free_ext(wnd, wbit + wpos - prev_tail,
70331 +                                                prev_tail, true);
70332 +                               prev_tail = 0;
70333 +                       }
70335 +                       wpos = used;
70337 +                       if (wpos >= wbits) {
70338 +                               /* No free blocks */
70339 +                               prev_tail = 0;
70340 +                               break;
70341 +                       }
70343 +                       frb = find_next_bit(buf, wbits, wpos);
70344 +                       if (frb >= wbits) {
70345 +                               /* keep last free block */
70346 +                               prev_tail += frb - wpos;
70347 +                               break;
70348 +                       }
70350 +                       wnd_add_free_ext(wnd, wbit + wpos - prev_tail,
70351 +                                        frb + prev_tail - wpos, true);
70353 +                       /* Skip free block and first '1' */
70354 +                       wpos = frb + 1;
70355 +                       /* Reset previous tail */
70356 +                       prev_tail = 0;
70357 +               } while (wpos < wbits);
70359 +next_wnd:
70361 +               if (bh)
70362 +                       put_bh(bh);
70363 +               bh = NULL;
70365 +               vbo += blocksize;
70366 +               if (len) {
70367 +                       len -= blocksize;
70368 +                       lbo += blocksize;
70369 +               }
70370 +       }
70372 +       /* Add last block */
70373 +       if (prev_tail)
70374 +               wnd_add_free_ext(wnd, wnd->nbits - prev_tail, prev_tail, true);
70376 +       /*
70377 +        * Before init cycle wnd->uptodated was 0
70378 +        * If any errors or limits occurs while initialization then
70379 +        * wnd->uptodated will be -1
70380 +        * If 'uptodated' is still 0 then Tree is really updated
70381 +        */
70382 +       if (!wnd->uptodated)
70383 +               wnd->uptodated = 1;
70385 +       if (wnd->zone_bit != wnd->zone_end) {
70386 +               size_t zlen = wnd->zone_end - wnd->zone_bit;
70388 +               wnd->zone_end = wnd->zone_bit;
70389 +               wnd_zone_set(wnd, wnd->zone_bit, zlen);
70390 +       }
70392 +out:
70393 +       return err;
70397 + * wnd_init
70398 + */
70399 +int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
70401 +       int err;
70402 +       u32 blocksize = sb->s_blocksize;
70403 +       u32 wbits = blocksize * 8;
70405 +       init_rwsem(&wnd->rw_lock);
70407 +       wnd->sb = sb;
70408 +       wnd->nbits = nbits;
70409 +       wnd->total_zeroes = nbits;
70410 +       wnd->extent_max = MINUS_ONE_T;
70411 +       wnd->zone_bit = wnd->zone_end = 0;
70412 +       wnd->nwnd = bytes_to_block(sb, bitmap_size(nbits));
70413 +       wnd->bits_last = nbits & (wbits - 1);
70414 +       if (!wnd->bits_last)
70415 +               wnd->bits_last = wbits;
70417 +       wnd->free_bits = ntfs_zalloc(wnd->nwnd * sizeof(u16));
70418 +       if (!wnd->free_bits)
70419 +               return -ENOMEM;
70421 +       err = wnd_rescan(wnd);
70422 +       if (err)
70423 +               return err;
70425 +       wnd->inited = true;
70427 +       return 0;
70431 + * wnd_map
70432 + *
70433 + * call sb_bread for requested window
70434 + */
70435 +static struct buffer_head *wnd_map(struct wnd_bitmap *wnd, size_t iw)
70437 +       size_t vbo;
70438 +       CLST lcn, clen;
70439 +       struct super_block *sb = wnd->sb;
70440 +       struct ntfs_sb_info *sbi;
70441 +       struct buffer_head *bh;
70442 +       u64 lbo;
70444 +       sbi = sb->s_fs_info;
70445 +       vbo = (u64)iw << sb->s_blocksize_bits;
70447 +       if (!run_lookup_entry(&wnd->run, vbo >> sbi->cluster_bits, &lcn, &clen,
70448 +                             NULL)) {
70449 +               return ERR_PTR(-ENOENT);
70450 +       }
70452 +       lbo = ((u64)lcn << sbi->cluster_bits) + (vbo & sbi->cluster_mask);
70454 +       bh = ntfs_bread(wnd->sb, lbo >> sb->s_blocksize_bits);
70455 +       if (!bh)
70456 +               return ERR_PTR(-EIO);
70458 +       return bh;
70462 + * wnd_set_free
70463 + *
70464 + * Marks the bits range from bit to bit + bits as free
70465 + */
70466 +int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
70468 +       int err = 0;
70469 +       struct super_block *sb = wnd->sb;
70470 +       size_t bits0 = bits;
70471 +       u32 wbits = 8 * sb->s_blocksize;
70472 +       size_t iw = bit >> (sb->s_blocksize_bits + 3);
70473 +       u32 wbit = bit & (wbits - 1);
70474 +       struct buffer_head *bh;
70476 +       while (iw < wnd->nwnd && bits) {
70477 +               u32 tail, op;
70478 +               ulong *buf;
70480 +               if (iw + 1 == wnd->nwnd)
70481 +                       wbits = wnd->bits_last;
70483 +               tail = wbits - wbit;
70484 +               op = tail < bits ? tail : bits;
70486 +               bh = wnd_map(wnd, iw);
70487 +               if (IS_ERR(bh)) {
70488 +                       err = PTR_ERR(bh);
70489 +                       break;
70490 +               }
70492 +               buf = (ulong *)bh->b_data;
70494 +               lock_buffer(bh);
70496 +               __bitmap_clear(buf, wbit, op);
70498 +               wnd->free_bits[iw] += op;
70500 +               set_buffer_uptodate(bh);
70501 +               mark_buffer_dirty(bh);
70502 +               unlock_buffer(bh);
70503 +               put_bh(bh);
70505 +               wnd->total_zeroes += op;
70506 +               bits -= op;
70507 +               wbit = 0;
70508 +               iw += 1;
70509 +       }
70511 +       wnd_add_free_ext(wnd, bit, bits0, false);
70513 +       return err;
70517 + * wnd_set_used
70518 + *
70519 + * Marks the bits range from bit to bit + bits as used
70520 + */
70521 +int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
70523 +       int err = 0;
70524 +       struct super_block *sb = wnd->sb;
70525 +       size_t bits0 = bits;
70526 +       size_t iw = bit >> (sb->s_blocksize_bits + 3);
70527 +       u32 wbits = 8 * sb->s_blocksize;
70528 +       u32 wbit = bit & (wbits - 1);
70529 +       struct buffer_head *bh;
70531 +       while (iw < wnd->nwnd && bits) {
70532 +               u32 tail, op;
70533 +               ulong *buf;
70535 +               if (unlikely(iw + 1 == wnd->nwnd))
70536 +                       wbits = wnd->bits_last;
70538 +               tail = wbits - wbit;
70539 +               op = tail < bits ? tail : bits;
70541 +               bh = wnd_map(wnd, iw);
70542 +               if (IS_ERR(bh)) {
70543 +                       err = PTR_ERR(bh);
70544 +                       break;
70545 +               }
70546 +               buf = (ulong *)bh->b_data;
70548 +               lock_buffer(bh);
70550 +               __bitmap_set(buf, wbit, op);
70551 +               wnd->free_bits[iw] -= op;
70553 +               set_buffer_uptodate(bh);
70554 +               mark_buffer_dirty(bh);
70555 +               unlock_buffer(bh);
70556 +               put_bh(bh);
70558 +               wnd->total_zeroes -= op;
70559 +               bits -= op;
70560 +               wbit = 0;
70561 +               iw += 1;
70562 +       }
70564 +       if (!RB_EMPTY_ROOT(&wnd->start_tree))
70565 +               wnd_remove_free_ext(wnd, bit, bits0);
70567 +       return err;
70571 + * wnd_is_free_hlp
70572 + *
70573 + * Returns true if all clusters [bit, bit+bits) are free (bitmap only)
70574 + */
70575 +static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits)
70577 +       struct super_block *sb = wnd->sb;
70578 +       size_t iw = bit >> (sb->s_blocksize_bits + 3);
70579 +       u32 wbits = 8 * sb->s_blocksize;
70580 +       u32 wbit = bit & (wbits - 1);
70582 +       while (iw < wnd->nwnd && bits) {
70583 +               u32 tail, op;
70585 +               if (unlikely(iw + 1 == wnd->nwnd))
70586 +                       wbits = wnd->bits_last;
70588 +               tail = wbits - wbit;
70589 +               op = tail < bits ? tail : bits;
70591 +               if (wbits != wnd->free_bits[iw]) {
70592 +                       bool ret;
70593 +                       struct buffer_head *bh = wnd_map(wnd, iw);
70595 +                       if (IS_ERR(bh))
70596 +                               return false;
70598 +                       ret = are_bits_clear((ulong *)bh->b_data, wbit, op);
70600 +                       put_bh(bh);
70601 +                       if (!ret)
70602 +                               return false;
70603 +               }
70605 +               bits -= op;
70606 +               wbit = 0;
70607 +               iw += 1;
70608 +       }
70610 +       return true;
70614 + * wnd_is_free
70615 + *
70616 + * Returns true if all clusters [bit, bit+bits) are free
70617 + */
70618 +bool wnd_is_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
70620 +       bool ret;
70621 +       struct rb_node *n;
70622 +       size_t end;
70623 +       struct e_node *e;
70625 +       if (RB_EMPTY_ROOT(&wnd->start_tree))
70626 +               goto use_wnd;
70628 +       n = rb_lookup(&wnd->start_tree, bit);
70629 +       if (!n)
70630 +               goto use_wnd;
70632 +       e = rb_entry(n, struct e_node, start.node);
70634 +       end = e->start.key + e->count.key;
70636 +       if (bit < end && bit + bits <= end)
70637 +               return true;
70639 +use_wnd:
70640 +       ret = wnd_is_free_hlp(wnd, bit, bits);
70642 +       return ret;
70646 + * wnd_is_used
70647 + *
70648 + * Returns true if all clusters [bit, bit+bits) are used
70649 + */
70650 +bool wnd_is_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
70652 +       bool ret = false;
70653 +       struct super_block *sb = wnd->sb;
70654 +       size_t iw = bit >> (sb->s_blocksize_bits + 3);
70655 +       u32 wbits = 8 * sb->s_blocksize;
70656 +       u32 wbit = bit & (wbits - 1);
70657 +       size_t end;
70658 +       struct rb_node *n;
70659 +       struct e_node *e;
70661 +       if (RB_EMPTY_ROOT(&wnd->start_tree))
70662 +               goto use_wnd;
70664 +       end = bit + bits;
70665 +       n = rb_lookup(&wnd->start_tree, end - 1);
70666 +       if (!n)
70667 +               goto use_wnd;
70669 +       e = rb_entry(n, struct e_node, start.node);
70670 +       if (e->start.key + e->count.key > bit)
70671 +               return false;
70673 +use_wnd:
70674 +       while (iw < wnd->nwnd && bits) {
70675 +               u32 tail, op;
70677 +               if (unlikely(iw + 1 == wnd->nwnd))
70678 +                       wbits = wnd->bits_last;
70680 +               tail = wbits - wbit;
70681 +               op = tail < bits ? tail : bits;
70683 +               if (wnd->free_bits[iw]) {
70684 +                       bool ret;
70685 +                       struct buffer_head *bh = wnd_map(wnd, iw);
70687 +                       if (IS_ERR(bh))
70688 +                               goto out;
70690 +                       ret = are_bits_set((ulong *)bh->b_data, wbit, op);
70691 +                       put_bh(bh);
70692 +                       if (!ret)
70693 +                               goto out;
70694 +               }
70696 +               bits -= op;
70697 +               wbit = 0;
70698 +               iw += 1;
70699 +       }
70700 +       ret = true;
70702 +out:
70703 +       return ret;
70707 + * wnd_find
70708 + * - flags - BITMAP_FIND_XXX flags
70709 + *
70710 + * looks for free space
70711 + * Returns 0 if not found
70712 + */
70713 +size_t wnd_find(struct wnd_bitmap *wnd, size_t to_alloc, size_t hint,
70714 +               size_t flags, size_t *allocated)
70716 +       struct super_block *sb;
70717 +       u32 wbits, wpos, wzbit, wzend;
70718 +       size_t fnd, max_alloc, b_len, b_pos;
70719 +       size_t iw, prev_tail, nwnd, wbit, ebit, zbit, zend;
70720 +       size_t to_alloc0 = to_alloc;
70721 +       const ulong *buf;
70722 +       const struct e_node *e;
70723 +       const struct rb_node *pr, *cr;
70724 +       u8 log2_bits;
70725 +       bool fbits_valid;
70726 +       struct buffer_head *bh;
70728 +       /* fast checking for available free space */
70729 +       if (flags & BITMAP_FIND_FULL) {
70730 +               size_t zeroes = wnd_zeroes(wnd);
70732 +               zeroes -= wnd->zone_end - wnd->zone_bit;
70733 +               if (zeroes < to_alloc0)
70734 +                       goto no_space;
70736 +               if (to_alloc0 > wnd->extent_max)
70737 +                       goto no_space;
70738 +       } else {
70739 +               if (to_alloc > wnd->extent_max)
70740 +                       to_alloc = wnd->extent_max;
70741 +       }
70743 +       if (wnd->zone_bit <= hint && hint < wnd->zone_end)
70744 +               hint = wnd->zone_end;
70746 +       max_alloc = wnd->nbits;
70747 +       b_len = b_pos = 0;
70749 +       if (hint >= max_alloc)
70750 +               hint = 0;
70752 +       if (RB_EMPTY_ROOT(&wnd->start_tree)) {
70753 +               if (wnd->uptodated == 1) {
70754 +                       /* extents tree is updated -> no free space */
70755 +                       goto no_space;
70756 +               }
70757 +               goto scan_bitmap;
70758 +       }
70760 +       e = NULL;
70761 +       if (!hint)
70762 +               goto allocate_biggest;
70764 +       /* Use hint: enumerate extents by start >= hint */
70765 +       pr = NULL;
70766 +       cr = wnd->start_tree.rb_node;
70768 +       for (;;) {
70769 +               e = rb_entry(cr, struct e_node, start.node);
70771 +               if (e->start.key == hint)
70772 +                       break;
70774 +               if (e->start.key < hint) {
70775 +                       pr = cr;
70776 +                       cr = cr->rb_right;
70777 +                       if (!cr)
70778 +                               break;
70779 +                       continue;
70780 +               }
70782 +               cr = cr->rb_left;
70783 +               if (!cr) {
70784 +                       e = pr ? rb_entry(pr, struct e_node, start.node) : NULL;
70785 +                       break;
70786 +               }
70787 +       }
70789 +       if (!e)
70790 +               goto allocate_biggest;
70792 +       if (e->start.key + e->count.key > hint) {
70793 +               /* We have found extension with 'hint' inside */
70794 +               size_t len = e->start.key + e->count.key - hint;
70796 +               if (len >= to_alloc && hint + to_alloc <= max_alloc) {
70797 +                       fnd = hint;
70798 +                       goto found;
70799 +               }
70801 +               if (!(flags & BITMAP_FIND_FULL)) {
70802 +                       if (len > to_alloc)
70803 +                               len = to_alloc;
70805 +                       if (hint + len <= max_alloc) {
70806 +                               fnd = hint;
70807 +                               to_alloc = len;
70808 +                               goto found;
70809 +                       }
70810 +               }
70811 +       }
70813 +allocate_biggest:
70814 +       /* Allocate from biggest free extent */
70815 +       e = rb_entry(rb_first(&wnd->count_tree), struct e_node, count.node);
70816 +       if (e->count.key != wnd->extent_max)
70817 +               wnd->extent_max = e->count.key;
70819 +       if (e->count.key < max_alloc) {
70820 +               if (e->count.key >= to_alloc) {
70821 +                       ;
70822 +               } else if (flags & BITMAP_FIND_FULL) {
70823 +                       if (e->count.key < to_alloc0) {
70824 +                               /* Biggest free block is less then requested */
70825 +                               goto no_space;
70826 +                       }
70827 +                       to_alloc = e->count.key;
70828 +               } else if (-1 != wnd->uptodated) {
70829 +                       to_alloc = e->count.key;
70830 +               } else {
70831 +                       /* Check if we can use more bits */
70832 +                       size_t op, max_check;
70833 +                       struct rb_root start_tree;
70835 +                       memcpy(&start_tree, &wnd->start_tree,
70836 +                              sizeof(struct rb_root));
70837 +                       memset(&wnd->start_tree, 0, sizeof(struct rb_root));
70839 +                       max_check = e->start.key + to_alloc;
70840 +                       if (max_check > max_alloc)
70841 +                               max_check = max_alloc;
70842 +                       for (op = e->start.key + e->count.key; op < max_check;
70843 +                            op++) {
70844 +                               if (!wnd_is_free(wnd, op, 1))
70845 +                                       break;
70846 +                       }
70847 +                       memcpy(&wnd->start_tree, &start_tree,
70848 +                              sizeof(struct rb_root));
70849 +                       to_alloc = op - e->start.key;
70850 +               }
70852 +               /* Prepare to return */
70853 +               fnd = e->start.key;
70854 +               if (e->start.key + to_alloc > max_alloc)
70855 +                       to_alloc = max_alloc - e->start.key;
70856 +               goto found;
70857 +       }
70859 +       if (wnd->uptodated == 1) {
70860 +               /* extents tree is updated -> no free space */
70861 +               goto no_space;
70862 +       }
70864 +       b_len = e->count.key;
70865 +       b_pos = e->start.key;
70867 +scan_bitmap:
70868 +       sb = wnd->sb;
70869 +       log2_bits = sb->s_blocksize_bits + 3;
70871 +       /* At most two ranges [hint, max_alloc) + [0, hint) */
70872 +Again:
70874 +       /* TODO: optimize request for case nbits > wbits */
70875 +       iw = hint >> log2_bits;
70876 +       wbits = sb->s_blocksize * 8;
70877 +       wpos = hint & (wbits - 1);
70878 +       prev_tail = 0;
70879 +       fbits_valid = true;
70881 +       if (max_alloc == wnd->nbits) {
70882 +               nwnd = wnd->nwnd;
70883 +       } else {
70884 +               size_t t = max_alloc + wbits - 1;
70886 +               nwnd = likely(t > max_alloc) ? (t >> log2_bits) : wnd->nwnd;
70887 +       }
70889 +       /* Enumerate all windows */
70890 +       for (; iw < nwnd; iw++) {
70891 +               wbit = iw << log2_bits;
70893 +               if (!wnd->free_bits[iw]) {
70894 +                       if (prev_tail > b_len) {
70895 +                               b_pos = wbit - prev_tail;
70896 +                               b_len = prev_tail;
70897 +                       }
70899 +                       /* Skip full used window */
70900 +                       prev_tail = 0;
70901 +                       wpos = 0;
70902 +                       continue;
70903 +               }
70905 +               if (unlikely(iw + 1 == nwnd)) {
70906 +                       if (max_alloc == wnd->nbits) {
70907 +                               wbits = wnd->bits_last;
70908 +                       } else {
70909 +                               size_t t = max_alloc & (wbits - 1);
70911 +                               if (t) {
70912 +                                       wbits = t;
70913 +                                       fbits_valid = false;
70914 +                               }
70915 +                       }
70916 +               }
70918 +               if (wnd->zone_end > wnd->zone_bit) {
70919 +                       ebit = wbit + wbits;
70920 +                       zbit = max(wnd->zone_bit, wbit);
70921 +                       zend = min(wnd->zone_end, ebit);
70923 +                       /* Here we have a window [wbit, ebit) and zone [zbit, zend) */
70924 +                       if (zend <= zbit) {
70925 +                               /* Zone does not overlap window */
70926 +                       } else {
70927 +                               wzbit = zbit - wbit;
70928 +                               wzend = zend - wbit;
70930 +                               /* Zone overlaps window */
70931 +                               if (wnd->free_bits[iw] == wzend - wzbit) {
70932 +                                       prev_tail = 0;
70933 +                                       wpos = 0;
70934 +                                       continue;
70935 +                               }
70937 +                               /* Scan two ranges window: [wbit, zbit) and [zend, ebit) */
70938 +                               bh = wnd_map(wnd, iw);
70940 +                               if (IS_ERR(bh)) {
70941 +                                       /* TODO: error */
70942 +                                       prev_tail = 0;
70943 +                                       wpos = 0;
70944 +                                       continue;
70945 +                               }
70947 +                               buf = (ulong *)bh->b_data;
70949 +                               /* Scan range [wbit, zbit) */
70950 +                               if (wpos < wzbit) {
70951 +                                       /* Scan range [wpos, zbit) */
70952 +                                       fnd = wnd_scan(buf, wbit, wpos, wzbit,
70953 +                                                      to_alloc, &prev_tail,
70954 +                                                      &b_pos, &b_len);
70955 +                                       if (fnd != MINUS_ONE_T) {
70956 +                                               put_bh(bh);
70957 +                                               goto found;
70958 +                                       }
70959 +                               }
70961 +                               prev_tail = 0;
70963 +                               /* Scan range [zend, ebit) */
70964 +                               if (wzend < wbits) {
70965 +                                       fnd = wnd_scan(buf, wbit,
70966 +                                                      max(wzend, wpos), wbits,
70967 +                                                      to_alloc, &prev_tail,
70968 +                                                      &b_pos, &b_len);
70969 +                                       if (fnd != MINUS_ONE_T) {
70970 +                                               put_bh(bh);
70971 +                                               goto found;
70972 +                                       }
70973 +                               }
70975 +                               wpos = 0;
70976 +                               put_bh(bh);
70977 +                               continue;
70978 +                       }
70979 +               }
70981 +               /* Current window does not overlap zone */
70982 +               if (!wpos && fbits_valid && wnd->free_bits[iw] == wbits) {
70983 +                       /* window is empty */
70984 +                       if (prev_tail + wbits >= to_alloc) {
70985 +                               fnd = wbit + wpos - prev_tail;
70986 +                               goto found;
70987 +                       }
70989 +                       /* Increase 'prev_tail' and process next window */
70990 +                       prev_tail += wbits;
70991 +                       wpos = 0;
70992 +                       continue;
70993 +               }
70995 +               /* read window */
70996 +               bh = wnd_map(wnd, iw);
70997 +               if (IS_ERR(bh)) {
70998 +                       // TODO: error
70999 +                       prev_tail = 0;
71000 +                       wpos = 0;
71001 +                       continue;
71002 +               }
71004 +               buf = (ulong *)bh->b_data;
71006 +               /* Scan range [wpos, eBits) */
71007 +               fnd = wnd_scan(buf, wbit, wpos, wbits, to_alloc, &prev_tail,
71008 +                              &b_pos, &b_len);
71009 +               put_bh(bh);
71010 +               if (fnd != MINUS_ONE_T)
71011 +                       goto found;
71012 +       }
71014 +       if (b_len < prev_tail) {
71015 +               /* The last fragment */
71016 +               b_len = prev_tail;
71017 +               b_pos = max_alloc - prev_tail;
71018 +       }
71020 +       if (hint) {
71021 +               /*
71022 +                * We have scanned range [hint max_alloc)
71023 +                * Prepare to scan range [0 hint + to_alloc)
71024 +                */
71025 +               size_t nextmax = hint + to_alloc;
71027 +               if (likely(nextmax >= hint) && nextmax < max_alloc)
71028 +                       max_alloc = nextmax;
71029 +               hint = 0;
71030 +               goto Again;
71031 +       }
71033 +       if (!b_len)
71034 +               goto no_space;
71036 +       wnd->extent_max = b_len;
71038 +       if (flags & BITMAP_FIND_FULL)
71039 +               goto no_space;
71041 +       fnd = b_pos;
71042 +       to_alloc = b_len;
71044 +found:
71045 +       if (flags & BITMAP_FIND_MARK_AS_USED) {
71046 +               /* TODO optimize remove extent (pass 'e'?) */
71047 +               if (wnd_set_used(wnd, fnd, to_alloc))
71048 +                       goto no_space;
71049 +       } else if (wnd->extent_max != MINUS_ONE_T &&
71050 +                  to_alloc > wnd->extent_max) {
71051 +               wnd->extent_max = to_alloc;
71052 +       }
71054 +       *allocated = fnd;
71055 +       return to_alloc;
71057 +no_space:
71058 +       return 0;
71062 + * wnd_extend
71063 + *
71064 + * Extend bitmap ($MFT bitmap)
71065 + */
71066 +int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
71068 +       int err;
71069 +       struct super_block *sb = wnd->sb;
71070 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
71071 +       u32 blocksize = sb->s_blocksize;
71072 +       u32 wbits = blocksize * 8;
71073 +       u32 b0, new_last;
71074 +       size_t bits, iw, new_wnd;
71075 +       size_t old_bits = wnd->nbits;
71076 +       u16 *new_free;
71078 +       if (new_bits <= old_bits)
71079 +               return -EINVAL;
71081 +       /* align to 8 byte boundary */
71082 +       new_wnd = bytes_to_block(sb, bitmap_size(new_bits));
71083 +       new_last = new_bits & (wbits - 1);
71084 +       if (!new_last)
71085 +               new_last = wbits;
71087 +       if (new_wnd != wnd->nwnd) {
71088 +               new_free = ntfs_malloc(new_wnd * sizeof(u16));
71089 +               if (!new_free)
71090 +                       return -ENOMEM;
71092 +               if (new_free != wnd->free_bits)
71093 +                       memcpy(new_free, wnd->free_bits,
71094 +                              wnd->nwnd * sizeof(short));
71095 +               memset(new_free + wnd->nwnd, 0,
71096 +                      (new_wnd - wnd->nwnd) * sizeof(short));
71097 +               ntfs_free(wnd->free_bits);
71098 +               wnd->free_bits = new_free;
71099 +       }
71101 +       /* Zero bits [old_bits,new_bits) */
71102 +       bits = new_bits - old_bits;
71103 +       b0 = old_bits & (wbits - 1);
71105 +       for (iw = old_bits >> (sb->s_blocksize_bits + 3); bits; iw += 1) {
71106 +               u32 op;
71107 +               size_t frb;
71108 +               u64 vbo, lbo, bytes;
71109 +               struct buffer_head *bh;
71110 +               ulong *buf;
71112 +               if (iw + 1 == new_wnd)
71113 +                       wbits = new_last;
71115 +               op = b0 + bits > wbits ? wbits - b0 : bits;
71116 +               vbo = (u64)iw * blocksize;
71118 +               err = ntfs_vbo_to_lbo(sbi, &wnd->run, vbo, &lbo, &bytes);
71119 +               if (err)
71120 +                       break;
71122 +               bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
71123 +               if (!bh)
71124 +                       return -EIO;
71126 +               lock_buffer(bh);
71127 +               buf = (ulong *)bh->b_data;
71129 +               __bitmap_clear(buf, b0, blocksize * 8 - b0);
71130 +               frb = wbits - __bitmap_weight(buf, wbits);
71131 +               wnd->total_zeroes += frb - wnd->free_bits[iw];
71132 +               wnd->free_bits[iw] = frb;
71134 +               set_buffer_uptodate(bh);
71135 +               mark_buffer_dirty(bh);
71136 +               unlock_buffer(bh);
71137 +               /*err = sync_dirty_buffer(bh);*/
71139 +               b0 = 0;
71140 +               bits -= op;
71141 +       }
71143 +       wnd->nbits = new_bits;
71144 +       wnd->nwnd = new_wnd;
71145 +       wnd->bits_last = new_last;
71147 +       wnd_add_free_ext(wnd, old_bits, new_bits - old_bits, false);
71149 +       return 0;
71153 + * wnd_zone_set
71154 + */
71155 +void wnd_zone_set(struct wnd_bitmap *wnd, size_t lcn, size_t len)
71157 +       size_t zlen;
71159 +       zlen = wnd->zone_end - wnd->zone_bit;
71160 +       if (zlen)
71161 +               wnd_add_free_ext(wnd, wnd->zone_bit, zlen, false);
71163 +       if (!RB_EMPTY_ROOT(&wnd->start_tree) && len)
71164 +               wnd_remove_free_ext(wnd, lcn, len);
71166 +       wnd->zone_bit = lcn;
71167 +       wnd->zone_end = lcn + len;
71170 +int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range)
71172 +       int err = 0;
71173 +       struct super_block *sb = sbi->sb;
71174 +       struct wnd_bitmap *wnd = &sbi->used.bitmap;
71175 +       u32 wbits = 8 * sb->s_blocksize;
71176 +       CLST len = 0, lcn = 0, done = 0;
71177 +       CLST minlen = bytes_to_cluster(sbi, range->minlen);
71178 +       CLST lcn_from = bytes_to_cluster(sbi, range->start);
71179 +       size_t iw = lcn_from >> (sb->s_blocksize_bits + 3);
71180 +       u32 wbit = lcn_from & (wbits - 1);
71181 +       const ulong *buf;
71182 +       CLST lcn_to;
71184 +       if (!minlen)
71185 +               minlen = 1;
71187 +       if (range->len == (u64)-1)
71188 +               lcn_to = wnd->nbits;
71189 +       else
71190 +               lcn_to = bytes_to_cluster(sbi, range->start + range->len);
71192 +       down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
71194 +       for (; iw < wnd->nbits; iw++, wbit = 0) {
71195 +               CLST lcn_wnd = iw * wbits;
71196 +               struct buffer_head *bh;
71198 +               if (lcn_wnd > lcn_to)
71199 +                       break;
71201 +               if (!wnd->free_bits[iw])
71202 +                       continue;
71204 +               if (iw + 1 == wnd->nwnd)
71205 +                       wbits = wnd->bits_last;
71207 +               if (lcn_wnd + wbits > lcn_to)
71208 +                       wbits = lcn_to - lcn_wnd;
71210 +               bh = wnd_map(wnd, iw);
71211 +               if (IS_ERR(bh)) {
71212 +                       err = PTR_ERR(bh);
71213 +                       break;
71214 +               }
71216 +               buf = (ulong *)bh->b_data;
71218 +               for (; wbit < wbits; wbit++) {
71219 +                       if (!test_bit(wbit, buf)) {
71220 +                               if (!len)
71221 +                                       lcn = lcn_wnd + wbit;
71222 +                               len += 1;
71223 +                               continue;
71224 +                       }
71225 +                       if (len >= minlen) {
71226 +                               err = ntfs_discard(sbi, lcn, len);
71227 +                               if (err)
71228 +                                       goto out;
71229 +                               done += len;
71230 +                       }
71231 +                       len = 0;
71232 +               }
71233 +               put_bh(bh);
71234 +       }
71236 +       /* Process the last fragment */
71237 +       if (len >= minlen) {
71238 +               err = ntfs_discard(sbi, lcn, len);
71239 +               if (err)
71240 +                       goto out;
71241 +               done += len;
71242 +       }
71244 +out:
71245 +       range->len = (u64)done << sbi->cluster_bits;
71247 +       up_read(&wnd->rw_lock);
71249 +       return err;
71251 diff --git a/fs/ntfs3/debug.h b/fs/ntfs3/debug.h
71252 new file mode 100644
71253 index 000000000000..dfaa4c79dc6d
71254 --- /dev/null
71255 +++ b/fs/ntfs3/debug.h
71256 @@ -0,0 +1,64 @@
71257 +/* SPDX-License-Identifier: GPL-2.0 */
71259 + *
71260 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
71261 + *
71262 + * useful functions for debuging
71263 + */
71265 +// clang-format off
71266 +#ifndef Add2Ptr
71267 +#define Add2Ptr(P, I)          ((void *)((u8 *)(P) + (I)))
71268 +#define PtrOffset(B, O)                ((size_t)((size_t)(O) - (size_t)(B)))
71269 +#endif
71271 +#define QuadAlign(n)           (((n) + 7u) & (~7u))
71272 +#define IsQuadAligned(n)       (!((size_t)(n)&7u))
71273 +#define Quad2Align(n)          (((n) + 15u) & (~15u))
71274 +#define IsQuad2Aligned(n)      (!((size_t)(n)&15u))
71275 +#define Quad4Align(n)          (((n) + 31u) & (~31u))
71276 +#define IsSizeTAligned(n)      (!((size_t)(n) & (sizeof(size_t) - 1)))
71277 +#define DwordAlign(n)          (((n) + 3u) & (~3u))
71278 +#define IsDwordAligned(n)      (!((size_t)(n)&3u))
71279 +#define WordAlign(n)           (((n) + 1u) & (~1u))
71280 +#define IsWordAligned(n)       (!((size_t)(n)&1u))
71282 +#ifdef CONFIG_PRINTK
71283 +__printf(2, 3)
71284 +void ntfs_printk(const struct super_block *sb, const char *fmt, ...);
71285 +__printf(2, 3)
71286 +void ntfs_inode_printk(struct inode *inode, const char *fmt, ...);
71287 +#else
71288 +static inline __printf(2, 3)
71289 +void ntfs_printk(const struct super_block *sb, const char *fmt, ...)
71293 +static inline __printf(2, 3)
71294 +void ntfs_inode_printk(struct inode *inode, const char *fmt, ...)
71297 +#endif
71300 + * Logging macros ( thanks Joe Perches <joe@perches.com> for implementation )
71301 + */
71303 +#define ntfs_err(sb, fmt, ...)  ntfs_printk(sb, KERN_ERR fmt, ##__VA_ARGS__)
71304 +#define ntfs_warn(sb, fmt, ...) ntfs_printk(sb, KERN_WARNING fmt, ##__VA_ARGS__)
71305 +#define ntfs_info(sb, fmt, ...) ntfs_printk(sb, KERN_INFO fmt, ##__VA_ARGS__)
71306 +#define ntfs_notice(sb, fmt, ...)                                              \
71307 +       ntfs_printk(sb, KERN_NOTICE fmt, ##__VA_ARGS__)
71309 +#define ntfs_inode_err(inode, fmt, ...)                                        \
71310 +       ntfs_inode_printk(inode, KERN_ERR fmt, ##__VA_ARGS__)
71311 +#define ntfs_inode_warn(inode, fmt, ...)                                       \
71312 +       ntfs_inode_printk(inode, KERN_WARNING fmt, ##__VA_ARGS__)
71314 +#define ntfs_malloc(s)         kmalloc(s, GFP_NOFS)
71315 +#define ntfs_zalloc(s)         kzalloc(s, GFP_NOFS)
71316 +#define ntfs_vmalloc(s)                kvmalloc(s, GFP_KERNEL)
71317 +#define ntfs_free(p)           kfree(p)
71318 +#define ntfs_vfree(p)          kvfree(p)
71319 +#define ntfs_memdup(src, len)  kmemdup(src, len, GFP_NOFS)
71320 +// clang-format on
71321 diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
71322 new file mode 100644
71323 index 000000000000..9ec6012c405b
71324 --- /dev/null
71325 +++ b/fs/ntfs3/dir.c
71326 @@ -0,0 +1,594 @@
71327 +// SPDX-License-Identifier: GPL-2.0
71329 + *
71330 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
71331 + *
71332 + *  directory handling functions for ntfs-based filesystems
71333 + *
71334 + */
71335 +#include <linux/blkdev.h>
71336 +#include <linux/buffer_head.h>
71337 +#include <linux/fs.h>
71338 +#include <linux/iversion.h>
71339 +#include <linux/nls.h>
71341 +#include "debug.h"
71342 +#include "ntfs.h"
71343 +#include "ntfs_fs.h"
71346 + * Convert little endian utf16 to nls string
71347 + */
71348 +int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const struct le_str *uni,
71349 +                     u8 *buf, int buf_len)
71351 +       int ret, uni_len, warn;
71352 +       const __le16 *ip;
71353 +       u8 *op;
71354 +       struct nls_table *nls = sbi->options.nls;
71356 +       static_assert(sizeof(wchar_t) == sizeof(__le16));
71358 +       if (!nls) {
71359 +               /* utf16 -> utf8 */
71360 +               ret = utf16s_to_utf8s((wchar_t *)uni->name, uni->len,
71361 +                                     UTF16_LITTLE_ENDIAN, buf, buf_len);
71362 +               buf[ret] = '\0';
71363 +               return ret;
71364 +       }
71366 +       ip = uni->name;
71367 +       op = buf;
71368 +       uni_len = uni->len;
71369 +       warn = 0;
71371 +       while (uni_len--) {
71372 +               u16 ec;
71373 +               int charlen;
71374 +               char dump[5];
71376 +               if (buf_len < NLS_MAX_CHARSET_SIZE) {
71377 +                       ntfs_warn(sbi->sb,
71378 +                                 "filename was truncated while converting.");
71379 +                       break;
71380 +               }
71382 +               ec = le16_to_cpu(*ip++);
71383 +               charlen = nls->uni2char(ec, op, buf_len);
71385 +               if (charlen > 0) {
71386 +                       op += charlen;
71387 +                       buf_len -= charlen;
71388 +                       continue;
71389 +               }
71391 +               *op++ = '_';
71392 +               buf_len -= 1;
71393 +               if (warn)
71394 +                       continue;
71396 +               warn = 1;
71397 +               hex_byte_pack(&dump[0], ec >> 8);
71398 +               hex_byte_pack(&dump[2], ec);
71399 +               dump[4] = 0;
71401 +               ntfs_err(sbi->sb, "failed to convert \"%s\" to %s", dump,
71402 +                        nls->charset);
71403 +       }
71405 +       *op = '\0';
71406 +       return op - buf;
71409 +// clang-format off
71410 +#define PLANE_SIZE     0x00010000
71412 +#define SURROGATE_PAIR 0x0000d800
71413 +#define SURROGATE_LOW  0x00000400
71414 +#define SURROGATE_BITS 0x000003ff
71415 +// clang-format on
71418 + * modified version of put_utf16 from fs/nls/nls_base.c
71419 + * is sparse warnings free
71420 + */
71421 +static inline void put_utf16(wchar_t *s, unsigned int c,
71422 +                            enum utf16_endian endian)
71424 +       static_assert(sizeof(wchar_t) == sizeof(__le16));
71425 +       static_assert(sizeof(wchar_t) == sizeof(__be16));
71427 +       switch (endian) {
71428 +       default:
71429 +               *s = (wchar_t)c;
71430 +               break;
71431 +       case UTF16_LITTLE_ENDIAN:
71432 +               *(__le16 *)s = __cpu_to_le16(c);
71433 +               break;
71434 +       case UTF16_BIG_ENDIAN:
71435 +               *(__be16 *)s = __cpu_to_be16(c);
71436 +               break;
71437 +       }
71441 + * modified version of 'utf8s_to_utf16s' allows to
71442 + * detect -ENAMETOOLONG without writing out of expected maximum
71443 + */
71444 +static int _utf8s_to_utf16s(const u8 *s, int inlen, enum utf16_endian endian,
71445 +                           wchar_t *pwcs, int maxout)
71447 +       u16 *op;
71448 +       int size;
71449 +       unicode_t u;
71451 +       op = pwcs;
71452 +       while (inlen > 0 && *s) {
71453 +               if (*s & 0x80) {
71454 +                       size = utf8_to_utf32(s, inlen, &u);
71455 +                       if (size < 0)
71456 +                               return -EINVAL;
71457 +                       s += size;
71458 +                       inlen -= size;
71460 +                       if (u >= PLANE_SIZE) {
71461 +                               if (maxout < 2)
71462 +                                       return -ENAMETOOLONG;
71464 +                               u -= PLANE_SIZE;
71465 +                               put_utf16(op++,
71466 +                                         SURROGATE_PAIR |
71467 +                                                 ((u >> 10) & SURROGATE_BITS),
71468 +                                         endian);
71469 +                               put_utf16(op++,
71470 +                                         SURROGATE_PAIR | SURROGATE_LOW |
71471 +                                                 (u & SURROGATE_BITS),
71472 +                                         endian);
71473 +                               maxout -= 2;
71474 +                       } else {
71475 +                               if (maxout < 1)
71476 +                                       return -ENAMETOOLONG;
71478 +                               put_utf16(op++, u, endian);
71479 +                               maxout--;
71480 +                       }
71481 +               } else {
71482 +                       if (maxout < 1)
71483 +                               return -ENAMETOOLONG;
71485 +                       put_utf16(op++, *s++, endian);
71486 +                       inlen--;
71487 +                       maxout--;
71488 +               }
71489 +       }
71490 +       return op - pwcs;
71494 + * Convert input string to utf16
71495 + *
71496 + * name, name_len - input name
71497 + * uni, max_ulen - destination memory
71498 + * endian - endian of target utf16 string
71499 + *
71500 + * This function is called:
71501 + * - to create ntfs name
71502 + * - to create symlink
71503 + *
71504 + * returns utf16 string length or error (if negative)
71505 + */
71506 +int ntfs_nls_to_utf16(struct ntfs_sb_info *sbi, const u8 *name, u32 name_len,
71507 +                     struct cpu_str *uni, u32 max_ulen,
71508 +                     enum utf16_endian endian)
71510 +       int ret, slen;
71511 +       const u8 *end;
71512 +       struct nls_table *nls = sbi->options.nls;
71513 +       u16 *uname = uni->name;
71515 +       static_assert(sizeof(wchar_t) == sizeof(u16));
71517 +       if (!nls) {
71518 +               /* utf8 -> utf16 */
71519 +               ret = _utf8s_to_utf16s(name, name_len, endian, uname, max_ulen);
71520 +               uni->len = ret;
71521 +               return ret;
71522 +       }
71524 +       for (ret = 0, end = name + name_len; name < end; ret++, name += slen) {
71525 +               if (ret >= max_ulen)
71526 +                       return -ENAMETOOLONG;
71528 +               slen = nls->char2uni(name, end - name, uname + ret);
71529 +               if (!slen)
71530 +                       return -EINVAL;
71531 +               if (slen < 0)
71532 +                       return slen;
71533 +       }
71535 +#ifdef __BIG_ENDIAN
71536 +       if (endian == UTF16_LITTLE_ENDIAN) {
71537 +               int i = ret;
71539 +               while (i--) {
71540 +                       __cpu_to_le16s(uname);
71541 +                       uname++;
71542 +               }
71543 +       }
71544 +#else
71545 +       if (endian == UTF16_BIG_ENDIAN) {
71546 +               int i = ret;
71548 +               while (i--) {
71549 +                       __cpu_to_be16s(uname);
71550 +                       uname++;
71551 +               }
71552 +       }
71553 +#endif
71555 +       uni->len = ret;
71556 +       return ret;
71559 +/* helper function */
71560 +struct inode *dir_search_u(struct inode *dir, const struct cpu_str *uni,
71561 +                          struct ntfs_fnd *fnd)
71563 +       int err = 0;
71564 +       struct super_block *sb = dir->i_sb;
71565 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
71566 +       struct ntfs_inode *ni = ntfs_i(dir);
71567 +       struct NTFS_DE *e;
71568 +       int diff;
71569 +       struct inode *inode = NULL;
71570 +       struct ntfs_fnd *fnd_a = NULL;
71572 +       if (!fnd) {
71573 +               fnd_a = fnd_get();
71574 +               if (!fnd_a) {
71575 +                       err = -ENOMEM;
71576 +                       goto out;
71577 +               }
71578 +               fnd = fnd_a;
71579 +       }
71581 +       err = indx_find(&ni->dir, ni, NULL, uni, 0, sbi, &diff, &e, fnd);
71583 +       if (err)
71584 +               goto out;
71586 +       if (diff) {
71587 +               err = -ENOENT;
71588 +               goto out;
71589 +       }
71591 +       inode = ntfs_iget5(sb, &e->ref, uni);
71592 +       if (!IS_ERR(inode) && is_bad_inode(inode)) {
71593 +               iput(inode);
71594 +               err = -EINVAL;
71595 +       }
71596 +out:
71597 +       fnd_put(fnd_a);
71599 +       return err == -ENOENT ? NULL : err ? ERR_PTR(err) : inode;
71602 +static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
71603 +                              const struct NTFS_DE *e, u8 *name,
71604 +                              struct dir_context *ctx)
71606 +       const struct ATTR_FILE_NAME *fname;
71607 +       unsigned long ino;
71608 +       int name_len;
71609 +       u32 dt_type;
71611 +       fname = Add2Ptr(e, sizeof(struct NTFS_DE));
71613 +       if (fname->type == FILE_NAME_DOS)
71614 +               return 0;
71616 +       if (!mi_is_ref(&ni->mi, &fname->home))
71617 +               return 0;
71619 +       ino = ino_get(&e->ref);
71621 +       if (ino == MFT_REC_ROOT)
71622 +               return 0;
71624 +       /* Skip meta files ( unless option to show metafiles is set ) */
71625 +       if (!sbi->options.showmeta && ntfs_is_meta_file(sbi, ino))
71626 +               return 0;
71628 +       if (sbi->options.nohidden && (fname->dup.fa & FILE_ATTRIBUTE_HIDDEN))
71629 +               return 0;
71631 +       name_len = ntfs_utf16_to_nls(sbi, (struct le_str *)&fname->name_len,
71632 +                                    name, PATH_MAX);
71633 +       if (name_len <= 0) {
71634 +               ntfs_warn(sbi->sb, "failed to convert name for inode %lx.",
71635 +                         ino);
71636 +               return 0;
71637 +       }
71639 +       dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
71641 +       return !dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);
71645 + * ntfs_read_hdr
71646 + *
71647 + * helper function 'ntfs_readdir'
71648 + */
71649 +static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
71650 +                        const struct INDEX_HDR *hdr, u64 vbo, u64 pos,
71651 +                        u8 *name, struct dir_context *ctx)
71653 +       int err;
71654 +       const struct NTFS_DE *e;
71655 +       u32 e_size;
71656 +       u32 end = le32_to_cpu(hdr->used);
71657 +       u32 off = le32_to_cpu(hdr->de_off);
71659 +       for (;; off += e_size) {
71660 +               if (off + sizeof(struct NTFS_DE) > end)
71661 +                       return -1;
71663 +               e = Add2Ptr(hdr, off);
71664 +               e_size = le16_to_cpu(e->size);
71665 +               if (e_size < sizeof(struct NTFS_DE) || off + e_size > end)
71666 +                       return -1;
71668 +               if (de_is_last(e))
71669 +                       return 0;
71671 +               /* Skip already enumerated*/
71672 +               if (vbo + off < pos)
71673 +                       continue;
71675 +               if (le16_to_cpu(e->key_size) < SIZEOF_ATTRIBUTE_FILENAME)
71676 +                       return -1;
71678 +               ctx->pos = vbo + off;
71680 +               /* Submit the name to the filldir callback. */
71681 +               err = ntfs_filldir(sbi, ni, e, name, ctx);
71682 +               if (err)
71683 +                       return err;
71684 +       }
71688 + * file_operations::iterate_shared
71689 + *
71690 + * Use non sorted enumeration.
71691 + * We have an example of broken volume where sorted enumeration
71692 + * counts each name twice
71693 + */
71694 +static int ntfs_readdir(struct file *file, struct dir_context *ctx)
71696 +       const struct INDEX_ROOT *root;
71697 +       u64 vbo;
71698 +       size_t bit;
71699 +       loff_t eod;
71700 +       int err = 0;
71701 +       struct inode *dir = file_inode(file);
71702 +       struct ntfs_inode *ni = ntfs_i(dir);
71703 +       struct super_block *sb = dir->i_sb;
71704 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
71705 +       loff_t i_size = i_size_read(dir);
71706 +       u32 pos = ctx->pos;
71707 +       u8 *name = NULL;
71708 +       struct indx_node *node = NULL;
71709 +       u8 index_bits = ni->dir.index_bits;
71711 +       /* name is a buffer of PATH_MAX length */
71712 +       static_assert(NTFS_NAME_LEN * 4 < PATH_MAX);
71714 +       eod = i_size + sbi->record_size;
71716 +       if (pos >= eod)
71717 +               return 0;
71719 +       if (!dir_emit_dots(file, ctx))
71720 +               return 0;
71722 +       /* allocate PATH_MAX bytes */
71723 +       name = __getname();
71724 +       if (!name)
71725 +               return -ENOMEM;
71727 +       if (!ni->mi_loaded && ni->attr_list.size) {
71728 +               /*
71729 +                * directory inode is locked for read
71730 +                * load all subrecords to avoid 'write' access to 'ni' during
71731 +                * directory reading
71732 +                */
71733 +               ni_lock(ni);
71734 +               if (!ni->mi_loaded && ni->attr_list.size) {
71735 +                       err = ni_load_all_mi(ni);
71736 +                       if (!err)
71737 +                               ni->mi_loaded = true;
71738 +               }
71739 +               ni_unlock(ni);
71740 +               if (err)
71741 +                       goto out;
71742 +       }
71744 +       root = indx_get_root(&ni->dir, ni, NULL, NULL);
71745 +       if (!root) {
71746 +               err = -EINVAL;
71747 +               goto out;
71748 +       }
71750 +       if (pos >= sbi->record_size) {
71751 +               bit = (pos - sbi->record_size) >> index_bits;
71752 +       } else {
71753 +               err = ntfs_read_hdr(sbi, ni, &root->ihdr, 0, pos, name, ctx);
71754 +               if (err)
71755 +                       goto out;
71756 +               bit = 0;
71757 +       }
71759 +       if (!i_size) {
71760 +               ctx->pos = eod;
71761 +               goto out;
71762 +       }
71764 +       for (;;) {
71765 +               vbo = (u64)bit << index_bits;
71766 +               if (vbo >= i_size) {
71767 +                       ctx->pos = eod;
71768 +                       goto out;
71769 +               }
71771 +               err = indx_used_bit(&ni->dir, ni, &bit);
71772 +               if (err)
71773 +                       goto out;
71775 +               if (bit == MINUS_ONE_T) {
71776 +                       ctx->pos = eod;
71777 +                       goto out;
71778 +               }
71780 +               vbo = (u64)bit << index_bits;
71781 +               if (vbo >= i_size) {
71782 +                       ntfs_inode_err(dir, "Looks like your dir is corrupt");
71783 +                       err = -EINVAL;
71784 +                       goto out;
71785 +               }
71787 +               err = indx_read(&ni->dir, ni, bit << ni->dir.idx2vbn_bits,
71788 +                               &node);
71789 +               if (err)
71790 +                       goto out;
71792 +               err = ntfs_read_hdr(sbi, ni, &node->index->ihdr,
71793 +                                   vbo + sbi->record_size, pos, name, ctx);
71794 +               if (err)
71795 +                       goto out;
71797 +               bit += 1;
71798 +       }
71800 +out:
71802 +       __putname(name);
71803 +       put_indx_node(node);
71805 +       if (err == -ENOENT) {
71806 +               err = 0;
71807 +               ctx->pos = pos;
71808 +       }
71810 +       return err;
71813 +static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
71814 +                         size_t *files)
71816 +       int err = 0;
71817 +       struct ntfs_inode *ni = ntfs_i(dir);
71818 +       struct NTFS_DE *e = NULL;
71819 +       struct INDEX_ROOT *root;
71820 +       struct INDEX_HDR *hdr;
71821 +       const struct ATTR_FILE_NAME *fname;
71822 +       u32 e_size, off, end;
71823 +       u64 vbo = 0;
71824 +       size_t drs = 0, fles = 0, bit = 0;
71825 +       loff_t i_size = ni->vfs_inode.i_size;
71826 +       struct indx_node *node = NULL;
71827 +       u8 index_bits = ni->dir.index_bits;
71829 +       if (is_empty)
71830 +               *is_empty = true;
71832 +       root = indx_get_root(&ni->dir, ni, NULL, NULL);
71833 +       if (!root)
71834 +               return -EINVAL;
71836 +       hdr = &root->ihdr;
71838 +       for (;;) {
71839 +               end = le32_to_cpu(hdr->used);
71840 +               off = le32_to_cpu(hdr->de_off);
71842 +               for (; off + sizeof(struct NTFS_DE) <= end; off += e_size) {
71843 +                       e = Add2Ptr(hdr, off);
71844 +                       e_size = le16_to_cpu(e->size);
71845 +                       if (e_size < sizeof(struct NTFS_DE) ||
71846 +                           off + e_size > end)
71847 +                               break;
71849 +                       if (de_is_last(e))
71850 +                               break;
71852 +                       fname = de_get_fname(e);
71853 +                       if (!fname)
71854 +                               continue;
71856 +                       if (fname->type == FILE_NAME_DOS)
71857 +                               continue;
71859 +                       if (is_empty) {
71860 +                               *is_empty = false;
71861 +                               if (!dirs && !files)
71862 +                                       goto out;
71863 +                       }
71865 +                       if (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY)
71866 +                               drs += 1;
71867 +                       else
71868 +                               fles += 1;
71869 +               }
71871 +               if (vbo >= i_size)
71872 +                       goto out;
71874 +               err = indx_used_bit(&ni->dir, ni, &bit);
71875 +               if (err)
71876 +                       goto out;
71878 +               if (bit == MINUS_ONE_T)
71879 +                       goto out;
71881 +               vbo = (u64)bit << index_bits;
71882 +               if (vbo >= i_size)
71883 +                       goto out;
71885 +               err = indx_read(&ni->dir, ni, bit << ni->dir.idx2vbn_bits,
71886 +                               &node);
71887 +               if (err)
71888 +                       goto out;
71890 +               hdr = &node->index->ihdr;
71891 +               bit += 1;
71892 +               vbo = (u64)bit << ni->dir.idx2vbn_bits;
71893 +       }
71895 +out:
71896 +       put_indx_node(node);
71897 +       if (dirs)
71898 +               *dirs = drs;
71899 +       if (files)
71900 +               *files = fles;
71902 +       return err;
71905 +bool dir_is_empty(struct inode *dir)
71907 +       bool is_empty = false;
71909 +       ntfs_dir_count(dir, &is_empty, NULL, NULL);
71911 +       return is_empty;
71914 +const struct file_operations ntfs_dir_operations = {
71915 +       .llseek = generic_file_llseek,
71916 +       .read = generic_read_dir,
71917 +       .iterate_shared = ntfs_readdir,
71918 +       .fsync = generic_file_fsync,
71919 +       .open = ntfs_file_open,
71921 diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
71922 new file mode 100644
71923 index 000000000000..347baf674008
71924 --- /dev/null
71925 +++ b/fs/ntfs3/file.c
71926 @@ -0,0 +1,1130 @@
71927 +// SPDX-License-Identifier: GPL-2.0
71929 + *
71930 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
71931 + *
71932 + *  regular file handling primitives for ntfs-based filesystems
71933 + */
71934 +#include <linux/backing-dev.h>
71935 +#include <linux/buffer_head.h>
71936 +#include <linux/compat.h>
71937 +#include <linux/falloc.h>
71938 +#include <linux/fiemap.h>
71939 +#include <linux/msdos_fs.h> /* FAT_IOCTL_XXX */
71940 +#include <linux/nls.h>
71942 +#include "debug.h"
71943 +#include "ntfs.h"
71944 +#include "ntfs_fs.h"
71946 +static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg)
71948 +       struct fstrim_range __user *user_range;
71949 +       struct fstrim_range range;
71950 +       struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
71951 +       int err;
71953 +       if (!capable(CAP_SYS_ADMIN))
71954 +               return -EPERM;
71956 +       if (!blk_queue_discard(q))
71957 +               return -EOPNOTSUPP;
71959 +       user_range = (struct fstrim_range __user *)arg;
71960 +       if (copy_from_user(&range, user_range, sizeof(range)))
71961 +               return -EFAULT;
71963 +       range.minlen = max_t(u32, range.minlen, q->limits.discard_granularity);
71965 +       err = ntfs_trim_fs(sbi, &range);
71966 +       if (err < 0)
71967 +               return err;
71969 +       if (copy_to_user(user_range, &range, sizeof(range)))
71970 +               return -EFAULT;
71972 +       return 0;
71975 +static long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
71977 +       struct inode *inode = file_inode(filp);
71978 +       struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
71979 +       u32 __user *user_attr = (u32 __user *)arg;
71981 +       switch (cmd) {
71982 +       case FAT_IOCTL_GET_ATTRIBUTES:
71983 +               return put_user(le32_to_cpu(ntfs_i(inode)->std_fa), user_attr);
71985 +       case FAT_IOCTL_GET_VOLUME_ID:
71986 +               return put_user(sbi->volume.ser_num, user_attr);
71988 +       case FITRIM:
71989 +               return ntfs_ioctl_fitrim(sbi, arg);
71990 +       }
71991 +       return -ENOTTY; /* Inappropriate ioctl for device */
71994 +#ifdef CONFIG_COMPAT
71995 +static long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg)
71998 +       return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
72000 +#endif
72003 + * inode_operations::getattr
72004 + */
72005 +int ntfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
72006 +                struct kstat *stat, u32 request_mask, u32 flags)
72008 +       struct inode *inode = d_inode(path->dentry);
72009 +       struct ntfs_inode *ni = ntfs_i(inode);
72011 +       if (is_compressed(ni))
72012 +               stat->attributes |= STATX_ATTR_COMPRESSED;
72014 +       if (is_encrypted(ni))
72015 +               stat->attributes |= STATX_ATTR_ENCRYPTED;
72017 +       stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED;
72019 +       generic_fillattr(mnt_userns, inode, stat);
72021 +       stat->result_mask |= STATX_BTIME;
72022 +       stat->btime = ni->i_crtime;
72024 +       return 0;
72027 +static int ntfs_extend_initialized_size(struct file *file,
72028 +                                       struct ntfs_inode *ni,
72029 +                                       const loff_t valid,
72030 +                                       const loff_t new_valid)
72032 +       struct inode *inode = &ni->vfs_inode;
72033 +       struct address_space *mapping = inode->i_mapping;
72034 +       struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
72035 +       loff_t pos = valid;
72036 +       int err;
72038 +       if (is_resident(ni)) {
72039 +               ni->i_valid = new_valid;
72040 +               return 0;
72041 +       }
72043 +       WARN_ON(is_compressed(ni));
72044 +       WARN_ON(valid >= new_valid);
72046 +       for (;;) {
72047 +               u32 zerofrom, len;
72048 +               struct page *page;
72049 +               void *fsdata;
72050 +               u8 bits;
72051 +               CLST vcn, lcn, clen;
72053 +               if (is_sparsed(ni)) {
72054 +                       bits = sbi->cluster_bits;
72055 +                       vcn = pos >> bits;
72057 +                       err = attr_data_get_block(ni, vcn, 0, &lcn, &clen,
72058 +                                                 NULL);
72059 +                       if (err)
72060 +                               goto out;
72062 +                       if (lcn == SPARSE_LCN) {
72063 +                               loff_t vbo = (loff_t)vcn << bits;
72064 +                               loff_t to = vbo + ((loff_t)clen << bits);
72066 +                               if (to <= new_valid) {
72067 +                                       ni->i_valid = to;
72068 +                                       pos = to;
72069 +                                       goto next;
72070 +                               }
72072 +                               if (vbo < pos) {
72073 +                                       pos = vbo;
72074 +                               } else {
72075 +                                       to = (new_valid >> bits) << bits;
72076 +                                       if (pos < to) {
72077 +                                               ni->i_valid = to;
72078 +                                               pos = to;
72079 +                                               goto next;
72080 +                                       }
72081 +                               }
72082 +                       }
72083 +               }
72085 +               zerofrom = pos & (PAGE_SIZE - 1);
72086 +               len = PAGE_SIZE - zerofrom;
72088 +               if (pos + len > new_valid)
72089 +                       len = new_valid - pos;
72091 +               err = pagecache_write_begin(file, mapping, pos, len, 0, &page,
72092 +                                           &fsdata);
72093 +               if (err)
72094 +                       goto out;
72096 +               zero_user_segment(page, zerofrom, PAGE_SIZE);
72098 +               /* this function in any case puts page*/
72099 +               err = pagecache_write_end(file, mapping, pos, len, len, page,
72100 +                                         fsdata);
72101 +               if (err < 0)
72102 +                       goto out;
72103 +               pos += len;
72105 +next:
72106 +               if (pos >= new_valid)
72107 +                       break;
72109 +               balance_dirty_pages_ratelimited(mapping);
72110 +               cond_resched();
72111 +       }
72113 +       mark_inode_dirty(inode);
72115 +       return 0;
72117 +out:
72118 +       ni->i_valid = valid;
72119 +       ntfs_inode_warn(inode, "failed to extend initialized size to %llx.",
72120 +                       new_valid);
72121 +       return err;
72125 + * ntfs_sparse_cluster
72126 + *
72127 + * Helper function to zero a new allocated clusters
72128 + */
72129 +void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn,
72130 +                        CLST len)
72132 +       struct address_space *mapping = inode->i_mapping;
72133 +       struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
72134 +       u64 vbo = (u64)vcn << sbi->cluster_bits;
72135 +       u64 bytes = (u64)len << sbi->cluster_bits;
72136 +       u32 blocksize = 1 << inode->i_blkbits;
72137 +       pgoff_t idx0 = page0 ? page0->index : -1;
72138 +       loff_t vbo_clst = vbo & sbi->cluster_mask_inv;
72139 +       loff_t end = ntfs_up_cluster(sbi, vbo + bytes);
72140 +       pgoff_t idx = vbo_clst >> PAGE_SHIFT;
72141 +       u32 from = vbo_clst & (PAGE_SIZE - 1);
72142 +       pgoff_t idx_end = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
72143 +       loff_t page_off;
72144 +       u32 to;
72145 +       bool partial;
72146 +       struct page *page;
72148 +       for (; idx < idx_end; idx += 1, from = 0) {
72149 +               page = idx == idx0 ? page0 : grab_cache_page(mapping, idx);
72151 +               if (!page)
72152 +                       continue;
72154 +               page_off = (loff_t)idx << PAGE_SHIFT;
72155 +               to = (page_off + PAGE_SIZE) > end ? (end - page_off)
72156 +                                                 : PAGE_SIZE;
72157 +               partial = false;
72159 +               if ((from || PAGE_SIZE != to) &&
72160 +                   likely(!page_has_buffers(page))) {
72161 +                       create_empty_buffers(page, blocksize, 0);
72162 +                       if (!page_has_buffers(page)) {
72163 +                               ntfs_inode_err(
72164 +                                       inode,
72165 +                                       "failed to allocate page buffers.");
72166 +                               /*err = -ENOMEM;*/
72167 +                               goto unlock_page;
72168 +                       }
72169 +               }
72171 +               if (page_has_buffers(page)) {
72172 +                       struct buffer_head *head, *bh;
72173 +                       u32 bh_off = 0;
72175 +                       bh = head = page_buffers(page);
72176 +                       do {
72177 +                               u32 bh_next = bh_off + blocksize;
72179 +                               if (from <= bh_off && bh_next <= to) {
72180 +                                       set_buffer_uptodate(bh);
72181 +                                       mark_buffer_dirty(bh);
72182 +                               } else if (!buffer_uptodate(bh)) {
72183 +                                       partial = true;
72184 +                               }
72185 +                               bh_off = bh_next;
72186 +                       } while (head != (bh = bh->b_this_page));
72187 +               }
72189 +               zero_user_segment(page, from, to);
72191 +               if (!partial) {
72192 +                       if (!PageUptodate(page))
72193 +                               SetPageUptodate(page);
72194 +                       set_page_dirty(page);
72195 +               }
72197 +unlock_page:
72198 +               if (idx != idx0) {
72199 +                       unlock_page(page);
72200 +                       put_page(page);
72201 +               }
72202 +               cond_resched();
72203 +       }
72204 +       mark_inode_dirty(inode);
72208 + * file_operations::mmap
72209 + */
72210 +static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
72212 +       struct address_space *mapping = file->f_mapping;
72213 +       struct inode *inode = mapping->host;
72214 +       struct ntfs_inode *ni = ntfs_i(inode);
72215 +       u64 to, from = ((u64)vma->vm_pgoff << PAGE_SHIFT);
72216 +       bool rw = vma->vm_flags & VM_WRITE;
72217 +       int err;
72219 +       if (is_encrypted(ni)) {
72220 +               ntfs_inode_warn(inode,
72221 +                               "mmap is not supported for encrypted files");
72222 +               err = -EOPNOTSUPP;
72223 +               goto out;
72224 +       }
72226 +       if (!rw)
72227 +               goto do_map;
72229 +       if (is_compressed(ni)) {
72230 +               ntfs_inode_warn(
72231 +                       inode,
72232 +                       "mmap(write) is not supported for compressed files");
72233 +               err = -EOPNOTSUPP;
72234 +               goto out;
72235 +       }
72237 +       to = min_t(loff_t, i_size_read(inode),
72238 +                  from + vma->vm_end - vma->vm_start);
72240 +       if (is_sparsed(ni)) {
72241 +               /* allocate clusters for rw map */
72242 +               struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
72243 +               CLST vcn, lcn, len;
72244 +               CLST end = bytes_to_cluster(sbi, to);
72245 +               bool new;
72247 +               for (vcn = from >> sbi->cluster_bits; vcn < end; vcn += len) {
72248 +                       err = attr_data_get_block(ni, vcn, 1, &lcn, &len, &new);
72249 +                       if (err)
72250 +                               goto out;
72251 +                       if (!new)
72252 +                               continue;
72253 +                       ntfs_sparse_cluster(inode, NULL, vcn, 1);
72254 +               }
72255 +       }
72257 +       if (ni->i_valid < to) {
72258 +               inode_lock(inode);
72259 +               err = ntfs_extend_initialized_size(file, ni, ni->i_valid, to);
72260 +               inode_unlock(inode);
72261 +               if (err)
72262 +                       goto out;
72263 +       }
72265 +do_map:
72266 +       err = generic_file_mmap(file, vma);
72267 +out:
72268 +       return err;
72271 +static int ntfs_extend(struct inode *inode, loff_t pos, size_t count,
72272 +                      struct file *file)
72274 +       struct ntfs_inode *ni = ntfs_i(inode);
72275 +       struct address_space *mapping = inode->i_mapping;
72276 +       loff_t end = pos + count;
72277 +       bool extend_init = file && pos > ni->i_valid;
72278 +       int err;
72280 +       if (end <= inode->i_size && !extend_init)
72281 +               return 0;
72283 +       /*mark rw ntfs as dirty. it will be cleared at umount*/
72284 +       ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY);
72286 +       if (end > inode->i_size) {
72287 +               err = ntfs_set_size(inode, end);
72288 +               if (err)
72289 +                       goto out;
72290 +               inode->i_size = end;
72291 +       }
72293 +       if (extend_init && !is_compressed(ni)) {
72294 +               err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos);
72295 +               if (err)
72296 +                       goto out;
72297 +       } else {
72298 +               err = 0;
72299 +       }
72301 +       inode->i_ctime = inode->i_mtime = current_time(inode);
72302 +       mark_inode_dirty(inode);
72304 +       if (IS_SYNC(inode)) {
72305 +               int err2;
72307 +               err = filemap_fdatawrite_range(mapping, pos, end - 1);
72308 +               err2 = sync_mapping_buffers(mapping);
72309 +               if (!err)
72310 +                       err = err2;
72311 +               err2 = write_inode_now(inode, 1);
72312 +               if (!err)
72313 +                       err = err2;
72314 +               if (!err)
72315 +                       err = filemap_fdatawait_range(mapping, pos, end - 1);
72316 +       }
72318 +out:
72319 +       return err;
72322 +static int ntfs_truncate(struct inode *inode, loff_t new_size)
72324 +       struct super_block *sb = inode->i_sb;
72325 +       struct ntfs_inode *ni = ntfs_i(inode);
72326 +       int err, dirty = 0;
72327 +       u64 new_valid;
72329 +       if (!S_ISREG(inode->i_mode))
72330 +               return 0;
72332 +       if (is_compressed(ni)) {
72333 +               if (ni->i_valid > new_size)
72334 +                       ni->i_valid = new_size;
72335 +       } else {
72336 +               err = block_truncate_page(inode->i_mapping, new_size,
72337 +                                         ntfs_get_block);
72338 +               if (err)
72339 +                       return err;
72340 +       }
72342 +       new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size));
72344 +       ni_lock(ni);
72346 +       truncate_setsize(inode, new_size);
72348 +       down_write(&ni->file.run_lock);
72349 +       err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
72350 +                           &new_valid, true, NULL);
72351 +       up_write(&ni->file.run_lock);
72353 +       if (new_valid < ni->i_valid)
72354 +               ni->i_valid = new_valid;
72356 +       ni_unlock(ni);
72358 +       ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
72359 +       inode->i_ctime = inode->i_mtime = current_time(inode);
72360 +       if (!IS_DIRSYNC(inode)) {
72361 +               dirty = 1;
72362 +       } else {
72363 +               err = ntfs_sync_inode(inode);
72364 +               if (err)
72365 +                       return err;
72366 +       }
72368 +       if (dirty)
72369 +               mark_inode_dirty(inode);
72371 +       /*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/
72373 +       return 0;
72377 + * Preallocate space for a file. This implements ntfs's fallocate file
72378 + * operation, which gets called from sys_fallocate system call. User
72379 + * space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set
72380 + * we just allocate clusters without zeroing them out. Otherwise we
72381 + * allocate and zero out clusters via an expanding truncate.
72382 + */
72383 +static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
72385 +       struct inode *inode = file->f_mapping->host;
72386 +       struct super_block *sb = inode->i_sb;
72387 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
72388 +       struct ntfs_inode *ni = ntfs_i(inode);
72389 +       loff_t end = vbo + len;
72390 +       loff_t vbo_down = round_down(vbo, PAGE_SIZE);
72391 +       loff_t i_size;
72392 +       int err;
72394 +       /* No support for dir */
72395 +       if (!S_ISREG(inode->i_mode))
72396 +               return -EOPNOTSUPP;
72398 +       /* Return error if mode is not supported */
72399 +       if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
72400 +                    FALLOC_FL_COLLAPSE_RANGE))
72401 +               return -EOPNOTSUPP;
72403 +       ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
72405 +       inode_lock(inode);
72406 +       i_size = inode->i_size;
72408 +       if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
72409 +               /* should never be here, see ntfs_file_open*/
72410 +               err = -EOPNOTSUPP;
72411 +               goto out;
72412 +       }
72414 +       if (mode & FALLOC_FL_PUNCH_HOLE) {
72415 +               if (!(mode & FALLOC_FL_KEEP_SIZE)) {
72416 +                       err = -EINVAL;
72417 +                       goto out;
72418 +               }
72420 +               if (!is_sparsed(ni) && !is_compressed(ni)) {
72421 +                       ntfs_inode_warn(
72422 +                               inode,
72423 +                               "punch_hole only for sparsed/compressed files");
72424 +                       err = -EOPNOTSUPP;
72425 +                       goto out;
72426 +               }
72428 +               err = filemap_write_and_wait_range(inode->i_mapping, vbo,
72429 +                                                  end - 1);
72430 +               if (err)
72431 +                       goto out;
72433 +               err = filemap_write_and_wait_range(inode->i_mapping, end,
72434 +                                                  LLONG_MAX);
72435 +               if (err)
72436 +                       goto out;
72438 +               truncate_pagecache(inode, vbo_down);
72440 +               ni_lock(ni);
72441 +               err = attr_punch_hole(ni, vbo, len);
72442 +               ni_unlock(ni);
72443 +       } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
72444 +               if (mode & ~FALLOC_FL_COLLAPSE_RANGE) {
72445 +                       err = -EINVAL;
72446 +                       goto out;
72447 +               }
72449 +               /*
72450 +                * Write tail of the last page before removed range since
72451 +                * it will get removed from the page cache below.
72452 +                */
72453 +               err = filemap_write_and_wait_range(inode->i_mapping, vbo_down,
72454 +                                                  vbo);
72455 +               if (err)
72456 +                       goto out;
72458 +               /*
72459 +                * Write data that will be shifted to preserve them
72460 +                * when discarding page cache below
72461 +                */
72462 +               err = filemap_write_and_wait_range(inode->i_mapping, end,
72463 +                                                  LLONG_MAX);
72464 +               if (err)
72465 +                       goto out;
72467 +               truncate_pagecache(inode, vbo_down);
72469 +               ni_lock(ni);
72470 +               err = attr_collapse_range(ni, vbo, len);
72471 +               ni_unlock(ni);
72472 +       } else {
72473 +               /*
72474 +                * normal file: allocate clusters, do not change 'valid' size
72475 +                */
72476 +               err = ntfs_set_size(inode, max(end, i_size));
72477 +               if (err)
72478 +                       goto out;
72480 +               if (is_sparsed(ni) || is_compressed(ni)) {
72481 +                       CLST vcn_v = ni->i_valid >> sbi->cluster_bits;
72482 +                       CLST vcn = vbo >> sbi->cluster_bits;
72483 +                       CLST cend = bytes_to_cluster(sbi, end);
72484 +                       CLST lcn, clen;
72485 +                       bool new;
72487 +                       /*
72488 +                        * allocate but not zero new clusters (see below comments)
72489 +                        * this breaks security (one can read unused on-disk areas)
72490 +                        * zeroing these clusters may be too long
72491 +                        * may be we should check here for root rights?
72492 +                        */
72493 +                       for (; vcn < cend; vcn += clen) {
72494 +                               err = attr_data_get_block(ni, vcn, cend - vcn,
72495 +                                                         &lcn, &clen, &new);
72496 +                               if (err)
72497 +                                       goto out;
72498 +                               if (!new || vcn >= vcn_v)
72499 +                                       continue;
72501 +                               /*
72502 +                                * Unwritten area
72503 +                                * NTFS is not able to store several unwritten areas
72504 +                                * Activate 'ntfs_sparse_cluster' to zero new allocated clusters
72505 +                                *
72506 +                                * Dangerous in case:
72507 +                                * 1G of sparsed clusters + 1 cluster of data =>
72508 +                                * valid_size == 1G + 1 cluster
72509 +                                * fallocate(1G) will zero 1G and this can be very long
72510 +                                * xfstest 016/086 will fail without 'ntfs_sparse_cluster'
72511 +                                */
72512 +                               /*ntfs_sparse_cluster(inode, NULL, vcn,
72513 +                                *                  min(vcn_v - vcn, clen));
72514 +                                */
72515 +                       }
72516 +               }
72518 +               if (mode & FALLOC_FL_KEEP_SIZE) {
72519 +                       ni_lock(ni);
72520 +                       /*true - keep preallocated*/
72521 +                       err = attr_set_size(ni, ATTR_DATA, NULL, 0,
72522 +                                           &ni->file.run, i_size, &ni->i_valid,
72523 +                                           true, NULL);
72524 +                       ni_unlock(ni);
72525 +               }
72526 +       }
72528 +       if (!err) {
72529 +               inode->i_ctime = inode->i_mtime = current_time(inode);
72530 +               mark_inode_dirty(inode);
72531 +       }
72532 +out:
72533 +       if (err == -EFBIG)
72534 +               err = -ENOSPC;
72536 +       inode_unlock(inode);
72537 +       return err;
72541 + * inode_operations::setattr
72542 + */
72543 +int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
72544 +                 struct iattr *attr)
72546 +       struct super_block *sb = dentry->d_sb;
72547 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
72548 +       struct inode *inode = d_inode(dentry);
72549 +       struct ntfs_inode *ni = ntfs_i(inode);
72550 +       u32 ia_valid = attr->ia_valid;
72551 +       umode_t mode = inode->i_mode;
72552 +       int err;
72554 +       if (sbi->options.no_acs_rules) {
72555 +               /* "no access rules" - force any changes of time etc. */
72556 +               attr->ia_valid |= ATTR_FORCE;
72557 +               /* and disable for editing some attributes */
72558 +               attr->ia_valid &= ~(ATTR_UID | ATTR_GID | ATTR_MODE);
72559 +               ia_valid = attr->ia_valid;
72560 +       }
72562 +       err = setattr_prepare(mnt_userns, dentry, attr);
72563 +       if (err)
72564 +               goto out;
72566 +       if (ia_valid & ATTR_SIZE) {
72567 +               loff_t oldsize = inode->i_size;
72569 +               if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
72570 +                       /* should never be here, see ntfs_file_open*/
72571 +                       err = -EOPNOTSUPP;
72572 +                       goto out;
72573 +               }
72574 +               inode_dio_wait(inode);
72576 +               if (attr->ia_size < oldsize)
72577 +                       err = ntfs_truncate(inode, attr->ia_size);
72578 +               else if (attr->ia_size > oldsize)
72579 +                       err = ntfs_extend(inode, attr->ia_size, 0, NULL);
72581 +               if (err)
72582 +                       goto out;
72584 +               ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
72585 +       }
72587 +       setattr_copy(mnt_userns, inode, attr);
72589 +       if (mode != inode->i_mode) {
72590 +               err = ntfs_acl_chmod(mnt_userns, inode);
72591 +               if (err)
72592 +                       goto out;
72594 +               /* linux 'w' -> windows 'ro' */
72595 +               if (0222 & inode->i_mode)
72596 +                       ni->std_fa &= ~FILE_ATTRIBUTE_READONLY;
72597 +               else
72598 +                       ni->std_fa |= FILE_ATTRIBUTE_READONLY;
72599 +       }
72601 +       mark_inode_dirty(inode);
72602 +out:
72603 +       return err;
72606 +static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
72608 +       ssize_t err;
72609 +       size_t count = iov_iter_count(iter);
72610 +       struct file *file = iocb->ki_filp;
72611 +       struct inode *inode = file->f_mapping->host;
72612 +       struct ntfs_inode *ni = ntfs_i(inode);
72614 +       if (is_encrypted(ni)) {
72615 +               ntfs_inode_warn(inode, "encrypted i/o not supported");
72616 +               return -EOPNOTSUPP;
72617 +       }
72619 +       if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
72620 +               ntfs_inode_warn(inode, "direct i/o + compressed not supported");
72621 +               return -EOPNOTSUPP;
72622 +       }
72624 +#ifndef CONFIG_NTFS3_LZX_XPRESS
72625 +       if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
72626 +               ntfs_inode_warn(
72627 +                       inode,
72628 +                       "activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files");
72629 +               return -EOPNOTSUPP;
72630 +       }
72631 +#endif
72633 +       if (is_dedup(ni)) {
72634 +               ntfs_inode_warn(inode, "read deduplicated not supported");
72635 +               return -EOPNOTSUPP;
72636 +       }
72638 +       err = count ? generic_file_read_iter(iocb, iter) : 0;
72640 +       return err;
72643 +/* returns array of locked pages */
72644 +static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index,
72645 +                               struct page **pages, u32 pages_per_frame,
72646 +                               bool *frame_uptodate)
72648 +       gfp_t gfp_mask = mapping_gfp_mask(mapping);
72649 +       u32 npages;
72651 +       *frame_uptodate = true;
72653 +       for (npages = 0; npages < pages_per_frame; npages++, index++) {
72654 +               struct page *page;
72656 +               page = find_or_create_page(mapping, index, gfp_mask);
72657 +               if (!page) {
72658 +                       while (npages--) {
72659 +                               page = pages[npages];
72660 +                               unlock_page(page);
72661 +                               put_page(page);
72662 +                       }
72664 +                       return -ENOMEM;
72665 +               }
72667 +               if (!PageUptodate(page))
72668 +                       *frame_uptodate = false;
72670 +               pages[npages] = page;
72671 +       }
72673 +       return 0;
72676 +/*helper for ntfs_file_write_iter (compressed files)*/
72677 +static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
72679 +       int err;
72680 +       struct file *file = iocb->ki_filp;
72681 +       size_t count = iov_iter_count(from);
72682 +       loff_t pos = iocb->ki_pos;
72683 +       struct inode *inode = file_inode(file);
72684 +       loff_t i_size = inode->i_size;
72685 +       struct address_space *mapping = inode->i_mapping;
72686 +       struct ntfs_inode *ni = ntfs_i(inode);
72687 +       u64 valid = ni->i_valid;
72688 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
72689 +       struct page *page, **pages = NULL;
72690 +       size_t written = 0;
72691 +       u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
72692 +       u32 frame_size = 1u << frame_bits;
72693 +       u32 pages_per_frame = frame_size >> PAGE_SHIFT;
72694 +       u32 ip, off;
72695 +       CLST frame;
72696 +       u64 frame_vbo;
72697 +       pgoff_t index;
72698 +       bool frame_uptodate;
72700 +       if (frame_size < PAGE_SIZE) {
72701 +               /*
72702 +                * frame_size == 8K if cluster 512
72703 +                * frame_size == 64K if cluster 4096
72704 +                */
72705 +               ntfs_inode_warn(inode, "page size is bigger than frame size");
72706 +               return -EOPNOTSUPP;
72707 +       }
72709 +       pages = ntfs_malloc(pages_per_frame * sizeof(struct page *));
72710 +       if (!pages)
72711 +               return -ENOMEM;
72713 +       current->backing_dev_info = inode_to_bdi(inode);
72714 +       err = file_remove_privs(file);
72715 +       if (err)
72716 +               goto out;
72718 +       err = file_update_time(file);
72719 +       if (err)
72720 +               goto out;
72722 +       /* zero range [valid : pos) */
72723 +       while (valid < pos) {
72724 +               CLST lcn, clen;
72726 +               frame = valid >> frame_bits;
72727 +               frame_vbo = valid & ~(frame_size - 1);
72728 +               off = valid & (frame_size - 1);
72730 +               err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 0, &lcn,
72731 +                                         &clen, NULL);
72732 +               if (err)
72733 +                       goto out;
72735 +               if (lcn == SPARSE_LCN) {
72736 +                       ni->i_valid = valid =
72737 +                               frame_vbo + ((u64)clen << sbi->cluster_bits);
72738 +                       continue;
72739 +               }
72741 +               /* Load full frame */
72742 +               err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT,
72743 +                                          pages, pages_per_frame,
72744 +                                          &frame_uptodate);
72745 +               if (err)
72746 +                       goto out;
72748 +               if (!frame_uptodate && off) {
72749 +                       err = ni_read_frame(ni, frame_vbo, pages,
72750 +                                           pages_per_frame);
72751 +                       if (err) {
72752 +                               for (ip = 0; ip < pages_per_frame; ip++) {
72753 +                                       page = pages[ip];
72754 +                                       unlock_page(page);
72755 +                                       put_page(page);
72756 +                               }
72757 +                               goto out;
72758 +                       }
72759 +               }
72761 +               ip = off >> PAGE_SHIFT;
72762 +               off = offset_in_page(valid);
72763 +               for (; ip < pages_per_frame; ip++, off = 0) {
72764 +                       page = pages[ip];
72765 +                       zero_user_segment(page, off, PAGE_SIZE);
72766 +                       flush_dcache_page(page);
72767 +                       SetPageUptodate(page);
72768 +               }
72770 +               ni_lock(ni);
72771 +               err = ni_write_frame(ni, pages, pages_per_frame);
72772 +               ni_unlock(ni);
72774 +               for (ip = 0; ip < pages_per_frame; ip++) {
72775 +                       page = pages[ip];
72776 +                       SetPageUptodate(page);
72777 +                       unlock_page(page);
72778 +                       put_page(page);
72779 +               }
72781 +               if (err)
72782 +                       goto out;
72784 +               ni->i_valid = valid = frame_vbo + frame_size;
72785 +       }
72787 +       /* copy user data [pos : pos + count) */
72788 +       while (count) {
72789 +               size_t copied, bytes;
72791 +               off = pos & (frame_size - 1);
72792 +               bytes = frame_size - off;
72793 +               if (bytes > count)
72794 +                       bytes = count;
72796 +               frame = pos >> frame_bits;
72797 +               frame_vbo = pos & ~(frame_size - 1);
72798 +               index = frame_vbo >> PAGE_SHIFT;
72800 +               if (unlikely(iov_iter_fault_in_readable(from, bytes))) {
72801 +                       err = -EFAULT;
72802 +                       goto out;
72803 +               }
72805 +               /* Load full frame */
72806 +               err = ntfs_get_frame_pages(mapping, index, pages,
72807 +                                          pages_per_frame, &frame_uptodate);
72808 +               if (err)
72809 +                       goto out;
72811 +               if (!frame_uptodate) {
72812 +                       loff_t to = pos + bytes;
72814 +                       if (off || (to < i_size && (to & (frame_size - 1)))) {
72815 +                               err = ni_read_frame(ni, frame_vbo, pages,
72816 +                                                   pages_per_frame);
72817 +                               if (err) {
72818 +                                       for (ip = 0; ip < pages_per_frame;
72819 +                                            ip++) {
72820 +                                               page = pages[ip];
72821 +                                               unlock_page(page);
72822 +                                               put_page(page);
72823 +                                       }
72824 +                                       goto out;
72825 +                               }
72826 +                       }
72827 +               }
72829 +               WARN_ON(!bytes);
72830 +               copied = 0;
72831 +               ip = off >> PAGE_SHIFT;
72832 +               off = offset_in_page(pos);
72834 +               /* copy user data to pages */
72835 +               for (;;) {
72836 +                       size_t cp, tail = PAGE_SIZE - off;
72838 +                       page = pages[ip];
72839 +                       cp = iov_iter_copy_from_user_atomic(page, from, off,
72840 +                                                           min(tail, bytes));
72841 +                       flush_dcache_page(page);
72842 +                       iov_iter_advance(from, cp);
72843 +                       copied += cp;
72844 +                       bytes -= cp;
72845 +                       if (!bytes || !cp)
72846 +                               break;
72848 +                       if (cp < tail) {
72849 +                               off += cp;
72850 +                       } else {
72851 +                               ip++;
72852 +                               off = 0;
72853 +                       }
72854 +               }
72856 +               ni_lock(ni);
72857 +               err = ni_write_frame(ni, pages, pages_per_frame);
72858 +               ni_unlock(ni);
72860 +               for (ip = 0; ip < pages_per_frame; ip++) {
72861 +                       page = pages[ip];
72862 +                       ClearPageDirty(page);
72863 +                       SetPageUptodate(page);
72864 +                       unlock_page(page);
72865 +                       put_page(page);
72866 +               }
72868 +               if (err)
72869 +                       goto out;
72871 +               /*
72872 +                * We can loop for a long time in here. Be nice and allow
72873 +                * us to schedule out to avoid softlocking if preempt
72874 +                * is disabled.
72875 +                */
72876 +               cond_resched();
72878 +               pos += copied;
72879 +               written += copied;
72881 +               count = iov_iter_count(from);
72882 +       }
72884 +out:
72885 +       ntfs_free(pages);
72887 +       current->backing_dev_info = NULL;
72889 +       if (err < 0)
72890 +               return err;
72892 +       iocb->ki_pos += written;
72893 +       if (iocb->ki_pos > ni->i_valid)
72894 +               ni->i_valid = iocb->ki_pos;
72896 +       return written;
72900 + * file_operations::write_iter
72901 + */
72902 +static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
72904 +       struct file *file = iocb->ki_filp;
72905 +       struct address_space *mapping = file->f_mapping;
72906 +       struct inode *inode = mapping->host;
72907 +       ssize_t ret;
72908 +       struct ntfs_inode *ni = ntfs_i(inode);
72910 +       if (is_encrypted(ni)) {
72911 +               ntfs_inode_warn(inode, "encrypted i/o not supported");
72912 +               return -EOPNOTSUPP;
72913 +       }
72915 +       if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
72916 +               ntfs_inode_warn(inode, "direct i/o + compressed not supported");
72917 +               return -EOPNOTSUPP;
72918 +       }
72920 +       if (is_dedup(ni)) {
72921 +               ntfs_inode_warn(inode, "write into deduplicated not supported");
72922 +               return -EOPNOTSUPP;
72923 +       }
72925 +       if (!inode_trylock(inode)) {
72926 +               if (iocb->ki_flags & IOCB_NOWAIT)
72927 +                       return -EAGAIN;
72928 +               inode_lock(inode);
72929 +       }
72931 +       ret = generic_write_checks(iocb, from);
72932 +       if (ret <= 0)
72933 +               goto out;
72935 +       if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
72936 +               /* should never be here, see ntfs_file_open*/
72937 +               ret = -EOPNOTSUPP;
72938 +               goto out;
72939 +       }
72941 +       ret = ntfs_extend(inode, iocb->ki_pos, ret, file);
72942 +       if (ret)
72943 +               goto out;
72945 +       ret = is_compressed(ni) ? ntfs_compress_write(iocb, from)
72946 +                               : __generic_file_write_iter(iocb, from);
72948 +out:
72949 +       inode_unlock(inode);
72951 +       if (ret > 0)
72952 +               ret = generic_write_sync(iocb, ret);
72954 +       return ret;
72958 + * file_operations::open
72959 + */
72960 +int ntfs_file_open(struct inode *inode, struct file *file)
72962 +       struct ntfs_inode *ni = ntfs_i(inode);
72964 +       if (unlikely((is_compressed(ni) || is_encrypted(ni)) &&
72965 +                    (file->f_flags & O_DIRECT))) {
72966 +               return -EOPNOTSUPP;
72967 +       }
72969 +       /* Decompress "external compressed" file if opened for rw */
72970 +       if ((ni->ni_flags & NI_FLAG_COMPRESSED_MASK) &&
72971 +           (file->f_flags & (O_WRONLY | O_RDWR | O_TRUNC))) {
72972 +#ifdef CONFIG_NTFS3_LZX_XPRESS
72973 +               int err = ni_decompress_file(ni);
72975 +               if (err)
72976 +                       return err;
72977 +#else
72978 +               ntfs_inode_warn(
72979 +                       inode,
72980 +                       "activate CONFIG_NTFS3_LZX_XPRESS to write external compressed files");
72981 +               return -EOPNOTSUPP;
72982 +#endif
72983 +       }
72985 +       return generic_file_open(inode, file);
72989 + * file_operations::release
72990 + */
72991 +static int ntfs_file_release(struct inode *inode, struct file *file)
72993 +       struct ntfs_inode *ni = ntfs_i(inode);
72994 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
72995 +       int err = 0;
72997 +       /* if we are the last writer on the inode, drop the block reservation */
72998 +       if (sbi->options.prealloc && ((file->f_mode & FMODE_WRITE) &&
72999 +                                     atomic_read(&inode->i_writecount) == 1)) {
73000 +               ni_lock(ni);
73001 +               down_write(&ni->file.run_lock);
73003 +               err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
73004 +                                   inode->i_size, &ni->i_valid, false, NULL);
73006 +               up_write(&ni->file.run_lock);
73007 +               ni_unlock(ni);
73008 +       }
73009 +       return err;
73012 +/* file_operations::fiemap */
73013 +int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
73014 +               __u64 start, __u64 len)
73016 +       int err;
73017 +       struct ntfs_inode *ni = ntfs_i(inode);
73019 +       if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR)
73020 +               return -EOPNOTSUPP;
73022 +       ni_lock(ni);
73024 +       err = ni_fiemap(ni, fieinfo, start, len);
73026 +       ni_unlock(ni);
73028 +       return err;
73031 +const struct inode_operations ntfs_file_inode_operations = {
73032 +       .getattr = ntfs_getattr,
73033 +       .setattr = ntfs3_setattr,
73034 +       .listxattr = ntfs_listxattr,
73035 +       .permission = ntfs_permission,
73036 +       .get_acl = ntfs_get_acl,
73037 +       .set_acl = ntfs_set_acl,
73038 +       .fiemap = ntfs_fiemap,
73041 +const struct file_operations ntfs_file_operations = {
73042 +       .llseek = generic_file_llseek,
73043 +       .read_iter = ntfs_file_read_iter,
73044 +       .write_iter = ntfs_file_write_iter,
73045 +       .unlocked_ioctl = ntfs_ioctl,
73046 +#ifdef CONFIG_COMPAT
73047 +       .compat_ioctl = ntfs_compat_ioctl,
73048 +#endif
73049 +       .splice_read = generic_file_splice_read,
73050 +       .mmap = ntfs_file_mmap,
73051 +       .open = ntfs_file_open,
73052 +       .fsync = generic_file_fsync,
73053 +       .splice_write = iter_file_splice_write,
73054 +       .fallocate = ntfs_fallocate,
73055 +       .release = ntfs_file_release,
73057 diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
73058 new file mode 100644
73059 index 000000000000..c3121bf9c62f
73060 --- /dev/null
73061 +++ b/fs/ntfs3/frecord.c
73062 @@ -0,0 +1,3071 @@
73063 +// SPDX-License-Identifier: GPL-2.0
73065 + *
73066 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
73067 + *
73068 + */
73070 +#include <linux/blkdev.h>
73071 +#include <linux/buffer_head.h>
73072 +#include <linux/fiemap.h>
73073 +#include <linux/fs.h>
73074 +#include <linux/nls.h>
73075 +#include <linux/vmalloc.h>
73077 +#include "debug.h"
73078 +#include "ntfs.h"
73079 +#include "ntfs_fs.h"
73080 +#ifdef CONFIG_NTFS3_LZX_XPRESS
73081 +#include "lib/lib.h"
73082 +#endif
73084 +static struct mft_inode *ni_ins_mi(struct ntfs_inode *ni, struct rb_root *tree,
73085 +                                  CLST ino, struct rb_node *ins)
73087 +       struct rb_node **p = &tree->rb_node;
73088 +       struct rb_node *pr = NULL;
73090 +       while (*p) {
73091 +               struct mft_inode *mi;
73093 +               pr = *p;
73094 +               mi = rb_entry(pr, struct mft_inode, node);
73095 +               if (mi->rno > ino)
73096 +                       p = &pr->rb_left;
73097 +               else if (mi->rno < ino)
73098 +                       p = &pr->rb_right;
73099 +               else
73100 +                       return mi;
73101 +       }
73103 +       if (!ins)
73104 +               return NULL;
73106 +       rb_link_node(ins, pr, p);
73107 +       rb_insert_color(ins, tree);
73108 +       return rb_entry(ins, struct mft_inode, node);
73112 + * ni_find_mi
73113 + *
73114 + * finds mft_inode by record number
73115 + */
73116 +static struct mft_inode *ni_find_mi(struct ntfs_inode *ni, CLST rno)
73118 +       return ni_ins_mi(ni, &ni->mi_tree, rno, NULL);
73122 + * ni_add_mi
73123 + *
73124 + * adds new mft_inode into ntfs_inode
73125 + */
73126 +static void ni_add_mi(struct ntfs_inode *ni, struct mft_inode *mi)
73128 +       ni_ins_mi(ni, &ni->mi_tree, mi->rno, &mi->node);
73132 + * ni_remove_mi
73133 + *
73134 + * removes mft_inode from ntfs_inode
73135 + */
73136 +void ni_remove_mi(struct ntfs_inode *ni, struct mft_inode *mi)
73138 +       rb_erase(&mi->node, &ni->mi_tree);
73142 + * ni_std
73143 + *
73144 + * returns pointer into std_info from primary record
73145 + */
73146 +struct ATTR_STD_INFO *ni_std(struct ntfs_inode *ni)
73148 +       const struct ATTRIB *attr;
73150 +       attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
73151 +       return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO))
73152 +                   : NULL;
73156 + * ni_std5
73157 + *
73158 + * returns pointer into std_info from primary record
73159 + */
73160 +struct ATTR_STD_INFO5 *ni_std5(struct ntfs_inode *ni)
73162 +       const struct ATTRIB *attr;
73164 +       attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
73166 +       return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO5))
73167 +                   : NULL;
73171 + * ni_clear
73172 + *
73173 + * clears resources allocated by ntfs_inode
73174 + */
73175 +void ni_clear(struct ntfs_inode *ni)
73177 +       struct rb_node *node;
73179 +       if (!ni->vfs_inode.i_nlink && is_rec_inuse(ni->mi.mrec))
73180 +               ni_delete_all(ni);
73182 +       al_destroy(ni);
73184 +       for (node = rb_first(&ni->mi_tree); node;) {
73185 +               struct rb_node *next = rb_next(node);
73186 +               struct mft_inode *mi = rb_entry(node, struct mft_inode, node);
73188 +               rb_erase(node, &ni->mi_tree);
73189 +               mi_put(mi);
73190 +               node = next;
73191 +       }
73193 +       /* bad inode always has mode == S_IFREG */
73194 +       if (ni->ni_flags & NI_FLAG_DIR)
73195 +               indx_clear(&ni->dir);
73196 +       else {
73197 +               run_close(&ni->file.run);
73198 +#ifdef CONFIG_NTFS3_LZX_XPRESS
73199 +               if (ni->file.offs_page) {
73200 +                       /* on-demand allocated page for offsets */
73201 +                       put_page(ni->file.offs_page);
73202 +                       ni->file.offs_page = NULL;
73203 +               }
73204 +#endif
73205 +       }
73207 +       mi_clear(&ni->mi);
73211 + * ni_load_mi_ex
73212 + *
73213 + * finds mft_inode by record number.
73214 + */
73215 +int ni_load_mi_ex(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi)
73217 +       int err;
73218 +       struct mft_inode *r;
73220 +       r = ni_find_mi(ni, rno);
73221 +       if (r)
73222 +               goto out;
73224 +       err = mi_get(ni->mi.sbi, rno, &r);
73225 +       if (err)
73226 +               return err;
73228 +       ni_add_mi(ni, r);
73230 +out:
73231 +       if (mi)
73232 +               *mi = r;
73233 +       return 0;
73237 + * ni_load_mi
73238 + *
73239 + * load mft_inode corresponded list_entry
73240 + */
73241 +int ni_load_mi(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
73242 +              struct mft_inode **mi)
73244 +       CLST rno;
73246 +       if (!le) {
73247 +               *mi = &ni->mi;
73248 +               return 0;
73249 +       }
73251 +       rno = ino_get(&le->ref);
73252 +       if (rno == ni->mi.rno) {
73253 +               *mi = &ni->mi;
73254 +               return 0;
73255 +       }
73256 +       return ni_load_mi_ex(ni, rno, mi);
73260 + * ni_find_attr
73261 + *
73262 + * returns attribute and record this attribute belongs to
73263 + */
73264 +struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
73265 +                           struct ATTR_LIST_ENTRY **le_o, enum ATTR_TYPE type,
73266 +                           const __le16 *name, u8 name_len, const CLST *vcn,
73267 +                           struct mft_inode **mi)
73269 +       struct ATTR_LIST_ENTRY *le;
73270 +       struct mft_inode *m;
73272 +       if (!ni->attr_list.size ||
73273 +           (!name_len && (type == ATTR_LIST || type == ATTR_STD))) {
73274 +               if (le_o)
73275 +                       *le_o = NULL;
73276 +               if (mi)
73277 +                       *mi = &ni->mi;
73279 +               /* Look for required attribute in primary record */
73280 +               return mi_find_attr(&ni->mi, attr, type, name, name_len, NULL);
73281 +       }
73283 +       /* first look for list entry of required type */
73284 +       le = al_find_ex(ni, le_o ? *le_o : NULL, type, name, name_len, vcn);
73285 +       if (!le)
73286 +               return NULL;
73288 +       if (le_o)
73289 +               *le_o = le;
73291 +       /* Load record that contains this attribute */
73292 +       if (ni_load_mi(ni, le, &m))
73293 +               return NULL;
73295 +       /* Look for required attribute */
73296 +       attr = mi_find_attr(m, NULL, type, name, name_len, &le->id);
73298 +       if (!attr)
73299 +               goto out;
73301 +       if (!attr->non_res) {
73302 +               if (vcn && *vcn)
73303 +                       goto out;
73304 +       } else if (!vcn) {
73305 +               if (attr->nres.svcn)
73306 +                       goto out;
73307 +       } else if (le64_to_cpu(attr->nres.svcn) > *vcn ||
73308 +                  *vcn > le64_to_cpu(attr->nres.evcn)) {
73309 +               goto out;
73310 +       }
73312 +       if (mi)
73313 +               *mi = m;
73314 +       return attr;
73316 +out:
73317 +       ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
73318 +       return NULL;
73322 + * ni_enum_attr_ex
73323 + *
73324 + * enumerates attributes in ntfs_inode
73325 + */
73326 +struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr,
73327 +                              struct ATTR_LIST_ENTRY **le,
73328 +                              struct mft_inode **mi)
73330 +       struct mft_inode *mi2;
73331 +       struct ATTR_LIST_ENTRY *le2;
73333 +       /* Do we have an attribute list? */
73334 +       if (!ni->attr_list.size) {
73335 +               *le = NULL;
73336 +               if (mi)
73337 +                       *mi = &ni->mi;
73338 +               /* Enum attributes in primary record */
73339 +               return mi_enum_attr(&ni->mi, attr);
73340 +       }
73342 +       /* get next list entry */
73343 +       le2 = *le = al_enumerate(ni, attr ? *le : NULL);
73344 +       if (!le2)
73345 +               return NULL;
73347 +       /* Load record that contains the required attribute */
73348 +       if (ni_load_mi(ni, le2, &mi2))
73349 +               return NULL;
73351 +       if (mi)
73352 +               *mi = mi2;
73354 +       /* Find attribute in loaded record */
73355 +       return rec_find_attr_le(mi2, le2);
73359 + * ni_load_attr
73360 + *
73361 + * loads attribute that contains given vcn
73362 + */
73363 +struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
73364 +                           const __le16 *name, u8 name_len, CLST vcn,
73365 +                           struct mft_inode **pmi)
73367 +       struct ATTR_LIST_ENTRY *le;
73368 +       struct ATTRIB *attr;
73369 +       struct mft_inode *mi;
73370 +       struct ATTR_LIST_ENTRY *next;
73372 +       if (!ni->attr_list.size) {
73373 +               if (pmi)
73374 +                       *pmi = &ni->mi;
73375 +               return mi_find_attr(&ni->mi, NULL, type, name, name_len, NULL);
73376 +       }
73378 +       le = al_find_ex(ni, NULL, type, name, name_len, NULL);
73379 +       if (!le)
73380 +               return NULL;
73382 +       /*
73383 +        * Unfortunately ATTR_LIST_ENTRY contains only start vcn
73384 +        * So to find the ATTRIB segment that contains 'vcn' we should
73385 +        * enumerate some entries
73386 +        */
73387 +       if (vcn) {
73388 +               for (;; le = next) {
73389 +                       next = al_find_ex(ni, le, type, name, name_len, NULL);
73390 +                       if (!next || le64_to_cpu(next->vcn) > vcn)
73391 +                               break;
73392 +               }
73393 +       }
73395 +       if (ni_load_mi(ni, le, &mi))
73396 +               return NULL;
73398 +       if (pmi)
73399 +               *pmi = mi;
73401 +       attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
73402 +       if (!attr)
73403 +               return NULL;
73405 +       if (!attr->non_res)
73406 +               return attr;
73408 +       if (le64_to_cpu(attr->nres.svcn) <= vcn &&
73409 +           vcn <= le64_to_cpu(attr->nres.evcn))
73410 +               return attr;
73412 +       return NULL;
73416 + * ni_load_all_mi
73417 + *
73418 + * loads all subrecords
73419 + */
73420 +int ni_load_all_mi(struct ntfs_inode *ni)
73422 +       int err;
73423 +       struct ATTR_LIST_ENTRY *le;
73425 +       if (!ni->attr_list.size)
73426 +               return 0;
73428 +       le = NULL;
73430 +       while ((le = al_enumerate(ni, le))) {
73431 +               CLST rno = ino_get(&le->ref);
73433 +               if (rno == ni->mi.rno)
73434 +                       continue;
73436 +               err = ni_load_mi_ex(ni, rno, NULL);
73437 +               if (err)
73438 +                       return err;
73439 +       }
73441 +       return 0;
73445 + * ni_add_subrecord
73446 + *
73447 + * allocate + format + attach a new subrecord
73448 + */
73449 +bool ni_add_subrecord(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi)
73451 +       struct mft_inode *m;
73453 +       m = ntfs_zalloc(sizeof(struct mft_inode));
73454 +       if (!m)
73455 +               return false;
73457 +       if (mi_format_new(m, ni->mi.sbi, rno, 0, ni->mi.rno == MFT_REC_MFT)) {
73458 +               mi_put(m);
73459 +               return false;
73460 +       }
73462 +       mi_get_ref(&ni->mi, &m->mrec->parent_ref);
73464 +       ni_add_mi(ni, m);
73465 +       *mi = m;
73466 +       return true;
73470 + * ni_remove_attr
73471 + *
73472 + * removes all attributes for the given type/name/id
73473 + */
73474 +int ni_remove_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
73475 +                  const __le16 *name, size_t name_len, bool base_only,
73476 +                  const __le16 *id)
73478 +       int err;
73479 +       struct ATTRIB *attr;
73480 +       struct ATTR_LIST_ENTRY *le;
73481 +       struct mft_inode *mi;
73482 +       u32 type_in;
73483 +       int diff;
73485 +       if (base_only || type == ATTR_LIST || !ni->attr_list.size) {
73486 +               attr = mi_find_attr(&ni->mi, NULL, type, name, name_len, id);
73487 +               if (!attr)
73488 +                       return -ENOENT;
73490 +               mi_remove_attr(&ni->mi, attr);
73491 +               return 0;
73492 +       }
73494 +       type_in = le32_to_cpu(type);
73495 +       le = NULL;
73497 +       for (;;) {
73498 +               le = al_enumerate(ni, le);
73499 +               if (!le)
73500 +                       return 0;
73502 +next_le2:
73503 +               diff = le32_to_cpu(le->type) - type_in;
73504 +               if (diff < 0)
73505 +                       continue;
73507 +               if (diff > 0)
73508 +                       return 0;
73510 +               if (le->name_len != name_len)
73511 +                       continue;
73513 +               if (name_len &&
73514 +                   memcmp(le_name(le), name, name_len * sizeof(short)))
73515 +                       continue;
73517 +               if (id && le->id != *id)
73518 +                       continue;
73519 +               err = ni_load_mi(ni, le, &mi);
73520 +               if (err)
73521 +                       return err;
73523 +               al_remove_le(ni, le);
73525 +               attr = mi_find_attr(mi, NULL, type, name, name_len, id);
73526 +               if (!attr)
73527 +                       return -ENOENT;
73529 +               mi_remove_attr(mi, attr);
73531 +               if (PtrOffset(ni->attr_list.le, le) >= ni->attr_list.size)
73532 +                       return 0;
73533 +               goto next_le2;
73534 +       }
73538 + * ni_ins_new_attr
73539 + *
73540 + * inserts the attribute into record
73541 + * Returns not full constructed attribute or NULL if not possible to create
73542 + */
73543 +static struct ATTRIB *ni_ins_new_attr(struct ntfs_inode *ni,
73544 +                                     struct mft_inode *mi,
73545 +                                     struct ATTR_LIST_ENTRY *le,
73546 +                                     enum ATTR_TYPE type, const __le16 *name,
73547 +                                     u8 name_len, u32 asize, u16 name_off,
73548 +                                     CLST svcn)
73550 +       int err;
73551 +       struct ATTRIB *attr;
73552 +       bool le_added = false;
73553 +       struct MFT_REF ref;
73555 +       mi_get_ref(mi, &ref);
73557 +       if (type != ATTR_LIST && !le && ni->attr_list.size) {
73558 +               err = al_add_le(ni, type, name, name_len, svcn, cpu_to_le16(-1),
73559 +                               &ref, &le);
73560 +               if (err) {
73561 +                       /* no memory or no space */
73562 +                       return NULL;
73563 +               }
73564 +               le_added = true;
73566 +               /*
73567 +                * al_add_le -> attr_set_size (list) -> ni_expand_list
73568 +                * which moves some attributes out of primary record
73569 +                * this means that name may point into moved memory
73570 +                * reinit 'name' from le
73571 +                */
73572 +               name = le->name;
73573 +       }
73575 +       attr = mi_insert_attr(mi, type, name, name_len, asize, name_off);
73576 +       if (!attr) {
73577 +               if (le_added)
73578 +                       al_remove_le(ni, le);
73579 +               return NULL;
73580 +       }
73582 +       if (type == ATTR_LIST) {
73583 +               /*attr list is not in list entry array*/
73584 +               goto out;
73585 +       }
73587 +       if (!le)
73588 +               goto out;
73590 +       /* Update ATTRIB Id and record reference */
73591 +       le->id = attr->id;
73592 +       ni->attr_list.dirty = true;
73593 +       le->ref = ref;
73595 +out:
73596 +       return attr;
73600 + * random write access to sparsed or compressed file may result to
73601 + * not optimized packed runs.
73602 + * Here it is the place to optimize it
73603 + */
73604 +static int ni_repack(struct ntfs_inode *ni)
73606 +       int err = 0;
73607 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
73608 +       struct mft_inode *mi, *mi_p = NULL;
73609 +       struct ATTRIB *attr = NULL, *attr_p;
73610 +       struct ATTR_LIST_ENTRY *le = NULL, *le_p;
73611 +       CLST alloc = 0;
73612 +       u8 cluster_bits = sbi->cluster_bits;
73613 +       CLST svcn, evcn = 0, svcn_p, evcn_p, next_svcn;
73614 +       u32 roff, rs = sbi->record_size;
73615 +       struct runs_tree run;
73617 +       run_init(&run);
73619 +       while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi))) {
73620 +               if (!attr->non_res)
73621 +                       continue;
73623 +               svcn = le64_to_cpu(attr->nres.svcn);
73624 +               if (svcn != le64_to_cpu(le->vcn)) {
73625 +                       err = -EINVAL;
73626 +                       break;
73627 +               }
73629 +               if (!svcn) {
73630 +                       alloc = le64_to_cpu(attr->nres.alloc_size) >>
73631 +                               cluster_bits;
73632 +                       mi_p = NULL;
73633 +               } else if (svcn != evcn + 1) {
73634 +                       err = -EINVAL;
73635 +                       break;
73636 +               }
73638 +               evcn = le64_to_cpu(attr->nres.evcn);
73640 +               if (svcn > evcn + 1) {
73641 +                       err = -EINVAL;
73642 +                       break;
73643 +               }
73645 +               if (!mi_p) {
73646 +                       /* do not try if too little free space */
73647 +                       if (le32_to_cpu(mi->mrec->used) + 8 >= rs)
73648 +                               continue;
73650 +                       /* do not try if last attribute segment */
73651 +                       if (evcn + 1 == alloc)
73652 +                               continue;
73653 +                       run_close(&run);
73654 +               }
73656 +               roff = le16_to_cpu(attr->nres.run_off);
73657 +               err = run_unpack(&run, sbi, ni->mi.rno, svcn, evcn, svcn,
73658 +                                Add2Ptr(attr, roff),
73659 +                                le32_to_cpu(attr->size) - roff);
73660 +               if (err < 0)
73661 +                       break;
73663 +               if (!mi_p) {
73664 +                       mi_p = mi;
73665 +                       attr_p = attr;
73666 +                       svcn_p = svcn;
73667 +                       evcn_p = evcn;
73668 +                       le_p = le;
73669 +                       err = 0;
73670 +                       continue;
73671 +               }
73673 +               /*
73674 +                * run contains data from two records: mi_p and mi
73675 +                * try to pack in one
73676 +                */
73677 +               err = mi_pack_runs(mi_p, attr_p, &run, evcn + 1 - svcn_p);
73678 +               if (err)
73679 +                       break;
73681 +               next_svcn = le64_to_cpu(attr_p->nres.evcn) + 1;
73683 +               if (next_svcn >= evcn + 1) {
73684 +                       /* we can remove this attribute segment */
73685 +                       al_remove_le(ni, le);
73686 +                       mi_remove_attr(mi, attr);
73687 +                       le = le_p;
73688 +                       continue;
73689 +               }
73691 +               attr->nres.svcn = le->vcn = cpu_to_le64(next_svcn);
73692 +               mi->dirty = true;
73693 +               ni->attr_list.dirty = true;
73695 +               if (evcn + 1 == alloc) {
73696 +                       err = mi_pack_runs(mi, attr, &run,
73697 +                                          evcn + 1 - next_svcn);
73698 +                       if (err)
73699 +                               break;
73700 +                       mi_p = NULL;
73701 +               } else {
73702 +                       mi_p = mi;
73703 +                       attr_p = attr;
73704 +                       svcn_p = next_svcn;
73705 +                       evcn_p = evcn;
73706 +                       le_p = le;
73707 +                       run_truncate_head(&run, next_svcn);
73708 +               }
73709 +       }
73711 +       if (err) {
73712 +               ntfs_inode_warn(&ni->vfs_inode, "repack problem");
73713 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
73715 +               /* Pack loaded but not packed runs */
73716 +               if (mi_p)
73717 +                       mi_pack_runs(mi_p, attr_p, &run, evcn_p + 1 - svcn_p);
73718 +       }
73720 +       run_close(&run);
73721 +       return err;
73725 + * ni_try_remove_attr_list
73726 + *
73727 + * Can we remove attribute list?
73728 + * Check the case when primary record contains enough space for all attributes
73729 + */
73730 +static int ni_try_remove_attr_list(struct ntfs_inode *ni)
73732 +       int err = 0;
73733 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
73734 +       struct ATTRIB *attr, *attr_list, *attr_ins;
73735 +       struct ATTR_LIST_ENTRY *le;
73736 +       struct mft_inode *mi;
73737 +       u32 asize, free;
73738 +       struct MFT_REF ref;
73739 +       __le16 id;
73741 +       if (!ni->attr_list.dirty)
73742 +               return 0;
73744 +       err = ni_repack(ni);
73745 +       if (err)
73746 +               return err;
73748 +       attr_list = mi_find_attr(&ni->mi, NULL, ATTR_LIST, NULL, 0, NULL);
73749 +       if (!attr_list)
73750 +               return 0;
73752 +       asize = le32_to_cpu(attr_list->size);
73754 +       /* free space in primary record without attribute list */
73755 +       free = sbi->record_size - le32_to_cpu(ni->mi.mrec->used) + asize;
73756 +       mi_get_ref(&ni->mi, &ref);
73758 +       le = NULL;
73759 +       while ((le = al_enumerate(ni, le))) {
73760 +               if (!memcmp(&le->ref, &ref, sizeof(ref)))
73761 +                       continue;
73763 +               if (le->vcn)
73764 +                       return 0;
73766 +               mi = ni_find_mi(ni, ino_get(&le->ref));
73767 +               if (!mi)
73768 +                       return 0;
73770 +               attr = mi_find_attr(mi, NULL, le->type, le_name(le),
73771 +                                   le->name_len, &le->id);
73772 +               if (!attr)
73773 +                       return 0;
73775 +               asize = le32_to_cpu(attr->size);
73776 +               if (asize > free)
73777 +                       return 0;
73779 +               free -= asize;
73780 +       }
73782 +       /* Is seems that attribute list can be removed from primary record */
73783 +       mi_remove_attr(&ni->mi, attr_list);
73785 +       /*
73786 +        * Repeat the cycle above and move all attributes to primary record.
73787 +        * It should be success!
73788 +        */
73789 +       le = NULL;
73790 +       while ((le = al_enumerate(ni, le))) {
73791 +               if (!memcmp(&le->ref, &ref, sizeof(ref)))
73792 +                       continue;
73794 +               mi = ni_find_mi(ni, ino_get(&le->ref));
73796 +               attr = mi_find_attr(mi, NULL, le->type, le_name(le),
73797 +                                   le->name_len, &le->id);
73798 +               asize = le32_to_cpu(attr->size);
73800 +               /* insert into primary record */
73801 +               attr_ins = mi_insert_attr(&ni->mi, le->type, le_name(le),
73802 +                                         le->name_len, asize,
73803 +                                         le16_to_cpu(attr->name_off));
73804 +               id = attr_ins->id;
73806 +               /* copy all except id */
73807 +               memcpy(attr_ins, attr, asize);
73808 +               attr_ins->id = id;
73810 +               /* remove from original record */
73811 +               mi_remove_attr(mi, attr);
73812 +       }
73814 +       run_deallocate(sbi, &ni->attr_list.run, true);
73815 +       run_close(&ni->attr_list.run);
73816 +       ni->attr_list.size = 0;
73817 +       ntfs_free(ni->attr_list.le);
73818 +       ni->attr_list.le = NULL;
73819 +       ni->attr_list.dirty = false;
73821 +       return 0;
73825 + * ni_create_attr_list
73826 + *
73827 + * generates an attribute list for this primary record
73828 + */
73829 +int ni_create_attr_list(struct ntfs_inode *ni)
73831 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
73832 +       int err;
73833 +       u32 lsize;
73834 +       struct ATTRIB *attr;
73835 +       struct ATTRIB *arr_move[7];
73836 +       struct ATTR_LIST_ENTRY *le, *le_b[7];
73837 +       struct MFT_REC *rec;
73838 +       bool is_mft;
73839 +       CLST rno = 0;
73840 +       struct mft_inode *mi;
73841 +       u32 free_b, nb, to_free, rs;
73842 +       u16 sz;
73844 +       is_mft = ni->mi.rno == MFT_REC_MFT;
73845 +       rec = ni->mi.mrec;
73846 +       rs = sbi->record_size;
73848 +       /*
73849 +        * Skip estimating exact memory requirement
73850 +        * Looks like one record_size is always enough
73851 +        */
73852 +       le = ntfs_malloc(al_aligned(rs));
73853 +       if (!le) {
73854 +               err = -ENOMEM;
73855 +               goto out;
73856 +       }
73858 +       mi_get_ref(&ni->mi, &le->ref);
73859 +       ni->attr_list.le = le;
73861 +       attr = NULL;
73862 +       nb = 0;
73863 +       free_b = 0;
73864 +       attr = NULL;
73866 +       for (; (attr = mi_enum_attr(&ni->mi, attr)); le = Add2Ptr(le, sz)) {
73867 +               sz = le_size(attr->name_len);
73868 +               le->type = attr->type;
73869 +               le->size = cpu_to_le16(sz);
73870 +               le->name_len = attr->name_len;
73871 +               le->name_off = offsetof(struct ATTR_LIST_ENTRY, name);
73872 +               le->vcn = 0;
73873 +               if (le != ni->attr_list.le)
73874 +                       le->ref = ni->attr_list.le->ref;
73875 +               le->id = attr->id;
73877 +               if (attr->name_len)
73878 +                       memcpy(le->name, attr_name(attr),
73879 +                              sizeof(short) * attr->name_len);
73880 +               else if (attr->type == ATTR_STD)
73881 +                       continue;
73882 +               else if (attr->type == ATTR_LIST)
73883 +                       continue;
73884 +               else if (is_mft && attr->type == ATTR_DATA)
73885 +                       continue;
73887 +               if (!nb || nb < ARRAY_SIZE(arr_move)) {
73888 +                       le_b[nb] = le;
73889 +                       arr_move[nb++] = attr;
73890 +                       free_b += le32_to_cpu(attr->size);
73891 +               }
73892 +       }
73894 +       lsize = PtrOffset(ni->attr_list.le, le);
73895 +       ni->attr_list.size = lsize;
73897 +       to_free = le32_to_cpu(rec->used) + lsize + SIZEOF_RESIDENT;
73898 +       if (to_free <= rs) {
73899 +               to_free = 0;
73900 +       } else {
73901 +               to_free -= rs;
73903 +               if (to_free > free_b) {
73904 +                       err = -EINVAL;
73905 +                       goto out1;
73906 +               }
73907 +       }
73909 +       /* Allocate child mft. */
73910 +       err = ntfs_look_free_mft(sbi, &rno, is_mft, ni, &mi);
73911 +       if (err)
73912 +               goto out1;
73914 +       /* Call 'mi_remove_attr' in reverse order to keep pointers 'arr_move' valid */
73915 +       while (to_free > 0) {
73916 +               struct ATTRIB *b = arr_move[--nb];
73917 +               u32 asize = le32_to_cpu(b->size);
73918 +               u16 name_off = le16_to_cpu(b->name_off);
73920 +               attr = mi_insert_attr(mi, b->type, Add2Ptr(b, name_off),
73921 +                                     b->name_len, asize, name_off);
73922 +               WARN_ON(!attr);
73924 +               mi_get_ref(mi, &le_b[nb]->ref);
73925 +               le_b[nb]->id = attr->id;
73927 +               /* copy all except id */
73928 +               memcpy(attr, b, asize);
73929 +               attr->id = le_b[nb]->id;
73931 +               WARN_ON(!mi_remove_attr(&ni->mi, b));
73933 +               if (to_free <= asize)
73934 +                       break;
73935 +               to_free -= asize;
73936 +               WARN_ON(!nb);
73937 +       }
73939 +       attr = mi_insert_attr(&ni->mi, ATTR_LIST, NULL, 0,
73940 +                             lsize + SIZEOF_RESIDENT, SIZEOF_RESIDENT);
73941 +       WARN_ON(!attr);
73943 +       attr->non_res = 0;
73944 +       attr->flags = 0;
73945 +       attr->res.data_size = cpu_to_le32(lsize);
73946 +       attr->res.data_off = SIZEOF_RESIDENT_LE;
73947 +       attr->res.flags = 0;
73948 +       attr->res.res = 0;
73950 +       memcpy(resident_data_ex(attr, lsize), ni->attr_list.le, lsize);
73952 +       ni->attr_list.dirty = false;
73954 +       mark_inode_dirty(&ni->vfs_inode);
73955 +       goto out;
73957 +out1:
73958 +       ntfs_free(ni->attr_list.le);
73959 +       ni->attr_list.le = NULL;
73960 +       ni->attr_list.size = 0;
73962 +out:
73963 +       return err;
73967 + * ni_ins_attr_ext
73968 + *
73969 + * This method adds an external attribute to the ntfs_inode.
73970 + */
73971 +static int ni_ins_attr_ext(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
73972 +                          enum ATTR_TYPE type, const __le16 *name, u8 name_len,
73973 +                          u32 asize, CLST svcn, u16 name_off, bool force_ext,
73974 +                          struct ATTRIB **ins_attr, struct mft_inode **ins_mi)
73976 +       struct ATTRIB *attr;
73977 +       struct mft_inode *mi;
73978 +       CLST rno;
73979 +       u64 vbo;
73980 +       struct rb_node *node;
73981 +       int err;
73982 +       bool is_mft, is_mft_data;
73983 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
73985 +       is_mft = ni->mi.rno == MFT_REC_MFT;
73986 +       is_mft_data = is_mft && type == ATTR_DATA && !name_len;
73988 +       if (asize > sbi->max_bytes_per_attr) {
73989 +               err = -EINVAL;
73990 +               goto out;
73991 +       }
73993 +       /*
73994 +        * standard information and attr_list cannot be made external.
73995 +        * The Log File cannot have any external attributes
73996 +        */
73997 +       if (type == ATTR_STD || type == ATTR_LIST ||
73998 +           ni->mi.rno == MFT_REC_LOG) {
73999 +               err = -EINVAL;
74000 +               goto out;
74001 +       }
74003 +       /* Create attribute list if it is not already existed */
74004 +       if (!ni->attr_list.size) {
74005 +               err = ni_create_attr_list(ni);
74006 +               if (err)
74007 +                       goto out;
74008 +       }
74010 +       vbo = is_mft_data ? ((u64)svcn << sbi->cluster_bits) : 0;
74012 +       if (force_ext)
74013 +               goto insert_ext;
74015 +       /* Load all subrecords into memory. */
74016 +       err = ni_load_all_mi(ni);
74017 +       if (err)
74018 +               goto out;
74020 +       /* Check each of loaded subrecord */
74021 +       for (node = rb_first(&ni->mi_tree); node; node = rb_next(node)) {
74022 +               mi = rb_entry(node, struct mft_inode, node);
74024 +               if (is_mft_data &&
74025 +                   (mi_enum_attr(mi, NULL) ||
74026 +                    vbo <= ((u64)mi->rno << sbi->record_bits))) {
74027 +                       /* We can't accept this record 'case MFT's bootstrapping */
74028 +                       continue;
74029 +               }
74030 +               if (is_mft &&
74031 +                   mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, NULL)) {
74032 +                       /*
74033 +                        * This child record already has a ATTR_DATA.
74034 +                        * So it can't accept any other records.
74035 +                        */
74036 +                       continue;
74037 +               }
74039 +               if ((type != ATTR_NAME || name_len) &&
74040 +                   mi_find_attr(mi, NULL, type, name, name_len, NULL)) {
74041 +                       /* Only indexed attributes can share same record */
74042 +                       continue;
74043 +               }
74045 +               /* Try to insert attribute into this subrecord */
74046 +               attr = ni_ins_new_attr(ni, mi, le, type, name, name_len, asize,
74047 +                                      name_off, svcn);
74048 +               if (!attr)
74049 +                       continue;
74051 +               if (ins_attr)
74052 +                       *ins_attr = attr;
74053 +               return 0;
74054 +       }
74056 +insert_ext:
74057 +       /* We have to allocate a new child subrecord*/
74058 +       err = ntfs_look_free_mft(sbi, &rno, is_mft_data, ni, &mi);
74059 +       if (err)
74060 +               goto out;
74062 +       if (is_mft_data && vbo <= ((u64)rno << sbi->record_bits)) {
74063 +               err = -EINVAL;
74064 +               goto out1;
74065 +       }
74067 +       attr = ni_ins_new_attr(ni, mi, le, type, name, name_len, asize,
74068 +                              name_off, svcn);
74069 +       if (!attr)
74070 +               goto out2;
74072 +       if (ins_attr)
74073 +               *ins_attr = attr;
74074 +       if (ins_mi)
74075 +               *ins_mi = mi;
74077 +       return 0;
74079 +out2:
74080 +       ni_remove_mi(ni, mi);
74081 +       mi_put(mi);
74082 +       err = -EINVAL;
74084 +out1:
74085 +       ntfs_mark_rec_free(sbi, rno);
74087 +out:
74088 +       return err;
74092 + * ni_insert_attr
74093 + *
74094 + * inserts an attribute into the file.
74095 + *
74096 + * If the primary record has room, it will just insert the attribute.
74097 + * If not, it may make the attribute external.
74098 + * For $MFT::Data it may make room for the attribute by
74099 + * making other attributes external.
74100 + *
74101 + * NOTE:
74102 + * The ATTR_LIST and ATTR_STD cannot be made external.
74103 + * This function does not fill new attribute full
74104 + * It only fills 'size'/'type'/'id'/'name_len' fields
74105 + */
74106 +static int ni_insert_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
74107 +                         const __le16 *name, u8 name_len, u32 asize,
74108 +                         u16 name_off, CLST svcn, struct ATTRIB **ins_attr,
74109 +                         struct mft_inode **ins_mi)
74111 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
74112 +       int err;
74113 +       struct ATTRIB *attr, *eattr;
74114 +       struct MFT_REC *rec;
74115 +       bool is_mft;
74116 +       struct ATTR_LIST_ENTRY *le;
74117 +       u32 list_reserve, max_free, free, used, t32;
74118 +       __le16 id;
74119 +       u16 t16;
74121 +       is_mft = ni->mi.rno == MFT_REC_MFT;
74122 +       rec = ni->mi.mrec;
74124 +       list_reserve = SIZEOF_NONRESIDENT + 3 * (1 + 2 * sizeof(u32));
74125 +       used = le32_to_cpu(rec->used);
74126 +       free = sbi->record_size - used;
74128 +       if (is_mft && type != ATTR_LIST) {
74129 +               /* Reserve space for the ATTRIB List. */
74130 +               if (free < list_reserve)
74131 +                       free = 0;
74132 +               else
74133 +                       free -= list_reserve;
74134 +       }
74136 +       if (asize <= free) {
74137 +               attr = ni_ins_new_attr(ni, &ni->mi, NULL, type, name, name_len,
74138 +                                      asize, name_off, svcn);
74139 +               if (attr) {
74140 +                       if (ins_attr)
74141 +                               *ins_attr = attr;
74142 +                       if (ins_mi)
74143 +                               *ins_mi = &ni->mi;
74144 +                       err = 0;
74145 +                       goto out;
74146 +               }
74147 +       }
74149 +       if (!is_mft || type != ATTR_DATA || svcn) {
74150 +               /* This ATTRIB will be external. */
74151 +               err = ni_ins_attr_ext(ni, NULL, type, name, name_len, asize,
74152 +                                     svcn, name_off, false, ins_attr, ins_mi);
74153 +               goto out;
74154 +       }
74156 +       /*
74157 +        * Here we have: "is_mft && type == ATTR_DATA && !svcn
74158 +        *
74159 +        * The first chunk of the $MFT::Data ATTRIB must be the base record.
74160 +        * Evict as many other attributes as possible.
74161 +        */
74162 +       max_free = free;
74164 +       /* Estimate the result of moving all possible attributes away.*/
74165 +       attr = NULL;
74167 +       while ((attr = mi_enum_attr(&ni->mi, attr))) {
74168 +               if (attr->type == ATTR_STD)
74169 +                       continue;
74170 +               if (attr->type == ATTR_LIST)
74171 +                       continue;
74172 +               max_free += le32_to_cpu(attr->size);
74173 +       }
74175 +       if (max_free < asize + list_reserve) {
74176 +               /* Impossible to insert this attribute into primary record */
74177 +               err = -EINVAL;
74178 +               goto out;
74179 +       }
74181 +       /* Start real attribute moving */
74182 +       attr = NULL;
74184 +       for (;;) {
74185 +               attr = mi_enum_attr(&ni->mi, attr);
74186 +               if (!attr) {
74187 +                       /* We should never be here 'cause we have already check this case */
74188 +                       err = -EINVAL;
74189 +                       goto out;
74190 +               }
74192 +               /* Skip attributes that MUST be primary record */
74193 +               if (attr->type == ATTR_STD || attr->type == ATTR_LIST)
74194 +                       continue;
74196 +               le = NULL;
74197 +               if (ni->attr_list.size) {
74198 +                       le = al_find_le(ni, NULL, attr);
74199 +                       if (!le) {
74200 +                               /* Really this is a serious bug */
74201 +                               err = -EINVAL;
74202 +                               goto out;
74203 +                       }
74204 +               }
74206 +               t32 = le32_to_cpu(attr->size);
74207 +               t16 = le16_to_cpu(attr->name_off);
74208 +               err = ni_ins_attr_ext(ni, le, attr->type, Add2Ptr(attr, t16),
74209 +                                     attr->name_len, t32, attr_svcn(attr), t16,
74210 +                                     false, &eattr, NULL);
74211 +               if (err)
74212 +                       return err;
74214 +               id = eattr->id;
74215 +               memcpy(eattr, attr, t32);
74216 +               eattr->id = id;
74218 +               /* remove attrib from primary record */
74219 +               mi_remove_attr(&ni->mi, attr);
74221 +               /* attr now points to next attribute */
74222 +               if (attr->type == ATTR_END)
74223 +                       goto out;
74224 +       }
74225 +       while (asize + list_reserve > sbi->record_size - le32_to_cpu(rec->used))
74226 +               ;
74228 +       attr = ni_ins_new_attr(ni, &ni->mi, NULL, type, name, name_len, asize,
74229 +                              name_off, svcn);
74230 +       if (!attr) {
74231 +               err = -EINVAL;
74232 +               goto out;
74233 +       }
74235 +       if (ins_attr)
74236 +               *ins_attr = attr;
74237 +       if (ins_mi)
74238 +               *ins_mi = &ni->mi;
74240 +out:
74241 +       return err;
74245 + * ni_expand_mft_list
74246 + *
74247 + * This method splits ATTR_DATA of $MFT
74248 + */
74249 +static int ni_expand_mft_list(struct ntfs_inode *ni)
74251 +       int err = 0;
74252 +       struct runs_tree *run = &ni->file.run;
74253 +       u32 asize, run_size, done = 0;
74254 +       struct ATTRIB *attr;
74255 +       struct rb_node *node;
74256 +       CLST mft_min, mft_new, svcn, evcn, plen;
74257 +       struct mft_inode *mi, *mi_min, *mi_new;
74258 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
74260 +       /* Find the nearest Mft */
74261 +       mft_min = 0;
74262 +       mft_new = 0;
74263 +       mi_min = NULL;
74265 +       for (node = rb_first(&ni->mi_tree); node; node = rb_next(node)) {
74266 +               mi = rb_entry(node, struct mft_inode, node);
74268 +               attr = mi_enum_attr(mi, NULL);
74270 +               if (!attr) {
74271 +                       mft_min = mi->rno;
74272 +                       mi_min = mi;
74273 +                       break;
74274 +               }
74275 +       }
74277 +       if (ntfs_look_free_mft(sbi, &mft_new, true, ni, &mi_new)) {
74278 +               mft_new = 0;
74279 +               // really this is not critical
74280 +       } else if (mft_min > mft_new) {
74281 +               mft_min = mft_new;
74282 +               mi_min = mi_new;
74283 +       } else {
74284 +               ntfs_mark_rec_free(sbi, mft_new);
74285 +               mft_new = 0;
74286 +               ni_remove_mi(ni, mi_new);
74287 +       }
74289 +       attr = mi_find_attr(&ni->mi, NULL, ATTR_DATA, NULL, 0, NULL);
74290 +       if (!attr) {
74291 +               err = -EINVAL;
74292 +               goto out;
74293 +       }
74295 +       asize = le32_to_cpu(attr->size);
74297 +       evcn = le64_to_cpu(attr->nres.evcn);
74298 +       svcn = bytes_to_cluster(sbi, (u64)(mft_min + 1) << sbi->record_bits);
74299 +       if (evcn + 1 >= svcn) {
74300 +               err = -EINVAL;
74301 +               goto out;
74302 +       }
74304 +       /*
74305 +        * split primary attribute [0 evcn] in two parts [0 svcn) + [svcn evcn]
74306 +        *
74307 +        * Update first part of ATTR_DATA in 'primary MFT
74308 +        */
74309 +       err = run_pack(run, 0, svcn, Add2Ptr(attr, SIZEOF_NONRESIDENT),
74310 +                      asize - SIZEOF_NONRESIDENT, &plen);
74311 +       if (err < 0)
74312 +               goto out;
74314 +       run_size = QuadAlign(err);
74315 +       err = 0;
74317 +       if (plen < svcn) {
74318 +               err = -EINVAL;
74319 +               goto out;
74320 +       }
74322 +       attr->nres.evcn = cpu_to_le64(svcn - 1);
74323 +       attr->size = cpu_to_le32(run_size + SIZEOF_NONRESIDENT);
74324 +       /* 'done' - how many bytes of primary MFT becomes free */
74325 +       done = asize - run_size - SIZEOF_NONRESIDENT;
74326 +       le32_sub_cpu(&ni->mi.mrec->used, done);
74328 +       /* Estimate the size of second part: run_buf=NULL */
74329 +       err = run_pack(run, svcn, evcn + 1 - svcn, NULL, sbi->record_size,
74330 +                      &plen);
74331 +       if (err < 0)
74332 +               goto out;
74334 +       run_size = QuadAlign(err);
74335 +       err = 0;
74337 +       if (plen < evcn + 1 - svcn) {
74338 +               err = -EINVAL;
74339 +               goto out;
74340 +       }
74342 +       /*
74343 +        * This function may implicitly call expand attr_list
74344 +        * Insert second part of ATTR_DATA in 'mi_min'
74345 +        */
74346 +       attr = ni_ins_new_attr(ni, mi_min, NULL, ATTR_DATA, NULL, 0,
74347 +                              SIZEOF_NONRESIDENT + run_size,
74348 +                              SIZEOF_NONRESIDENT, svcn);
74349 +       if (!attr) {
74350 +               err = -EINVAL;
74351 +               goto out;
74352 +       }
74354 +       attr->non_res = 1;
74355 +       attr->name_off = SIZEOF_NONRESIDENT_LE;
74356 +       attr->flags = 0;
74358 +       run_pack(run, svcn, evcn + 1 - svcn, Add2Ptr(attr, SIZEOF_NONRESIDENT),
74359 +                run_size, &plen);
74361 +       attr->nres.svcn = cpu_to_le64(svcn);
74362 +       attr->nres.evcn = cpu_to_le64(evcn);
74363 +       attr->nres.run_off = cpu_to_le16(SIZEOF_NONRESIDENT);
74365 +out:
74366 +       if (mft_new) {
74367 +               ntfs_mark_rec_free(sbi, mft_new);
74368 +               ni_remove_mi(ni, mi_new);
74369 +       }
74371 +       return !err && !done ? -EOPNOTSUPP : err;
74375 + * ni_expand_list
74376 + *
74377 + * This method moves all possible attributes out of primary record
74378 + */
74379 +int ni_expand_list(struct ntfs_inode *ni)
74381 +       int err = 0;
74382 +       u32 asize, done = 0;
74383 +       struct ATTRIB *attr, *ins_attr;
74384 +       struct ATTR_LIST_ENTRY *le;
74385 +       bool is_mft = ni->mi.rno == MFT_REC_MFT;
74386 +       struct MFT_REF ref;
74388 +       mi_get_ref(&ni->mi, &ref);
74389 +       le = NULL;
74391 +       while ((le = al_enumerate(ni, le))) {
74392 +               if (le->type == ATTR_STD)
74393 +                       continue;
74395 +               if (memcmp(&ref, &le->ref, sizeof(struct MFT_REF)))
74396 +                       continue;
74398 +               if (is_mft && le->type == ATTR_DATA)
74399 +                       continue;
74401 +               /* Find attribute in primary record */
74402 +               attr = rec_find_attr_le(&ni->mi, le);
74403 +               if (!attr) {
74404 +                       err = -EINVAL;
74405 +                       goto out;
74406 +               }
74408 +               asize = le32_to_cpu(attr->size);
74410 +               /* Always insert into new record to avoid collisions (deep recursive) */
74411 +               err = ni_ins_attr_ext(ni, le, attr->type, attr_name(attr),
74412 +                                     attr->name_len, asize, attr_svcn(attr),
74413 +                                     le16_to_cpu(attr->name_off), true,
74414 +                                     &ins_attr, NULL);
74416 +               if (err)
74417 +                       goto out;
74419 +               memcpy(ins_attr, attr, asize);
74420 +               ins_attr->id = le->id;
74421 +               mi_remove_attr(&ni->mi, attr);
74423 +               done += asize;
74424 +               goto out;
74425 +       }
74427 +       if (!is_mft) {
74428 +               err = -EFBIG; /* attr list is too big(?) */
74429 +               goto out;
74430 +       }
74432 +       /* split mft data as much as possible */
74433 +       err = ni_expand_mft_list(ni);
74434 +       if (err)
74435 +               goto out;
74437 +out:
74438 +       return !err && !done ? -EOPNOTSUPP : err;
74442 + * ni_insert_nonresident
74443 + *
74444 + * inserts new nonresident attribute
74445 + */
74446 +int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
74447 +                         const __le16 *name, u8 name_len,
74448 +                         const struct runs_tree *run, CLST svcn, CLST len,
74449 +                         __le16 flags, struct ATTRIB **new_attr,
74450 +                         struct mft_inode **mi)
74452 +       int err;
74453 +       CLST plen;
74454 +       struct ATTRIB *attr;
74455 +       bool is_ext =
74456 +               (flags & (ATTR_FLAG_SPARSED | ATTR_FLAG_COMPRESSED)) && !svcn;
74457 +       u32 name_size = QuadAlign(name_len * sizeof(short));
74458 +       u32 name_off = is_ext ? SIZEOF_NONRESIDENT_EX : SIZEOF_NONRESIDENT;
74459 +       u32 run_off = name_off + name_size;
74460 +       u32 run_size, asize;
74461 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
74463 +       err = run_pack(run, svcn, len, NULL, sbi->max_bytes_per_attr - run_off,
74464 +                      &plen);
74465 +       if (err < 0)
74466 +               goto out;
74468 +       run_size = QuadAlign(err);
74470 +       if (plen < len) {
74471 +               err = -EINVAL;
74472 +               goto out;
74473 +       }
74475 +       asize = run_off + run_size;
74477 +       if (asize > sbi->max_bytes_per_attr) {
74478 +               err = -EINVAL;
74479 +               goto out;
74480 +       }
74482 +       err = ni_insert_attr(ni, type, name, name_len, asize, name_off, svcn,
74483 +                            &attr, mi);
74485 +       if (err)
74486 +               goto out;
74488 +       attr->non_res = 1;
74489 +       attr->name_off = cpu_to_le16(name_off);
74490 +       attr->flags = flags;
74492 +       run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_size, &plen);
74494 +       attr->nres.svcn = cpu_to_le64(svcn);
74495 +       attr->nres.evcn = cpu_to_le64((u64)svcn + len - 1);
74497 +       err = 0;
74498 +       if (new_attr)
74499 +               *new_attr = attr;
74501 +       *(__le64 *)&attr->nres.run_off = cpu_to_le64(run_off);
74503 +       attr->nres.alloc_size =
74504 +               svcn ? 0 : cpu_to_le64((u64)len << ni->mi.sbi->cluster_bits);
74505 +       attr->nres.data_size = attr->nres.alloc_size;
74506 +       attr->nres.valid_size = attr->nres.alloc_size;
74508 +       if (is_ext) {
74509 +               if (flags & ATTR_FLAG_COMPRESSED)
74510 +                       attr->nres.c_unit = COMPRESSION_UNIT;
74511 +               attr->nres.total_size = attr->nres.alloc_size;
74512 +       }
74514 +out:
74515 +       return err;
74519 + * ni_insert_resident
74520 + *
74521 + * inserts new resident attribute
74522 + */
74523 +int ni_insert_resident(struct ntfs_inode *ni, u32 data_size,
74524 +                      enum ATTR_TYPE type, const __le16 *name, u8 name_len,
74525 +                      struct ATTRIB **new_attr, struct mft_inode **mi)
74527 +       int err;
74528 +       u32 name_size = QuadAlign(name_len * sizeof(short));
74529 +       u32 asize = SIZEOF_RESIDENT + name_size + QuadAlign(data_size);
74530 +       struct ATTRIB *attr;
74532 +       err = ni_insert_attr(ni, type, name, name_len, asize, SIZEOF_RESIDENT,
74533 +                            0, &attr, mi);
74534 +       if (err)
74535 +               return err;
74537 +       attr->non_res = 0;
74538 +       attr->flags = 0;
74540 +       attr->res.data_size = cpu_to_le32(data_size);
74541 +       attr->res.data_off = cpu_to_le16(SIZEOF_RESIDENT + name_size);
74542 +       if (type == ATTR_NAME)
74543 +               attr->res.flags = RESIDENT_FLAG_INDEXED;
74544 +       attr->res.res = 0;
74546 +       if (new_attr)
74547 +               *new_attr = attr;
74549 +       return 0;
74553 + * ni_remove_attr_le
74554 + *
74555 + * removes attribute from record
74556 + */
74557 +int ni_remove_attr_le(struct ntfs_inode *ni, struct ATTRIB *attr,
74558 +                     struct ATTR_LIST_ENTRY *le)
74560 +       int err;
74561 +       struct mft_inode *mi;
74563 +       err = ni_load_mi(ni, le, &mi);
74564 +       if (err)
74565 +               return err;
74567 +       mi_remove_attr(mi, attr);
74569 +       if (le)
74570 +               al_remove_le(ni, le);
74572 +       return 0;
74576 + * ni_delete_all
74577 + *
74578 + * removes all attributes and frees allocates space
74579 + * ntfs_evict_inode->ntfs_clear_inode->ni_delete_all (if no links)
74580 + */
74581 +int ni_delete_all(struct ntfs_inode *ni)
74583 +       int err;
74584 +       struct ATTR_LIST_ENTRY *le = NULL;
74585 +       struct ATTRIB *attr = NULL;
74586 +       struct rb_node *node;
74587 +       u16 roff;
74588 +       u32 asize;
74589 +       CLST svcn, evcn;
74590 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
74591 +       bool nt3 = is_ntfs3(sbi);
74592 +       struct MFT_REF ref;
74594 +       while ((attr = ni_enum_attr_ex(ni, attr, &le, NULL))) {
74595 +               if (!nt3 || attr->name_len) {
74596 +                       ;
74597 +               } else if (attr->type == ATTR_REPARSE) {
74598 +                       mi_get_ref(&ni->mi, &ref);
74599 +                       ntfs_remove_reparse(sbi, 0, &ref);
74600 +               } else if (attr->type == ATTR_ID && !attr->non_res &&
74601 +                          le32_to_cpu(attr->res.data_size) >=
74602 +                                  sizeof(struct GUID)) {
74603 +                       ntfs_objid_remove(sbi, resident_data(attr));
74604 +               }
74606 +               if (!attr->non_res)
74607 +                       continue;
74609 +               svcn = le64_to_cpu(attr->nres.svcn);
74610 +               evcn = le64_to_cpu(attr->nres.evcn);
74612 +               if (evcn + 1 <= svcn)
74613 +                       continue;
74615 +               asize = le32_to_cpu(attr->size);
74616 +               roff = le16_to_cpu(attr->nres.run_off);
74618 +               /*run==1 means unpack and deallocate*/
74619 +               run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
74620 +                             Add2Ptr(attr, roff), asize - roff);
74621 +       }
74623 +       if (ni->attr_list.size) {
74624 +               run_deallocate(ni->mi.sbi, &ni->attr_list.run, true);
74625 +               al_destroy(ni);
74626 +       }
74628 +       /* Free all subrecords */
74629 +       for (node = rb_first(&ni->mi_tree); node;) {
74630 +               struct rb_node *next = rb_next(node);
74631 +               struct mft_inode *mi = rb_entry(node, struct mft_inode, node);
74633 +               clear_rec_inuse(mi->mrec);
74634 +               mi->dirty = true;
74635 +               mi_write(mi, 0);
74637 +               ntfs_mark_rec_free(sbi, mi->rno);
74638 +               ni_remove_mi(ni, mi);
74639 +               mi_put(mi);
74640 +               node = next;
74641 +       }
74643 +       // Free base record
74644 +       clear_rec_inuse(ni->mi.mrec);
74645 +       ni->mi.dirty = true;
74646 +       err = mi_write(&ni->mi, 0);
74648 +       ntfs_mark_rec_free(sbi, ni->mi.rno);
74650 +       return err;
74654 + * ni_fname_name
74655 + *
74656 + * returns file name attribute by its value
74657 + */
74658 +struct ATTR_FILE_NAME *ni_fname_name(struct ntfs_inode *ni,
74659 +                                    const struct cpu_str *uni,
74660 +                                    const struct MFT_REF *home_dir,
74661 +                                    struct ATTR_LIST_ENTRY **le)
74663 +       struct ATTRIB *attr = NULL;
74664 +       struct ATTR_FILE_NAME *fname;
74666 +       *le = NULL;
74668 +       /* Enumerate all names */
74669 +next:
74670 +       attr = ni_find_attr(ni, attr, le, ATTR_NAME, NULL, 0, NULL, NULL);
74671 +       if (!attr)
74672 +               return NULL;
74674 +       fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
74675 +       if (!fname)
74676 +               goto next;
74678 +       if (home_dir && memcmp(home_dir, &fname->home, sizeof(*home_dir)))
74679 +               goto next;
74681 +       if (!uni)
74682 +               goto next;
74684 +       if (uni->len != fname->name_len)
74685 +               goto next;
74687 +       if (ntfs_cmp_names_cpu(uni, (struct le_str *)&fname->name_len, NULL,
74688 +                              false))
74689 +               goto next;
74691 +       return fname;
74695 + * ni_fname_type
74696 + *
74697 + * returns file name attribute with given type
74698 + */
74699 +struct ATTR_FILE_NAME *ni_fname_type(struct ntfs_inode *ni, u8 name_type,
74700 +                                    struct ATTR_LIST_ENTRY **le)
74702 +       struct ATTRIB *attr = NULL;
74703 +       struct ATTR_FILE_NAME *fname;
74705 +       *le = NULL;
74707 +       /* Enumerate all names */
74708 +       for (;;) {
74709 +               attr = ni_find_attr(ni, attr, le, ATTR_NAME, NULL, 0, NULL,
74710 +                                   NULL);
74711 +               if (!attr)
74712 +                       return NULL;
74714 +               fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
74715 +               if (fname && name_type == fname->type)
74716 +                       return fname;
74717 +       }
74721 + * Process compressed/sparsed in special way
74722 + * NOTE: you need to set ni->std_fa = new_fa
74723 + * after this function to keep internal structures in consistency
74724 + */
74725 +int ni_new_attr_flags(struct ntfs_inode *ni, enum FILE_ATTRIBUTE new_fa)
74727 +       struct ATTRIB *attr;
74728 +       struct mft_inode *mi;
74729 +       __le16 new_aflags;
74730 +       u32 new_asize;
74732 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
74733 +       if (!attr)
74734 +               return -EINVAL;
74736 +       new_aflags = attr->flags;
74738 +       if (new_fa & FILE_ATTRIBUTE_SPARSE_FILE)
74739 +               new_aflags |= ATTR_FLAG_SPARSED;
74740 +       else
74741 +               new_aflags &= ~ATTR_FLAG_SPARSED;
74743 +       if (new_fa & FILE_ATTRIBUTE_COMPRESSED)
74744 +               new_aflags |= ATTR_FLAG_COMPRESSED;
74745 +       else
74746 +               new_aflags &= ~ATTR_FLAG_COMPRESSED;
74748 +       if (new_aflags == attr->flags)
74749 +               return 0;
74751 +       if ((new_aflags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) ==
74752 +           (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) {
74753 +               ntfs_inode_warn(&ni->vfs_inode,
74754 +                               "file can't be sparsed and compressed");
74755 +               return -EOPNOTSUPP;
74756 +       }
74758 +       if (!attr->non_res)
74759 +               goto out;
74761 +       if (attr->nres.data_size) {
74762 +               ntfs_inode_warn(
74763 +                       &ni->vfs_inode,
74764 +                       "one can change sparsed/compressed only for empty files");
74765 +               return -EOPNOTSUPP;
74766 +       }
74768 +       /* resize nonresident empty attribute in-place only*/
74769 +       new_asize = (new_aflags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED))
74770 +                           ? (SIZEOF_NONRESIDENT_EX + 8)
74771 +                           : (SIZEOF_NONRESIDENT + 8);
74773 +       if (!mi_resize_attr(mi, attr, new_asize - le32_to_cpu(attr->size)))
74774 +               return -EOPNOTSUPP;
74776 +       if (new_aflags & ATTR_FLAG_SPARSED) {
74777 +               attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
74778 +               /* windows uses 16 clusters per frame but supports one cluster per frame too*/
74779 +               attr->nres.c_unit = 0;
74780 +               ni->vfs_inode.i_mapping->a_ops = &ntfs_aops;
74781 +       } else if (new_aflags & ATTR_FLAG_COMPRESSED) {
74782 +               attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
74783 +               /* the only allowed: 16 clusters per frame */
74784 +               attr->nres.c_unit = NTFS_LZNT_CUNIT;
74785 +               ni->vfs_inode.i_mapping->a_ops = &ntfs_aops_cmpr;
74786 +       } else {
74787 +               attr->name_off = SIZEOF_NONRESIDENT_LE;
74788 +               /* normal files */
74789 +               attr->nres.c_unit = 0;
74790 +               ni->vfs_inode.i_mapping->a_ops = &ntfs_aops;
74791 +       }
74792 +       attr->nres.run_off = attr->name_off;
74793 +out:
74794 +       attr->flags = new_aflags;
74795 +       mi->dirty = true;
74797 +       return 0;
74801 + * ni_parse_reparse
74802 + *
74803 + * buffer is at least 24 bytes
74804 + */
74805 +enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
74806 +                                  void *buffer)
74808 +       const struct REPARSE_DATA_BUFFER *rp = NULL;
74809 +       u8 bits;
74810 +       u16 len;
74811 +       typeof(rp->CompressReparseBuffer) *cmpr;
74813 +       static_assert(sizeof(struct REPARSE_DATA_BUFFER) <= 24);
74815 +       /* Try to estimate reparse point */
74816 +       if (!attr->non_res) {
74817 +               rp = resident_data_ex(attr, sizeof(struct REPARSE_DATA_BUFFER));
74818 +       } else if (le64_to_cpu(attr->nres.data_size) >=
74819 +                  sizeof(struct REPARSE_DATA_BUFFER)) {
74820 +               struct runs_tree run;
74822 +               run_init(&run);
74824 +               if (!attr_load_runs_vcn(ni, ATTR_REPARSE, NULL, 0, &run, 0) &&
74825 +                   !ntfs_read_run_nb(ni->mi.sbi, &run, 0, buffer,
74826 +                                     sizeof(struct REPARSE_DATA_BUFFER),
74827 +                                     NULL)) {
74828 +                       rp = buffer;
74829 +               }
74831 +               run_close(&run);
74832 +       }
74834 +       if (!rp)
74835 +               return REPARSE_NONE;
74837 +       len = le16_to_cpu(rp->ReparseDataLength);
74838 +       switch (rp->ReparseTag) {
74839 +       case (IO_REPARSE_TAG_MICROSOFT | IO_REPARSE_TAG_SYMBOLIC_LINK):
74840 +               break; /* Symbolic link */
74841 +       case IO_REPARSE_TAG_MOUNT_POINT:
74842 +               break; /* Mount points and junctions */
74843 +       case IO_REPARSE_TAG_SYMLINK:
74844 +               break;
74845 +       case IO_REPARSE_TAG_COMPRESS:
74846 +               /*
74847 +                * WOF - Windows Overlay Filter - used to compress files with lzx/xpress
74848 +                * Unlike native NTFS file compression, the Windows Overlay Filter supports
74849 +                * only read operations. This means that it doesn’t need to sector-align each
74850 +                * compressed chunk, so the compressed data can be packed more tightly together.
74851 +                * If you open the file for writing, the Windows Overlay Filter just decompresses
74852 +                * the entire file, turning it back into a plain file.
74853 +                *
74854 +                * ntfs3 driver decompresses the entire file only on write or change size requests
74855 +                */
74857 +               cmpr = &rp->CompressReparseBuffer;
74858 +               if (len < sizeof(*cmpr) ||
74859 +                   cmpr->WofVersion != WOF_CURRENT_VERSION ||
74860 +                   cmpr->WofProvider != WOF_PROVIDER_SYSTEM ||
74861 +                   cmpr->ProviderVer != WOF_PROVIDER_CURRENT_VERSION) {
74862 +                       return REPARSE_NONE;
74863 +               }
74865 +               switch (cmpr->CompressionFormat) {
74866 +               case WOF_COMPRESSION_XPRESS4K:
74867 +                       bits = 0xc; // 4k
74868 +                       break;
74869 +               case WOF_COMPRESSION_XPRESS8K:
74870 +                       bits = 0xd; // 8k
74871 +                       break;
74872 +               case WOF_COMPRESSION_XPRESS16K:
74873 +                       bits = 0xe; // 16k
74874 +                       break;
74875 +               case WOF_COMPRESSION_LZX32K:
74876 +                       bits = 0xf; // 32k
74877 +                       break;
74878 +               default:
74879 +                       bits = 0x10; // 64k
74880 +                       break;
74881 +               }
74882 +               ni_set_ext_compress_bits(ni, bits);
74883 +               return REPARSE_COMPRESSED;
74885 +       case IO_REPARSE_TAG_DEDUP:
74886 +               ni->ni_flags |= NI_FLAG_DEDUPLICATED;
74887 +               return REPARSE_DEDUPLICATED;
74889 +       default:
74890 +               if (rp->ReparseTag & IO_REPARSE_TAG_NAME_SURROGATE)
74891 +                       break;
74893 +               return REPARSE_NONE;
74894 +       }
74896 +       /* Looks like normal symlink */
74897 +       return REPARSE_LINK;
74901 + * helper for file_fiemap
74902 + * assumed ni_lock
74903 + * TODO: less aggressive locks
74904 + */
74905 +int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
74906 +             __u64 vbo, __u64 len)
74908 +       int err = 0;
74909 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
74910 +       u8 cluster_bits = sbi->cluster_bits;
74911 +       struct runs_tree *run;
74912 +       struct rw_semaphore *run_lock;
74913 +       struct ATTRIB *attr;
74914 +       CLST vcn = vbo >> cluster_bits;
74915 +       CLST lcn, clen;
74916 +       u64 valid = ni->i_valid;
74917 +       u64 lbo, bytes;
74918 +       u64 end, alloc_size;
74919 +       size_t idx = -1;
74920 +       u32 flags;
74921 +       bool ok;
74923 +       if (S_ISDIR(ni->vfs_inode.i_mode)) {
74924 +               run = &ni->dir.alloc_run;
74925 +               attr = ni_find_attr(ni, NULL, NULL, ATTR_ALLOC, I30_NAME,
74926 +                                   ARRAY_SIZE(I30_NAME), NULL, NULL);
74927 +               run_lock = &ni->dir.run_lock;
74928 +       } else {
74929 +               run = &ni->file.run;
74930 +               attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
74931 +                                   NULL);
74932 +               if (!attr) {
74933 +                       err = -EINVAL;
74934 +                       goto out;
74935 +               }
74936 +               if (is_attr_compressed(attr)) {
74937 +                       /*unfortunately cp -r incorrectly treats compressed clusters*/
74938 +                       err = -EOPNOTSUPP;
74939 +                       ntfs_inode_warn(
74940 +                               &ni->vfs_inode,
74941 +                               "fiemap is not supported for compressed file (cp -r)");
74942 +                       goto out;
74943 +               }
74944 +               run_lock = &ni->file.run_lock;
74945 +       }
74947 +       if (!attr || !attr->non_res) {
74948 +               err = fiemap_fill_next_extent(
74949 +                       fieinfo, 0, 0,
74950 +                       attr ? le32_to_cpu(attr->res.data_size) : 0,
74951 +                       FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_LAST |
74952 +                               FIEMAP_EXTENT_MERGED);
74953 +               goto out;
74954 +       }
74956 +       end = vbo + len;
74957 +       alloc_size = le64_to_cpu(attr->nres.alloc_size);
74958 +       if (end > alloc_size)
74959 +               end = alloc_size;
74961 +       down_read(run_lock);
74963 +       while (vbo < end) {
74964 +               if (idx == -1) {
74965 +                       ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx);
74966 +               } else {
74967 +                       CLST vcn_next = vcn;
74969 +                       ok = run_get_entry(run, ++idx, &vcn, &lcn, &clen) &&
74970 +                            vcn == vcn_next;
74971 +                       if (!ok)
74972 +                               vcn = vcn_next;
74973 +               }
74975 +               if (!ok) {
74976 +                       up_read(run_lock);
74977 +                       down_write(run_lock);
74979 +                       err = attr_load_runs_vcn(ni, attr->type,
74980 +                                                attr_name(attr),
74981 +                                                attr->name_len, run, vcn);
74983 +                       up_write(run_lock);
74984 +                       down_read(run_lock);
74986 +                       if (err)
74987 +                               break;
74989 +                       ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx);
74991 +                       if (!ok) {
74992 +                               err = -EINVAL;
74993 +                               break;
74994 +                       }
74995 +               }
74997 +               if (!clen) {
74998 +                       err = -EINVAL; // ?
74999 +                       break;
75000 +               }
75002 +               if (lcn == SPARSE_LCN) {
75003 +                       vcn += clen;
75004 +                       vbo = (u64)vcn << cluster_bits;
75005 +                       continue;
75006 +               }
75008 +               flags = FIEMAP_EXTENT_MERGED;
75009 +               if (S_ISDIR(ni->vfs_inode.i_mode)) {
75010 +                       ;
75011 +               } else if (is_attr_compressed(attr)) {
75012 +                       CLST clst_data;
75014 +                       err = attr_is_frame_compressed(
75015 +                               ni, attr, vcn >> attr->nres.c_unit, &clst_data);
75016 +                       if (err)
75017 +                               break;
75018 +                       if (clst_data < NTFS_LZNT_CLUSTERS)
75019 +                               flags |= FIEMAP_EXTENT_ENCODED;
75020 +               } else if (is_attr_encrypted(attr)) {
75021 +                       flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
75022 +               }
75024 +               vbo = (u64)vcn << cluster_bits;
75025 +               bytes = (u64)clen << cluster_bits;
75026 +               lbo = (u64)lcn << cluster_bits;
75028 +               vcn += clen;
75030 +               if (vbo + bytes >= end) {
75031 +                       bytes = end - vbo;
75032 +                       flags |= FIEMAP_EXTENT_LAST;
75033 +               }
75035 +               if (vbo + bytes <= valid) {
75036 +                       ;
75037 +               } else if (vbo >= valid) {
75038 +                       flags |= FIEMAP_EXTENT_UNWRITTEN;
75039 +               } else {
75040 +                       /* vbo < valid && valid < vbo + bytes */
75041 +                       u64 dlen = valid - vbo;
75043 +                       err = fiemap_fill_next_extent(fieinfo, vbo, lbo, dlen,
75044 +                                                     flags);
75045 +                       if (err < 0)
75046 +                               break;
75047 +                       if (err == 1) {
75048 +                               err = 0;
75049 +                               break;
75050 +                       }
75052 +                       vbo = valid;
75053 +                       bytes -= dlen;
75054 +                       if (!bytes)
75055 +                               continue;
75057 +                       lbo += dlen;
75058 +                       flags |= FIEMAP_EXTENT_UNWRITTEN;
75059 +               }
75061 +               err = fiemap_fill_next_extent(fieinfo, vbo, lbo, bytes, flags);
75062 +               if (err < 0)
75063 +                       break;
75064 +               if (err == 1) {
75065 +                       err = 0;
75066 +                       break;
75067 +               }
75069 +               vbo += bytes;
75070 +       }
75072 +       up_read(run_lock);
75074 +out:
75075 +       return err;
75079 + * When decompressing, we typically obtain more than one page per reference.
75080 + * We inject the additional pages into the page cache.
75081 + */
75082 +int ni_readpage_cmpr(struct ntfs_inode *ni, struct page *page)
75084 +       int err;
75085 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
75086 +       struct address_space *mapping = page->mapping;
75087 +       pgoff_t index = page->index;
75088 +       u64 frame_vbo, vbo = (u64)index << PAGE_SHIFT;
75089 +       struct page **pages = NULL; /*array of at most 16 pages. stack?*/
75090 +       u8 frame_bits;
75091 +       CLST frame;
75092 +       u32 i, idx, frame_size, pages_per_frame;
75093 +       gfp_t gfp_mask;
75094 +       struct page *pg;
75096 +       if (vbo >= ni->vfs_inode.i_size) {
75097 +               SetPageUptodate(page);
75098 +               err = 0;
75099 +               goto out;
75100 +       }
75102 +       if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
75103 +               /* xpress or lzx */
75104 +               frame_bits = ni_ext_compress_bits(ni);
75105 +       } else {
75106 +               /* lznt compression*/
75107 +               frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
75108 +       }
75109 +       frame_size = 1u << frame_bits;
75110 +       frame = vbo >> frame_bits;
75111 +       frame_vbo = (u64)frame << frame_bits;
75112 +       idx = (vbo - frame_vbo) >> PAGE_SHIFT;
75114 +       pages_per_frame = frame_size >> PAGE_SHIFT;
75115 +       pages = ntfs_zalloc(pages_per_frame * sizeof(struct page *));
75116 +       if (!pages) {
75117 +               err = -ENOMEM;
75118 +               goto out;
75119 +       }
75121 +       pages[idx] = page;
75122 +       index = frame_vbo >> PAGE_SHIFT;
75123 +       gfp_mask = mapping_gfp_mask(mapping);
75125 +       for (i = 0; i < pages_per_frame; i++, index++) {
75126 +               if (i == idx)
75127 +                       continue;
75129 +               pg = find_or_create_page(mapping, index, gfp_mask);
75130 +               if (!pg) {
75131 +                       err = -ENOMEM;
75132 +                       goto out1;
75133 +               }
75134 +               pages[i] = pg;
75135 +       }
75137 +       err = ni_read_frame(ni, frame_vbo, pages, pages_per_frame);
75139 +out1:
75140 +       if (err)
75141 +               SetPageError(page);
75143 +       for (i = 0; i < pages_per_frame; i++) {
75144 +               pg = pages[i];
75145 +               if (i == idx)
75146 +                       continue;
75147 +               unlock_page(pg);
75148 +               put_page(pg);
75149 +       }
75151 +out:
75152 +       /* At this point, err contains 0 or -EIO depending on the "critical" page */
75153 +       ntfs_free(pages);
75154 +       unlock_page(page);
75156 +       return err;
75159 +#ifdef CONFIG_NTFS3_LZX_XPRESS
75161 + * decompress lzx/xpress compressed file
75162 + * remove ATTR_DATA::WofCompressedData
75163 + * remove ATTR_REPARSE
75164 + */
75165 +int ni_decompress_file(struct ntfs_inode *ni)
75167 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
75168 +       struct inode *inode = &ni->vfs_inode;
75169 +       loff_t i_size = inode->i_size;
75170 +       struct address_space *mapping = inode->i_mapping;
75171 +       gfp_t gfp_mask = mapping_gfp_mask(mapping);
75172 +       struct page **pages = NULL;
75173 +       struct ATTR_LIST_ENTRY *le;
75174 +       struct ATTRIB *attr;
75175 +       CLST vcn, cend, lcn, clen, end;
75176 +       pgoff_t index;
75177 +       u64 vbo;
75178 +       u8 frame_bits;
75179 +       u32 i, frame_size, pages_per_frame, bytes;
75180 +       struct mft_inode *mi;
75181 +       int err;
75183 +       /* clusters for decompressed data*/
75184 +       cend = bytes_to_cluster(sbi, i_size);
75186 +       if (!i_size)
75187 +               goto remove_wof;
75189 +       /* check in advance */
75190 +       if (cend > wnd_zeroes(&sbi->used.bitmap)) {
75191 +               err = -ENOSPC;
75192 +               goto out;
75193 +       }
75195 +       frame_bits = ni_ext_compress_bits(ni);
75196 +       frame_size = 1u << frame_bits;
75197 +       pages_per_frame = frame_size >> PAGE_SHIFT;
75198 +       pages = ntfs_zalloc(pages_per_frame * sizeof(struct page *));
75199 +       if (!pages) {
75200 +               err = -ENOMEM;
75201 +               goto out;
75202 +       }
75204 +       /*
75205 +        * Step 1: decompress data and copy to new allocated clusters
75206 +        */
75207 +       index = 0;
75208 +       for (vbo = 0; vbo < i_size; vbo += bytes) {
75209 +               u32 nr_pages;
75210 +               bool new;
75212 +               if (vbo + frame_size > i_size) {
75213 +                       bytes = i_size - vbo;
75214 +                       nr_pages = (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
75215 +               } else {
75216 +                       nr_pages = pages_per_frame;
75217 +                       bytes = frame_size;
75218 +               }
75220 +               end = bytes_to_cluster(sbi, vbo + bytes);
75222 +               for (vcn = vbo >> sbi->cluster_bits; vcn < end; vcn += clen) {
75223 +                       err = attr_data_get_block(ni, vcn, cend - vcn, &lcn,
75224 +                                                 &clen, &new);
75225 +                       if (err)
75226 +                               goto out;
75227 +               }
75229 +               for (i = 0; i < pages_per_frame; i++, index++) {
75230 +                       struct page *pg;
75232 +                       pg = find_or_create_page(mapping, index, gfp_mask);
75233 +                       if (!pg) {
75234 +                               while (i--) {
75235 +                                       unlock_page(pages[i]);
75236 +                                       put_page(pages[i]);
75237 +                               }
75238 +                               err = -ENOMEM;
75239 +                               goto out;
75240 +                       }
75241 +                       pages[i] = pg;
75242 +               }
75244 +               err = ni_read_frame(ni, vbo, pages, pages_per_frame);
75246 +               if (!err) {
75247 +                       down_read(&ni->file.run_lock);
75248 +                       err = ntfs_bio_pages(sbi, &ni->file.run, pages,
75249 +                                            nr_pages, vbo, bytes,
75250 +                                            REQ_OP_WRITE);
75251 +                       up_read(&ni->file.run_lock);
75252 +               }
75254 +               for (i = 0; i < pages_per_frame; i++) {
75255 +                       unlock_page(pages[i]);
75256 +                       put_page(pages[i]);
75257 +               }
75259 +               if (err)
75260 +                       goto out;
75262 +               cond_resched();
75263 +       }
75265 +remove_wof:
75266 +       /*
75267 +        * Step 2: deallocate attributes ATTR_DATA::WofCompressedData and ATTR_REPARSE
75268 +        */
75269 +       attr = NULL;
75270 +       le = NULL;
75271 +       while ((attr = ni_enum_attr_ex(ni, attr, &le, NULL))) {
75272 +               CLST svcn, evcn;
75273 +               u32 asize, roff;
75275 +               if (attr->type == ATTR_REPARSE) {
75276 +                       struct MFT_REF ref;
75278 +                       mi_get_ref(&ni->mi, &ref);
75279 +                       ntfs_remove_reparse(sbi, 0, &ref);
75280 +               }
75282 +               if (!attr->non_res)
75283 +                       continue;
75285 +               if (attr->type != ATTR_REPARSE &&
75286 +                   (attr->type != ATTR_DATA ||
75287 +                    attr->name_len != ARRAY_SIZE(WOF_NAME) ||
75288 +                    memcmp(attr_name(attr), WOF_NAME, sizeof(WOF_NAME))))
75289 +                       continue;
75291 +               svcn = le64_to_cpu(attr->nres.svcn);
75292 +               evcn = le64_to_cpu(attr->nres.evcn);
75294 +               if (evcn + 1 <= svcn)
75295 +                       continue;
75297 +               asize = le32_to_cpu(attr->size);
75298 +               roff = le16_to_cpu(attr->nres.run_off);
75300 +               /*run==1 means unpack and deallocate*/
75301 +               run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
75302 +                             Add2Ptr(attr, roff), asize - roff);
75303 +       }
75305 +       /*
75306 +        * Step 3: remove attribute ATTR_DATA::WofCompressedData
75307 +        */
75308 +       err = ni_remove_attr(ni, ATTR_DATA, WOF_NAME, ARRAY_SIZE(WOF_NAME),
75309 +                            false, NULL);
75310 +       if (err)
75311 +               goto out;
75313 +       /*
75314 +        * Step 4: remove ATTR_REPARSE
75315 +        */
75316 +       err = ni_remove_attr(ni, ATTR_REPARSE, NULL, 0, false, NULL);
75317 +       if (err)
75318 +               goto out;
75320 +       /*
75321 +        * Step 5: remove sparse flag from data attribute
75322 +        */
75323 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
75324 +       if (!attr) {
75325 +               err = -EINVAL;
75326 +               goto out;
75327 +       }
75329 +       if (attr->non_res && is_attr_sparsed(attr)) {
75330 +               /* sparsed attribute header is 8 bytes bigger than normal*/
75331 +               struct MFT_REC *rec = mi->mrec;
75332 +               u32 used = le32_to_cpu(rec->used);
75333 +               u32 asize = le32_to_cpu(attr->size);
75334 +               u16 roff = le16_to_cpu(attr->nres.run_off);
75335 +               char *rbuf = Add2Ptr(attr, roff);
75337 +               memmove(rbuf - 8, rbuf, used - PtrOffset(rec, rbuf));
75338 +               attr->size = cpu_to_le32(asize - 8);
75339 +               attr->flags &= ~ATTR_FLAG_SPARSED;
75340 +               attr->nres.run_off = cpu_to_le16(roff - 8);
75341 +               attr->nres.c_unit = 0;
75342 +               rec->used = cpu_to_le32(used - 8);
75343 +               mi->dirty = true;
75344 +               ni->std_fa &= ~(FILE_ATTRIBUTE_SPARSE_FILE |
75345 +                               FILE_ATTRIBUTE_REPARSE_POINT);
75347 +               mark_inode_dirty(inode);
75348 +       }
75350 +       /* clear cached flag */
75351 +       ni->ni_flags &= ~NI_FLAG_COMPRESSED_MASK;
75352 +       if (ni->file.offs_page) {
75353 +               put_page(ni->file.offs_page);
75354 +               ni->file.offs_page = NULL;
75355 +       }
75356 +       mapping->a_ops = &ntfs_aops;
75358 +out:
75359 +       ntfs_free(pages);
75360 +       if (err) {
75361 +               make_bad_inode(inode);
75362 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
75363 +       }
75365 +       return err;
75368 +/* external compression lzx/xpress */
75369 +static int decompress_lzx_xpress(struct ntfs_sb_info *sbi, const char *cmpr,
75370 +                                size_t cmpr_size, void *unc, size_t unc_size,
75371 +                                u32 frame_size)
75373 +       int err;
75374 +       void *ctx;
75376 +       if (cmpr_size == unc_size) {
75377 +               /* frame not compressed */
75378 +               memcpy(unc, cmpr, unc_size);
75379 +               return 0;
75380 +       }
75382 +       err = 0;
75383 +       if (frame_size == 0x8000) {
75384 +               mutex_lock(&sbi->compress.mtx_lzx);
75385 +               /* LZX: frame compressed */
75386 +               ctx = sbi->compress.lzx;
75387 +               if (!ctx) {
75388 +                       /* Lazy initialize lzx decompress context */
75389 +                       ctx = lzx_allocate_decompressor();
75390 +                       if (!ctx) {
75391 +                               err = -ENOMEM;
75392 +                               goto out1;
75393 +                       }
75395 +                       sbi->compress.lzx = ctx;
75396 +               }
75398 +               if (lzx_decompress(ctx, cmpr, cmpr_size, unc, unc_size)) {
75399 +                       /* treat all errors as "invalid argument" */
75400 +                       err = -EINVAL;
75401 +               }
75402 +out1:
75403 +               mutex_unlock(&sbi->compress.mtx_lzx);
75404 +       } else {
75405 +               /* XPRESS: frame compressed */
75406 +               mutex_lock(&sbi->compress.mtx_xpress);
75407 +               ctx = sbi->compress.xpress;
75408 +               if (!ctx) {
75409 +                       /* Lazy initialize xpress decompress context */
75410 +                       ctx = xpress_allocate_decompressor();
75411 +                       if (!ctx) {
75412 +                               err = -ENOMEM;
75413 +                               goto out2;
75414 +                       }
75416 +                       sbi->compress.xpress = ctx;
75417 +               }
75419 +               if (xpress_decompress(ctx, cmpr, cmpr_size, unc, unc_size)) {
75420 +                       /* treat all errors as "invalid argument" */
75421 +                       err = -EINVAL;
75422 +               }
75423 +out2:
75424 +               mutex_unlock(&sbi->compress.mtx_xpress);
75425 +       }
75426 +       return err;
75428 +#endif
75431 + * ni_read_frame
75432 + *
75433 + * pages - array of locked pages
75434 + */
75435 +int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
75436 +                 u32 pages_per_frame)
75438 +       int err;
75439 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
75440 +       u8 cluster_bits = sbi->cluster_bits;
75441 +       char *frame_ondisk = NULL;
75442 +       char *frame_mem = NULL;
75443 +       struct page **pages_disk = NULL;
75444 +       struct ATTR_LIST_ENTRY *le = NULL;
75445 +       struct runs_tree *run = &ni->file.run;
75446 +       u64 valid_size = ni->i_valid;
75447 +       u64 vbo_disk;
75448 +       size_t unc_size;
75449 +       u32 frame_size, i, npages_disk, ondisk_size;
75450 +       struct page *pg;
75451 +       struct ATTRIB *attr;
75452 +       CLST frame, clst_data;
75454 +       /*
75455 +        * To simplify decompress algorithm do vmap for source and target pages
75456 +        */
75457 +       for (i = 0; i < pages_per_frame; i++)
75458 +               kmap(pages[i]);
75460 +       frame_size = pages_per_frame << PAGE_SHIFT;
75461 +       frame_mem = vmap(pages, pages_per_frame, VM_MAP, PAGE_KERNEL);
75462 +       if (!frame_mem) {
75463 +               err = -ENOMEM;
75464 +               goto out;
75465 +       }
75467 +       attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, NULL);
75468 +       if (!attr) {
75469 +               err = -ENOENT;
75470 +               goto out1;
75471 +       }
75473 +       if (!attr->non_res) {
75474 +               u32 data_size = le32_to_cpu(attr->res.data_size);
75476 +               memset(frame_mem, 0, frame_size);
75477 +               if (frame_vbo < data_size) {
75478 +                       ondisk_size = data_size - frame_vbo;
75479 +                       memcpy(frame_mem, resident_data(attr) + frame_vbo,
75480 +                              min(ondisk_size, frame_size));
75481 +               }
75482 +               err = 0;
75483 +               goto out1;
75484 +       }
75486 +       if (frame_vbo >= valid_size) {
75487 +               memset(frame_mem, 0, frame_size);
75488 +               err = 0;
75489 +               goto out1;
75490 +       }
75492 +       if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
75493 +#ifndef CONFIG_NTFS3_LZX_XPRESS
75494 +               err = -EOPNOTSUPP;
75495 +               goto out1;
75496 +#else
75497 +               u32 frame_bits = ni_ext_compress_bits(ni);
75498 +               u64 frame64 = frame_vbo >> frame_bits;
75499 +               u64 frames, vbo_data;
75501 +               if (frame_size != (1u << frame_bits)) {
75502 +                       err = -EINVAL;
75503 +                       goto out1;
75504 +               }
75505 +               switch (frame_size) {
75506 +               case 0x1000:
75507 +               case 0x2000:
75508 +               case 0x4000:
75509 +               case 0x8000:
75510 +                       break;
75511 +               default:
75512 +                       /* unknown compression */
75513 +                       err = -EOPNOTSUPP;
75514 +                       goto out1;
75515 +               }
75517 +               attr = ni_find_attr(ni, attr, &le, ATTR_DATA, WOF_NAME,
75518 +                                   ARRAY_SIZE(WOF_NAME), NULL, NULL);
75519 +               if (!attr) {
75520 +                       ntfs_inode_err(
75521 +                               &ni->vfs_inode,
75522 +                               "external compressed file should contains data attribute \"WofCompressedData\"");
75523 +                       err = -EINVAL;
75524 +                       goto out1;
75525 +               }
75527 +               if (!attr->non_res) {
75528 +                       run = NULL;
75529 +               } else {
75530 +                       run = run_alloc();
75531 +                       if (!run) {
75532 +                               err = -ENOMEM;
75533 +                               goto out1;
75534 +                       }
75535 +               }
75537 +               frames = (ni->vfs_inode.i_size - 1) >> frame_bits;
75539 +               err = attr_wof_frame_info(ni, attr, run, frame64, frames,
75540 +                                         frame_bits, &ondisk_size, &vbo_data);
75541 +               if (err)
75542 +                       goto out2;
75544 +               if (frame64 == frames) {
75545 +                       unc_size = 1 + ((ni->vfs_inode.i_size - 1) &
75546 +                                       (frame_size - 1));
75547 +                       ondisk_size = attr_size(attr) - vbo_data;
75548 +               } else {
75549 +                       unc_size = frame_size;
75550 +               }
75552 +               if (ondisk_size > frame_size) {
75553 +                       err = -EINVAL;
75554 +                       goto out2;
75555 +               }
75557 +               if (!attr->non_res) {
75558 +                       if (vbo_data + ondisk_size >
75559 +                           le32_to_cpu(attr->res.data_size)) {
75560 +                               err = -EINVAL;
75561 +                               goto out1;
75562 +                       }
75564 +                       err = decompress_lzx_xpress(
75565 +                               sbi, Add2Ptr(resident_data(attr), vbo_data),
75566 +                               ondisk_size, frame_mem, unc_size, frame_size);
75567 +                       goto out1;
75568 +               }
75569 +               vbo_disk = vbo_data;
75570 +               /* load all runs to read [vbo_disk-vbo_to) */
75571 +               err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
75572 +                                          ARRAY_SIZE(WOF_NAME), run, vbo_disk,
75573 +                                          vbo_data + ondisk_size);
75574 +               if (err)
75575 +                       goto out2;
75576 +               npages_disk = (ondisk_size + (vbo_disk & (PAGE_SIZE - 1)) +
75577 +                              PAGE_SIZE - 1) >>
75578 +                             PAGE_SHIFT;
75579 +#endif
75580 +       } else if (is_attr_compressed(attr)) {
75581 +               /* lznt compression*/
75582 +               if (sbi->cluster_size > NTFS_LZNT_MAX_CLUSTER) {
75583 +                       err = -EOPNOTSUPP;
75584 +                       goto out1;
75585 +               }
75587 +               if (attr->nres.c_unit != NTFS_LZNT_CUNIT) {
75588 +                       err = -EOPNOTSUPP;
75589 +                       goto out1;
75590 +               }
75592 +               down_write(&ni->file.run_lock);
75593 +               run_truncate_around(run, le64_to_cpu(attr->nres.svcn));
75594 +               frame = frame_vbo >> (cluster_bits + NTFS_LZNT_CUNIT);
75595 +               err = attr_is_frame_compressed(ni, attr, frame, &clst_data);
75596 +               up_write(&ni->file.run_lock);
75597 +               if (err)
75598 +                       goto out1;
75600 +               if (!clst_data) {
75601 +                       memset(frame_mem, 0, frame_size);
75602 +                       goto out1;
75603 +               }
75605 +               frame_size = sbi->cluster_size << NTFS_LZNT_CUNIT;
75606 +               ondisk_size = clst_data << cluster_bits;
75608 +               if (clst_data >= NTFS_LZNT_CLUSTERS) {
75609 +                       /* frame is not compressed */
75610 +                       down_read(&ni->file.run_lock);
75611 +                       err = ntfs_bio_pages(sbi, run, pages, pages_per_frame,
75612 +                                            frame_vbo, ondisk_size,
75613 +                                            REQ_OP_READ);
75614 +                       up_read(&ni->file.run_lock);
75615 +                       goto out1;
75616 +               }
75617 +               vbo_disk = frame_vbo;
75618 +               npages_disk = (ondisk_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
75619 +       } else {
75620 +               __builtin_unreachable();
75621 +               err = -EINVAL;
75622 +               goto out1;
75623 +       }
75625 +       pages_disk = ntfs_zalloc(npages_disk * sizeof(struct page *));
75626 +       if (!pages_disk) {
75627 +               err = -ENOMEM;
75628 +               goto out2;
75629 +       }
75631 +       for (i = 0; i < npages_disk; i++) {
75632 +               pg = alloc_page(GFP_KERNEL);
75633 +               if (!pg) {
75634 +                       err = -ENOMEM;
75635 +                       goto out3;
75636 +               }
75637 +               pages_disk[i] = pg;
75638 +               lock_page(pg);
75639 +               kmap(pg);
75640 +       }
75642 +       /* read 'ondisk_size' bytes from disk */
75643 +       down_read(&ni->file.run_lock);
75644 +       err = ntfs_bio_pages(sbi, run, pages_disk, npages_disk, vbo_disk,
75645 +                            ondisk_size, REQ_OP_READ);
75646 +       up_read(&ni->file.run_lock);
75647 +       if (err)
75648 +               goto out3;
75650 +       /*
75651 +        * To simplify decompress algorithm do vmap for source and target pages
75652 +        */
75653 +       frame_ondisk = vmap(pages_disk, npages_disk, VM_MAP, PAGE_KERNEL_RO);
75654 +       if (!frame_ondisk) {
75655 +               err = -ENOMEM;
75656 +               goto out3;
75657 +       }
75659 +       /* decompress: frame_ondisk -> frame_mem */
75660 +#ifdef CONFIG_NTFS3_LZX_XPRESS
75661 +       if (run != &ni->file.run) {
75662 +               /* LZX or XPRESS */
75663 +               err = decompress_lzx_xpress(
75664 +                       sbi, frame_ondisk + (vbo_disk & (PAGE_SIZE - 1)),
75665 +                       ondisk_size, frame_mem, unc_size, frame_size);
75666 +       } else
75667 +#endif
75668 +       {
75669 +               /* LZNT - native ntfs compression */
75670 +               unc_size = decompress_lznt(frame_ondisk, ondisk_size, frame_mem,
75671 +                                          frame_size);
75672 +               if ((ssize_t)unc_size < 0)
75673 +                       err = unc_size;
75674 +               else if (!unc_size || unc_size > frame_size)
75675 +                       err = -EINVAL;
75676 +       }
75677 +       if (!err && valid_size < frame_vbo + frame_size) {
75678 +               size_t ok = valid_size - frame_vbo;
75680 +               memset(frame_mem + ok, 0, frame_size - ok);
75681 +       }
75683 +       vunmap(frame_ondisk);
75685 +out3:
75686 +       for (i = 0; i < npages_disk; i++) {
75687 +               pg = pages_disk[i];
75688 +               if (pg) {
75689 +                       kunmap(pg);
75690 +                       unlock_page(pg);
75691 +                       put_page(pg);
75692 +               }
75693 +       }
75694 +       ntfs_free(pages_disk);
75696 +out2:
75697 +#ifdef CONFIG_NTFS3_LZX_XPRESS
75698 +       if (run != &ni->file.run)
75699 +               run_free(run);
75700 +#endif
75701 +out1:
75702 +       vunmap(frame_mem);
75703 +out:
75704 +       for (i = 0; i < pages_per_frame; i++) {
75705 +               pg = pages[i];
75706 +               kunmap(pg);
75707 +               ClearPageError(pg);
75708 +               SetPageUptodate(pg);
75709 +       }
75711 +       return err;
75715 + * ni_write_frame
75716 + *
75717 + * pages - array of locked pages
75718 + */
75719 +int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
75720 +                  u32 pages_per_frame)
75722 +       int err;
75723 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
75724 +       u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
75725 +       u32 frame_size = sbi->cluster_size << NTFS_LZNT_CUNIT;
75726 +       u64 frame_vbo = (u64)pages[0]->index << PAGE_SHIFT;
75727 +       CLST frame = frame_vbo >> frame_bits;
75728 +       char *frame_ondisk = NULL;
75729 +       struct page **pages_disk = NULL;
75730 +       struct ATTR_LIST_ENTRY *le = NULL;
75731 +       char *frame_mem;
75732 +       struct ATTRIB *attr;
75733 +       struct mft_inode *mi;
75734 +       u32 i;
75735 +       struct page *pg;
75736 +       size_t compr_size, ondisk_size;
75737 +       struct lznt *lznt;
75739 +       attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, &mi);
75740 +       if (!attr) {
75741 +               err = -ENOENT;
75742 +               goto out;
75743 +       }
75745 +       if (WARN_ON(!is_attr_compressed(attr))) {
75746 +               err = -EINVAL;
75747 +               goto out;
75748 +       }
75750 +       if (sbi->cluster_size > NTFS_LZNT_MAX_CLUSTER) {
75751 +               err = -EOPNOTSUPP;
75752 +               goto out;
75753 +       }
75755 +       if (!attr->non_res) {
75756 +               down_write(&ni->file.run_lock);
75757 +               err = attr_make_nonresident(ni, attr, le, mi,
75758 +                                           le32_to_cpu(attr->res.data_size),
75759 +                                           &ni->file.run, &attr, pages[0]);
75760 +               up_write(&ni->file.run_lock);
75761 +               if (err)
75762 +                       goto out;
75763 +       }
75765 +       if (attr->nres.c_unit != NTFS_LZNT_CUNIT) {
75766 +               err = -EOPNOTSUPP;
75767 +               goto out;
75768 +       }
75770 +       pages_disk = ntfs_zalloc(pages_per_frame * sizeof(struct page *));
75771 +       if (!pages_disk) {
75772 +               err = -ENOMEM;
75773 +               goto out;
75774 +       }
75776 +       for (i = 0; i < pages_per_frame; i++) {
75777 +               pg = alloc_page(GFP_KERNEL);
75778 +               if (!pg) {
75779 +                       err = -ENOMEM;
75780 +                       goto out1;
75781 +               }
75782 +               pages_disk[i] = pg;
75783 +               lock_page(pg);
75784 +               kmap(pg);
75785 +       }
75787 +       /*
75788 +        * To simplify compress algorithm do vmap for source and target pages
75789 +        */
75790 +       frame_ondisk = vmap(pages_disk, pages_per_frame, VM_MAP, PAGE_KERNEL);
75791 +       if (!frame_ondisk) {
75792 +               err = -ENOMEM;
75793 +               goto out1;
75794 +       }
75796 +       for (i = 0; i < pages_per_frame; i++)
75797 +               kmap(pages[i]);
75799 +       /* map in-memory frame for read-only */
75800 +       frame_mem = vmap(pages, pages_per_frame, VM_MAP, PAGE_KERNEL_RO);
75801 +       if (!frame_mem) {
75802 +               err = -ENOMEM;
75803 +               goto out2;
75804 +       }
75806 +       mutex_lock(&sbi->compress.mtx_lznt);
75807 +       lznt = NULL;
75808 +       if (!sbi->compress.lznt) {
75809 +               /*
75810 +                * lznt implements two levels of compression:
75811 +                * 0 - standard compression
75812 +                * 1 - best compression, requires a lot of cpu
75813 +                * use mount option?
75814 +                */
75815 +               lznt = get_lznt_ctx(0);
75816 +               if (!lznt) {
75817 +                       mutex_unlock(&sbi->compress.mtx_lznt);
75818 +                       err = -ENOMEM;
75819 +                       goto out3;
75820 +               }
75822 +               sbi->compress.lznt = lznt;
75823 +               lznt = NULL;
75824 +       }
75826 +       /* compress: frame_mem -> frame_ondisk */
75827 +       compr_size = compress_lznt(frame_mem, frame_size, frame_ondisk,
75828 +                                  frame_size, sbi->compress.lznt);
75829 +       mutex_unlock(&sbi->compress.mtx_lznt);
75830 +       ntfs_free(lznt);
75832 +       if (compr_size + sbi->cluster_size > frame_size) {
75833 +               /* frame is not compressed */
75834 +               compr_size = frame_size;
75835 +               ondisk_size = frame_size;
75836 +       } else if (compr_size) {
75837 +               /* frame is compressed */
75838 +               ondisk_size = ntfs_up_cluster(sbi, compr_size);
75839 +               memset(frame_ondisk + compr_size, 0, ondisk_size - compr_size);
75840 +       } else {
75841 +               /* frame is sparsed */
75842 +               ondisk_size = 0;
75843 +       }
75845 +       down_write(&ni->file.run_lock);
75846 +       run_truncate_around(&ni->file.run, le64_to_cpu(attr->nres.svcn));
75847 +       err = attr_allocate_frame(ni, frame, compr_size, ni->i_valid);
75848 +       up_write(&ni->file.run_lock);
75849 +       if (err)
75850 +               goto out2;
75852 +       if (!ondisk_size)
75853 +               goto out2;
75855 +       down_read(&ni->file.run_lock);
75856 +       err = ntfs_bio_pages(sbi, &ni->file.run,
75857 +                            ondisk_size < frame_size ? pages_disk : pages,
75858 +                            pages_per_frame, frame_vbo, ondisk_size,
75859 +                            REQ_OP_WRITE);
75860 +       up_read(&ni->file.run_lock);
75862 +out3:
75863 +       vunmap(frame_mem);
75865 +out2:
75866 +       for (i = 0; i < pages_per_frame; i++)
75867 +               kunmap(pages[i]);
75869 +       vunmap(frame_ondisk);
75870 +out1:
75871 +       for (i = 0; i < pages_per_frame; i++) {
75872 +               pg = pages_disk[i];
75873 +               if (pg) {
75874 +                       kunmap(pg);
75875 +                       unlock_page(pg);
75876 +                       put_page(pg);
75877 +               }
75878 +       }
75879 +       ntfs_free(pages_disk);
75880 +out:
75881 +       return err;
75885 + * update duplicate info of ATTR_FILE_NAME in MFT and in parent directories
75886 + */
75887 +static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup,
75888 +                            int sync)
75890 +       struct ATTRIB *attr;
75891 +       struct mft_inode *mi;
75892 +       struct ATTR_LIST_ENTRY *le = NULL;
75893 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
75894 +       struct super_block *sb = sbi->sb;
75895 +       bool re_dirty = false;
75896 +       bool active = sb->s_flags & SB_ACTIVE;
75897 +       bool upd_parent = ni->ni_flags & NI_FLAG_UPDATE_PARENT;
75899 +       if (ni->mi.mrec->flags & RECORD_FLAG_DIR) {
75900 +               dup->fa |= FILE_ATTRIBUTE_DIRECTORY;
75901 +               attr = NULL;
75902 +               dup->alloc_size = 0;
75903 +               dup->data_size = 0;
75904 +       } else {
75905 +               dup->fa &= ~FILE_ATTRIBUTE_DIRECTORY;
75907 +               attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL,
75908 +                                   &mi);
75909 +               if (!attr) {
75910 +                       dup->alloc_size = dup->data_size = 0;
75911 +               } else if (!attr->non_res) {
75912 +                       u32 data_size = le32_to_cpu(attr->res.data_size);
75914 +                       dup->alloc_size = cpu_to_le64(QuadAlign(data_size));
75915 +                       dup->data_size = cpu_to_le64(data_size);
75916 +               } else {
75917 +                       u64 new_valid = ni->i_valid;
75918 +                       u64 data_size = le64_to_cpu(attr->nres.data_size);
75919 +                       __le64 valid_le;
75921 +                       dup->alloc_size = is_attr_ext(attr)
75922 +                                                 ? attr->nres.total_size
75923 +                                                 : attr->nres.alloc_size;
75924 +                       dup->data_size = attr->nres.data_size;
75926 +                       if (new_valid > data_size)
75927 +                               new_valid = data_size;
75929 +                       valid_le = cpu_to_le64(new_valid);
75930 +                       if (valid_le != attr->nres.valid_size) {
75931 +                               attr->nres.valid_size = valid_le;
75932 +                               mi->dirty = true;
75933 +                       }
75934 +               }
75935 +       }
75937 +       /* TODO: fill reparse info */
75938 +       dup->reparse = 0;
75939 +       dup->ea_size = 0;
75941 +       if (ni->ni_flags & NI_FLAG_EA) {
75942 +               attr = ni_find_attr(ni, attr, &le, ATTR_EA_INFO, NULL, 0, NULL,
75943 +                                   NULL);
75944 +               if (attr) {
75945 +                       const struct EA_INFO *info;
75947 +                       info = resident_data_ex(attr, sizeof(struct EA_INFO));
75948 +                       dup->ea_size = info->size_pack;
75949 +               }
75950 +       }
75952 +       attr = NULL;
75953 +       le = NULL;
75955 +       while ((attr = ni_find_attr(ni, attr, &le, ATTR_NAME, NULL, 0, NULL,
75956 +                                   &mi))) {
75957 +               struct inode *dir;
75958 +               struct ATTR_FILE_NAME *fname;
75960 +               fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
75961 +               if (!fname)
75962 +                       continue;
75964 +               if (memcmp(&fname->dup, dup, sizeof(fname->dup))) {
75965 +                       memcpy(&fname->dup, dup, sizeof(fname->dup));
75966 +                       mi->dirty = true;
75967 +               } else if (!upd_parent) {
75968 +                       continue;
75969 +               }
75971 +               if (!active)
75972 +                       continue; /*avoid __wait_on_freeing_inode(inode); */
75974 +               /*ntfs_iget5 may sleep*/
75975 +               dir = ntfs_iget5(sb, &fname->home, NULL);
75976 +               if (IS_ERR(dir)) {
75977 +                       ntfs_inode_warn(
75978 +                               &ni->vfs_inode,
75979 +                               "failed to open parent directory r=%lx to update",
75980 +                               (long)ino_get(&fname->home));
75981 +                       continue;
75982 +               }
75984 +               if (!is_bad_inode(dir)) {
75985 +                       struct ntfs_inode *dir_ni = ntfs_i(dir);
75987 +                       if (!ni_trylock(dir_ni)) {
75988 +                               re_dirty = true;
75989 +                       } else {
75990 +                               indx_update_dup(dir_ni, sbi, fname, dup, sync);
75991 +                               ni_unlock(dir_ni);
75992 +                       }
75993 +               }
75994 +               iput(dir);
75995 +       }
75997 +       return re_dirty;
76001 + * ni_write_inode
76002 + *
76003 + * write mft base record and all subrecords to disk
76004 + */
76005 +int ni_write_inode(struct inode *inode, int sync, const char *hint)
76007 +       int err = 0, err2;
76008 +       struct ntfs_inode *ni = ntfs_i(inode);
76009 +       struct super_block *sb = inode->i_sb;
76010 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
76011 +       bool re_dirty = false;
76012 +       struct ATTR_STD_INFO *std;
76013 +       struct rb_node *node, *next;
76014 +       struct NTFS_DUP_INFO dup;
76016 +       if (is_bad_inode(inode) || sb_rdonly(sb))
76017 +               return 0;
76019 +       if (!ni_trylock(ni)) {
76020 +               /* 'ni' is under modification, skip for now */
76021 +               mark_inode_dirty_sync(inode);
76022 +               return 0;
76023 +       }
76025 +       if (is_rec_inuse(ni->mi.mrec) &&
76026 +           !(sbi->flags & NTFS_FLAGS_LOG_REPLAYING) && inode->i_nlink) {
76027 +               bool modified = false;
76029 +               /* update times in standard attribute */
76030 +               std = ni_std(ni);
76031 +               if (!std) {
76032 +                       err = -EINVAL;
76033 +                       goto out;
76034 +               }
76036 +               /* Update the access times if they have changed. */
76037 +               dup.m_time = kernel2nt(&inode->i_mtime);
76038 +               if (std->m_time != dup.m_time) {
76039 +                       std->m_time = dup.m_time;
76040 +                       modified = true;
76041 +               }
76043 +               dup.c_time = kernel2nt(&inode->i_ctime);
76044 +               if (std->c_time != dup.c_time) {
76045 +                       std->c_time = dup.c_time;
76046 +                       modified = true;
76047 +               }
76049 +               dup.a_time = kernel2nt(&inode->i_atime);
76050 +               if (std->a_time != dup.a_time) {
76051 +                       std->a_time = dup.a_time;
76052 +                       modified = true;
76053 +               }
76055 +               dup.fa = ni->std_fa;
76056 +               if (std->fa != dup.fa) {
76057 +                       std->fa = dup.fa;
76058 +                       modified = true;
76059 +               }
76061 +               if (modified)
76062 +                       ni->mi.dirty = true;
76064 +               if (!ntfs_is_meta_file(sbi, inode->i_ino) &&
76065 +                   (modified || (ni->ni_flags & NI_FLAG_UPDATE_PARENT))) {
76066 +                       dup.cr_time = std->cr_time;
76067 +                       /* Not critical if this function fail */
76068 +                       re_dirty = ni_update_parent(ni, &dup, sync);
76070 +                       if (re_dirty)
76071 +                               ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
76072 +                       else
76073 +                               ni->ni_flags &= ~NI_FLAG_UPDATE_PARENT;
76074 +               }
76076 +               /* update attribute list */
76077 +               if (ni->attr_list.size && ni->attr_list.dirty) {
76078 +                       if (inode->i_ino != MFT_REC_MFT || sync) {
76079 +                               err = ni_try_remove_attr_list(ni);
76080 +                               if (err)
76081 +                                       goto out;
76082 +                       }
76084 +                       err = al_update(ni);
76085 +                       if (err)
76086 +                               goto out;
76087 +               }
76088 +       }
76090 +       for (node = rb_first(&ni->mi_tree); node; node = next) {
76091 +               struct mft_inode *mi = rb_entry(node, struct mft_inode, node);
76092 +               bool is_empty;
76094 +               next = rb_next(node);
76096 +               if (!mi->dirty)
76097 +                       continue;
76099 +               is_empty = !mi_enum_attr(mi, NULL);
76101 +               if (is_empty)
76102 +                       clear_rec_inuse(mi->mrec);
76104 +               err2 = mi_write(mi, sync);
76105 +               if (!err && err2)
76106 +                       err = err2;
76108 +               if (is_empty) {
76109 +                       ntfs_mark_rec_free(sbi, mi->rno);
76110 +                       rb_erase(node, &ni->mi_tree);
76111 +                       mi_put(mi);
76112 +               }
76113 +       }
76115 +       if (ni->mi.dirty) {
76116 +               err2 = mi_write(&ni->mi, sync);
76117 +               if (!err && err2)
76118 +                       err = err2;
76119 +       }
76120 +out:
76121 +       ni_unlock(ni);
76123 +       if (err) {
76124 +               ntfs_err(sb, "%s r=%lx failed, %d.", hint, inode->i_ino, err);
76125 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
76126 +               return err;
76127 +       }
76129 +       if (re_dirty && (sb->s_flags & SB_ACTIVE))
76130 +               mark_inode_dirty_sync(inode);
76132 +       return 0;
76134 diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
76135 new file mode 100644
76136 index 000000000000..53da12252408
76137 --- /dev/null
76138 +++ b/fs/ntfs3/fslog.c
76139 @@ -0,0 +1,5181 @@
76140 +// SPDX-License-Identifier: GPL-2.0
76142 + *
76143 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
76144 + *
76145 + */
76147 +#include <linux/blkdev.h>
76148 +#include <linux/buffer_head.h>
76149 +#include <linux/fs.h>
76150 +#include <linux/hash.h>
76151 +#include <linux/nls.h>
76152 +#include <linux/random.h>
76153 +#include <linux/ratelimit.h>
76154 +#include <linux/slab.h>
76156 +#include "debug.h"
76157 +#include "ntfs.h"
76158 +#include "ntfs_fs.h"
76161 + * LOG FILE structs
76162 + */
76164 +// clang-format off
76166 +#define MaxLogFileSize     0x100000000ull
76167 +#define DefaultLogPageSize 4096
76168 +#define MinLogRecordPages  0x30
76170 +struct RESTART_HDR {
76171 +       struct NTFS_RECORD_HEADER rhdr; // 'RSTR'
76172 +       __le32 sys_page_size; // 0x10: Page size of the system which initialized the log
76173 +       __le32 page_size;     // 0x14: Log page size used for this log file
76174 +       __le16 ra_off;        // 0x18:
76175 +       __le16 minor_ver;     // 0x1A:
76176 +       __le16 major_ver;     // 0x1C:
76177 +       __le16 fixups[];
76180 +#define LFS_NO_CLIENT 0xffff
76181 +#define LFS_NO_CLIENT_LE cpu_to_le16(0xffff)
76183 +struct CLIENT_REC {
76184 +       __le64 oldest_lsn;
76185 +       __le64 restart_lsn; // 0x08:
76186 +       __le16 prev_client; // 0x10:
76187 +       __le16 next_client; // 0x12:
76188 +       __le16 seq_num;     // 0x14:
76189 +       u8 align[6];        // 0x16
76190 +       __le32 name_bytes;  // 0x1C: in bytes
76191 +       __le16 name[32];    // 0x20: name of client
76194 +static_assert(sizeof(struct CLIENT_REC) == 0x60);
76196 +/* Two copies of these will exist at the beginning of the log file */
76197 +struct RESTART_AREA {
76198 +       __le64 current_lsn;    // 0x00: Current logical end of log file
76199 +       __le16 log_clients;    // 0x08: Maximum number of clients
76200 +       __le16 client_idx[2];  // 0x0A: free/use index into the client record arrays
76201 +       __le16 flags;          // 0x0E: See RESTART_SINGLE_PAGE_IO
76202 +       __le32 seq_num_bits;   // 0x10: the number of bits in sequence number.
76203 +       __le16 ra_len;         // 0x14:
76204 +       __le16 client_off;     // 0x16:
76205 +       __le64 l_size;         // 0x18: Usable log file size.
76206 +       __le32 last_lsn_data_len; // 0x20:
76207 +       __le16 rec_hdr_len;    // 0x24: log page data offset
76208 +       __le16 data_off;       // 0x26: log page data length
76209 +       __le32 open_log_count; // 0x28:
76210 +       __le32 align[5];       // 0x2C:
76211 +       struct CLIENT_REC clients[]; // 0x40:
76214 +struct LOG_REC_HDR {
76215 +       __le16 redo_op;      // 0x00:  NTFS_LOG_OPERATION
76216 +       __le16 undo_op;      // 0x02:  NTFS_LOG_OPERATION
76217 +       __le16 redo_off;     // 0x04:  Offset to Redo record
76218 +       __le16 redo_len;     // 0x06:  Redo length
76219 +       __le16 undo_off;     // 0x08:  Offset to Undo record
76220 +       __le16 undo_len;     // 0x0A:  Undo length
76221 +       __le16 target_attr;  // 0x0C:
76222 +       __le16 lcns_follow;  // 0x0E:
76223 +       __le16 record_off;   // 0x10:
76224 +       __le16 attr_off;     // 0x12:
76225 +       __le16 cluster_off;  // 0x14:
76226 +       __le16 reserved;     // 0x16:
76227 +       __le64 target_vcn;   // 0x18:
76228 +       __le64 page_lcns[];  // 0x20:
76231 +static_assert(sizeof(struct LOG_REC_HDR) == 0x20);
76233 +#define RESTART_ENTRY_ALLOCATED    0xFFFFFFFF
76234 +#define RESTART_ENTRY_ALLOCATED_LE cpu_to_le32(0xFFFFFFFF)
76236 +struct RESTART_TABLE {
76237 +       __le16 size;       // 0x00:  In bytes
76238 +       __le16 used;       // 0x02: entries
76239 +       __le16 total;      // 0x04: entries
76240 +       __le16 res[3];     // 0x06:
76241 +       __le32 free_goal;  // 0x0C:
76242 +       __le32 first_free; // 0x10
76243 +       __le32 last_free;  // 0x14
76247 +static_assert(sizeof(struct RESTART_TABLE) == 0x18);
76249 +struct ATTR_NAME_ENTRY {
76250 +       __le16 off; // offset in the Open attribute Table
76251 +       __le16 name_bytes;
76252 +       __le16 name[];
76255 +struct OPEN_ATTR_ENRTY {
76256 +       __le32 next;            // 0x00: RESTART_ENTRY_ALLOCATED if allocated
76257 +       __le32 bytes_per_index; // 0x04:
76258 +       enum ATTR_TYPE type;    // 0x08:
76259 +       u8 is_dirty_pages;      // 0x0C:
76260 +       u8 is_attr_name;        // 0x0B: Faked field to manage 'ptr'
76261 +       u8 name_len;            // 0x0C: Faked field to manage 'ptr'
76262 +       u8 res;
76263 +       struct MFT_REF ref; // 0x10: File Reference of file containing attribute
76264 +       __le64 open_record_lsn; // 0x18:
76265 +       void *ptr;              // 0x20:
76268 +/* 32 bit version of 'struct OPEN_ATTR_ENRTY' */
76269 +struct OPEN_ATTR_ENRTY_32 {
76270 +       __le32 next;            // 0x00: RESTART_ENTRY_ALLOCATED if allocated
76271 +       __le32 ptr;             // 0x04:
76272 +       struct MFT_REF ref;     // 0x08:
76273 +       __le64 open_record_lsn; // 0x10:
76274 +       u8 is_dirty_pages;      // 0x18:
76275 +       u8 is_attr_name;        // 0x19
76276 +       u8 res1[2];
76277 +       enum ATTR_TYPE type;    // 0x1C:
76278 +       u8 name_len;            // 0x20:  in wchar
76279 +       u8 res2[3];
76280 +       __le32 AttributeName;   // 0x24:
76281 +       __le32 bytes_per_index; // 0x28:
76284 +#define SIZEOF_OPENATTRIBUTEENTRY0 0x2c
76285 +// static_assert( 0x2C == sizeof(struct OPEN_ATTR_ENRTY_32) );
76286 +static_assert(sizeof(struct OPEN_ATTR_ENRTY) < SIZEOF_OPENATTRIBUTEENTRY0);
76289 + * One entry exists in the Dirty Pages Table for each page which is dirty at the
76290 + * time the Restart Area is written
76291 + */
76292 +struct DIR_PAGE_ENTRY {
76293 +       __le32 next;         // 0x00:  RESTART_ENTRY_ALLOCATED if allocated
76294 +       __le32 target_attr;  // 0x04:  Index into the Open attribute Table
76295 +       __le32 transfer_len; // 0x08:
76296 +       __le32 lcns_follow;  // 0x0C:
76297 +       __le64 vcn;          // 0x10:  Vcn of dirty page
76298 +       __le64 oldest_lsn;   // 0x18:
76299 +       __le64 page_lcns[];  // 0x20:
76302 +static_assert(sizeof(struct DIR_PAGE_ENTRY) == 0x20);
76304 +/* 32 bit version of 'struct DIR_PAGE_ENTRY' */
76305 +struct DIR_PAGE_ENTRY_32 {
76306 +       __le32 next;         // 0x00:  RESTART_ENTRY_ALLOCATED if allocated
76307 +       __le32 target_attr;  // 0x04:  Index into the Open attribute Table
76308 +       __le32 transfer_len; // 0x08:
76309 +       __le32 lcns_follow;  // 0x0C:
76310 +       __le32 reserved;     // 0x10:
76311 +       __le32 vcn_low;      // 0x14:  Vcn of dirty page
76312 +       __le32 vcn_hi;       // 0x18:  Vcn of dirty page
76313 +       __le32 oldest_lsn_low; // 0x1C:
76314 +       __le32 oldest_lsn_hi; // 0x1C:
76315 +       __le32 page_lcns_low; // 0x24:
76316 +       __le32 page_lcns_hi; // 0x24:
76319 +static_assert(offsetof(struct DIR_PAGE_ENTRY_32, vcn_low) == 0x14);
76320 +static_assert(sizeof(struct DIR_PAGE_ENTRY_32) == 0x2c);
76322 +enum transact_state {
76323 +       TransactionUninitialized = 0,
76324 +       TransactionActive,
76325 +       TransactionPrepared,
76326 +       TransactionCommitted
76329 +struct TRANSACTION_ENTRY {
76330 +       __le32 next;          // 0x00: RESTART_ENTRY_ALLOCATED if allocated
76331 +       u8 transact_state;    // 0x04:
76332 +       u8 reserved[3];       // 0x05:
76333 +       __le64 first_lsn;     // 0x08:
76334 +       __le64 prev_lsn;      // 0x10:
76335 +       __le64 undo_next_lsn; // 0x18:
76336 +       __le32 undo_records;  // 0x20: Number of undo log records pending abort
76337 +       __le32 undo_len;      // 0x24: Total undo size
76340 +static_assert(sizeof(struct TRANSACTION_ENTRY) == 0x28);
76342 +struct NTFS_RESTART {
76343 +       __le32 major_ver;             // 0x00:
76344 +       __le32 minor_ver;             // 0x04:
76345 +       __le64 check_point_start;     // 0x08:
76346 +       __le64 open_attr_table_lsn;   // 0x10:
76347 +       __le64 attr_names_lsn;        // 0x18:
76348 +       __le64 dirty_pages_table_lsn; // 0x20:
76349 +       __le64 transact_table_lsn;    // 0x28:
76350 +       __le32 open_attr_len;         // 0x30: In bytes
76351 +       __le32 attr_names_len;        // 0x34: In bytes
76352 +       __le32 dirty_pages_len;       // 0x38: In bytes
76353 +       __le32 transact_table_len;    // 0x3C: In bytes
76356 +static_assert(sizeof(struct NTFS_RESTART) == 0x40);
76358 +struct NEW_ATTRIBUTE_SIZES {
76359 +       __le64 alloc_size;
76360 +       __le64 valid_size;
76361 +       __le64 data_size;
76362 +       __le64 total_size;
76365 +struct BITMAP_RANGE {
76366 +       __le32 bitmap_off;
76367 +       __le32 bits;
76370 +struct LCN_RANGE {
76371 +       __le64 lcn;
76372 +       __le64 len;
76375 +/* The following type defines the different log record types */
76376 +#define LfsClientRecord  cpu_to_le32(1)
76377 +#define LfsClientRestart cpu_to_le32(2)
76379 +/* This is used to uniquely identify a client for a particular log file */
76380 +struct CLIENT_ID {
76381 +       __le16 seq_num;
76382 +       __le16 client_idx;
76385 +/* This is the header that begins every Log Record in the log file */
76386 +struct LFS_RECORD_HDR {
76387 +       __le64 this_lsn;    // 0x00:
76388 +       __le64 client_prev_lsn;  // 0x08:
76389 +       __le64 client_undo_next_lsn; // 0x10:
76390 +       __le32 client_data_len;  // 0x18:
76391 +       struct CLIENT_ID client; // 0x1C: Owner of this log record
76392 +       __le32 record_type; // 0x20: LfsClientRecord or LfsClientRestart
76393 +       __le32 transact_id; // 0x24:
76394 +       __le16 flags;       // 0x28:    LOG_RECORD_MULTI_PAGE
76395 +       u8 align[6];        // 0x2A:
76398 +#define LOG_RECORD_MULTI_PAGE cpu_to_le16(1)
76400 +static_assert(sizeof(struct LFS_RECORD_HDR) == 0x30);
76402 +struct LFS_RECORD {
76403 +       __le16 next_record_off; // 0x00: Offset of the free space in the page
76404 +       u8 align[6];         // 0x02:
76405 +       __le64 last_end_lsn; // 0x08: lsn for the last log record which ends on the page
76408 +static_assert(sizeof(struct LFS_RECORD) == 0x10);
76410 +struct RECORD_PAGE_HDR {
76411 +       struct NTFS_RECORD_HEADER rhdr; // 'RCRD'
76412 +       __le32 rflags;     // 0x10:  See LOG_PAGE_LOG_RECORD_END
76413 +       __le16 page_count; // 0x14:
76414 +       __le16 page_pos;   // 0x16:
76415 +       struct LFS_RECORD record_hdr; // 0x18
76416 +       __le16 fixups[10]; // 0x28
76417 +       __le32 file_off;   // 0x3c: used when major version >= 2
76420 +// clang-format on
76422 +// Page contains the end of a log record
76423 +#define LOG_PAGE_LOG_RECORD_END cpu_to_le32(0x00000001)
76425 +static inline bool is_log_record_end(const struct RECORD_PAGE_HDR *hdr)
76427 +       return hdr->rflags & LOG_PAGE_LOG_RECORD_END;
76430 +static_assert(offsetof(struct RECORD_PAGE_HDR, file_off) == 0x3c);
76433 + * END of NTFS LOG structures
76434 + */
76436 +/* Define some tuning parameters to keep the restart tables a reasonable size */
76437 +#define INITIAL_NUMBER_TRANSACTIONS 5
76439 +enum NTFS_LOG_OPERATION {
76441 +       Noop = 0x00,
76442 +       CompensationLogRecord = 0x01,
76443 +       InitializeFileRecordSegment = 0x02,
76444 +       DeallocateFileRecordSegment = 0x03,
76445 +       WriteEndOfFileRecordSegment = 0x04,
76446 +       CreateAttribute = 0x05,
76447 +       DeleteAttribute = 0x06,
76448 +       UpdateResidentValue = 0x07,
76449 +       UpdateNonresidentValue = 0x08,
76450 +       UpdateMappingPairs = 0x09,
76451 +       DeleteDirtyClusters = 0x0A,
76452 +       SetNewAttributeSizes = 0x0B,
76453 +       AddIndexEntryRoot = 0x0C,
76454 +       DeleteIndexEntryRoot = 0x0D,
76455 +       AddIndexEntryAllocation = 0x0E,
76456 +       DeleteIndexEntryAllocation = 0x0F,
76457 +       WriteEndOfIndexBuffer = 0x10,
76458 +       SetIndexEntryVcnRoot = 0x11,
76459 +       SetIndexEntryVcnAllocation = 0x12,
76460 +       UpdateFileNameRoot = 0x13,
76461 +       UpdateFileNameAllocation = 0x14,
76462 +       SetBitsInNonresidentBitMap = 0x15,
76463 +       ClearBitsInNonresidentBitMap = 0x16,
76464 +       HotFix = 0x17,
76465 +       EndTopLevelAction = 0x18,
76466 +       PrepareTransaction = 0x19,
76467 +       CommitTransaction = 0x1A,
76468 +       ForgetTransaction = 0x1B,
76469 +       OpenNonresidentAttribute = 0x1C,
76470 +       OpenAttributeTableDump = 0x1D,
76471 +       AttributeNamesDump = 0x1E,
76472 +       DirtyPageTableDump = 0x1F,
76473 +       TransactionTableDump = 0x20,
76474 +       UpdateRecordDataRoot = 0x21,
76475 +       UpdateRecordDataAllocation = 0x22,
76477 +       UpdateRelativeDataInIndex =
76478 +               0x23, // NtOfsRestartUpdateRelativeDataInIndex
76479 +       UpdateRelativeDataInIndex2 = 0x24,
76480 +       ZeroEndOfFileRecord = 0x25,
76484 + * Array for log records which require a target attribute
76485 + * A true indicates that the corresponding restart operation requires a target attribute
76486 + */
76487 +static const u8 AttributeRequired[] = {
76488 +       0xFC, 0xFB, 0xFF, 0x10, 0x06,
76491 +static inline bool is_target_required(u16 op)
76493 +       bool ret = op <= UpdateRecordDataAllocation &&
76494 +                  (AttributeRequired[op >> 3] >> (op & 7) & 1);
76495 +       return ret;
76498 +static inline bool can_skip_action(enum NTFS_LOG_OPERATION op)
76500 +       switch (op) {
76501 +       case Noop:
76502 +       case DeleteDirtyClusters:
76503 +       case HotFix:
76504 +       case EndTopLevelAction:
76505 +       case PrepareTransaction:
76506 +       case CommitTransaction:
76507 +       case ForgetTransaction:
76508 +       case CompensationLogRecord:
76509 +       case OpenNonresidentAttribute:
76510 +       case OpenAttributeTableDump:
76511 +       case AttributeNamesDump:
76512 +       case DirtyPageTableDump:
76513 +       case TransactionTableDump:
76514 +               return true;
76515 +       default:
76516 +               return false;
76517 +       }
76520 +enum { lcb_ctx_undo_next, lcb_ctx_prev, lcb_ctx_next };
76522 +/* bytes per restart table */
76523 +static inline u32 bytes_per_rt(const struct RESTART_TABLE *rt)
76525 +       return le16_to_cpu(rt->used) * le16_to_cpu(rt->size) +
76526 +              sizeof(struct RESTART_TABLE);
76529 +/* log record length */
76530 +static inline u32 lrh_length(const struct LOG_REC_HDR *lr)
76532 +       u16 t16 = le16_to_cpu(lr->lcns_follow);
76534 +       return struct_size(lr, page_lcns, max_t(u16, 1, t16));
76537 +struct lcb {
76538 +       struct LFS_RECORD_HDR *lrh; // Log record header of the current lsn
76539 +       struct LOG_REC_HDR *log_rec;
76540 +       u32 ctx_mode; // lcb_ctx_undo_next/lcb_ctx_prev/lcb_ctx_next
76541 +       struct CLIENT_ID client;
76542 +       bool alloc; // if true the we should deallocate 'log_rec'
76545 +static void lcb_put(struct lcb *lcb)
76547 +       if (lcb->alloc)
76548 +               ntfs_free(lcb->log_rec);
76549 +       ntfs_free(lcb->lrh);
76550 +       ntfs_free(lcb);
76554 + * oldest_client_lsn
76555 + *
76556 + * find the oldest lsn from active clients.
76557 + */
76558 +static inline void oldest_client_lsn(const struct CLIENT_REC *ca,
76559 +                                    __le16 next_client, u64 *oldest_lsn)
76561 +       while (next_client != LFS_NO_CLIENT_LE) {
76562 +               const struct CLIENT_REC *cr = ca + le16_to_cpu(next_client);
76563 +               u64 lsn = le64_to_cpu(cr->oldest_lsn);
76565 +               /* ignore this block if it's oldest lsn is 0 */
76566 +               if (lsn && lsn < *oldest_lsn)
76567 +                       *oldest_lsn = lsn;
76569 +               next_client = cr->next_client;
76570 +       }
76573 +static inline bool is_rst_page_hdr_valid(u32 file_off,
76574 +                                        const struct RESTART_HDR *rhdr)
76576 +       u32 sys_page = le32_to_cpu(rhdr->sys_page_size);
76577 +       u32 page_size = le32_to_cpu(rhdr->page_size);
76578 +       u32 end_usa;
76579 +       u16 ro;
76581 +       if (sys_page < SECTOR_SIZE || page_size < SECTOR_SIZE ||
76582 +           sys_page & (sys_page - 1) || page_size & (page_size - 1)) {
76583 +               return false;
76584 +       }
76586 +       /* Check that if the file offset isn't 0, it is the system page size */
76587 +       if (file_off && file_off != sys_page)
76588 +               return false;
76590 +       /* Check support version 1.1+ */
76591 +       if (le16_to_cpu(rhdr->major_ver) <= 1 && !rhdr->minor_ver)
76592 +               return false;
76594 +       if (le16_to_cpu(rhdr->major_ver) > 2)
76595 +               return false;
76597 +       ro = le16_to_cpu(rhdr->ra_off);
76598 +       if (!IsQuadAligned(ro) || ro > sys_page)
76599 +               return false;
76601 +       end_usa = ((sys_page >> SECTOR_SHIFT) + 1) * sizeof(short);
76602 +       end_usa += le16_to_cpu(rhdr->rhdr.fix_off);
76604 +       if (ro < end_usa)
76605 +               return false;
76607 +       return true;
76610 +static inline bool is_rst_area_valid(const struct RESTART_HDR *rhdr)
76612 +       const struct RESTART_AREA *ra;
76613 +       u16 cl, fl, ul;
76614 +       u32 off, l_size, file_dat_bits, file_size_round;
76615 +       u16 ro = le16_to_cpu(rhdr->ra_off);
76616 +       u32 sys_page = le32_to_cpu(rhdr->sys_page_size);
76618 +       if (ro + offsetof(struct RESTART_AREA, l_size) >
76619 +           SECTOR_SIZE - sizeof(short))
76620 +               return false;
76622 +       ra = Add2Ptr(rhdr, ro);
76623 +       cl = le16_to_cpu(ra->log_clients);
76625 +       if (cl > 1)
76626 +               return false;
76628 +       off = le16_to_cpu(ra->client_off);
76630 +       if (!IsQuadAligned(off) || ro + off > SECTOR_SIZE - sizeof(short))
76631 +               return false;
76633 +       off += cl * sizeof(struct CLIENT_REC);
76635 +       if (off > sys_page)
76636 +               return false;
76638 +       /*
76639 +        * Check the restart length field and whether the entire
76640 +        * restart area is contained that length
76641 +        */
76642 +       if (le16_to_cpu(rhdr->ra_off) + le16_to_cpu(ra->ra_len) > sys_page ||
76643 +           off > le16_to_cpu(ra->ra_len)) {
76644 +               return false;
76645 +       }
76647 +       /*
76648 +        * As a final check make sure that the use list and the free list
76649 +        * are either empty or point to a valid client
76650 +        */
76651 +       fl = le16_to_cpu(ra->client_idx[0]);
76652 +       ul = le16_to_cpu(ra->client_idx[1]);
76653 +       if ((fl != LFS_NO_CLIENT && fl >= cl) ||
76654 +           (ul != LFS_NO_CLIENT && ul >= cl))
76655 +               return false;
76657 +       /* Make sure the sequence number bits match the log file size */
76658 +       l_size = le64_to_cpu(ra->l_size);
76660 +       file_dat_bits = sizeof(u64) * 8 - le32_to_cpu(ra->seq_num_bits);
76661 +       file_size_round = 1u << (file_dat_bits + 3);
76662 +       if (file_size_round != l_size &&
76663 +           (file_size_round < l_size || (file_size_round / 2) > l_size)) {
76664 +               return false;
76665 +       }
76667 +       /* The log page data offset and record header length must be quad-aligned */
76668 +       if (!IsQuadAligned(le16_to_cpu(ra->data_off)) ||
76669 +           !IsQuadAligned(le16_to_cpu(ra->rec_hdr_len)))
76670 +               return false;
76672 +       return true;
76675 +static inline bool is_client_area_valid(const struct RESTART_HDR *rhdr,
76676 +                                       bool usa_error)
76678 +       u16 ro = le16_to_cpu(rhdr->ra_off);
76679 +       const struct RESTART_AREA *ra = Add2Ptr(rhdr, ro);
76680 +       u16 ra_len = le16_to_cpu(ra->ra_len);
76681 +       const struct CLIENT_REC *ca;
76682 +       u32 i;
76684 +       if (usa_error && ra_len + ro > SECTOR_SIZE - sizeof(short))
76685 +               return false;
76687 +       /* Find the start of the client array */
76688 +       ca = Add2Ptr(ra, le16_to_cpu(ra->client_off));
76690 +       /*
76691 +        * Start with the free list
76692 +        * Check that all the clients are valid and that there isn't a cycle
76693 +        * Do the in-use list on the second pass
76694 +        */
76695 +       for (i = 0; i < 2; i++) {
76696 +               u16 client_idx = le16_to_cpu(ra->client_idx[i]);
76697 +               bool first_client = true;
76698 +               u16 clients = le16_to_cpu(ra->log_clients);
76700 +               while (client_idx != LFS_NO_CLIENT) {
76701 +                       const struct CLIENT_REC *cr;
76703 +                       if (!clients ||
76704 +                           client_idx >= le16_to_cpu(ra->log_clients))
76705 +                               return false;
76707 +                       clients -= 1;
76708 +                       cr = ca + client_idx;
76710 +                       client_idx = le16_to_cpu(cr->next_client);
76712 +                       if (first_client) {
76713 +                               first_client = false;
76714 +                               if (cr->prev_client != LFS_NO_CLIENT_LE)
76715 +                                       return false;
76716 +                       }
76717 +               }
76718 +       }
76720 +       return true;
76724 + * remove_client
76725 + *
76726 + * remove a client record from a client record list an restart area
76727 + */
76728 +static inline void remove_client(struct CLIENT_REC *ca,
76729 +                                const struct CLIENT_REC *cr, __le16 *head)
76731 +       if (cr->prev_client == LFS_NO_CLIENT_LE)
76732 +               *head = cr->next_client;
76733 +       else
76734 +               ca[le16_to_cpu(cr->prev_client)].next_client = cr->next_client;
76736 +       if (cr->next_client != LFS_NO_CLIENT_LE)
76737 +               ca[le16_to_cpu(cr->next_client)].prev_client = cr->prev_client;
76741 + * add_client
76742 + *
76743 + * add a client record to the start of a list
76744 + */
76745 +static inline void add_client(struct CLIENT_REC *ca, u16 index, __le16 *head)
76747 +       struct CLIENT_REC *cr = ca + index;
76749 +       cr->prev_client = LFS_NO_CLIENT_LE;
76750 +       cr->next_client = *head;
76752 +       if (*head != LFS_NO_CLIENT_LE)
76753 +               ca[le16_to_cpu(*head)].prev_client = cpu_to_le16(index);
76755 +       *head = cpu_to_le16(index);
76759 + * enum_rstbl
76760 + *
76761 + */
76762 +static inline void *enum_rstbl(struct RESTART_TABLE *t, void *c)
76764 +       __le32 *e;
76765 +       u32 bprt;
76766 +       u16 rsize = t ? le16_to_cpu(t->size) : 0;
76768 +       if (!c) {
76769 +               if (!t || !t->total)
76770 +                       return NULL;
76771 +               e = Add2Ptr(t, sizeof(struct RESTART_TABLE));
76772 +       } else {
76773 +               e = Add2Ptr(c, rsize);
76774 +       }
76776 +       /* Loop until we hit the first one allocated, or the end of the list */
76777 +       for (bprt = bytes_per_rt(t); PtrOffset(t, e) < bprt;
76778 +            e = Add2Ptr(e, rsize)) {
76779 +               if (*e == RESTART_ENTRY_ALLOCATED_LE)
76780 +                       return e;
76781 +       }
76782 +       return NULL;
76786 + * find_dp
76787 + *
76788 + * searches for a 'vcn' in Dirty Page Table,
76789 + */
76790 +static inline struct DIR_PAGE_ENTRY *find_dp(struct RESTART_TABLE *dptbl,
76791 +                                            u32 target_attr, u64 vcn)
76793 +       __le32 ta = cpu_to_le32(target_attr);
76794 +       struct DIR_PAGE_ENTRY *dp = NULL;
76796 +       while ((dp = enum_rstbl(dptbl, dp))) {
76797 +               u64 dp_vcn = le64_to_cpu(dp->vcn);
76799 +               if (dp->target_attr == ta && vcn >= dp_vcn &&
76800 +                   vcn < dp_vcn + le32_to_cpu(dp->lcns_follow)) {
76801 +                       return dp;
76802 +               }
76803 +       }
76804 +       return NULL;
76807 +static inline u32 norm_file_page(u32 page_size, u32 *l_size, bool use_default)
76809 +       if (use_default)
76810 +               page_size = DefaultLogPageSize;
76812 +       /* Round the file size down to a system page boundary */
76813 +       *l_size &= ~(page_size - 1);
76815 +       /* File should contain at least 2 restart pages and MinLogRecordPages pages */
76816 +       if (*l_size < (MinLogRecordPages + 2) * page_size)
76817 +               return 0;
76819 +       return page_size;
76822 +static bool check_log_rec(const struct LOG_REC_HDR *lr, u32 bytes, u32 tr,
76823 +                         u32 bytes_per_attr_entry)
76825 +       u16 t16;
76827 +       if (bytes < sizeof(struct LOG_REC_HDR))
76828 +               return false;
76829 +       if (!tr)
76830 +               return false;
76832 +       if ((tr - sizeof(struct RESTART_TABLE)) %
76833 +           sizeof(struct TRANSACTION_ENTRY))
76834 +               return false;
76836 +       if (le16_to_cpu(lr->redo_off) & 7)
76837 +               return false;
76839 +       if (le16_to_cpu(lr->undo_off) & 7)
76840 +               return false;
76842 +       if (lr->target_attr)
76843 +               goto check_lcns;
76845 +       if (is_target_required(le16_to_cpu(lr->redo_op)))
76846 +               return false;
76848 +       if (is_target_required(le16_to_cpu(lr->undo_op)))
76849 +               return false;
76851 +check_lcns:
76852 +       if (!lr->lcns_follow)
76853 +               goto check_length;
76855 +       t16 = le16_to_cpu(lr->target_attr);
76856 +       if ((t16 - sizeof(struct RESTART_TABLE)) % bytes_per_attr_entry)
76857 +               return false;
76859 +check_length:
76860 +       if (bytes < lrh_length(lr))
76861 +               return false;
76863 +       return true;
76866 +static bool check_rstbl(const struct RESTART_TABLE *rt, size_t bytes)
76868 +       u32 ts;
76869 +       u32 i, off;
76870 +       u16 rsize = le16_to_cpu(rt->size);
76871 +       u16 ne = le16_to_cpu(rt->used);
76872 +       u32 ff = le32_to_cpu(rt->first_free);
76873 +       u32 lf = le32_to_cpu(rt->last_free);
76875 +       ts = rsize * ne + sizeof(struct RESTART_TABLE);
76877 +       if (!rsize || rsize > bytes ||
76878 +           rsize + sizeof(struct RESTART_TABLE) > bytes || bytes < ts ||
76879 +           le16_to_cpu(rt->total) > ne || ff > ts || lf > ts ||
76880 +           (ff && ff < sizeof(struct RESTART_TABLE)) ||
76881 +           (lf && lf < sizeof(struct RESTART_TABLE))) {
76882 +               return false;
76883 +       }
76885 +       /* Verify each entry is either allocated or points
76886 +        * to a valid offset the table
76887 +        */
76888 +       for (i = 0; i < ne; i++) {
76889 +               off = le32_to_cpu(*(__le32 *)Add2Ptr(
76890 +                       rt, i * rsize + sizeof(struct RESTART_TABLE)));
76892 +               if (off != RESTART_ENTRY_ALLOCATED && off &&
76893 +                   (off < sizeof(struct RESTART_TABLE) ||
76894 +                    ((off - sizeof(struct RESTART_TABLE)) % rsize))) {
76895 +                       return false;
76896 +               }
76897 +       }
76899 +       /* Walk through the list headed by the first entry to make
76900 +        * sure none of the entries are currently being used
76901 +        */
76902 +       for (off = ff; off;) {
76903 +               if (off == RESTART_ENTRY_ALLOCATED)
76904 +                       return false;
76906 +               off = le32_to_cpu(*(__le32 *)Add2Ptr(rt, off));
76907 +       }
76909 +       return true;
76913 + * free_rsttbl_idx
76914 + *
76915 + * frees a previously allocated index a Restart Table.
76916 + */
76917 +static inline void free_rsttbl_idx(struct RESTART_TABLE *rt, u32 off)
76919 +       __le32 *e;
76920 +       u32 lf = le32_to_cpu(rt->last_free);
76921 +       __le32 off_le = cpu_to_le32(off);
76923 +       e = Add2Ptr(rt, off);
76925 +       if (off < le32_to_cpu(rt->free_goal)) {
76926 +               *e = rt->first_free;
76927 +               rt->first_free = off_le;
76928 +               if (!lf)
76929 +                       rt->last_free = off_le;
76930 +       } else {
76931 +               if (lf)
76932 +                       *(__le32 *)Add2Ptr(rt, lf) = off_le;
76933 +               else
76934 +                       rt->first_free = off_le;
76936 +               rt->last_free = off_le;
76937 +               *e = 0;
76938 +       }
76940 +       le16_sub_cpu(&rt->total, 1);
76943 +static inline struct RESTART_TABLE *init_rsttbl(u16 esize, u16 used)
76945 +       __le32 *e, *last_free;
76946 +       u32 off;
76947 +       u32 bytes = esize * used + sizeof(struct RESTART_TABLE);
76948 +       u32 lf = sizeof(struct RESTART_TABLE) + (used - 1) * esize;
76949 +       struct RESTART_TABLE *t = ntfs_zalloc(bytes);
76951 +       t->size = cpu_to_le16(esize);
76952 +       t->used = cpu_to_le16(used);
76953 +       t->free_goal = cpu_to_le32(~0u);
76954 +       t->first_free = cpu_to_le32(sizeof(struct RESTART_TABLE));
76955 +       t->last_free = cpu_to_le32(lf);
76957 +       e = (__le32 *)(t + 1);
76958 +       last_free = Add2Ptr(t, lf);
76960 +       for (off = sizeof(struct RESTART_TABLE) + esize; e < last_free;
76961 +            e = Add2Ptr(e, esize), off += esize) {
76962 +               *e = cpu_to_le32(off);
76963 +       }
76964 +       return t;
76967 +static inline struct RESTART_TABLE *extend_rsttbl(struct RESTART_TABLE *tbl,
76968 +                                                 u32 add, u32 free_goal)
76970 +       u16 esize = le16_to_cpu(tbl->size);
76971 +       __le32 osize = cpu_to_le32(bytes_per_rt(tbl));
76972 +       u32 used = le16_to_cpu(tbl->used);
76973 +       struct RESTART_TABLE *rt = init_rsttbl(esize, used + add);
76975 +       memcpy(rt + 1, tbl + 1, esize * used);
76977 +       rt->free_goal = free_goal == ~0u
76978 +                               ? cpu_to_le32(~0u)
76979 +                               : cpu_to_le32(sizeof(struct RESTART_TABLE) +
76980 +                                             free_goal * esize);
76982 +       if (tbl->first_free) {
76983 +               rt->first_free = tbl->first_free;
76984 +               *(__le32 *)Add2Ptr(rt, le32_to_cpu(tbl->last_free)) = osize;
76985 +       } else {
76986 +               rt->first_free = osize;
76987 +       }
76989 +       rt->total = tbl->total;
76991 +       ntfs_free(tbl);
76992 +       return rt;
76996 + * alloc_rsttbl_idx
76997 + *
76998 + * allocates an index from within a previously initialized Restart Table
76999 + */
77000 +static inline void *alloc_rsttbl_idx(struct RESTART_TABLE **tbl)
77002 +       u32 off;
77003 +       __le32 *e;
77004 +       struct RESTART_TABLE *t = *tbl;
77006 +       if (!t->first_free)
77007 +               *tbl = t = extend_rsttbl(t, 16, ~0u);
77009 +       off = le32_to_cpu(t->first_free);
77011 +       /* Dequeue this entry and zero it. */
77012 +       e = Add2Ptr(t, off);
77014 +       t->first_free = *e;
77016 +       memset(e, 0, le16_to_cpu(t->size));
77018 +       *e = RESTART_ENTRY_ALLOCATED_LE;
77020 +       /* If list is going empty, then we fix the last_free as well. */
77021 +       if (!t->first_free)
77022 +               t->last_free = 0;
77024 +       le16_add_cpu(&t->total, 1);
77026 +       return Add2Ptr(t, off);
77030 + * alloc_rsttbl_from_idx
77031 + *
77032 + * allocates a specific index from within a previously initialized Restart Table
77033 + */
77034 +static inline void *alloc_rsttbl_from_idx(struct RESTART_TABLE **tbl, u32 vbo)
77036 +       u32 off;
77037 +       __le32 *e;
77038 +       struct RESTART_TABLE *rt = *tbl;
77039 +       u32 bytes = bytes_per_rt(rt);
77040 +       u16 esize = le16_to_cpu(rt->size);
77042 +       /* If the entry is not the table, we will have to extend the table */
77043 +       if (vbo >= bytes) {
77044 +               /*
77045 +                * extend the size by computing the number of entries between
77046 +                * the existing size and the desired index and adding
77047 +                * 1 to that
77048 +                */
77049 +               u32 bytes2idx = vbo - bytes;
77051 +               /* There should always be an integral number of entries being added */
77052 +               /* Now extend the table */
77053 +               *tbl = rt = extend_rsttbl(rt, bytes2idx / esize + 1, bytes);
77054 +               if (!rt)
77055 +                       return NULL;
77056 +       }
77058 +       /* see if the entry is already allocated, and just return if it is. */
77059 +       e = Add2Ptr(rt, vbo);
77061 +       if (*e == RESTART_ENTRY_ALLOCATED_LE)
77062 +               return e;
77064 +       /*
77065 +        * Walk through the table, looking for the entry we're
77066 +        * interested and the previous entry
77067 +        */
77068 +       off = le32_to_cpu(rt->first_free);
77069 +       e = Add2Ptr(rt, off);
77071 +       if (off == vbo) {
77072 +               /* this is a match */
77073 +               rt->first_free = *e;
77074 +               goto skip_looking;
77075 +       }
77077 +       /*
77078 +        * need to walk through the list looking for the predecessor of our entry
77079 +        */
77080 +       for (;;) {
77081 +               /* Remember the entry just found */
77082 +               u32 last_off = off;
77083 +               __le32 *last_e = e;
77085 +               /* should never run of entries. */
77087 +               /* Lookup up the next entry the list */
77088 +               off = le32_to_cpu(*last_e);
77089 +               e = Add2Ptr(rt, off);
77091 +               /* If this is our match we are done */
77092 +               if (off == vbo) {
77093 +                       *last_e = *e;
77095 +                       /* If this was the last entry, we update that the table as well */
77096 +                       if (le32_to_cpu(rt->last_free) == off)
77097 +                               rt->last_free = cpu_to_le32(last_off);
77098 +                       break;
77099 +               }
77100 +       }
77102 +skip_looking:
77103 +       /* If the list is now empty, we fix the last_free as well */
77104 +       if (!rt->first_free)
77105 +               rt->last_free = 0;
77107 +       /* Zero this entry */
77108 +       memset(e, 0, esize);
77109 +       *e = RESTART_ENTRY_ALLOCATED_LE;
77111 +       le16_add_cpu(&rt->total, 1);
77113 +       return e;
77116 +#define RESTART_SINGLE_PAGE_IO cpu_to_le16(0x0001)
77118 +#define NTFSLOG_WRAPPED 0x00000001
77119 +#define NTFSLOG_MULTIPLE_PAGE_IO 0x00000002
77120 +#define NTFSLOG_NO_LAST_LSN 0x00000004
77121 +#define NTFSLOG_REUSE_TAIL 0x00000010
77122 +#define NTFSLOG_NO_OLDEST_LSN 0x00000020
77125 + * Helper struct to work with NTFS LogFile
77126 + */
77127 +struct ntfs_log {
77128 +       struct ntfs_inode *ni;
77130 +       u32 l_size;
77131 +       u32 sys_page_size;
77132 +       u32 sys_page_mask;
77133 +       u32 page_size;
77134 +       u32 page_mask; // page_size - 1
77135 +       u8 page_bits;
77136 +       struct RECORD_PAGE_HDR *one_page_buf;
77138 +       struct RESTART_TABLE *open_attr_tbl;
77139 +       u32 transaction_id;
77140 +       u32 clst_per_page;
77142 +       u32 first_page;
77143 +       u32 next_page;
77144 +       u32 ra_off;
77145 +       u32 data_off;
77146 +       u32 restart_size;
77147 +       u32 data_size;
77148 +       u16 record_header_len;
77149 +       u64 seq_num;
77150 +       u32 seq_num_bits;
77151 +       u32 file_data_bits;
77152 +       u32 seq_num_mask; /* (1 << file_data_bits) - 1 */
77154 +       struct RESTART_AREA *ra; /* in-memory image of the next restart area */
77155 +       u32 ra_size; /* the usable size of the restart area */
77157 +       /*
77158 +        * If true, then the in-memory restart area is to be written
77159 +        * to the first position on the disk
77160 +        */
77161 +       bool init_ra;
77162 +       bool set_dirty; /* true if we need to set dirty flag */
77164 +       u64 oldest_lsn;
77166 +       u32 oldest_lsn_off;
77167 +       u64 last_lsn;
77169 +       u32 total_avail;
77170 +       u32 total_avail_pages;
77171 +       u32 total_undo_commit;
77172 +       u32 max_current_avail;
77173 +       u32 current_avail;
77174 +       u32 reserved;
77176 +       short major_ver;
77177 +       short minor_ver;
77179 +       u32 l_flags; /* See NTFSLOG_XXX */
77180 +       u32 current_openlog_count; /* On-disk value for open_log_count */
77182 +       struct CLIENT_ID client_id;
77183 +       u32 client_undo_commit;
77186 +static inline u32 lsn_to_vbo(struct ntfs_log *log, const u64 lsn)
77188 +       u32 vbo = (lsn << log->seq_num_bits) >> (log->seq_num_bits - 3);
77190 +       return vbo;
77193 +/* compute the offset in the log file of the next log page */
77194 +static inline u32 next_page_off(struct ntfs_log *log, u32 off)
77196 +       off = (off & ~log->sys_page_mask) + log->page_size;
77197 +       return off >= log->l_size ? log->first_page : off;
77200 +static inline u32 lsn_to_page_off(struct ntfs_log *log, u64 lsn)
77202 +       return (((u32)lsn) << 3) & log->page_mask;
77205 +static inline u64 vbo_to_lsn(struct ntfs_log *log, u32 off, u64 Seq)
77207 +       return (off >> 3) + (Seq << log->file_data_bits);
77210 +static inline bool is_lsn_in_file(struct ntfs_log *log, u64 lsn)
77212 +       return lsn >= log->oldest_lsn &&
77213 +              lsn <= le64_to_cpu(log->ra->current_lsn);
77216 +static inline u32 hdr_file_off(struct ntfs_log *log,
77217 +                              struct RECORD_PAGE_HDR *hdr)
77219 +       if (log->major_ver < 2)
77220 +               return le64_to_cpu(hdr->rhdr.lsn);
77222 +       return le32_to_cpu(hdr->file_off);
77225 +static inline u64 base_lsn(struct ntfs_log *log,
77226 +                          const struct RECORD_PAGE_HDR *hdr, u64 lsn)
77228 +       u64 h_lsn = le64_to_cpu(hdr->rhdr.lsn);
77229 +       u64 ret = (((h_lsn >> log->file_data_bits) +
77230 +                   (lsn < (lsn_to_vbo(log, h_lsn) & ~log->page_mask) ? 1 : 0))
77231 +                  << log->file_data_bits) +
77232 +                 ((((is_log_record_end(hdr) &&
77233 +                     h_lsn <= le64_to_cpu(hdr->record_hdr.last_end_lsn))
77234 +                            ? le16_to_cpu(hdr->record_hdr.next_record_off)
77235 +                            : log->page_size) +
77236 +                   lsn) >>
77237 +                  3);
77239 +       return ret;
77242 +static inline bool verify_client_lsn(struct ntfs_log *log,
77243 +                                    const struct CLIENT_REC *client, u64 lsn)
77245 +       return lsn >= le64_to_cpu(client->oldest_lsn) &&
77246 +              lsn <= le64_to_cpu(log->ra->current_lsn) && lsn;
77249 +struct restart_info {
77250 +       u64 last_lsn;
77251 +       struct RESTART_HDR *r_page;
77252 +       u32 vbo;
77253 +       bool chkdsk_was_run;
77254 +       bool valid_page;
77255 +       bool initialized;
77256 +       bool restart;
77259 +static int read_log_page(struct ntfs_log *log, u32 vbo,
77260 +                        struct RECORD_PAGE_HDR **buffer, bool *usa_error)
77262 +       int err = 0;
77263 +       u32 page_idx = vbo >> log->page_bits;
77264 +       u32 page_off = vbo & log->page_mask;
77265 +       u32 bytes = log->page_size - page_off;
77266 +       void *to_free = NULL;
77267 +       u32 page_vbo = page_idx << log->page_bits;
77268 +       struct RECORD_PAGE_HDR *page_buf;
77269 +       struct ntfs_inode *ni = log->ni;
77270 +       bool bBAAD;
77272 +       if (vbo >= log->l_size)
77273 +               return -EINVAL;
77275 +       if (!*buffer) {
77276 +               to_free = ntfs_malloc(bytes);
77277 +               if (!to_free)
77278 +                       return -ENOMEM;
77279 +               *buffer = to_free;
77280 +       }
77282 +       page_buf = page_off ? log->one_page_buf : *buffer;
77284 +       err = ntfs_read_run_nb(ni->mi.sbi, &ni->file.run, page_vbo, page_buf,
77285 +                              log->page_size, NULL);
77286 +       if (err)
77287 +               goto out;
77289 +       if (page_buf->rhdr.sign != NTFS_FFFF_SIGNATURE)
77290 +               ntfs_fix_post_read(&page_buf->rhdr, PAGE_SIZE, false);
77292 +       if (page_buf != *buffer)
77293 +               memcpy(*buffer, Add2Ptr(page_buf, page_off), bytes);
77295 +       bBAAD = page_buf->rhdr.sign == NTFS_BAAD_SIGNATURE;
77297 +       if (usa_error)
77298 +               *usa_error = bBAAD;
77299 +       /* Check that the update sequence array for this page is valid */
77300 +       /* If we don't allow errors, raise an error status */
77301 +       else if (bBAAD)
77302 +               err = -EINVAL;
77304 +out:
77305 +       if (err && to_free) {
77306 +               ntfs_free(to_free);
77307 +               *buffer = NULL;
77308 +       }
77310 +       return err;
77314 + * log_read_rst
77315 + *
77316 + * it walks through 512 blocks of the file looking for a valid restart page header
77317 + * It will stop the first time we find a valid page header
77318 + */
77319 +static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
77320 +                       struct restart_info *info)
77322 +       u32 skip, vbo;
77323 +       struct RESTART_HDR *r_page = ntfs_malloc(DefaultLogPageSize);
77325 +       if (!r_page)
77326 +               return -ENOMEM;
77328 +       memset(info, 0, sizeof(struct restart_info));
77330 +       /* Determine which restart area we are looking for */
77331 +       if (first) {
77332 +               vbo = 0;
77333 +               skip = 512;
77334 +       } else {
77335 +               vbo = 512;
77336 +               skip = 0;
77337 +       }
77339 +       /* loop continuously until we succeed */
77340 +       for (; vbo < l_size; vbo = 2 * vbo + skip, skip = 0) {
77341 +               bool usa_error;
77342 +               u32 sys_page_size;
77343 +               bool brst, bchk;
77344 +               struct RESTART_AREA *ra;
77346 +               /* Read a page header at the current offset */
77347 +               if (read_log_page(log, vbo, (struct RECORD_PAGE_HDR **)&r_page,
77348 +                                 &usa_error)) {
77349 +                       /* ignore any errors */
77350 +                       continue;
77351 +               }
77353 +               /* exit if the signature is a log record page */
77354 +               if (r_page->rhdr.sign == NTFS_RCRD_SIGNATURE) {
77355 +                       info->initialized = true;
77356 +                       break;
77357 +               }
77359 +               brst = r_page->rhdr.sign == NTFS_RSTR_SIGNATURE;
77360 +               bchk = r_page->rhdr.sign == NTFS_CHKD_SIGNATURE;
77362 +               if (!bchk && !brst) {
77363 +                       if (r_page->rhdr.sign != NTFS_FFFF_SIGNATURE) {
77364 +                               /*
77365 +                                * Remember if the signature does not
77366 +                                * indicate uninitialized file
77367 +                                */
77368 +                               info->initialized = true;
77369 +                       }
77370 +                       continue;
77371 +               }
77373 +               ra = NULL;
77374 +               info->valid_page = false;
77375 +               info->initialized = true;
77376 +               info->vbo = vbo;
77378 +               /* Let's check the restart area if this is a valid page */
77379 +               if (!is_rst_page_hdr_valid(vbo, r_page))
77380 +                       goto check_result;
77381 +               ra = Add2Ptr(r_page, le16_to_cpu(r_page->ra_off));
77383 +               if (!is_rst_area_valid(r_page))
77384 +                       goto check_result;
77386 +               /*
77387 +                * We have a valid restart page header and restart area.
77388 +                * If chkdsk was run or we have no clients then we have
77389 +                * no more checking to do
77390 +                */
77391 +               if (bchk || ra->client_idx[1] == LFS_NO_CLIENT_LE) {
77392 +                       info->valid_page = true;
77393 +                       goto check_result;
77394 +               }
77396 +               /* Read the entire restart area */
77397 +               sys_page_size = le32_to_cpu(r_page->sys_page_size);
77398 +               if (DefaultLogPageSize != sys_page_size) {
77399 +                       ntfs_free(r_page);
77400 +                       r_page = ntfs_zalloc(sys_page_size);
77401 +                       if (!r_page)
77402 +                               return -ENOMEM;
77404 +                       if (read_log_page(log, vbo,
77405 +                                         (struct RECORD_PAGE_HDR **)&r_page,
77406 +                                         &usa_error)) {
77407 +                               /* ignore any errors */
77408 +                               ntfs_free(r_page);
77409 +                               r_page = NULL;
77410 +                               continue;
77411 +                       }
77412 +               }
77414 +               if (is_client_area_valid(r_page, usa_error)) {
77415 +                       info->valid_page = true;
77416 +                       ra = Add2Ptr(r_page, le16_to_cpu(r_page->ra_off));
77417 +               }
77419 +check_result:
77420 +               /* If chkdsk was run then update the caller's values and return */
77421 +               if (r_page->rhdr.sign == NTFS_CHKD_SIGNATURE) {
77422 +                       info->chkdsk_was_run = true;
77423 +                       info->last_lsn = le64_to_cpu(r_page->rhdr.lsn);
77424 +                       info->restart = true;
77425 +                       info->r_page = r_page;
77426 +                       return 0;
77427 +               }
77429 +               /* If we have a valid page then copy the values we need from it */
77430 +               if (info->valid_page) {
77431 +                       info->last_lsn = le64_to_cpu(ra->current_lsn);
77432 +                       info->restart = true;
77433 +                       info->r_page = r_page;
77434 +                       return 0;
77435 +               }
77436 +       }
77438 +       ntfs_free(r_page);
77440 +       return 0;
77444 + * log_init_pg_hdr
77445 + *
77446 + * init "log' from restart page header
77447 + */
77448 +static void log_init_pg_hdr(struct ntfs_log *log, u32 sys_page_size,
77449 +                           u32 page_size, u16 major_ver, u16 minor_ver)
77451 +       log->sys_page_size = sys_page_size;
77452 +       log->sys_page_mask = sys_page_size - 1;
77453 +       log->page_size = page_size;
77454 +       log->page_mask = page_size - 1;
77455 +       log->page_bits = blksize_bits(page_size);
77457 +       log->clst_per_page = log->page_size >> log->ni->mi.sbi->cluster_bits;
77458 +       if (!log->clst_per_page)
77459 +               log->clst_per_page = 1;
77461 +       log->first_page = major_ver >= 2
77462 +                                 ? 0x22 * page_size
77463 +                                 : ((sys_page_size << 1) + (page_size << 1));
77464 +       log->major_ver = major_ver;
77465 +       log->minor_ver = minor_ver;
77469 + * log_create
77470 + *
77471 + * init "log" in cases when we don't have a restart area to use
77472 + */
77473 +static void log_create(struct ntfs_log *log, u32 l_size, const u64 last_lsn,
77474 +                      u32 open_log_count, bool wrapped, bool use_multi_page)
77476 +       log->l_size = l_size;
77477 +       /* All file offsets must be quadword aligned */
77478 +       log->file_data_bits = blksize_bits(l_size) - 3;
77479 +       log->seq_num_mask = (8 << log->file_data_bits) - 1;
77480 +       log->seq_num_bits = sizeof(u64) * 8 - log->file_data_bits;
77481 +       log->seq_num = (last_lsn >> log->file_data_bits) + 2;
77482 +       log->next_page = log->first_page;
77483 +       log->oldest_lsn = log->seq_num << log->file_data_bits;
77484 +       log->oldest_lsn_off = 0;
77485 +       log->last_lsn = log->oldest_lsn;
77487 +       log->l_flags |= NTFSLOG_NO_LAST_LSN | NTFSLOG_NO_OLDEST_LSN;
77489 +       /* Set the correct flags for the I/O and indicate if we have wrapped */
77490 +       if (wrapped)
77491 +               log->l_flags |= NTFSLOG_WRAPPED;
77493 +       if (use_multi_page)
77494 +               log->l_flags |= NTFSLOG_MULTIPLE_PAGE_IO;
77496 +       /* Compute the log page values */
77497 +       log->data_off = QuadAlign(
77498 +               offsetof(struct RECORD_PAGE_HDR, fixups) +
77499 +               sizeof(short) * ((log->page_size >> SECTOR_SHIFT) + 1));
77500 +       log->data_size = log->page_size - log->data_off;
77501 +       log->record_header_len = sizeof(struct LFS_RECORD_HDR);
77503 +       /* Remember the different page sizes for reservation */
77504 +       log->reserved = log->data_size - log->record_header_len;
77506 +       /* Compute the restart page values. */
77507 +       log->ra_off = QuadAlign(
77508 +               offsetof(struct RESTART_HDR, fixups) +
77509 +               sizeof(short) * ((log->sys_page_size >> SECTOR_SHIFT) + 1));
77510 +       log->restart_size = log->sys_page_size - log->ra_off;
77511 +       log->ra_size = struct_size(log->ra, clients, 1);
77512 +       log->current_openlog_count = open_log_count;
77514 +       /*
77515 +        * The total available log file space is the number of
77516 +        * log file pages times the space available on each page
77517 +        */
77518 +       log->total_avail_pages = log->l_size - log->first_page;
77519 +       log->total_avail = log->total_avail_pages >> log->page_bits;
77521 +       /*
77522 +        * We assume that we can't use the end of the page less than
77523 +        * the file record size
77524 +        * Then we won't need to reserve more than the caller asks for
77525 +        */
77526 +       log->max_current_avail = log->total_avail * log->reserved;
77527 +       log->total_avail = log->total_avail * log->data_size;
77528 +       log->current_avail = log->max_current_avail;
77532 + * log_create_ra
77533 + *
77534 + * This routine is called to fill a restart area from the values stored in 'log'
77535 + */
77536 +static struct RESTART_AREA *log_create_ra(struct ntfs_log *log)
77538 +       struct CLIENT_REC *cr;
77539 +       struct RESTART_AREA *ra = ntfs_zalloc(log->restart_size);
77541 +       if (!ra)
77542 +               return NULL;
77544 +       ra->current_lsn = cpu_to_le64(log->last_lsn);
77545 +       ra->log_clients = cpu_to_le16(1);
77546 +       ra->client_idx[1] = LFS_NO_CLIENT_LE;
77547 +       if (log->l_flags & NTFSLOG_MULTIPLE_PAGE_IO)
77548 +               ra->flags = RESTART_SINGLE_PAGE_IO;
77549 +       ra->seq_num_bits = cpu_to_le32(log->seq_num_bits);
77550 +       ra->ra_len = cpu_to_le16(log->ra_size);
77551 +       ra->client_off = cpu_to_le16(offsetof(struct RESTART_AREA, clients));
77552 +       ra->l_size = cpu_to_le64(log->l_size);
77553 +       ra->rec_hdr_len = cpu_to_le16(log->record_header_len);
77554 +       ra->data_off = cpu_to_le16(log->data_off);
77555 +       ra->open_log_count = cpu_to_le32(log->current_openlog_count + 1);
77557 +       cr = ra->clients;
77559 +       cr->prev_client = LFS_NO_CLIENT_LE;
77560 +       cr->next_client = LFS_NO_CLIENT_LE;
77562 +       return ra;
77565 +static u32 final_log_off(struct ntfs_log *log, u64 lsn, u32 data_len)
77567 +       u32 base_vbo = lsn << 3;
77568 +       u32 final_log_off = (base_vbo & log->seq_num_mask) & ~log->page_mask;
77569 +       u32 page_off = base_vbo & log->page_mask;
77570 +       u32 tail = log->page_size - page_off;
77572 +       page_off -= 1;
77574 +       /* Add the length of the header */
77575 +       data_len += log->record_header_len;
77577 +       /*
77578 +        * If this lsn is contained this log page we are done
77579 +        * Otherwise we need to walk through several log pages
77580 +        */
77581 +       if (data_len > tail) {
77582 +               data_len -= tail;
77583 +               tail = log->data_size;
77584 +               page_off = log->data_off - 1;
77586 +               for (;;) {
77587 +                       final_log_off = next_page_off(log, final_log_off);
77589 +                       /* We are done if the remaining bytes fit on this page */
77590 +                       if (data_len <= tail)
77591 +                               break;
77592 +                       data_len -= tail;
77593 +               }
77594 +       }
77596 +       /*
77597 +        * We add the remaining bytes to our starting position on this page
77598 +        * and then add that value to the file offset of this log page
77599 +        */
77600 +       return final_log_off + data_len + page_off;
77603 +static int next_log_lsn(struct ntfs_log *log, const struct LFS_RECORD_HDR *rh,
77604 +                       u64 *lsn)
77606 +       int err;
77607 +       u64 this_lsn = le64_to_cpu(rh->this_lsn);
77608 +       u32 vbo = lsn_to_vbo(log, this_lsn);
77609 +       u32 end =
77610 +               final_log_off(log, this_lsn, le32_to_cpu(rh->client_data_len));
77611 +       u32 hdr_off = end & ~log->sys_page_mask;
77612 +       u64 seq = this_lsn >> log->file_data_bits;
77613 +       struct RECORD_PAGE_HDR *page = NULL;
77615 +       /* Remember if we wrapped */
77616 +       if (end <= vbo)
77617 +               seq += 1;
77619 +       /* log page header for this page */
77620 +       err = read_log_page(log, hdr_off, &page, NULL);
77621 +       if (err)
77622 +               return err;
77624 +       /*
77625 +        * If the lsn we were given was not the last lsn on this page,
77626 +        * then the starting offset for the next lsn is on a quad word
77627 +        * boundary following the last file offset for the current lsn
77628 +        * Otherwise the file offset is the start of the data on the next page
77629 +        */
77630 +       if (this_lsn == le64_to_cpu(page->rhdr.lsn)) {
77631 +               /* If we wrapped, we need to increment the sequence number */
77632 +               hdr_off = next_page_off(log, hdr_off);
77633 +               if (hdr_off == log->first_page)
77634 +                       seq += 1;
77636 +               vbo = hdr_off + log->data_off;
77637 +       } else {
77638 +               vbo = QuadAlign(end);
77639 +       }
77641 +       /* Compute the lsn based on the file offset and the sequence count */
77642 +       *lsn = vbo_to_lsn(log, vbo, seq);
77644 +       /*
77645 +        * If this lsn is within the legal range for the file, we return true
77646 +        * Otherwise false indicates that there are no more lsn's
77647 +        */
77648 +       if (!is_lsn_in_file(log, *lsn))
77649 +               *lsn = 0;
77651 +       ntfs_free(page);
77653 +       return 0;
77657 + * current_log_avail
77658 + *
77659 + * calculate the number of bytes available for log records
77660 + */
77661 +static u32 current_log_avail(struct ntfs_log *log)
77663 +       u32 oldest_off, next_free_off, free_bytes;
77665 +       if (log->l_flags & NTFSLOG_NO_LAST_LSN) {
77666 +               /* The entire file is available */
77667 +               return log->max_current_avail;
77668 +       }
77670 +       /*
77671 +        * If there is a last lsn the restart area then we know that we will
77672 +        * have to compute the free range
77673 +        * If there is no oldest lsn then start at the first page of the file
77674 +        */
77675 +       oldest_off = (log->l_flags & NTFSLOG_NO_OLDEST_LSN)
77676 +                            ? log->first_page
77677 +                            : (log->oldest_lsn_off & ~log->sys_page_mask);
77679 +       /*
77680 +        * We will use the next log page offset to compute the next free page\
77681 +        * If we are going to reuse this page go to the next page
77682 +        * If we are at the first page then use the end of the file
77683 +        */
77684 +       next_free_off = (log->l_flags & NTFSLOG_REUSE_TAIL)
77685 +                               ? log->next_page + log->page_size
77686 +                       : log->next_page == log->first_page ? log->l_size
77687 +                                                           : log->next_page;
77689 +       /* If the two offsets are the same then there is no available space */
77690 +       if (oldest_off == next_free_off)
77691 +               return 0;
77692 +       /*
77693 +        * If the free offset follows the oldest offset then subtract
77694 +        * this range from the total available pages
77695 +        */
77696 +       free_bytes =
77697 +               oldest_off < next_free_off
77698 +                       ? log->total_avail_pages - (next_free_off - oldest_off)
77699 +                       : oldest_off - next_free_off;
77701 +       free_bytes >>= log->page_bits;
77702 +       return free_bytes * log->reserved;
77705 +static bool check_subseq_log_page(struct ntfs_log *log,
77706 +                                 const struct RECORD_PAGE_HDR *rp, u32 vbo,
77707 +                                 u64 seq)
77709 +       u64 lsn_seq;
77710 +       const struct NTFS_RECORD_HEADER *rhdr = &rp->rhdr;
77711 +       u64 lsn = le64_to_cpu(rhdr->lsn);
77713 +       if (rhdr->sign == NTFS_FFFF_SIGNATURE || !rhdr->sign)
77714 +               return false;
77716 +       /*
77717 +        * If the last lsn on the page occurs was written after the page
77718 +        * that caused the original error then we have a fatal error
77719 +        */
77720 +       lsn_seq = lsn >> log->file_data_bits;
77722 +       /*
77723 +        * If the sequence number for the lsn the page is equal or greater
77724 +        * than lsn we expect, then this is a subsequent write
77725 +        */
77726 +       return lsn_seq >= seq ||
77727 +              (lsn_seq == seq - 1 && log->first_page == vbo &&
77728 +               vbo != (lsn_to_vbo(log, lsn) & ~log->page_mask));
77732 + * last_log_lsn
77733 + *
77734 + * This routine walks through the log pages for a file, searching for the
77735 + * last log page written to the file
77736 + */
77737 +static int last_log_lsn(struct ntfs_log *log)
77739 +       int err;
77740 +       bool usa_error = false;
77741 +       bool replace_page = false;
77742 +       bool reuse_page = log->l_flags & NTFSLOG_REUSE_TAIL;
77743 +       bool wrapped_file, wrapped;
77745 +       u32 page_cnt = 1, page_pos = 1;
77746 +       u32 page_off = 0, page_off1 = 0, saved_off = 0;
77747 +       u32 final_off, second_off, final_off_prev = 0, second_off_prev = 0;
77748 +       u32 first_file_off = 0, second_file_off = 0;
77749 +       u32 part_io_count = 0;
77750 +       u32 tails = 0;
77751 +       u32 this_off, curpage_off, nextpage_off, remain_pages;
77753 +       u64 expected_seq, seq_base = 0, lsn_base = 0;
77754 +       u64 best_lsn, best_lsn1, best_lsn2;
77755 +       u64 lsn_cur, lsn1, lsn2;
77756 +       u64 last_ok_lsn = reuse_page ? log->last_lsn : 0;
77758 +       u16 cur_pos, best_page_pos;
77760 +       struct RECORD_PAGE_HDR *page = NULL;
77761 +       struct RECORD_PAGE_HDR *tst_page = NULL;
77762 +       struct RECORD_PAGE_HDR *first_tail = NULL;
77763 +       struct RECORD_PAGE_HDR *second_tail = NULL;
77764 +       struct RECORD_PAGE_HDR *tail_page = NULL;
77765 +       struct RECORD_PAGE_HDR *second_tail_prev = NULL;
77766 +       struct RECORD_PAGE_HDR *first_tail_prev = NULL;
77767 +       struct RECORD_PAGE_HDR *page_bufs = NULL;
77768 +       struct RECORD_PAGE_HDR *best_page;
77770 +       if (log->major_ver >= 2) {
77771 +               final_off = 0x02 * log->page_size;
77772 +               second_off = 0x12 * log->page_size;
77774 +               // 0x10 == 0x12 - 0x2
77775 +               page_bufs = ntfs_malloc(log->page_size * 0x10);
77776 +               if (!page_bufs)
77777 +                       return -ENOMEM;
77778 +       } else {
77779 +               second_off = log->first_page - log->page_size;
77780 +               final_off = second_off - log->page_size;
77781 +       }
77783 +next_tail:
77784 +       /* Read second tail page (at pos 3/0x12000) */
77785 +       if (read_log_page(log, second_off, &second_tail, &usa_error) ||
77786 +           usa_error || second_tail->rhdr.sign != NTFS_RCRD_SIGNATURE) {
77787 +               ntfs_free(second_tail);
77788 +               second_tail = NULL;
77789 +               second_file_off = 0;
77790 +               lsn2 = 0;
77791 +       } else {
77792 +               second_file_off = hdr_file_off(log, second_tail);
77793 +               lsn2 = le64_to_cpu(second_tail->record_hdr.last_end_lsn);
77794 +       }
77796 +       /* Read first tail page (at pos 2/0x2000 ) */
77797 +       if (read_log_page(log, final_off, &first_tail, &usa_error) ||
77798 +           usa_error || first_tail->rhdr.sign != NTFS_RCRD_SIGNATURE) {
77799 +               ntfs_free(first_tail);
77800 +               first_tail = NULL;
77801 +               first_file_off = 0;
77802 +               lsn1 = 0;
77803 +       } else {
77804 +               first_file_off = hdr_file_off(log, first_tail);
77805 +               lsn1 = le64_to_cpu(first_tail->record_hdr.last_end_lsn);
77806 +       }
77808 +       if (log->major_ver < 2) {
77809 +               int best_page;
77811 +               first_tail_prev = first_tail;
77812 +               final_off_prev = first_file_off;
77813 +               second_tail_prev = second_tail;
77814 +               second_off_prev = second_file_off;
77815 +               tails = 1;
77817 +               if (!first_tail && !second_tail)
77818 +                       goto tail_read;
77820 +               if (first_tail && second_tail)
77821 +                       best_page = lsn1 < lsn2 ? 1 : 0;
77822 +               else if (first_tail)
77823 +                       best_page = 0;
77824 +               else
77825 +                       best_page = 1;
77827 +               page_off = best_page ? second_file_off : first_file_off;
77828 +               seq_base = (best_page ? lsn2 : lsn1) >> log->file_data_bits;
77829 +               goto tail_read;
77830 +       }
77832 +       best_lsn1 = first_tail ? base_lsn(log, first_tail, first_file_off) : 0;
77833 +       best_lsn2 =
77834 +               second_tail ? base_lsn(log, second_tail, second_file_off) : 0;
77836 +       if (first_tail && second_tail) {
77837 +               if (best_lsn1 > best_lsn2) {
77838 +                       best_lsn = best_lsn1;
77839 +                       best_page = first_tail;
77840 +                       this_off = first_file_off;
77841 +               } else {
77842 +                       best_lsn = best_lsn2;
77843 +                       best_page = second_tail;
77844 +                       this_off = second_file_off;
77845 +               }
77846 +       } else if (first_tail) {
77847 +               best_lsn = best_lsn1;
77848 +               best_page = first_tail;
77849 +               this_off = first_file_off;
77850 +       } else if (second_tail) {
77851 +               best_lsn = best_lsn2;
77852 +               best_page = second_tail;
77853 +               this_off = second_file_off;
77854 +       } else {
77855 +               goto tail_read;
77856 +       }
77858 +       best_page_pos = le16_to_cpu(best_page->page_pos);
77860 +       if (!tails) {
77861 +               if (best_page_pos == page_pos) {
77862 +                       seq_base = best_lsn >> log->file_data_bits;
77863 +                       saved_off = page_off = le32_to_cpu(best_page->file_off);
77864 +                       lsn_base = best_lsn;
77866 +                       memmove(page_bufs, best_page, log->page_size);
77868 +                       page_cnt = le16_to_cpu(best_page->page_count);
77869 +                       if (page_cnt > 1)
77870 +                               page_pos += 1;
77872 +                       tails = 1;
77873 +               }
77874 +       } else if (seq_base == (best_lsn >> log->file_data_bits) &&
77875 +                  saved_off + log->page_size == this_off &&
77876 +                  lsn_base < best_lsn &&
77877 +                  (page_pos != page_cnt || best_page_pos == page_pos ||
77878 +                   best_page_pos == 1) &&
77879 +                  (page_pos >= page_cnt || best_page_pos == page_pos)) {
77880 +               u16 bppc = le16_to_cpu(best_page->page_count);
77882 +               saved_off += log->page_size;
77883 +               lsn_base = best_lsn;
77885 +               memmove(Add2Ptr(page_bufs, tails * log->page_size), best_page,
77886 +                       log->page_size);
77888 +               tails += 1;
77890 +               if (best_page_pos != bppc) {
77891 +                       page_cnt = bppc;
77892 +                       page_pos = best_page_pos;
77894 +                       if (page_cnt > 1)
77895 +                               page_pos += 1;
77896 +               } else {
77897 +                       page_pos = page_cnt = 1;
77898 +               }
77899 +       } else {
77900 +               ntfs_free(first_tail);
77901 +               ntfs_free(second_tail);
77902 +               goto tail_read;
77903 +       }
77905 +       ntfs_free(first_tail_prev);
77906 +       first_tail_prev = first_tail;
77907 +       final_off_prev = first_file_off;
77908 +       first_tail = NULL;
77910 +       ntfs_free(second_tail_prev);
77911 +       second_tail_prev = second_tail;
77912 +       second_off_prev = second_file_off;
77913 +       second_tail = NULL;
77915 +       final_off += log->page_size;
77916 +       second_off += log->page_size;
77918 +       if (tails < 0x10)
77919 +               goto next_tail;
77920 +tail_read:
77921 +       first_tail = first_tail_prev;
77922 +       final_off = final_off_prev;
77924 +       second_tail = second_tail_prev;
77925 +       second_off = second_off_prev;
77927 +       page_cnt = page_pos = 1;
77929 +       curpage_off = seq_base == log->seq_num ? min(log->next_page, page_off)
77930 +                                              : log->next_page;
77932 +       wrapped_file =
77933 +               curpage_off == log->first_page &&
77934 +               !(log->l_flags & (NTFSLOG_NO_LAST_LSN | NTFSLOG_REUSE_TAIL));
77936 +       expected_seq = wrapped_file ? (log->seq_num + 1) : log->seq_num;
77938 +       nextpage_off = curpage_off;
77940 +next_page:
77941 +       tail_page = NULL;
77942 +       /* Read the next log page */
77943 +       err = read_log_page(log, curpage_off, &page, &usa_error);
77945 +       /* Compute the next log page offset the file */
77946 +       nextpage_off = next_page_off(log, curpage_off);
77947 +       wrapped = nextpage_off == log->first_page;
77949 +       if (tails > 1) {
77950 +               struct RECORD_PAGE_HDR *cur_page =
77951 +                       Add2Ptr(page_bufs, curpage_off - page_off);
77953 +               if (curpage_off == saved_off) {
77954 +                       tail_page = cur_page;
77955 +                       goto use_tail_page;
77956 +               }
77958 +               if (page_off > curpage_off || curpage_off >= saved_off)
77959 +                       goto use_tail_page;
77961 +               if (page_off1)
77962 +                       goto use_cur_page;
77964 +               if (!err && !usa_error &&
77965 +                   page->rhdr.sign == NTFS_RCRD_SIGNATURE &&
77966 +                   cur_page->rhdr.lsn == page->rhdr.lsn &&
77967 +                   cur_page->record_hdr.next_record_off ==
77968 +                           page->record_hdr.next_record_off &&
77969 +                   ((page_pos == page_cnt &&
77970 +                     le16_to_cpu(page->page_pos) == 1) ||
77971 +                    (page_pos != page_cnt &&
77972 +                     le16_to_cpu(page->page_pos) == page_pos + 1 &&
77973 +                     le16_to_cpu(page->page_count) == page_cnt))) {
77974 +                       cur_page = NULL;
77975 +                       goto use_tail_page;
77976 +               }
77978 +               page_off1 = page_off;
77980 +use_cur_page:
77982 +               lsn_cur = le64_to_cpu(cur_page->rhdr.lsn);
77984 +               if (last_ok_lsn !=
77985 +                           le64_to_cpu(cur_page->record_hdr.last_end_lsn) &&
77986 +                   ((lsn_cur >> log->file_data_bits) +
77987 +                    ((curpage_off <
77988 +                      (lsn_to_vbo(log, lsn_cur) & ~log->page_mask))
77989 +                             ? 1
77990 +                             : 0)) != expected_seq) {
77991 +                       goto check_tail;
77992 +               }
77994 +               if (!is_log_record_end(cur_page)) {
77995 +                       tail_page = NULL;
77996 +                       last_ok_lsn = lsn_cur;
77997 +                       goto next_page_1;
77998 +               }
78000 +               log->seq_num = expected_seq;
78001 +               log->l_flags &= ~NTFSLOG_NO_LAST_LSN;
78002 +               log->last_lsn = le64_to_cpu(cur_page->record_hdr.last_end_lsn);
78003 +               log->ra->current_lsn = cur_page->record_hdr.last_end_lsn;
78005 +               if (log->record_header_len <=
78006 +                   log->page_size -
78007 +                           le16_to_cpu(cur_page->record_hdr.next_record_off)) {
78008 +                       log->l_flags |= NTFSLOG_REUSE_TAIL;
78009 +                       log->next_page = curpage_off;
78010 +               } else {
78011 +                       log->l_flags &= ~NTFSLOG_REUSE_TAIL;
78012 +                       log->next_page = nextpage_off;
78013 +               }
78015 +               if (wrapped_file)
78016 +                       log->l_flags |= NTFSLOG_WRAPPED;
78018 +               last_ok_lsn = le64_to_cpu(cur_page->record_hdr.last_end_lsn);
78019 +               goto next_page_1;
78020 +       }
78022 +       /*
78023 +        * If we are at the expected first page of a transfer check to see
78024 +        * if either tail copy is at this offset
78025 +        * If this page is the last page of a transfer, check if we wrote
78026 +        * a subsequent tail copy
78027 +        */
78028 +       if (page_cnt == page_pos || page_cnt == page_pos + 1) {
78029 +               /*
78030 +                * Check if the offset matches either the first or second
78031 +                * tail copy. It is possible it will match both
78032 +                */
78033 +               if (curpage_off == final_off)
78034 +                       tail_page = first_tail;
78036 +               /*
78037 +                * If we already matched on the first page then
78038 +                * check the ending lsn's.
78039 +                */
78040 +               if (curpage_off == second_off) {
78041 +                       if (!tail_page ||
78042 +                           (second_tail &&
78043 +                            le64_to_cpu(second_tail->record_hdr.last_end_lsn) >
78044 +                                    le64_to_cpu(first_tail->record_hdr
78045 +                                                        .last_end_lsn))) {
78046 +                               tail_page = second_tail;
78047 +                       }
78048 +               }
78049 +       }
78051 +use_tail_page:
78052 +       if (tail_page) {
78053 +               /* we have a candidate for a tail copy */
78054 +               lsn_cur = le64_to_cpu(tail_page->record_hdr.last_end_lsn);
78056 +               if (last_ok_lsn < lsn_cur) {
78057 +                       /*
78058 +                        * If the sequence number is not expected,
78059 +                        * then don't use the tail copy
78060 +                        */
78061 +                       if (expected_seq != (lsn_cur >> log->file_data_bits))
78062 +                               tail_page = NULL;
78063 +               } else if (last_ok_lsn > lsn_cur) {
78064 +                       /*
78065 +                        * If the last lsn is greater than the one on
78066 +                        * this page then forget this tail
78067 +                        */
78068 +                       tail_page = NULL;
78069 +               }
78070 +       }
78072 +       /* If we have an error on the current page, we will break of this loop */
78073 +       if (err || usa_error)
78074 +               goto check_tail;
78076 +       /*
78077 +        * Done if the last lsn on this page doesn't match the previous known
78078 +        * last lsn or the sequence number is not expected
78079 +        */
78080 +       lsn_cur = le64_to_cpu(page->rhdr.lsn);
78081 +       if (last_ok_lsn != lsn_cur &&
78082 +           expected_seq != (lsn_cur >> log->file_data_bits)) {
78083 +               goto check_tail;
78084 +       }
78086 +       /*
78087 +        * Check that the page position and page count values are correct
78088 +        * If this is the first page of a transfer the position must be 1
78089 +        * and the count will be unknown
78090 +        */
78091 +       if (page_cnt == page_pos) {
78092 +               if (page->page_pos != cpu_to_le16(1) &&
78093 +                   (!reuse_page || page->page_pos != page->page_count)) {
78094 +                       /*
78095 +                        * If the current page is the first page we are
78096 +                        * looking at and we are reusing this page then
78097 +                        * it can be either the first or last page of a
78098 +                        * transfer. Otherwise it can only be the first.
78099 +                        */
78100 +                       goto check_tail;
78101 +               }
78102 +       } else if (le16_to_cpu(page->page_count) != page_cnt ||
78103 +                  le16_to_cpu(page->page_pos) != page_pos + 1) {
78104 +               /*
78105 +                * The page position better be 1 more than the last page
78106 +                * position and the page count better match
78107 +                */
78108 +               goto check_tail;
78109 +       }
78111 +       /*
78112 +        * We have a valid page the file and may have a valid page
78113 +        * the tail copy area
78114 +        * If the tail page was written after the page the file then
78115 +        * break of the loop
78116 +        */
78117 +       if (tail_page &&
78118 +           le64_to_cpu(tail_page->record_hdr.last_end_lsn) > lsn_cur) {
78119 +               /* Remember if we will replace the page */
78120 +               replace_page = true;
78121 +               goto check_tail;
78122 +       }
78124 +       tail_page = NULL;
78126 +       if (is_log_record_end(page)) {
78127 +               /*
78128 +                * Since we have read this page we know the sequence number
78129 +                * is the same as our expected value
78130 +                */
78131 +               log->seq_num = expected_seq;
78132 +               log->last_lsn = le64_to_cpu(page->record_hdr.last_end_lsn);
78133 +               log->ra->current_lsn = page->record_hdr.last_end_lsn;
78134 +               log->l_flags &= ~NTFSLOG_NO_LAST_LSN;
78136 +               /*
78137 +                * If there is room on this page for another header then
78138 +                * remember we want to reuse the page
78139 +                */
78140 +               if (log->record_header_len <=
78141 +                   log->page_size -
78142 +                           le16_to_cpu(page->record_hdr.next_record_off)) {
78143 +                       log->l_flags |= NTFSLOG_REUSE_TAIL;
78144 +                       log->next_page = curpage_off;
78145 +               } else {
78146 +                       log->l_flags &= ~NTFSLOG_REUSE_TAIL;
78147 +                       log->next_page = nextpage_off;
78148 +               }
78150 +               /* Remember if we wrapped the log file */
78151 +               if (wrapped_file)
78152 +                       log->l_flags |= NTFSLOG_WRAPPED;
78153 +       }
78155 +       /*
78156 +        * Remember the last page count and position.
78157 +        * Also remember the last known lsn
78158 +        */
78159 +       page_cnt = le16_to_cpu(page->page_count);
78160 +       page_pos = le16_to_cpu(page->page_pos);
78161 +       last_ok_lsn = le64_to_cpu(page->rhdr.lsn);
78163 +next_page_1:
78165 +       if (wrapped) {
78166 +               expected_seq += 1;
78167 +               wrapped_file = 1;
78168 +       }
78170 +       curpage_off = nextpage_off;
78171 +       ntfs_free(page);
78172 +       page = NULL;
78173 +       reuse_page = 0;
78174 +       goto next_page;
78176 +check_tail:
78177 +       if (tail_page) {
78178 +               log->seq_num = expected_seq;
78179 +               log->last_lsn = le64_to_cpu(tail_page->record_hdr.last_end_lsn);
78180 +               log->ra->current_lsn = tail_page->record_hdr.last_end_lsn;
78181 +               log->l_flags &= ~NTFSLOG_NO_LAST_LSN;
78183 +               if (log->page_size -
78184 +                           le16_to_cpu(
78185 +                                   tail_page->record_hdr.next_record_off) >=
78186 +                   log->record_header_len) {
78187 +                       log->l_flags |= NTFSLOG_REUSE_TAIL;
78188 +                       log->next_page = curpage_off;
78189 +               } else {
78190 +                       log->l_flags &= ~NTFSLOG_REUSE_TAIL;
78191 +                       log->next_page = nextpage_off;
78192 +               }
78194 +               if (wrapped)
78195 +                       log->l_flags |= NTFSLOG_WRAPPED;
78196 +       }
78198 +       /* Remember that the partial IO will start at the next page */
78199 +       second_off = nextpage_off;
78201 +       /*
78202 +        * If the next page is the first page of the file then update
78203 +        * the sequence number for log records which begon the next page
78204 +        */
78205 +       if (wrapped)
78206 +               expected_seq += 1;
78208 +       /*
78209 +        * If we have a tail copy or are performing single page I/O we can
78210 +        * immediately look at the next page
78211 +        */
78212 +       if (replace_page || (log->ra->flags & RESTART_SINGLE_PAGE_IO)) {
78213 +               page_cnt = 2;
78214 +               page_pos = 1;
78215 +               goto check_valid;
78216 +       }
78218 +       if (page_pos != page_cnt)
78219 +               goto check_valid;
78220 +       /*
78221 +        * If the next page causes us to wrap to the beginning of the log
78222 +        * file then we know which page to check next.
78223 +        */
78224 +       if (wrapped) {
78225 +               page_cnt = 2;
78226 +               page_pos = 1;
78227 +               goto check_valid;
78228 +       }
78230 +       cur_pos = 2;
78232 +next_test_page:
78233 +       ntfs_free(tst_page);
78234 +       tst_page = NULL;
78236 +       /* Walk through the file, reading log pages */
78237 +       err = read_log_page(log, nextpage_off, &tst_page, &usa_error);
78239 +       /*
78240 +        * If we get a USA error then assume that we correctly found
78241 +        * the end of the original transfer
78242 +        */
78243 +       if (usa_error)
78244 +               goto file_is_valid;
78246 +       /*
78247 +        * If we were able to read the page, we examine it to see if it
78248 +        * is the same or different Io block
78249 +        */
78250 +       if (err)
78251 +               goto next_test_page_1;
78253 +       if (le16_to_cpu(tst_page->page_pos) == cur_pos &&
78254 +           check_subseq_log_page(log, tst_page, nextpage_off, expected_seq)) {
78255 +               page_cnt = le16_to_cpu(tst_page->page_count) + 1;
78256 +               page_pos = le16_to_cpu(tst_page->page_pos);
78257 +               goto check_valid;
78258 +       } else {
78259 +               goto file_is_valid;
78260 +       }
78262 +next_test_page_1:
78264 +       nextpage_off = next_page_off(log, curpage_off);
78265 +       wrapped = nextpage_off == log->first_page;
78267 +       if (wrapped) {
78268 +               expected_seq += 1;
78269 +               page_cnt = 2;
78270 +               page_pos = 1;
78271 +       }
78273 +       cur_pos += 1;
78274 +       part_io_count += 1;
78275 +       if (!wrapped)
78276 +               goto next_test_page;
78278 +check_valid:
78279 +       /* Skip over the remaining pages this transfer */
78280 +       remain_pages = page_cnt - page_pos - 1;
78281 +       part_io_count += remain_pages;
78283 +       while (remain_pages--) {
78284 +               nextpage_off = next_page_off(log, curpage_off);
78285 +               wrapped = nextpage_off == log->first_page;
78287 +               if (wrapped)
78288 +                       expected_seq += 1;
78289 +       }
78291 +       /* Call our routine to check this log page */
78292 +       ntfs_free(tst_page);
78293 +       tst_page = NULL;
78295 +       err = read_log_page(log, nextpage_off, &tst_page, &usa_error);
78296 +       if (!err && !usa_error &&
78297 +           check_subseq_log_page(log, tst_page, nextpage_off, expected_seq)) {
78298 +               err = -EINVAL;
78299 +               goto out;
78300 +       }
78302 +file_is_valid:
78304 +       /* We have a valid file */
78305 +       if (page_off1 || tail_page) {
78306 +               struct RECORD_PAGE_HDR *tmp_page;
78308 +               if (sb_rdonly(log->ni->mi.sbi->sb)) {
78309 +                       err = -EROFS;
78310 +                       goto out;
78311 +               }
78313 +               if (page_off1) {
78314 +                       tmp_page = Add2Ptr(page_bufs, page_off1 - page_off);
78315 +                       tails -= (page_off1 - page_off) / log->page_size;
78316 +                       if (!tail_page)
78317 +                               tails -= 1;
78318 +               } else {
78319 +                       tmp_page = tail_page;
78320 +                       tails = 1;
78321 +               }
78323 +               while (tails--) {
78324 +                       u64 off = hdr_file_off(log, tmp_page);
78326 +                       if (!page) {
78327 +                               page = ntfs_malloc(log->page_size);
78328 +                               if (!page)
78329 +                                       return -ENOMEM;
78330 +                       }
78332 +                       /*
78333 +                        * Correct page and copy the data from this page
78334 +                        * into it and flush it to disk
78335 +                        */
78336 +                       memcpy(page, tmp_page, log->page_size);
78338 +                       /* Fill last flushed lsn value flush the page */
78339 +                       if (log->major_ver < 2)
78340 +                               page->rhdr.lsn = page->record_hdr.last_end_lsn;
78341 +                       else
78342 +                               page->file_off = 0;
78344 +                       page->page_pos = page->page_count = cpu_to_le16(1);
78346 +                       ntfs_fix_pre_write(&page->rhdr, log->page_size);
78348 +                       err = ntfs_sb_write_run(log->ni->mi.sbi,
78349 +                                               &log->ni->file.run, off, page,
78350 +                                               log->page_size);
78352 +                       if (err)
78353 +                               goto out;
78355 +                       if (part_io_count && second_off == off) {
78356 +                               second_off += log->page_size;
78357 +                               part_io_count -= 1;
78358 +                       }
78360 +                       tmp_page = Add2Ptr(tmp_page, log->page_size);
78361 +               }
78362 +       }
78364 +       if (part_io_count) {
78365 +               if (sb_rdonly(log->ni->mi.sbi->sb)) {
78366 +                       err = -EROFS;
78367 +                       goto out;
78368 +               }
78369 +       }
78371 +out:
78372 +       ntfs_free(second_tail);
78373 +       ntfs_free(first_tail);
78374 +       ntfs_free(page);
78375 +       ntfs_free(tst_page);
78376 +       ntfs_free(page_bufs);
78378 +       return err;
78382 + * read_log_rec_buf
78383 + *
78384 + * copies a log record from the file to a buffer
78385 + * The log record may span several log pages and may even wrap the file
78386 + */
78387 +static int read_log_rec_buf(struct ntfs_log *log,
78388 +                           const struct LFS_RECORD_HDR *rh, void *buffer)
78390 +       int err;
78391 +       struct RECORD_PAGE_HDR *ph = NULL;
78392 +       u64 lsn = le64_to_cpu(rh->this_lsn);
78393 +       u32 vbo = lsn_to_vbo(log, lsn) & ~log->page_mask;
78394 +       u32 off = lsn_to_page_off(log, lsn) + log->record_header_len;
78395 +       u32 data_len = le32_to_cpu(rh->client_data_len);
78397 +       /*
78398 +        * While there are more bytes to transfer,
78399 +        * we continue to attempt to perform the read
78400 +        */
78401 +       for (;;) {
78402 +               bool usa_error;
78403 +               u32 tail = log->page_size - off;
78405 +               if (tail >= data_len)
78406 +                       tail = data_len;
78408 +               data_len -= tail;
78410 +               err = read_log_page(log, vbo, &ph, &usa_error);
78411 +               if (err)
78412 +                       goto out;
78414 +               /*
78415 +                * The last lsn on this page better be greater or equal
78416 +                * to the lsn we are copying
78417 +                */
78418 +               if (lsn > le64_to_cpu(ph->rhdr.lsn)) {
78419 +                       err = -EINVAL;
78420 +                       goto out;
78421 +               }
78423 +               memcpy(buffer, Add2Ptr(ph, off), tail);
78425 +               /* If there are no more bytes to transfer, we exit the loop */
78426 +               if (!data_len) {
78427 +                       if (!is_log_record_end(ph) ||
78428 +                           lsn > le64_to_cpu(ph->record_hdr.last_end_lsn)) {
78429 +                               err = -EINVAL;
78430 +                               goto out;
78431 +                       }
78432 +                       break;
78433 +               }
78435 +               if (ph->rhdr.lsn == ph->record_hdr.last_end_lsn ||
78436 +                   lsn > le64_to_cpu(ph->rhdr.lsn)) {
78437 +                       err = -EINVAL;
78438 +                       goto out;
78439 +               }
78441 +               vbo = next_page_off(log, vbo);
78442 +               off = log->data_off;
78444 +               /*
78445 +                * adjust our pointer the user's buffer to transfer
78446 +                * the next block to
78447 +                */
78448 +               buffer = Add2Ptr(buffer, tail);
78449 +       }
78451 +out:
78452 +       ntfs_free(ph);
78453 +       return err;
78456 +static int read_rst_area(struct ntfs_log *log, struct NTFS_RESTART **rst_,
78457 +                        u64 *lsn)
78459 +       int err;
78460 +       struct LFS_RECORD_HDR *rh = NULL;
78461 +       const struct CLIENT_REC *cr =
78462 +               Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off));
78463 +       u64 lsnr, lsnc = le64_to_cpu(cr->restart_lsn);
78464 +       u32 len;
78465 +       struct NTFS_RESTART *rst;
78467 +       *lsn = 0;
78468 +       *rst_ = NULL;
78470 +       /* If the client doesn't have a restart area, go ahead and exit now */
78471 +       if (!lsnc)
78472 +               return 0;
78474 +       err = read_log_page(log, lsn_to_vbo(log, lsnc),
78475 +                           (struct RECORD_PAGE_HDR **)&rh, NULL);
78476 +       if (err)
78477 +               return err;
78479 +       rst = NULL;
78480 +       lsnr = le64_to_cpu(rh->this_lsn);
78482 +       if (lsnc != lsnr) {
78483 +               /* If the lsn values don't match, then the disk is corrupt */
78484 +               err = -EINVAL;
78485 +               goto out;
78486 +       }
78488 +       *lsn = lsnr;
78489 +       len = le32_to_cpu(rh->client_data_len);
78491 +       if (!len) {
78492 +               err = 0;
78493 +               goto out;
78494 +       }
78496 +       if (len < sizeof(struct NTFS_RESTART)) {
78497 +               err = -EINVAL;
78498 +               goto out;
78499 +       }
78501 +       rst = ntfs_malloc(len);
78502 +       if (!rst) {
78503 +               err = -ENOMEM;
78504 +               goto out;
78505 +       }
78507 +       /* Copy the data into the 'rst' buffer */
78508 +       err = read_log_rec_buf(log, rh, rst);
78509 +       if (err)
78510 +               goto out;
78512 +       *rst_ = rst;
78513 +       rst = NULL;
78515 +out:
78516 +       ntfs_free(rh);
78517 +       ntfs_free(rst);
78519 +       return err;
78522 +static int find_log_rec(struct ntfs_log *log, u64 lsn, struct lcb *lcb)
78524 +       int err;
78525 +       struct LFS_RECORD_HDR *rh = lcb->lrh;
78526 +       u32 rec_len, len;
78528 +       /* Read the record header for this lsn */
78529 +       if (!rh) {
78530 +               err = read_log_page(log, lsn_to_vbo(log, lsn),
78531 +                                   (struct RECORD_PAGE_HDR **)&rh, NULL);
78533 +               lcb->lrh = rh;
78534 +               if (err)
78535 +                       return err;
78536 +       }
78538 +       /*
78539 +        * If the lsn the log record doesn't match the desired
78540 +        * lsn then the disk is corrupt
78541 +        */
78542 +       if (lsn != le64_to_cpu(rh->this_lsn))
78543 +               return -EINVAL;
78545 +       len = le32_to_cpu(rh->client_data_len);
78547 +       /*
78548 +        * check that the length field isn't greater than the total
78549 +        * available space the log file
78550 +        */
78551 +       rec_len = len + log->record_header_len;
78552 +       if (rec_len >= log->total_avail)
78553 +               return -EINVAL;
78555 +       /*
78556 +        * If the entire log record is on this log page,
78557 +        * put a pointer to the log record the context block
78558 +        */
78559 +       if (rh->flags & LOG_RECORD_MULTI_PAGE) {
78560 +               void *lr = ntfs_malloc(len);
78562 +               if (!lr)
78563 +                       return -ENOMEM;
78565 +               lcb->log_rec = lr;
78566 +               lcb->alloc = true;
78568 +               /* Copy the data into the buffer returned */
78569 +               err = read_log_rec_buf(log, rh, lr);
78570 +               if (err)
78571 +                       return err;
78572 +       } else {
78573 +               /* If beyond the end of the current page -> an error */
78574 +               u32 page_off = lsn_to_page_off(log, lsn);
78576 +               if (page_off + len + log->record_header_len > log->page_size)
78577 +                       return -EINVAL;
78579 +               lcb->log_rec = Add2Ptr(rh, sizeof(struct LFS_RECORD_HDR));
78580 +               lcb->alloc = false;
78581 +       }
78583 +       return 0;
78587 + * read_log_rec_lcb
78588 + *
78589 + * initiates the query operation.
78590 + */
78591 +static int read_log_rec_lcb(struct ntfs_log *log, u64 lsn, u32 ctx_mode,
78592 +                           struct lcb **lcb_)
78594 +       int err;
78595 +       const struct CLIENT_REC *cr;
78596 +       struct lcb *lcb;
78598 +       switch (ctx_mode) {
78599 +       case lcb_ctx_undo_next:
78600 +       case lcb_ctx_prev:
78601 +       case lcb_ctx_next:
78602 +               break;
78603 +       default:
78604 +               return -EINVAL;
78605 +       }
78607 +       /* check that the given lsn is the legal range for this client */
78608 +       cr = Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off));
78610 +       if (!verify_client_lsn(log, cr, lsn))
78611 +               return -EINVAL;
78613 +       lcb = ntfs_zalloc(sizeof(struct lcb));
78614 +       if (!lcb)
78615 +               return -ENOMEM;
78616 +       lcb->client = log->client_id;
78617 +       lcb->ctx_mode = ctx_mode;
78619 +       /* Find the log record indicated by the given lsn */
78620 +       err = find_log_rec(log, lsn, lcb);
78621 +       if (err)
78622 +               goto out;
78624 +       *lcb_ = lcb;
78625 +       return 0;
78627 +out:
78628 +       lcb_put(lcb);
78629 +       *lcb_ = NULL;
78630 +       return err;
78634 + * find_client_next_lsn
78635 + *
78636 + * attempt to find the next lsn to return to a client based on the context mode.
78637 + */
78638 +static int find_client_next_lsn(struct ntfs_log *log, struct lcb *lcb, u64 *lsn)
78640 +       int err;
78641 +       u64 next_lsn;
78642 +       struct LFS_RECORD_HDR *hdr;
78644 +       hdr = lcb->lrh;
78645 +       *lsn = 0;
78647 +       if (lcb_ctx_next != lcb->ctx_mode)
78648 +               goto check_undo_next;
78650 +       /* Loop as long as another lsn can be found */
78651 +       for (;;) {
78652 +               u64 current_lsn;
78654 +               err = next_log_lsn(log, hdr, &current_lsn);
78655 +               if (err)
78656 +                       goto out;
78658 +               if (!current_lsn)
78659 +                       break;
78661 +               if (hdr != lcb->lrh)
78662 +                       ntfs_free(hdr);
78664 +               hdr = NULL;
78665 +               err = read_log_page(log, lsn_to_vbo(log, current_lsn),
78666 +                                   (struct RECORD_PAGE_HDR **)&hdr, NULL);
78667 +               if (err)
78668 +                       goto out;
78670 +               if (memcmp(&hdr->client, &lcb->client,
78671 +                          sizeof(struct CLIENT_ID))) {
78672 +                       /*err = -EINVAL; */
78673 +               } else if (LfsClientRecord == hdr->record_type) {
78674 +                       ntfs_free(lcb->lrh);
78675 +                       lcb->lrh = hdr;
78676 +                       *lsn = current_lsn;
78677 +                       return 0;
78678 +               }
78679 +       }
78681 +out:
78682 +       if (hdr != lcb->lrh)
78683 +               ntfs_free(hdr);
78684 +       return err;
78686 +check_undo_next:
78687 +       if (lcb_ctx_undo_next == lcb->ctx_mode)
78688 +               next_lsn = le64_to_cpu(hdr->client_undo_next_lsn);
78689 +       else if (lcb_ctx_prev == lcb->ctx_mode)
78690 +               next_lsn = le64_to_cpu(hdr->client_prev_lsn);
78691 +       else
78692 +               return 0;
78694 +       if (!next_lsn)
78695 +               return 0;
78697 +       if (!verify_client_lsn(
78698 +                   log, Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off)),
78699 +                   next_lsn))
78700 +               return 0;
78702 +       hdr = NULL;
78703 +       err = read_log_page(log, lsn_to_vbo(log, next_lsn),
78704 +                           (struct RECORD_PAGE_HDR **)&hdr, NULL);
78705 +       if (err)
78706 +               return err;
78707 +       ntfs_free(lcb->lrh);
78708 +       lcb->lrh = hdr;
78710 +       *lsn = next_lsn;
78712 +       return 0;
78715 +static int read_next_log_rec(struct ntfs_log *log, struct lcb *lcb, u64 *lsn)
78717 +       int err;
78719 +       err = find_client_next_lsn(log, lcb, lsn);
78720 +       if (err)
78721 +               return err;
78723 +       if (!*lsn)
78724 +               return 0;
78726 +       if (lcb->alloc)
78727 +               ntfs_free(lcb->log_rec);
78729 +       lcb->log_rec = NULL;
78730 +       lcb->alloc = false;
78731 +       ntfs_free(lcb->lrh);
78732 +       lcb->lrh = NULL;
78734 +       return find_log_rec(log, *lsn, lcb);
78737 +static inline bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes)
78739 +       __le16 mask;
78740 +       u32 min_de, de_off, used, total;
78741 +       const struct NTFS_DE *e;
78743 +       if (hdr_has_subnode(hdr)) {
78744 +               min_de = sizeof(struct NTFS_DE) + sizeof(u64);
78745 +               mask = NTFS_IE_HAS_SUBNODES;
78746 +       } else {
78747 +               min_de = sizeof(struct NTFS_DE);
78748 +               mask = 0;
78749 +       }
78751 +       de_off = le32_to_cpu(hdr->de_off);
78752 +       used = le32_to_cpu(hdr->used);
78753 +       total = le32_to_cpu(hdr->total);
78755 +       if (de_off > bytes - min_de || used > bytes || total > bytes ||
78756 +           de_off + min_de > used || used > total) {
78757 +               return false;
78758 +       }
78760 +       e = Add2Ptr(hdr, de_off);
78761 +       for (;;) {
78762 +               u16 esize = le16_to_cpu(e->size);
78763 +               struct NTFS_DE *next = Add2Ptr(e, esize);
78765 +               if (esize < min_de || PtrOffset(hdr, next) > used ||
78766 +                   (e->flags & NTFS_IE_HAS_SUBNODES) != mask) {
78767 +                       return false;
78768 +               }
78770 +               if (de_is_last(e))
78771 +                       break;
78773 +               e = next;
78774 +       }
78776 +       return true;
78779 +static inline bool check_index_buffer(const struct INDEX_BUFFER *ib, u32 bytes)
78781 +       u16 fo;
78782 +       const struct NTFS_RECORD_HEADER *r = &ib->rhdr;
78784 +       if (r->sign != NTFS_INDX_SIGNATURE)
78785 +               return false;
78787 +       fo = (SECTOR_SIZE - ((bytes >> SECTOR_SHIFT) + 1) * sizeof(short));
78789 +       if (le16_to_cpu(r->fix_off) > fo)
78790 +               return false;
78792 +       if ((le16_to_cpu(r->fix_num) - 1) * SECTOR_SIZE != bytes)
78793 +               return false;
78795 +       return check_index_header(&ib->ihdr,
78796 +                                 bytes - offsetof(struct INDEX_BUFFER, ihdr));
78799 +static inline bool check_index_root(const struct ATTRIB *attr,
78800 +                                   struct ntfs_sb_info *sbi)
78802 +       bool ret;
78803 +       const struct INDEX_ROOT *root = resident_data(attr);
78804 +       u8 index_bits = le32_to_cpu(root->index_block_size) >= sbi->cluster_size
78805 +                               ? sbi->cluster_bits
78806 +                               : SECTOR_SHIFT;
78807 +       u8 block_clst = root->index_block_clst;
78809 +       if (le32_to_cpu(attr->res.data_size) < sizeof(struct INDEX_ROOT) ||
78810 +           (root->type != ATTR_NAME && root->type != ATTR_ZERO) ||
78811 +           (root->type == ATTR_NAME &&
78812 +            root->rule != NTFS_COLLATION_TYPE_FILENAME) ||
78813 +           (le32_to_cpu(root->index_block_size) !=
78814 +            (block_clst << index_bits)) ||
78815 +           (block_clst != 1 && block_clst != 2 && block_clst != 4 &&
78816 +            block_clst != 8 && block_clst != 0x10 && block_clst != 0x20 &&
78817 +            block_clst != 0x40 && block_clst != 0x80)) {
78818 +               return false;
78819 +       }
78821 +       ret = check_index_header(&root->ihdr,
78822 +                                le32_to_cpu(attr->res.data_size) -
78823 +                                        offsetof(struct INDEX_ROOT, ihdr));
78824 +       return ret;
78827 +static inline bool check_attr(const struct MFT_REC *rec,
78828 +                             const struct ATTRIB *attr,
78829 +                             struct ntfs_sb_info *sbi)
78831 +       u32 asize = le32_to_cpu(attr->size);
78832 +       u32 rsize = 0;
78833 +       u64 dsize, svcn, evcn;
78834 +       u16 run_off;
78836 +       /* Check the fixed part of the attribute record header */
78837 +       if (asize >= sbi->record_size ||
78838 +           asize + PtrOffset(rec, attr) >= sbi->record_size ||
78839 +           (attr->name_len &&
78840 +            le16_to_cpu(attr->name_off) + attr->name_len * sizeof(short) >
78841 +                    asize)) {
78842 +               return false;
78843 +       }
78845 +       /* Check the attribute fields */
78846 +       switch (attr->non_res) {
78847 +       case 0:
78848 +               rsize = le32_to_cpu(attr->res.data_size);
78849 +               if (rsize >= asize ||
78850 +                   le16_to_cpu(attr->res.data_off) + rsize > asize) {
78851 +                       return false;
78852 +               }
78853 +               break;
78855 +       case 1:
78856 +               dsize = le64_to_cpu(attr->nres.data_size);
78857 +               svcn = le64_to_cpu(attr->nres.svcn);
78858 +               evcn = le64_to_cpu(attr->nres.evcn);
78859 +               run_off = le16_to_cpu(attr->nres.run_off);
78861 +               if (svcn > evcn + 1 || run_off >= asize ||
78862 +                   le64_to_cpu(attr->nres.valid_size) > dsize ||
78863 +                   dsize > le64_to_cpu(attr->nres.alloc_size)) {
78864 +                       return false;
78865 +               }
78867 +               if (run_unpack(NULL, sbi, 0, svcn, evcn, svcn,
78868 +                              Add2Ptr(attr, run_off), asize - run_off) < 0) {
78869 +                       return false;
78870 +               }
78872 +               return true;
78874 +       default:
78875 +               return false;
78876 +       }
78878 +       switch (attr->type) {
78879 +       case ATTR_NAME:
78880 +               if (fname_full_size(Add2Ptr(
78881 +                           attr, le16_to_cpu(attr->res.data_off))) > asize) {
78882 +                       return false;
78883 +               }
78884 +               break;
78886 +       case ATTR_ROOT:
78887 +               return check_index_root(attr, sbi);
78889 +       case ATTR_STD:
78890 +               if (rsize < sizeof(struct ATTR_STD_INFO5) &&
78891 +                   rsize != sizeof(struct ATTR_STD_INFO)) {
78892 +                       return false;
78893 +               }
78894 +               break;
78896 +       case ATTR_LIST:
78897 +       case ATTR_ID:
78898 +       case ATTR_SECURE:
78899 +       case ATTR_LABEL:
78900 +       case ATTR_VOL_INFO:
78901 +       case ATTR_DATA:
78902 +       case ATTR_ALLOC:
78903 +       case ATTR_BITMAP:
78904 +       case ATTR_REPARSE:
78905 +       case ATTR_EA_INFO:
78906 +       case ATTR_EA:
78907 +       case ATTR_PROPERTYSET:
78908 +       case ATTR_LOGGED_UTILITY_STREAM:
78909 +               break;
78911 +       default:
78912 +               return false;
78913 +       }
78915 +       return true;
78918 +static inline bool check_file_record(const struct MFT_REC *rec,
78919 +                                    const struct MFT_REC *rec2,
78920 +                                    struct ntfs_sb_info *sbi)
78922 +       const struct ATTRIB *attr;
78923 +       u16 fo = le16_to_cpu(rec->rhdr.fix_off);
78924 +       u16 fn = le16_to_cpu(rec->rhdr.fix_num);
78925 +       u16 ao = le16_to_cpu(rec->attr_off);
78926 +       u32 rs = sbi->record_size;
78928 +       /* check the file record header for consistency */
78929 +       if (rec->rhdr.sign != NTFS_FILE_SIGNATURE ||
78930 +           fo > (SECTOR_SIZE - ((rs >> SECTOR_SHIFT) + 1) * sizeof(short)) ||
78931 +           (fn - 1) * SECTOR_SIZE != rs || ao < MFTRECORD_FIXUP_OFFSET_1 ||
78932 +           ao > sbi->record_size - SIZEOF_RESIDENT || !is_rec_inuse(rec) ||
78933 +           le32_to_cpu(rec->total) != rs) {
78934 +               return false;
78935 +       }
78937 +       /* Loop to check all of the attributes */
78938 +       for (attr = Add2Ptr(rec, ao); attr->type != ATTR_END;
78939 +            attr = Add2Ptr(attr, le32_to_cpu(attr->size))) {
78940 +               if (check_attr(rec, attr, sbi))
78941 +                       continue;
78942 +               return false;
78943 +       }
78945 +       return true;
78948 +static inline int check_lsn(const struct NTFS_RECORD_HEADER *hdr,
78949 +                           const u64 *rlsn)
78951 +       u64 lsn;
78953 +       if (!rlsn)
78954 +               return true;
78956 +       lsn = le64_to_cpu(hdr->lsn);
78958 +       if (hdr->sign == NTFS_HOLE_SIGNATURE)
78959 +               return false;
78961 +       if (*rlsn > lsn)
78962 +               return true;
78964 +       return false;
78967 +static inline bool check_if_attr(const struct MFT_REC *rec,
78968 +                                const struct LOG_REC_HDR *lrh)
78970 +       u16 ro = le16_to_cpu(lrh->record_off);
78971 +       u16 o = le16_to_cpu(rec->attr_off);
78972 +       const struct ATTRIB *attr = Add2Ptr(rec, o);
78974 +       while (o < ro) {
78975 +               u32 asize;
78977 +               if (attr->type == ATTR_END)
78978 +                       break;
78980 +               asize = le32_to_cpu(attr->size);
78981 +               if (!asize)
78982 +                       break;
78984 +               o += asize;
78985 +               attr = Add2Ptr(attr, asize);
78986 +       }
78988 +       return o == ro;
78991 +static inline bool check_if_index_root(const struct MFT_REC *rec,
78992 +                                      const struct LOG_REC_HDR *lrh)
78994 +       u16 ro = le16_to_cpu(lrh->record_off);
78995 +       u16 o = le16_to_cpu(rec->attr_off);
78996 +       const struct ATTRIB *attr = Add2Ptr(rec, o);
78998 +       while (o < ro) {
78999 +               u32 asize;
79001 +               if (attr->type == ATTR_END)
79002 +                       break;
79004 +               asize = le32_to_cpu(attr->size);
79005 +               if (!asize)
79006 +                       break;
79008 +               o += asize;
79009 +               attr = Add2Ptr(attr, asize);
79010 +       }
79012 +       return o == ro && attr->type == ATTR_ROOT;
79015 +static inline bool check_if_root_index(const struct ATTRIB *attr,
79016 +                                      const struct INDEX_HDR *hdr,
79017 +                                      const struct LOG_REC_HDR *lrh)
79019 +       u16 ao = le16_to_cpu(lrh->attr_off);
79020 +       u32 de_off = le32_to_cpu(hdr->de_off);
79021 +       u32 o = PtrOffset(attr, hdr) + de_off;
79022 +       const struct NTFS_DE *e = Add2Ptr(hdr, de_off);
79023 +       u32 asize = le32_to_cpu(attr->size);
79025 +       while (o < ao) {
79026 +               u16 esize;
79028 +               if (o >= asize)
79029 +                       break;
79031 +               esize = le16_to_cpu(e->size);
79032 +               if (!esize)
79033 +                       break;
79035 +               o += esize;
79036 +               e = Add2Ptr(e, esize);
79037 +       }
79039 +       return o == ao;
79042 +static inline bool check_if_alloc_index(const struct INDEX_HDR *hdr,
79043 +                                       u32 attr_off)
79045 +       u32 de_off = le32_to_cpu(hdr->de_off);
79046 +       u32 o = offsetof(struct INDEX_BUFFER, ihdr) + de_off;
79047 +       const struct NTFS_DE *e = Add2Ptr(hdr, de_off);
79048 +       u32 used = le32_to_cpu(hdr->used);
79050 +       while (o < attr_off) {
79051 +               u16 esize;
79053 +               if (de_off >= used)
79054 +                       break;
79056 +               esize = le16_to_cpu(e->size);
79057 +               if (!esize)
79058 +                       break;
79060 +               o += esize;
79061 +               de_off += esize;
79062 +               e = Add2Ptr(e, esize);
79063 +       }
79065 +       return o == attr_off;
79068 +static inline void change_attr_size(struct MFT_REC *rec, struct ATTRIB *attr,
79069 +                                   u32 nsize)
79071 +       u32 asize = le32_to_cpu(attr->size);
79072 +       int dsize = nsize - asize;
79073 +       u8 *next = Add2Ptr(attr, asize);
79074 +       u32 used = le32_to_cpu(rec->used);
79076 +       memmove(Add2Ptr(attr, nsize), next, used - PtrOffset(rec, next));
79078 +       rec->used = cpu_to_le32(used + dsize);
79079 +       attr->size = cpu_to_le32(nsize);
79082 +struct OpenAttr {
79083 +       struct ATTRIB *attr;
79084 +       struct runs_tree *run1;
79085 +       struct runs_tree run0;
79086 +       struct ntfs_inode *ni;
79087 +       // CLST rno;
79090 +/* Returns 0 if 'attr' has the same type and name */
79091 +static inline int cmp_type_and_name(const struct ATTRIB *a1,
79092 +                                   const struct ATTRIB *a2)
79094 +       return a1->type != a2->type || a1->name_len != a2->name_len ||
79095 +              (a1->name_len && memcmp(attr_name(a1), attr_name(a2),
79096 +                                      a1->name_len * sizeof(short)));
79099 +static struct OpenAttr *find_loaded_attr(struct ntfs_log *log,
79100 +                                        const struct ATTRIB *attr, CLST rno)
79102 +       struct OPEN_ATTR_ENRTY *oe = NULL;
79104 +       while ((oe = enum_rstbl(log->open_attr_tbl, oe))) {
79105 +               struct OpenAttr *op_attr;
79107 +               if (ino_get(&oe->ref) != rno)
79108 +                       continue;
79110 +               op_attr = (struct OpenAttr *)oe->ptr;
79111 +               if (!cmp_type_and_name(op_attr->attr, attr))
79112 +                       return op_attr;
79113 +       }
79114 +       return NULL;
79117 +static struct ATTRIB *attr_create_nonres_log(struct ntfs_sb_info *sbi,
79118 +                                            enum ATTR_TYPE type, u64 size,
79119 +                                            const u16 *name, size_t name_len,
79120 +                                            __le16 flags)
79122 +       struct ATTRIB *attr;
79123 +       u32 name_size = QuadAlign(name_len * sizeof(short));
79124 +       bool is_ext = flags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED);
79125 +       u32 asize = name_size +
79126 +                   (is_ext ? SIZEOF_NONRESIDENT_EX : SIZEOF_NONRESIDENT);
79128 +       attr = ntfs_zalloc(asize);
79129 +       if (!attr)
79130 +               return NULL;
79132 +       attr->type = type;
79133 +       attr->size = cpu_to_le32(asize);
79134 +       attr->flags = flags;
79135 +       attr->non_res = 1;
79136 +       attr->name_len = name_len;
79138 +       attr->nres.evcn = cpu_to_le64((u64)bytes_to_cluster(sbi, size) - 1);
79139 +       attr->nres.alloc_size = cpu_to_le64(ntfs_up_cluster(sbi, size));
79140 +       attr->nres.data_size = cpu_to_le64(size);
79141 +       attr->nres.valid_size = attr->nres.data_size;
79142 +       if (is_ext) {
79143 +               attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
79144 +               if (is_attr_compressed(attr))
79145 +                       attr->nres.c_unit = COMPRESSION_UNIT;
79147 +               attr->nres.run_off =
79148 +                       cpu_to_le16(SIZEOF_NONRESIDENT_EX + name_size);
79149 +               memcpy(Add2Ptr(attr, SIZEOF_NONRESIDENT_EX), name,
79150 +                      name_len * sizeof(short));
79151 +       } else {
79152 +               attr->name_off = SIZEOF_NONRESIDENT_LE;
79153 +               attr->nres.run_off =
79154 +                       cpu_to_le16(SIZEOF_NONRESIDENT + name_size);
79155 +               memcpy(Add2Ptr(attr, SIZEOF_NONRESIDENT), name,
79156 +                      name_len * sizeof(short));
79157 +       }
79159 +       return attr;
79163 + * do_action
79164 + *
79165 + * common routine for the Redo and Undo Passes
79166 + * If rlsn is NULL then undo
79167 + */
79168 +static int do_action(struct ntfs_log *log, struct OPEN_ATTR_ENRTY *oe,
79169 +                    const struct LOG_REC_HDR *lrh, u32 op, void *data,
79170 +                    u32 dlen, u32 rec_len, const u64 *rlsn)
79172 +       int err = 0;
79173 +       struct ntfs_sb_info *sbi = log->ni->mi.sbi;
79174 +       struct inode *inode = NULL, *inode_parent;
79175 +       struct mft_inode *mi = NULL, *mi2_child = NULL;
79176 +       CLST rno = 0, rno_base = 0;
79177 +       struct INDEX_BUFFER *ib = NULL;
79178 +       struct MFT_REC *rec = NULL;
79179 +       struct ATTRIB *attr = NULL, *attr2;
79180 +       struct INDEX_HDR *hdr;
79181 +       struct INDEX_ROOT *root;
79182 +       struct NTFS_DE *e, *e1, *e2;
79183 +       struct NEW_ATTRIBUTE_SIZES *new_sz;
79184 +       struct ATTR_FILE_NAME *fname;
79185 +       struct OpenAttr *oa, *oa2;
79186 +       u32 nsize, t32, asize, used, esize, bmp_off, bmp_bits;
79187 +       u16 id, id2;
79188 +       u32 record_size = sbi->record_size;
79189 +       u64 t64;
79190 +       u16 roff = le16_to_cpu(lrh->record_off);
79191 +       u16 aoff = le16_to_cpu(lrh->attr_off);
79192 +       u64 lco = 0;
79193 +       u64 cbo = (u64)le16_to_cpu(lrh->cluster_off) << SECTOR_SHIFT;
79194 +       u64 tvo = le64_to_cpu(lrh->target_vcn) << sbi->cluster_bits;
79195 +       u64 vbo = cbo + tvo;
79196 +       void *buffer_le = NULL;
79197 +       u32 bytes = 0;
79198 +       bool a_dirty = false;
79199 +       u16 data_off;
79201 +       oa = oe->ptr;
79203 +       /* Big switch to prepare */
79204 +       switch (op) {
79205 +       /* ============================================================
79206 +        * Process MFT records, as described by the current log record
79207 +        * ============================================================
79208 +        */
79209 +       case InitializeFileRecordSegment:
79210 +       case DeallocateFileRecordSegment:
79211 +       case WriteEndOfFileRecordSegment:
79212 +       case CreateAttribute:
79213 +       case DeleteAttribute:
79214 +       case UpdateResidentValue:
79215 +       case UpdateMappingPairs:
79216 +       case SetNewAttributeSizes:
79217 +       case AddIndexEntryRoot:
79218 +       case DeleteIndexEntryRoot:
79219 +       case SetIndexEntryVcnRoot:
79220 +       case UpdateFileNameRoot:
79221 +       case UpdateRecordDataRoot:
79222 +       case ZeroEndOfFileRecord:
79223 +               rno = vbo >> sbi->record_bits;
79224 +               inode = ilookup(sbi->sb, rno);
79225 +               if (inode) {
79226 +                       mi = &ntfs_i(inode)->mi;
79227 +               } else if (op == InitializeFileRecordSegment) {
79228 +                       mi = ntfs_zalloc(sizeof(struct mft_inode));
79229 +                       if (!mi)
79230 +                               return -ENOMEM;
79231 +                       err = mi_format_new(mi, sbi, rno, 0, false);
79232 +                       if (err)
79233 +                               goto out;
79234 +               } else {
79235 +                       /* read from disk */
79236 +                       err = mi_get(sbi, rno, &mi);
79237 +                       if (err)
79238 +                               return err;
79239 +               }
79240 +               rec = mi->mrec;
79242 +               if (op == DeallocateFileRecordSegment)
79243 +                       goto skip_load_parent;
79245 +               if (InitializeFileRecordSegment != op) {
79246 +                       if (rec->rhdr.sign == NTFS_BAAD_SIGNATURE)
79247 +                               goto dirty_vol;
79248 +                       if (!check_lsn(&rec->rhdr, rlsn))
79249 +                               goto out;
79250 +                       if (!check_file_record(rec, NULL, sbi))
79251 +                               goto dirty_vol;
79252 +                       attr = Add2Ptr(rec, roff);
79253 +               }
79255 +               if (is_rec_base(rec) || InitializeFileRecordSegment == op) {
79256 +                       rno_base = rno;
79257 +                       goto skip_load_parent;
79258 +               }
79260 +               rno_base = ino_get(&rec->parent_ref);
79261 +               inode_parent = ntfs_iget5(sbi->sb, &rec->parent_ref, NULL);
79262 +               if (IS_ERR(inode_parent))
79263 +                       goto skip_load_parent;
79265 +               if (is_bad_inode(inode_parent)) {
79266 +                       iput(inode_parent);
79267 +                       goto skip_load_parent;
79268 +               }
79270 +               if (ni_load_mi_ex(ntfs_i(inode_parent), rno, &mi2_child)) {
79271 +                       iput(inode_parent);
79272 +               } else {
79273 +                       if (mi2_child->mrec != mi->mrec)
79274 +                               memcpy(mi2_child->mrec, mi->mrec,
79275 +                                      sbi->record_size);
79277 +                       if (inode)
79278 +                               iput(inode);
79279 +                       else if (mi)
79280 +                               mi_put(mi);
79282 +                       inode = inode_parent;
79283 +                       mi = mi2_child;
79284 +                       rec = mi2_child->mrec;
79285 +                       attr = Add2Ptr(rec, roff);
79286 +               }
79288 +skip_load_parent:
79289 +               inode_parent = NULL;
79290 +               break;
79292 +       /* ============================================================
79293 +        * Process attributes, as described by the current log record
79294 +        * ============================================================
79295 +        */
79296 +       case UpdateNonresidentValue:
79297 +       case AddIndexEntryAllocation:
79298 +       case DeleteIndexEntryAllocation:
79299 +       case WriteEndOfIndexBuffer:
79300 +       case SetIndexEntryVcnAllocation:
79301 +       case UpdateFileNameAllocation:
79302 +       case SetBitsInNonresidentBitMap:
79303 +       case ClearBitsInNonresidentBitMap:
79304 +       case UpdateRecordDataAllocation:
79305 +               attr = oa->attr;
79306 +               bytes = UpdateNonresidentValue == op ? dlen : 0;
79307 +               lco = (u64)le16_to_cpu(lrh->lcns_follow) << sbi->cluster_bits;
79309 +               if (attr->type == ATTR_ALLOC) {
79310 +                       t32 = le32_to_cpu(oe->bytes_per_index);
79311 +                       if (bytes < t32)
79312 +                               bytes = t32;
79313 +               }
79315 +               if (!bytes)
79316 +                       bytes = lco - cbo;
79318 +               bytes += roff;
79319 +               if (attr->type == ATTR_ALLOC)
79320 +                       bytes = (bytes + 511) & ~511; // align
79322 +               buffer_le = ntfs_malloc(bytes);
79323 +               if (!buffer_le)
79324 +                       return -ENOMEM;
79326 +               err = ntfs_read_run_nb(sbi, oa->run1, vbo, buffer_le, bytes,
79327 +                                      NULL);
79328 +               if (err)
79329 +                       goto out;
79331 +               if (attr->type == ATTR_ALLOC && *(int *)buffer_le)
79332 +                       ntfs_fix_post_read(buffer_le, bytes, false);
79333 +               break;
79335 +       default:
79336 +               WARN_ON(1);
79337 +       }
79339 +       /* Big switch to do operation */
79340 +       switch (op) {
79341 +       case InitializeFileRecordSegment:
79342 +               if (roff + dlen > record_size)
79343 +                       goto dirty_vol;
79345 +               memcpy(Add2Ptr(rec, roff), data, dlen);
79346 +               mi->dirty = true;
79347 +               break;
79349 +       case DeallocateFileRecordSegment:
79350 +               clear_rec_inuse(rec);
79351 +               le16_add_cpu(&rec->seq, 1);
79352 +               mi->dirty = true;
79353 +               break;
79355 +       case WriteEndOfFileRecordSegment:
79356 +               attr2 = (struct ATTRIB *)data;
79357 +               if (!check_if_attr(rec, lrh) || roff + dlen > record_size)
79358 +                       goto dirty_vol;
79360 +               memmove(attr, attr2, dlen);
79361 +               rec->used = cpu_to_le32(QuadAlign(roff + dlen));
79363 +               mi->dirty = true;
79364 +               break;
79366 +       case CreateAttribute:
79367 +               attr2 = (struct ATTRIB *)data;
79368 +               asize = le32_to_cpu(attr2->size);
79369 +               used = le32_to_cpu(rec->used);
79371 +               if (!check_if_attr(rec, lrh) || dlen < SIZEOF_RESIDENT ||
79372 +                   !IsQuadAligned(asize) ||
79373 +                   Add2Ptr(attr2, asize) > Add2Ptr(lrh, rec_len) ||
79374 +                   dlen > record_size - used) {
79375 +                       goto dirty_vol;
79376 +               }
79378 +               memmove(Add2Ptr(attr, asize), attr, used - roff);
79379 +               memcpy(attr, attr2, asize);
79381 +               rec->used = cpu_to_le32(used + asize);
79382 +               id = le16_to_cpu(rec->next_attr_id);
79383 +               id2 = le16_to_cpu(attr2->id);
79384 +               if (id <= id2)
79385 +                       rec->next_attr_id = cpu_to_le16(id2 + 1);
79386 +               if (is_attr_indexed(attr))
79387 +                       le16_add_cpu(&rec->hard_links, 1);
79389 +               oa2 = find_loaded_attr(log, attr, rno_base);
79390 +               if (oa2) {
79391 +                       void *p2 = ntfs_memdup(attr, le32_to_cpu(attr->size));
79393 +                       if (p2) {
79394 +                               // run_close(oa2->run1);
79395 +                               ntfs_free(oa2->attr);
79396 +                               oa2->attr = p2;
79397 +                       }
79398 +               }
79400 +               mi->dirty = true;
79401 +               break;
79403 +       case DeleteAttribute:
79404 +               asize = le32_to_cpu(attr->size);
79405 +               used = le32_to_cpu(rec->used);
79407 +               if (!check_if_attr(rec, lrh))
79408 +                       goto dirty_vol;
79410 +               rec->used = cpu_to_le32(used - asize);
79411 +               if (is_attr_indexed(attr))
79412 +                       le16_add_cpu(&rec->hard_links, -1);
79414 +               memmove(attr, Add2Ptr(attr, asize), used - asize - roff);
79416 +               mi->dirty = true;
79417 +               break;
79419 +       case UpdateResidentValue:
79420 +               nsize = aoff + dlen;
79422 +               if (!check_if_attr(rec, lrh))
79423 +                       goto dirty_vol;
79425 +               asize = le32_to_cpu(attr->size);
79426 +               used = le32_to_cpu(rec->used);
79428 +               if (lrh->redo_len == lrh->undo_len) {
79429 +                       if (nsize > asize)
79430 +                               goto dirty_vol;
79431 +                       goto move_data;
79432 +               }
79434 +               if (nsize > asize && nsize - asize > record_size - used)
79435 +                       goto dirty_vol;
79437 +               nsize = QuadAlign(nsize);
79438 +               data_off = le16_to_cpu(attr->res.data_off);
79440 +               if (nsize < asize) {
79441 +                       memmove(Add2Ptr(attr, aoff), data, dlen);
79442 +                       data = NULL; // To skip below memmove
79443 +               }
79445 +               memmove(Add2Ptr(attr, nsize), Add2Ptr(attr, asize),
79446 +                       used - le16_to_cpu(lrh->record_off) - asize);
79448 +               rec->used = cpu_to_le32(used + nsize - asize);
79449 +               attr->size = cpu_to_le32(nsize);
79450 +               attr->res.data_size = cpu_to_le32(aoff + dlen - data_off);
79452 +move_data:
79453 +               if (data)
79454 +                       memmove(Add2Ptr(attr, aoff), data, dlen);
79456 +               oa2 = find_loaded_attr(log, attr, rno_base);
79457 +               if (oa2) {
79458 +                       void *p2 = ntfs_memdup(attr, le32_to_cpu(attr->size));
79460 +                       if (p2) {
79461 +                               // run_close(&oa2->run0);
79462 +                               oa2->run1 = &oa2->run0;
79463 +                               ntfs_free(oa2->attr);
79464 +                               oa2->attr = p2;
79465 +                       }
79466 +               }
79468 +               mi->dirty = true;
79469 +               break;
79471 +       case UpdateMappingPairs:
79472 +               nsize = aoff + dlen;
79473 +               asize = le32_to_cpu(attr->size);
79474 +               used = le32_to_cpu(rec->used);
79476 +               if (!check_if_attr(rec, lrh) || !attr->non_res ||
79477 +                   aoff < le16_to_cpu(attr->nres.run_off) || aoff > asize ||
79478 +                   (nsize > asize && nsize - asize > record_size - used)) {
79479 +                       goto dirty_vol;
79480 +               }
79482 +               nsize = QuadAlign(nsize);
79484 +               memmove(Add2Ptr(attr, nsize), Add2Ptr(attr, asize),
79485 +                       used - le16_to_cpu(lrh->record_off) - asize);
79486 +               rec->used = cpu_to_le32(used + nsize - asize);
79487 +               attr->size = cpu_to_le32(nsize);
79488 +               memmove(Add2Ptr(attr, aoff), data, dlen);
79490 +               if (run_get_highest_vcn(le64_to_cpu(attr->nres.svcn),
79491 +                                       attr_run(attr), &t64)) {
79492 +                       goto dirty_vol;
79493 +               }
79495 +               attr->nres.evcn = cpu_to_le64(t64);
79496 +               oa2 = find_loaded_attr(log, attr, rno_base);
79497 +               if (oa2 && oa2->attr->non_res)
79498 +                       oa2->attr->nres.evcn = attr->nres.evcn;
79500 +               mi->dirty = true;
79501 +               break;
79503 +       case SetNewAttributeSizes:
79504 +               new_sz = data;
79505 +               if (!check_if_attr(rec, lrh) || !attr->non_res)
79506 +                       goto dirty_vol;
79508 +               attr->nres.alloc_size = new_sz->alloc_size;
79509 +               attr->nres.data_size = new_sz->data_size;
79510 +               attr->nres.valid_size = new_sz->valid_size;
79512 +               if (dlen >= sizeof(struct NEW_ATTRIBUTE_SIZES))
79513 +                       attr->nres.total_size = new_sz->total_size;
79515 +               oa2 = find_loaded_attr(log, attr, rno_base);
79516 +               if (oa2) {
79517 +                       void *p2 = ntfs_memdup(attr, le32_to_cpu(attr->size));
79519 +                       if (p2) {
79520 +                               ntfs_free(oa2->attr);
79521 +                               oa2->attr = p2;
79522 +                       }
79523 +               }
79524 +               mi->dirty = true;
79525 +               break;
79527 +       case AddIndexEntryRoot:
79528 +               e = (struct NTFS_DE *)data;
79529 +               esize = le16_to_cpu(e->size);
79530 +               root = resident_data(attr);
79531 +               hdr = &root->ihdr;
79532 +               used = le32_to_cpu(hdr->used);
79534 +               if (!check_if_index_root(rec, lrh) ||
79535 +                   !check_if_root_index(attr, hdr, lrh) ||
79536 +                   Add2Ptr(data, esize) > Add2Ptr(lrh, rec_len) ||
79537 +                   esize > le32_to_cpu(rec->total) - le32_to_cpu(rec->used)) {
79538 +                       goto dirty_vol;
79539 +               }
79541 +               e1 = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
79543 +               change_attr_size(rec, attr, le32_to_cpu(attr->size) + esize);
79545 +               memmove(Add2Ptr(e1, esize), e1,
79546 +                       PtrOffset(e1, Add2Ptr(hdr, used)));
79547 +               memmove(e1, e, esize);
79549 +               le32_add_cpu(&attr->res.data_size, esize);
79550 +               hdr->used = cpu_to_le32(used + esize);
79551 +               le32_add_cpu(&hdr->total, esize);
79553 +               mi->dirty = true;
79554 +               break;
79556 +       case DeleteIndexEntryRoot:
79557 +               root = resident_data(attr);
79558 +               hdr = &root->ihdr;
79559 +               used = le32_to_cpu(hdr->used);
79561 +               if (!check_if_index_root(rec, lrh) ||
79562 +                   !check_if_root_index(attr, hdr, lrh)) {
79563 +                       goto dirty_vol;
79564 +               }
79566 +               e1 = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
79567 +               esize = le16_to_cpu(e1->size);
79568 +               e2 = Add2Ptr(e1, esize);
79570 +               memmove(e1, e2, PtrOffset(e2, Add2Ptr(hdr, used)));
79572 +               le32_sub_cpu(&attr->res.data_size, esize);
79573 +               hdr->used = cpu_to_le32(used - esize);
79574 +               le32_sub_cpu(&hdr->total, esize);
79576 +               change_attr_size(rec, attr, le32_to_cpu(attr->size) - esize);
79578 +               mi->dirty = true;
79579 +               break;
79581 +       case SetIndexEntryVcnRoot:
79582 +               root = resident_data(attr);
79583 +               hdr = &root->ihdr;
79585 +               if (!check_if_index_root(rec, lrh) ||
79586 +                   !check_if_root_index(attr, hdr, lrh)) {
79587 +                       goto dirty_vol;
79588 +               }
79590 +               e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
79592 +               de_set_vbn_le(e, *(__le64 *)data);
79593 +               mi->dirty = true;
79594 +               break;
79596 +       case UpdateFileNameRoot:
79597 +               root = resident_data(attr);
79598 +               hdr = &root->ihdr;
79600 +               if (!check_if_index_root(rec, lrh) ||
79601 +                   !check_if_root_index(attr, hdr, lrh)) {
79602 +                       goto dirty_vol;
79603 +               }
79605 +               e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
79606 +               fname = (struct ATTR_FILE_NAME *)(e + 1);
79607 +               memmove(&fname->dup, data, sizeof(fname->dup)); //
79608 +               mi->dirty = true;
79609 +               break;
79611 +       case UpdateRecordDataRoot:
79612 +               root = resident_data(attr);
79613 +               hdr = &root->ihdr;
79615 +               if (!check_if_index_root(rec, lrh) ||
79616 +                   !check_if_root_index(attr, hdr, lrh)) {
79617 +                       goto dirty_vol;
79618 +               }
79620 +               e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
79622 +               memmove(Add2Ptr(e, le16_to_cpu(e->view.data_off)), data, dlen);
79624 +               mi->dirty = true;
79625 +               break;
79627 +       case ZeroEndOfFileRecord:
79628 +               if (roff + dlen > record_size)
79629 +                       goto dirty_vol;
79631 +               memset(attr, 0, dlen);
79632 +               mi->dirty = true;
79633 +               break;
79635 +       case UpdateNonresidentValue:
79636 +               if (lco < cbo + roff + dlen)
79637 +                       goto dirty_vol;
79639 +               memcpy(Add2Ptr(buffer_le, roff), data, dlen);
79641 +               a_dirty = true;
79642 +               if (attr->type == ATTR_ALLOC)
79643 +                       ntfs_fix_pre_write(buffer_le, bytes);
79644 +               break;
79646 +       case AddIndexEntryAllocation:
79647 +               ib = Add2Ptr(buffer_le, roff);
79648 +               hdr = &ib->ihdr;
79649 +               e = data;
79650 +               esize = le16_to_cpu(e->size);
79651 +               e1 = Add2Ptr(ib, aoff);
79653 +               if (is_baad(&ib->rhdr))
79654 +                       goto dirty_vol;
79655 +               if (!check_lsn(&ib->rhdr, rlsn))
79656 +                       goto out;
79658 +               used = le32_to_cpu(hdr->used);
79660 +               if (!check_index_buffer(ib, bytes) ||
79661 +                   !check_if_alloc_index(hdr, aoff) ||
79662 +                   Add2Ptr(e, esize) > Add2Ptr(lrh, rec_len) ||
79663 +                   used + esize > le32_to_cpu(hdr->total)) {
79664 +                       goto dirty_vol;
79665 +               }
79667 +               memmove(Add2Ptr(e1, esize), e1,
79668 +                       PtrOffset(e1, Add2Ptr(hdr, used)));
79669 +               memcpy(e1, e, esize);
79671 +               hdr->used = cpu_to_le32(used + esize);
79673 +               a_dirty = true;
79675 +               ntfs_fix_pre_write(&ib->rhdr, bytes);
79676 +               break;
79678 +       case DeleteIndexEntryAllocation:
79679 +               ib = Add2Ptr(buffer_le, roff);
79680 +               hdr = &ib->ihdr;
79681 +               e = Add2Ptr(ib, aoff);
79682 +               esize = le16_to_cpu(e->size);
79684 +               if (is_baad(&ib->rhdr))
79685 +                       goto dirty_vol;
79686 +               if (!check_lsn(&ib->rhdr, rlsn))
79687 +                       goto out;
79689 +               if (!check_index_buffer(ib, bytes) ||
79690 +                   !check_if_alloc_index(hdr, aoff)) {
79691 +                       goto dirty_vol;
79692 +               }
79694 +               e1 = Add2Ptr(e, esize);
79695 +               nsize = esize;
79696 +               used = le32_to_cpu(hdr->used);
79698 +               memmove(e, e1, PtrOffset(e1, Add2Ptr(hdr, used)));
79700 +               hdr->used = cpu_to_le32(used - nsize);
79702 +               a_dirty = true;
79704 +               ntfs_fix_pre_write(&ib->rhdr, bytes);
79705 +               break;
79707 +       case WriteEndOfIndexBuffer:
79708 +               ib = Add2Ptr(buffer_le, roff);
79709 +               hdr = &ib->ihdr;
79710 +               e = Add2Ptr(ib, aoff);
79712 +               if (is_baad(&ib->rhdr))
79713 +                       goto dirty_vol;
79714 +               if (!check_lsn(&ib->rhdr, rlsn))
79715 +                       goto out;
79716 +               if (!check_index_buffer(ib, bytes) ||
79717 +                   !check_if_alloc_index(hdr, aoff) ||
79718 +                   aoff + dlen > offsetof(struct INDEX_BUFFER, ihdr) +
79719 +                                         le32_to_cpu(hdr->total)) {
79720 +                       goto dirty_vol;
79721 +               }
79723 +               hdr->used = cpu_to_le32(dlen + PtrOffset(hdr, e));
79724 +               memmove(e, data, dlen);
79726 +               a_dirty = true;
79727 +               ntfs_fix_pre_write(&ib->rhdr, bytes);
79728 +               break;
79730 +       case SetIndexEntryVcnAllocation:
79731 +               ib = Add2Ptr(buffer_le, roff);
79732 +               hdr = &ib->ihdr;
79733 +               e = Add2Ptr(ib, aoff);
79735 +               if (is_baad(&ib->rhdr))
79736 +                       goto dirty_vol;
79738 +               if (!check_lsn(&ib->rhdr, rlsn))
79739 +                       goto out;
79740 +               if (!check_index_buffer(ib, bytes) ||
79741 +                   !check_if_alloc_index(hdr, aoff)) {
79742 +                       goto dirty_vol;
79743 +               }
79745 +               de_set_vbn_le(e, *(__le64 *)data);
79747 +               a_dirty = true;
79748 +               ntfs_fix_pre_write(&ib->rhdr, bytes);
79749 +               break;
79751 +       case UpdateFileNameAllocation:
79752 +               ib = Add2Ptr(buffer_le, roff);
79753 +               hdr = &ib->ihdr;
79754 +               e = Add2Ptr(ib, aoff);
79756 +               if (is_baad(&ib->rhdr))
79757 +                       goto dirty_vol;
79759 +               if (!check_lsn(&ib->rhdr, rlsn))
79760 +                       goto out;
79761 +               if (!check_index_buffer(ib, bytes) ||
79762 +                   !check_if_alloc_index(hdr, aoff)) {
79763 +                       goto dirty_vol;
79764 +               }
79766 +               fname = (struct ATTR_FILE_NAME *)(e + 1);
79767 +               memmove(&fname->dup, data, sizeof(fname->dup));
79769 +               a_dirty = true;
79770 +               ntfs_fix_pre_write(&ib->rhdr, bytes);
79771 +               break;
79773 +       case SetBitsInNonresidentBitMap:
79774 +               bmp_off =
79775 +                       le32_to_cpu(((struct BITMAP_RANGE *)data)->bitmap_off);
79776 +               bmp_bits = le32_to_cpu(((struct BITMAP_RANGE *)data)->bits);
79778 +               if (cbo + (bmp_off + 7) / 8 > lco ||
79779 +                   cbo + ((bmp_off + bmp_bits + 7) / 8) > lco) {
79780 +                       goto dirty_vol;
79781 +               }
79783 +               __bitmap_set(Add2Ptr(buffer_le, roff), bmp_off, bmp_bits);
79784 +               a_dirty = true;
79785 +               break;
79787 +       case ClearBitsInNonresidentBitMap:
79788 +               bmp_off =
79789 +                       le32_to_cpu(((struct BITMAP_RANGE *)data)->bitmap_off);
79790 +               bmp_bits = le32_to_cpu(((struct BITMAP_RANGE *)data)->bits);
79792 +               if (cbo + (bmp_off + 7) / 8 > lco ||
79793 +                   cbo + ((bmp_off + bmp_bits + 7) / 8) > lco) {
79794 +                       goto dirty_vol;
79795 +               }
79797 +               __bitmap_clear(Add2Ptr(buffer_le, roff), bmp_off, bmp_bits);
79798 +               a_dirty = true;
79799 +               break;
79801 +       case UpdateRecordDataAllocation:
79802 +               ib = Add2Ptr(buffer_le, roff);
79803 +               hdr = &ib->ihdr;
79804 +               e = Add2Ptr(ib, aoff);
79806 +               if (is_baad(&ib->rhdr))
79807 +                       goto dirty_vol;
79809 +               if (!check_lsn(&ib->rhdr, rlsn))
79810 +                       goto out;
79811 +               if (!check_index_buffer(ib, bytes) ||
79812 +                   !check_if_alloc_index(hdr, aoff)) {
79813 +                       goto dirty_vol;
79814 +               }
79816 +               memmove(Add2Ptr(e, le16_to_cpu(e->view.data_off)), data, dlen);
79818 +               a_dirty = true;
79819 +               ntfs_fix_pre_write(&ib->rhdr, bytes);
79820 +               break;
79822 +       default:
79823 +               WARN_ON(1);
79824 +       }
79826 +       if (rlsn) {
79827 +               __le64 t64 = cpu_to_le64(*rlsn);
79829 +               if (rec)
79830 +                       rec->rhdr.lsn = t64;
79831 +               if (ib)
79832 +                       ib->rhdr.lsn = t64;
79833 +       }
79835 +       if (mi && mi->dirty) {
79836 +               err = mi_write(mi, 0);
79837 +               if (err)
79838 +                       goto out;
79839 +       }
79841 +       if (a_dirty) {
79842 +               attr = oa->attr;
79843 +               err = ntfs_sb_write_run(sbi, oa->run1, vbo, buffer_le, bytes);
79844 +               if (err)
79845 +                       goto out;
79846 +       }
79848 +out:
79850 +       if (inode)
79851 +               iput(inode);
79852 +       else if (mi != mi2_child)
79853 +               mi_put(mi);
79855 +       ntfs_free(buffer_le);
79857 +       return err;
79859 +dirty_vol:
79860 +       log->set_dirty = true;
79861 +       goto out;
79865 + * log_replay
79866 + *
79867 + * this function is called during mount operation
79868 + * it replays log and empties it
79869 + * initialized is set false if logfile contains '-1'
79870 + */
79871 +int log_replay(struct ntfs_inode *ni, bool *initialized)
79873 +       int err;
79874 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
79875 +       struct ntfs_log *log;
79877 +       struct restart_info rst_info, rst_info2;
79878 +       u64 rec_lsn, ra_lsn, checkpt_lsn = 0, rlsn = 0;
79879 +       struct ATTR_NAME_ENTRY *attr_names = NULL;
79880 +       struct ATTR_NAME_ENTRY *ane;
79881 +       struct RESTART_TABLE *dptbl = NULL;
79882 +       struct RESTART_TABLE *trtbl = NULL;
79883 +       const struct RESTART_TABLE *rt;
79884 +       struct RESTART_TABLE *oatbl = NULL;
79885 +       struct inode *inode;
79886 +       struct OpenAttr *oa;
79887 +       struct ntfs_inode *ni_oe;
79888 +       struct ATTRIB *attr = NULL;
79889 +       u64 size, vcn, undo_next_lsn;
79890 +       CLST rno, lcn, lcn0, len0, clen;
79891 +       void *data;
79892 +       struct NTFS_RESTART *rst = NULL;
79893 +       struct lcb *lcb = NULL;
79894 +       struct OPEN_ATTR_ENRTY *oe;
79895 +       struct TRANSACTION_ENTRY *tr;
79896 +       struct DIR_PAGE_ENTRY *dp;
79897 +       u32 i, bytes_per_attr_entry;
79898 +       u32 l_size = ni->vfs_inode.i_size;
79899 +       u32 orig_file_size = l_size;
79900 +       u32 page_size, vbo, tail, off, dlen;
79901 +       u32 saved_len, rec_len, transact_id;
79902 +       bool use_second_page;
79903 +       struct RESTART_AREA *ra2, *ra = NULL;
79904 +       struct CLIENT_REC *ca, *cr;
79905 +       __le16 client;
79906 +       struct RESTART_HDR *rh;
79907 +       const struct LFS_RECORD_HDR *frh;
79908 +       const struct LOG_REC_HDR *lrh;
79909 +       bool is_mapped;
79910 +       bool is_ro = sb_rdonly(sbi->sb);
79911 +       u64 t64;
79912 +       u16 t16;
79913 +       u32 t32;
79915 +       /* Get the size of page. NOTE: To replay we can use default page */
79916 +#if PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <= DefaultLogPageSize * 2
79917 +       page_size = norm_file_page(PAGE_SIZE, &l_size, true);
79918 +#else
79919 +       page_size = norm_file_page(PAGE_SIZE, &l_size, false);
79920 +#endif
79921 +       if (!page_size)
79922 +               return -EINVAL;
79924 +       log = ntfs_zalloc(sizeof(struct ntfs_log));
79925 +       if (!log)
79926 +               return -ENOMEM;
79928 +       log->ni = ni;
79929 +       log->l_size = l_size;
79930 +       log->one_page_buf = ntfs_malloc(page_size);
79932 +       if (!log->one_page_buf) {
79933 +               err = -ENOMEM;
79934 +               goto out;
79935 +       }
79937 +       log->page_size = page_size;
79938 +       log->page_mask = page_size - 1;
79939 +       log->page_bits = blksize_bits(page_size);
79941 +       /* Look for a restart area on the disk */
79942 +       err = log_read_rst(log, l_size, true, &rst_info);
79943 +       if (err)
79944 +               goto out;
79946 +       /* remember 'initialized' */
79947 +       *initialized = rst_info.initialized;
79949 +       if (!rst_info.restart) {
79950 +               if (rst_info.initialized) {
79951 +                       /* no restart area but the file is not initialized */
79952 +                       err = -EINVAL;
79953 +                       goto out;
79954 +               }
79956 +               log_init_pg_hdr(log, page_size, page_size, 1, 1);
79957 +               log_create(log, l_size, 0, get_random_int(), false, false);
79959 +               log->ra = ra;
79961 +               ra = log_create_ra(log);
79962 +               if (!ra) {
79963 +                       err = -ENOMEM;
79964 +                       goto out;
79965 +               }
79966 +               log->ra = ra;
79967 +               log->init_ra = true;
79969 +               goto process_log;
79970 +       }
79972 +       /*
79973 +        * If the restart offset above wasn't zero then we won't
79974 +        * look for a second restart
79975 +        */
79976 +       if (rst_info.vbo)
79977 +               goto check_restart_area;
79979 +       err = log_read_rst(log, l_size, false, &rst_info2);
79981 +       /* Determine which restart area to use */
79982 +       if (!rst_info2.restart || rst_info2.last_lsn <= rst_info.last_lsn)
79983 +               goto use_first_page;
79985 +       use_second_page = true;
79987 +       if (rst_info.chkdsk_was_run && page_size != rst_info.vbo) {
79988 +               struct RECORD_PAGE_HDR *sp = NULL;
79989 +               bool usa_error;
79991 +               if (!read_log_page(log, page_size, &sp, &usa_error) &&
79992 +                   sp->rhdr.sign == NTFS_CHKD_SIGNATURE) {
79993 +                       use_second_page = false;
79994 +               }
79995 +               ntfs_free(sp);
79996 +       }
79998 +       if (use_second_page) {
79999 +               ntfs_free(rst_info.r_page);
80000 +               memcpy(&rst_info, &rst_info2, sizeof(struct restart_info));
80001 +               rst_info2.r_page = NULL;
80002 +       }
80004 +use_first_page:
80005 +       ntfs_free(rst_info2.r_page);
80007 +check_restart_area:
80008 +       /* If the restart area is at offset 0, we want to write the second restart area first */
80009 +       log->init_ra = !!rst_info.vbo;
80011 +       /* If we have a valid page then grab a pointer to the restart area */
80012 +       ra2 = rst_info.valid_page
80013 +                     ? Add2Ptr(rst_info.r_page,
80014 +                               le16_to_cpu(rst_info.r_page->ra_off))
80015 +                     : NULL;
80017 +       if (rst_info.chkdsk_was_run ||
80018 +           (ra2 && ra2->client_idx[1] == LFS_NO_CLIENT_LE)) {
80019 +               bool wrapped = false;
80020 +               bool use_multi_page = false;
80021 +               u32 open_log_count;
80023 +               /* Do some checks based on whether we have a valid log page */
80024 +               if (!rst_info.valid_page) {
80025 +                       open_log_count = get_random_int();
80026 +                       goto init_log_instance;
80027 +               }
80028 +               open_log_count = le32_to_cpu(ra2->open_log_count);
80030 +               /*
80031 +                * If the restart page size isn't changing then we want to
80032 +                * check how much work we need to do
80033 +                */
80034 +               if (page_size != le32_to_cpu(rst_info.r_page->sys_page_size))
80035 +                       goto init_log_instance;
80037 +init_log_instance:
80038 +               log_init_pg_hdr(log, page_size, page_size, 1, 1);
80040 +               log_create(log, l_size, rst_info.last_lsn, open_log_count,
80041 +                          wrapped, use_multi_page);
80043 +               ra = log_create_ra(log);
80044 +               if (!ra) {
80045 +                       err = -ENOMEM;
80046 +                       goto out;
80047 +               }
80048 +               log->ra = ra;
80050 +               /* Put the restart areas and initialize the log file as required */
80051 +               goto process_log;
80052 +       }
80054 +       if (!ra2) {
80055 +               err = -EINVAL;
80056 +               goto out;
80057 +       }
80059 +       /*
80060 +        * If the log page or the system page sizes have changed, we can't use the log file
80061 +        * We must use the system page size instead of the default size
80062 +        * if there is not a clean shutdown
80063 +        */
80064 +       t32 = le32_to_cpu(rst_info.r_page->sys_page_size);
80065 +       if (page_size != t32) {
80066 +               l_size = orig_file_size;
80067 +               page_size =
80068 +                       norm_file_page(t32, &l_size, t32 == DefaultLogPageSize);
80069 +       }
80071 +       if (page_size != t32 ||
80072 +           page_size != le32_to_cpu(rst_info.r_page->page_size)) {
80073 +               err = -EINVAL;
80074 +               goto out;
80075 +       }
80077 +       /* If the file size has shrunk then we won't mount it */
80078 +       if (l_size < le64_to_cpu(ra2->l_size)) {
80079 +               err = -EINVAL;
80080 +               goto out;
80081 +       }
80083 +       log_init_pg_hdr(log, page_size, page_size,
80084 +                       le16_to_cpu(rst_info.r_page->major_ver),
80085 +                       le16_to_cpu(rst_info.r_page->minor_ver));
80087 +       log->l_size = le64_to_cpu(ra2->l_size);
80088 +       log->seq_num_bits = le32_to_cpu(ra2->seq_num_bits);
80089 +       log->file_data_bits = sizeof(u64) * 8 - log->seq_num_bits;
80090 +       log->seq_num_mask = (8 << log->file_data_bits) - 1;
80091 +       log->last_lsn = le64_to_cpu(ra2->current_lsn);
80092 +       log->seq_num = log->last_lsn >> log->file_data_bits;
80093 +       log->ra_off = le16_to_cpu(rst_info.r_page->ra_off);
80094 +       log->restart_size = log->sys_page_size - log->ra_off;
80095 +       log->record_header_len = le16_to_cpu(ra2->rec_hdr_len);
80096 +       log->ra_size = le16_to_cpu(ra2->ra_len);
80097 +       log->data_off = le16_to_cpu(ra2->data_off);
80098 +       log->data_size = log->page_size - log->data_off;
80099 +       log->reserved = log->data_size - log->record_header_len;
80101 +       vbo = lsn_to_vbo(log, log->last_lsn);
80103 +       if (vbo < log->first_page) {
80104 +               /* This is a pseudo lsn */
80105 +               log->l_flags |= NTFSLOG_NO_LAST_LSN;
80106 +               log->next_page = log->first_page;
80107 +               goto find_oldest;
80108 +       }
80110 +       /* Find the end of this log record */
80111 +       off = final_log_off(log, log->last_lsn,
80112 +                           le32_to_cpu(ra2->last_lsn_data_len));
80114 +       /* If we wrapped the file then increment the sequence number */
80115 +       if (off <= vbo) {
80116 +               log->seq_num += 1;
80117 +               log->l_flags |= NTFSLOG_WRAPPED;
80118 +       }
80120 +       /* Now compute the next log page to use */
80121 +       vbo &= ~log->sys_page_mask;
80122 +       tail = log->page_size - (off & log->page_mask) - 1;
80124 +       /* If we can fit another log record on the page, move back a page the log file */
80125 +       if (tail >= log->record_header_len) {
80126 +               log->l_flags |= NTFSLOG_REUSE_TAIL;
80127 +               log->next_page = vbo;
80128 +       } else {
80129 +               log->next_page = next_page_off(log, vbo);
80130 +       }
80132 +find_oldest:
80133 +       /* Find the oldest client lsn. Use the last flushed lsn as a starting point */
80134 +       log->oldest_lsn = log->last_lsn;
80135 +       oldest_client_lsn(Add2Ptr(ra2, le16_to_cpu(ra2->client_off)),
80136 +                         ra2->client_idx[1], &log->oldest_lsn);
80137 +       log->oldest_lsn_off = lsn_to_vbo(log, log->oldest_lsn);
80139 +       if (log->oldest_lsn_off < log->first_page)
80140 +               log->l_flags |= NTFSLOG_NO_OLDEST_LSN;
80142 +       if (!(ra2->flags & RESTART_SINGLE_PAGE_IO))
80143 +               log->l_flags |= NTFSLOG_WRAPPED | NTFSLOG_MULTIPLE_PAGE_IO;
80145 +       log->current_openlog_count = le32_to_cpu(ra2->open_log_count);
80146 +       log->total_avail_pages = log->l_size - log->first_page;
80147 +       log->total_avail = log->total_avail_pages >> log->page_bits;
80148 +       log->max_current_avail = log->total_avail * log->reserved;
80149 +       log->total_avail = log->total_avail * log->data_size;
80151 +       log->current_avail = current_log_avail(log);
80153 +       ra = ntfs_zalloc(log->restart_size);
80154 +       if (!ra) {
80155 +               err = -ENOMEM;
80156 +               goto out;
80157 +       }
80158 +       log->ra = ra;
80160 +       t16 = le16_to_cpu(ra2->client_off);
80161 +       if (t16 == offsetof(struct RESTART_AREA, clients)) {
80162 +               memcpy(ra, ra2, log->ra_size);
80163 +       } else {
80164 +               memcpy(ra, ra2, offsetof(struct RESTART_AREA, clients));
80165 +               memcpy(ra->clients, Add2Ptr(ra2, t16),
80166 +                      le16_to_cpu(ra2->ra_len) - t16);
80168 +               log->current_openlog_count = get_random_int();
80169 +               ra->open_log_count = cpu_to_le32(log->current_openlog_count);
80170 +               log->ra_size = offsetof(struct RESTART_AREA, clients) +
80171 +                              sizeof(struct CLIENT_REC);
80172 +               ra->client_off =
80173 +                       cpu_to_le16(offsetof(struct RESTART_AREA, clients));
80174 +               ra->ra_len = cpu_to_le16(log->ra_size);
80175 +       }
80177 +       le32_add_cpu(&ra->open_log_count, 1);
80179 +       /* Now we need to walk through looking for the last lsn */
80180 +       err = last_log_lsn(log);
80181 +       if (err)
80182 +               goto out;
80184 +       log->current_avail = current_log_avail(log);
80186 +       /* Remember which restart area to write first */
80187 +       log->init_ra = rst_info.vbo;
80189 +process_log:
80190 +       /* 1.0, 1.1, 2.0 log->major_ver/minor_ver - short values */
80191 +       switch ((log->major_ver << 16) + log->minor_ver) {
80192 +       case 0x10000:
80193 +       case 0x10001:
80194 +       case 0x20000:
80195 +               break;
80196 +       default:
80197 +               ntfs_warn(sbi->sb, "\x24LogFile version %d.%d is not supported",
80198 +                         log->major_ver, log->minor_ver);
80199 +               err = -EOPNOTSUPP;
80200 +               log->set_dirty = true;
80201 +               goto out;
80202 +       }
80204 +       /* One client "NTFS" per logfile */
80205 +       ca = Add2Ptr(ra, le16_to_cpu(ra->client_off));
80207 +       for (client = ra->client_idx[1];; client = cr->next_client) {
80208 +               if (client == LFS_NO_CLIENT_LE) {
80209 +                       /* Insert "NTFS" client LogFile */
80210 +                       client = ra->client_idx[0];
80211 +                       if (client == LFS_NO_CLIENT_LE)
80212 +                               return -EINVAL;
80214 +                       t16 = le16_to_cpu(client);
80215 +                       cr = ca + t16;
80217 +                       remove_client(ca, cr, &ra->client_idx[0]);
80219 +                       cr->restart_lsn = 0;
80220 +                       cr->oldest_lsn = cpu_to_le64(log->oldest_lsn);
80221 +                       cr->name_bytes = cpu_to_le32(8);
80222 +                       cr->name[0] = cpu_to_le16('N');
80223 +                       cr->name[1] = cpu_to_le16('T');
80224 +                       cr->name[2] = cpu_to_le16('F');
80225 +                       cr->name[3] = cpu_to_le16('S');
80227 +                       add_client(ca, t16, &ra->client_idx[1]);
80228 +                       break;
80229 +               }
80231 +               cr = ca + le16_to_cpu(client);
80233 +               if (cpu_to_le32(8) == cr->name_bytes &&
80234 +                   cpu_to_le16('N') == cr->name[0] &&
80235 +                   cpu_to_le16('T') == cr->name[1] &&
80236 +                   cpu_to_le16('F') == cr->name[2] &&
80237 +                   cpu_to_le16('S') == cr->name[3])
80238 +                       break;
80239 +       }
80241 +       /* Update the client handle with the client block information */
80242 +       log->client_id.seq_num = cr->seq_num;
80243 +       log->client_id.client_idx = client;
80245 +       err = read_rst_area(log, &rst, &ra_lsn);
80246 +       if (err)
80247 +               goto out;
80249 +       if (!rst)
80250 +               goto out;
80252 +       bytes_per_attr_entry = !rst->major_ver ? 0x2C : 0x28;
80254 +       checkpt_lsn = le64_to_cpu(rst->check_point_start);
80255 +       if (!checkpt_lsn)
80256 +               checkpt_lsn = ra_lsn;
80258 +       /* Allocate and Read the Transaction Table */
80259 +       if (!rst->transact_table_len)
80260 +               goto check_dirty_page_table;
80262 +       t64 = le64_to_cpu(rst->transact_table_lsn);
80263 +       err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
80264 +       if (err)
80265 +               goto out;
80267 +       lrh = lcb->log_rec;
80268 +       frh = lcb->lrh;
80269 +       rec_len = le32_to_cpu(frh->client_data_len);
80271 +       if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
80272 +                          bytes_per_attr_entry)) {
80273 +               err = -EINVAL;
80274 +               goto out;
80275 +       }
80277 +       t16 = le16_to_cpu(lrh->redo_off);
80279 +       rt = Add2Ptr(lrh, t16);
80280 +       t32 = rec_len - t16;
80282 +       /* Now check that this is a valid restart table */
80283 +       if (!check_rstbl(rt, t32)) {
80284 +               err = -EINVAL;
80285 +               goto out;
80286 +       }
80288 +       trtbl = ntfs_memdup(rt, t32);
80289 +       if (!trtbl) {
80290 +               err = -ENOMEM;
80291 +               goto out;
80292 +       }
80294 +       lcb_put(lcb);
80295 +       lcb = NULL;
80297 +check_dirty_page_table:
80298 +       /* The next record back should be the Dirty Pages Table */
80299 +       if (!rst->dirty_pages_len)
80300 +               goto check_attribute_names;
80302 +       t64 = le64_to_cpu(rst->dirty_pages_table_lsn);
80303 +       err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
80304 +       if (err)
80305 +               goto out;
80307 +       lrh = lcb->log_rec;
80308 +       frh = lcb->lrh;
80309 +       rec_len = le32_to_cpu(frh->client_data_len);
80311 +       if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
80312 +                          bytes_per_attr_entry)) {
80313 +               err = -EINVAL;
80314 +               goto out;
80315 +       }
80317 +       t16 = le16_to_cpu(lrh->redo_off);
80319 +       rt = Add2Ptr(lrh, t16);
80320 +       t32 = rec_len - t16;
80322 +       /* Now check that this is a valid restart table */
80323 +       if (!check_rstbl(rt, t32)) {
80324 +               err = -EINVAL;
80325 +               goto out;
80326 +       }
80328 +       dptbl = ntfs_memdup(rt, t32);
80329 +       if (!dptbl) {
80330 +               err = -ENOMEM;
80331 +               goto out;
80332 +       }
80334 +       /* Convert Ra version '0' into version '1' */
80335 +       if (rst->major_ver)
80336 +               goto end_conv_1;
80338 +       dp = NULL;
80339 +       while ((dp = enum_rstbl(dptbl, dp))) {
80340 +               struct DIR_PAGE_ENTRY_32 *dp0 = (struct DIR_PAGE_ENTRY_32 *)dp;
80341 +               // NOTE: Danger. Check for of boundary
80342 +               memmove(&dp->vcn, &dp0->vcn_low,
80343 +                       2 * sizeof(u64) +
80344 +                               le32_to_cpu(dp->lcns_follow) * sizeof(u64));
80345 +       }
80347 +end_conv_1:
80348 +       lcb_put(lcb);
80349 +       lcb = NULL;
80351 +       /* Go through the table and remove the duplicates, remembering the oldest lsn values */
80352 +       if (sbi->cluster_size <= log->page_size)
80353 +               goto trace_dp_table;
80355 +       dp = NULL;
80356 +       while ((dp = enum_rstbl(dptbl, dp))) {
80357 +               struct DIR_PAGE_ENTRY *next = dp;
80359 +               while ((next = enum_rstbl(dptbl, next))) {
80360 +                       if (next->target_attr == dp->target_attr &&
80361 +                           next->vcn == dp->vcn) {
80362 +                               if (le64_to_cpu(next->oldest_lsn) <
80363 +                                   le64_to_cpu(dp->oldest_lsn)) {
80364 +                                       dp->oldest_lsn = next->oldest_lsn;
80365 +                               }
80367 +                               free_rsttbl_idx(dptbl, PtrOffset(dptbl, next));
80368 +                       }
80369 +               }
80370 +       }
80371 +trace_dp_table:
80372 +check_attribute_names:
80373 +       /* The next record should be the Attribute Names */
80374 +       if (!rst->attr_names_len)
80375 +               goto check_attr_table;
80377 +       t64 = le64_to_cpu(rst->attr_names_lsn);
80378 +       err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
80379 +       if (err)
80380 +               goto out;
80382 +       lrh = lcb->log_rec;
80383 +       frh = lcb->lrh;
80384 +       rec_len = le32_to_cpu(frh->client_data_len);
80386 +       if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
80387 +                          bytes_per_attr_entry)) {
80388 +               err = -EINVAL;
80389 +               goto out;
80390 +       }
80392 +       t32 = lrh_length(lrh);
80393 +       rec_len -= t32;
80395 +       attr_names = ntfs_memdup(Add2Ptr(lrh, t32), rec_len);
80397 +       lcb_put(lcb);
80398 +       lcb = NULL;
80400 +check_attr_table:
80401 +       /* The next record should be the attribute Table */
80402 +       if (!rst->open_attr_len)
80403 +               goto check_attribute_names2;
80405 +       t64 = le64_to_cpu(rst->open_attr_table_lsn);
80406 +       err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
80407 +       if (err)
80408 +               goto out;
80410 +       lrh = lcb->log_rec;
80411 +       frh = lcb->lrh;
80412 +       rec_len = le32_to_cpu(frh->client_data_len);
80414 +       if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
80415 +                          bytes_per_attr_entry)) {
80416 +               err = -EINVAL;
80417 +               goto out;
80418 +       }
80420 +       t16 = le16_to_cpu(lrh->redo_off);
80422 +       rt = Add2Ptr(lrh, t16);
80423 +       t32 = rec_len - t16;
80425 +       if (!check_rstbl(rt, t32)) {
80426 +               err = -EINVAL;
80427 +               goto out;
80428 +       }
80430 +       oatbl = ntfs_memdup(rt, t32);
80431 +       if (!oatbl) {
80432 +               err = -ENOMEM;
80433 +               goto out;
80434 +       }
80436 +       log->open_attr_tbl = oatbl;
80438 +       /* Clear all of the Attr pointers */
80439 +       oe = NULL;
80440 +       while ((oe = enum_rstbl(oatbl, oe))) {
80441 +               if (!rst->major_ver) {
80442 +                       struct OPEN_ATTR_ENRTY_32 oe0;
80444 +                       /* Really 'oe' points to OPEN_ATTR_ENRTY_32 */
80445 +                       memcpy(&oe0, oe, SIZEOF_OPENATTRIBUTEENTRY0);
80447 +                       oe->bytes_per_index = oe0.bytes_per_index;
80448 +                       oe->type = oe0.type;
80449 +                       oe->is_dirty_pages = oe0.is_dirty_pages;
80450 +                       oe->name_len = 0;
80451 +                       oe->ref = oe0.ref;
80452 +                       oe->open_record_lsn = oe0.open_record_lsn;
80453 +               }
80455 +               oe->is_attr_name = 0;
80456 +               oe->ptr = NULL;
80457 +       }
80459 +       lcb_put(lcb);
80460 +       lcb = NULL;
80462 +check_attribute_names2:
80463 +       if (!rst->attr_names_len)
80464 +               goto trace_attribute_table;
80466 +       ane = attr_names;
80467 +       if (!oatbl)
80468 +               goto trace_attribute_table;
80469 +       while (ane->off) {
80470 +               /* TODO: Clear table on exit! */
80471 +               oe = Add2Ptr(oatbl, le16_to_cpu(ane->off));
80472 +               t16 = le16_to_cpu(ane->name_bytes);
80473 +               oe->name_len = t16 / sizeof(short);
80474 +               oe->ptr = ane->name;
80475 +               oe->is_attr_name = 2;
80476 +               ane = Add2Ptr(ane, sizeof(struct ATTR_NAME_ENTRY) + t16);
80477 +       }
80479 +trace_attribute_table:
80480 +       /*
80481 +        * If the checkpt_lsn is zero, then this is a freshly
80482 +        * formatted disk and we have no work to do
80483 +        */
80484 +       if (!checkpt_lsn) {
80485 +               err = 0;
80486 +               goto out;
80487 +       }
80489 +       if (!oatbl) {
80490 +               oatbl = init_rsttbl(bytes_per_attr_entry, 8);
80491 +               if (!oatbl) {
80492 +                       err = -ENOMEM;
80493 +                       goto out;
80494 +               }
80495 +       }
80497 +       log->open_attr_tbl = oatbl;
80499 +       /* Start the analysis pass from the Checkpoint lsn. */
80500 +       rec_lsn = checkpt_lsn;
80502 +       /* Read the first lsn */
80503 +       err = read_log_rec_lcb(log, checkpt_lsn, lcb_ctx_next, &lcb);
80504 +       if (err)
80505 +               goto out;
80507 +       /* Loop to read all subsequent records to the end of the log file */
80508 +next_log_record_analyze:
80509 +       err = read_next_log_rec(log, lcb, &rec_lsn);
80510 +       if (err)
80511 +               goto out;
80513 +       if (!rec_lsn)
80514 +               goto end_log_records_enumerate;
80516 +       frh = lcb->lrh;
80517 +       transact_id = le32_to_cpu(frh->transact_id);
80518 +       rec_len = le32_to_cpu(frh->client_data_len);
80519 +       lrh = lcb->log_rec;
80521 +       if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) {
80522 +               err = -EINVAL;
80523 +               goto out;
80524 +       }
80526 +       /*
80527 +        * The first lsn after the previous lsn remembered
80528 +        * the checkpoint is the first candidate for the rlsn
80529 +        */
80530 +       if (!rlsn)
80531 +               rlsn = rec_lsn;
80533 +       if (LfsClientRecord != frh->record_type)
80534 +               goto next_log_record_analyze;
80536 +       /*
80537 +        * Now update the Transaction Table for this transaction
80538 +        * If there is no entry present or it is unallocated we allocate the entry
80539 +        */
80540 +       if (!trtbl) {
80541 +               trtbl = init_rsttbl(sizeof(struct TRANSACTION_ENTRY),
80542 +                                   INITIAL_NUMBER_TRANSACTIONS);
80543 +               if (!trtbl) {
80544 +                       err = -ENOMEM;
80545 +                       goto out;
80546 +               }
80547 +       }
80549 +       tr = Add2Ptr(trtbl, transact_id);
80551 +       if (transact_id >= bytes_per_rt(trtbl) ||
80552 +           tr->next != RESTART_ENTRY_ALLOCATED_LE) {
80553 +               tr = alloc_rsttbl_from_idx(&trtbl, transact_id);
80554 +               if (!tr) {
80555 +                       err = -ENOMEM;
80556 +                       goto out;
80557 +               }
80558 +               tr->transact_state = TransactionActive;
80559 +               tr->first_lsn = cpu_to_le64(rec_lsn);
80560 +       }
80562 +       tr->prev_lsn = tr->undo_next_lsn = cpu_to_le64(rec_lsn);
80564 +       /*
80565 +        * If this is a compensation log record, then change
80566 +        * the undo_next_lsn to be the undo_next_lsn of this record
80567 +        */
80568 +       if (lrh->undo_op == cpu_to_le16(CompensationLogRecord))
80569 +               tr->undo_next_lsn = frh->client_undo_next_lsn;
80571 +       /* Dispatch to handle log record depending on type */
80572 +       switch (le16_to_cpu(lrh->redo_op)) {
80573 +       case InitializeFileRecordSegment:
80574 +       case DeallocateFileRecordSegment:
80575 +       case WriteEndOfFileRecordSegment:
80576 +       case CreateAttribute:
80577 +       case DeleteAttribute:
80578 +       case UpdateResidentValue:
80579 +       case UpdateNonresidentValue:
80580 +       case UpdateMappingPairs:
80581 +       case SetNewAttributeSizes:
80582 +       case AddIndexEntryRoot:
80583 +       case DeleteIndexEntryRoot:
80584 +       case AddIndexEntryAllocation:
80585 +       case DeleteIndexEntryAllocation:
80586 +       case WriteEndOfIndexBuffer:
80587 +       case SetIndexEntryVcnRoot:
80588 +       case SetIndexEntryVcnAllocation:
80589 +       case UpdateFileNameRoot:
80590 +       case UpdateFileNameAllocation:
80591 +       case SetBitsInNonresidentBitMap:
80592 +       case ClearBitsInNonresidentBitMap:
80593 +       case UpdateRecordDataRoot:
80594 +       case UpdateRecordDataAllocation:
80595 +       case ZeroEndOfFileRecord:
80596 +               t16 = le16_to_cpu(lrh->target_attr);
80597 +               t64 = le64_to_cpu(lrh->target_vcn);
80598 +               dp = find_dp(dptbl, t16, t64);
80600 +               if (dp)
80601 +                       goto copy_lcns;
80603 +               /*
80604 +                * Calculate the number of clusters per page the system
80605 +                * which wrote the checkpoint, possibly creating the table
80606 +                */
80607 +               if (dptbl) {
80608 +                       t32 = (le16_to_cpu(dptbl->size) -
80609 +                              sizeof(struct DIR_PAGE_ENTRY)) /
80610 +                             sizeof(u64);
80611 +               } else {
80612 +                       t32 = log->clst_per_page;
80613 +                       ntfs_free(dptbl);
80614 +                       dptbl = init_rsttbl(struct_size(dp, page_lcns, t32),
80615 +                                           32);
80616 +                       if (!dptbl) {
80617 +                               err = -ENOMEM;
80618 +                               goto out;
80619 +                       }
80620 +               }
80622 +               dp = alloc_rsttbl_idx(&dptbl);
80623 +               dp->target_attr = cpu_to_le32(t16);
80624 +               dp->transfer_len = cpu_to_le32(t32 << sbi->cluster_bits);
80625 +               dp->lcns_follow = cpu_to_le32(t32);
80626 +               dp->vcn = cpu_to_le64(t64 & ~((u64)t32 - 1));
80627 +               dp->oldest_lsn = cpu_to_le64(rec_lsn);
80629 +copy_lcns:
80630 +               /*
80631 +                * Copy the Lcns from the log record into the Dirty Page Entry
80632 +                * TODO: for different page size support, must somehow make
80633 +                * whole routine a loop, case Lcns do not fit below
80634 +                */
80635 +               t16 = le16_to_cpu(lrh->lcns_follow);
80636 +               for (i = 0; i < t16; i++) {
80637 +                       size_t j = (size_t)(le64_to_cpu(lrh->target_vcn) -
80638 +                                           le64_to_cpu(dp->vcn));
80639 +                       dp->page_lcns[j + i] = lrh->page_lcns[i];
80640 +               }
80642 +               goto next_log_record_analyze;
80644 +       case DeleteDirtyClusters: {
80645 +               u32 range_count =
80646 +                       le16_to_cpu(lrh->redo_len) / sizeof(struct LCN_RANGE);
80647 +               const struct LCN_RANGE *r =
80648 +                       Add2Ptr(lrh, le16_to_cpu(lrh->redo_off));
80650 +               /* Loop through all of the Lcn ranges this log record */
80651 +               for (i = 0; i < range_count; i++, r++) {
80652 +                       u64 lcn0 = le64_to_cpu(r->lcn);
80653 +                       u64 lcn_e = lcn0 + le64_to_cpu(r->len) - 1;
80655 +                       dp = NULL;
80656 +                       while ((dp = enum_rstbl(dptbl, dp))) {
80657 +                               u32 j;
80659 +                               t32 = le32_to_cpu(dp->lcns_follow);
80660 +                               for (j = 0; j < t32; j++) {
80661 +                                       t64 = le64_to_cpu(dp->page_lcns[j]);
80662 +                                       if (t64 >= lcn0 && t64 <= lcn_e)
80663 +                                               dp->page_lcns[j] = 0;
80664 +                               }
80665 +                       }
80666 +               }
80667 +               goto next_log_record_analyze;
80668 +               ;
80669 +       }
80671 +       case OpenNonresidentAttribute:
80672 +               t16 = le16_to_cpu(lrh->target_attr);
80673 +               if (t16 >= bytes_per_rt(oatbl)) {
80674 +                       /*
80675 +                        * Compute how big the table needs to be.
80676 +                        * Add 10 extra entries for some cushion
80677 +                        */
80678 +                       u32 new_e = t16 / le16_to_cpu(oatbl->size);
80680 +                       new_e += 10 - le16_to_cpu(oatbl->used);
80682 +                       oatbl = extend_rsttbl(oatbl, new_e, ~0u);
80683 +                       log->open_attr_tbl = oatbl;
80684 +                       if (!oatbl) {
80685 +                               err = -ENOMEM;
80686 +                               goto out;
80687 +                       }
80688 +               }
80690 +               /* Point to the entry being opened */
80691 +               oe = alloc_rsttbl_from_idx(&oatbl, t16);
80692 +               log->open_attr_tbl = oatbl;
80693 +               if (!oe) {
80694 +                       err = -ENOMEM;
80695 +                       goto out;
80696 +               }
80698 +               /* Initialize this entry from the log record */
80699 +               t16 = le16_to_cpu(lrh->redo_off);
80700 +               if (!rst->major_ver) {
80701 +                       /* Convert version '0' into version '1' */
80702 +                       struct OPEN_ATTR_ENRTY_32 *oe0 = Add2Ptr(lrh, t16);
80704 +                       oe->bytes_per_index = oe0->bytes_per_index;
80705 +                       oe->type = oe0->type;
80706 +                       oe->is_dirty_pages = oe0->is_dirty_pages;
80707 +                       oe->name_len = 0; //oe0.name_len;
80708 +                       oe->ref = oe0->ref;
80709 +                       oe->open_record_lsn = oe0->open_record_lsn;
80710 +               } else {
80711 +                       memcpy(oe, Add2Ptr(lrh, t16), bytes_per_attr_entry);
80712 +               }
80714 +               t16 = le16_to_cpu(lrh->undo_len);
80715 +               if (t16) {
80716 +                       oe->ptr = ntfs_malloc(t16);
80717 +                       if (!oe->ptr) {
80718 +                               err = -ENOMEM;
80719 +                               goto out;
80720 +                       }
80721 +                       oe->name_len = t16 / sizeof(short);
80722 +                       memcpy(oe->ptr,
80723 +                              Add2Ptr(lrh, le16_to_cpu(lrh->undo_off)), t16);
80724 +                       oe->is_attr_name = 1;
80725 +               } else {
80726 +                       oe->ptr = NULL;
80727 +                       oe->is_attr_name = 0;
80728 +               }
80730 +               goto next_log_record_analyze;
80732 +       case HotFix:
80733 +               t16 = le16_to_cpu(lrh->target_attr);
80734 +               t64 = le64_to_cpu(lrh->target_vcn);
80735 +               dp = find_dp(dptbl, t16, t64);
80736 +               if (dp) {
80737 +                       size_t j = le64_to_cpu(lrh->target_vcn) -
80738 +                                  le64_to_cpu(dp->vcn);
80739 +                       if (dp->page_lcns[j])
80740 +                               dp->page_lcns[j] = lrh->page_lcns[0];
80741 +               }
80742 +               goto next_log_record_analyze;
80744 +       case EndTopLevelAction:
80745 +               tr = Add2Ptr(trtbl, transact_id);
80746 +               tr->prev_lsn = cpu_to_le64(rec_lsn);
80747 +               tr->undo_next_lsn = frh->client_undo_next_lsn;
80748 +               goto next_log_record_analyze;
80750 +       case PrepareTransaction:
80751 +               tr = Add2Ptr(trtbl, transact_id);
80752 +               tr->transact_state = TransactionPrepared;
80753 +               goto next_log_record_analyze;
80755 +       case CommitTransaction:
80756 +               tr = Add2Ptr(trtbl, transact_id);
80757 +               tr->transact_state = TransactionCommitted;
80758 +               goto next_log_record_analyze;
80760 +       case ForgetTransaction:
80761 +               free_rsttbl_idx(trtbl, transact_id);
80762 +               goto next_log_record_analyze;
80764 +       case Noop:
80765 +       case OpenAttributeTableDump:
80766 +       case AttributeNamesDump:
80767 +       case DirtyPageTableDump:
80768 +       case TransactionTableDump:
80769 +               /* The following cases require no action the Analysis Pass */
80770 +               goto next_log_record_analyze;
80772 +       default:
80773 +               /*
80774 +                * All codes will be explicitly handled.
80775 +                * If we see a code we do not expect, then we are trouble
80776 +                */
80777 +               goto next_log_record_analyze;
80778 +       }
80780 +end_log_records_enumerate:
80781 +       lcb_put(lcb);
80782 +       lcb = NULL;
80784 +       /*
80785 +        * Scan the Dirty Page Table and Transaction Table for
80786 +        * the lowest lsn, and return it as the Redo lsn
80787 +        */
80788 +       dp = NULL;
80789 +       while ((dp = enum_rstbl(dptbl, dp))) {
80790 +               t64 = le64_to_cpu(dp->oldest_lsn);
80791 +               if (t64 && t64 < rlsn)
80792 +                       rlsn = t64;
80793 +       }
80795 +       tr = NULL;
80796 +       while ((tr = enum_rstbl(trtbl, tr))) {
80797 +               t64 = le64_to_cpu(tr->first_lsn);
80798 +               if (t64 && t64 < rlsn)
80799 +                       rlsn = t64;
80800 +       }
80802 +       /* Only proceed if the Dirty Page Table or Transaction table are not empty */
80803 +       if ((!dptbl || !dptbl->total) && (!trtbl || !trtbl->total))
80804 +               goto end_reply;
80806 +       sbi->flags |= NTFS_FLAGS_NEED_REPLAY;
80807 +       if (is_ro)
80808 +               goto out;
80810 +       /* Reopen all of the attributes with dirty pages */
80811 +       oe = NULL;
80812 +next_open_attribute:
80814 +       oe = enum_rstbl(oatbl, oe);
80815 +       if (!oe) {
80816 +               err = 0;
80817 +               dp = NULL;
80818 +               goto next_dirty_page;
80819 +       }
80821 +       oa = ntfs_zalloc(sizeof(struct OpenAttr));
80822 +       if (!oa) {
80823 +               err = -ENOMEM;
80824 +               goto out;
80825 +       }
80827 +       inode = ntfs_iget5(sbi->sb, &oe->ref, NULL);
80828 +       if (IS_ERR(inode))
80829 +               goto fake_attr;
80831 +       if (is_bad_inode(inode)) {
80832 +               iput(inode);
80833 +fake_attr:
80834 +               if (oa->ni) {
80835 +                       iput(&oa->ni->vfs_inode);
80836 +                       oa->ni = NULL;
80837 +               }
80839 +               attr = attr_create_nonres_log(sbi, oe->type, 0, oe->ptr,
80840 +                                             oe->name_len, 0);
80841 +               if (!attr) {
80842 +                       ntfs_free(oa);
80843 +                       err = -ENOMEM;
80844 +                       goto out;
80845 +               }
80846 +               oa->attr = attr;
80847 +               oa->run1 = &oa->run0;
80848 +               goto final_oe;
80849 +       }
80851 +       ni_oe = ntfs_i(inode);
80852 +       oa->ni = ni_oe;
80854 +       attr = ni_find_attr(ni_oe, NULL, NULL, oe->type, oe->ptr, oe->name_len,
80855 +                           NULL, NULL);
80857 +       if (!attr)
80858 +               goto fake_attr;
80860 +       t32 = le32_to_cpu(attr->size);
80861 +       oa->attr = ntfs_memdup(attr, t32);
80862 +       if (!oa->attr)
80863 +               goto fake_attr;
80865 +       if (!S_ISDIR(inode->i_mode)) {
80866 +               if (attr->type == ATTR_DATA && !attr->name_len) {
80867 +                       oa->run1 = &ni_oe->file.run;
80868 +                       goto final_oe;
80869 +               }
80870 +       } else {
80871 +               if (attr->type == ATTR_ALLOC &&
80872 +                   attr->name_len == ARRAY_SIZE(I30_NAME) &&
80873 +                   !memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME))) {
80874 +                       oa->run1 = &ni_oe->dir.alloc_run;
80875 +                       goto final_oe;
80876 +               }
80877 +       }
80879 +       if (attr->non_res) {
80880 +               u16 roff = le16_to_cpu(attr->nres.run_off);
80881 +               CLST svcn = le64_to_cpu(attr->nres.svcn);
80883 +               err = run_unpack(&oa->run0, sbi, inode->i_ino, svcn,
80884 +                                le64_to_cpu(attr->nres.evcn), svcn,
80885 +                                Add2Ptr(attr, roff), t32 - roff);
80886 +               if (err < 0) {
80887 +                       ntfs_free(oa->attr);
80888 +                       oa->attr = NULL;
80889 +                       goto fake_attr;
80890 +               }
80891 +               err = 0;
80892 +       }
80893 +       oa->run1 = &oa->run0;
80894 +       attr = oa->attr;
80896 +final_oe:
80897 +       if (oe->is_attr_name == 1)
80898 +               ntfs_free(oe->ptr);
80899 +       oe->is_attr_name = 0;
80900 +       oe->ptr = oa;
80901 +       oe->name_len = attr->name_len;
80903 +       goto next_open_attribute;
80905 +       /*
80906 +        * Now loop through the dirty page table to extract all of the Vcn/Lcn
80907 +        * Mapping that we have, and insert it into the appropriate run
80908 +        */
80909 +next_dirty_page:
80910 +       dp = enum_rstbl(dptbl, dp);
80911 +       if (!dp)
80912 +               goto do_redo_1;
80914 +       oe = Add2Ptr(oatbl, le32_to_cpu(dp->target_attr));
80916 +       if (oe->next != RESTART_ENTRY_ALLOCATED_LE)
80917 +               goto next_dirty_page;
80919 +       oa = oe->ptr;
80920 +       if (!oa)
80921 +               goto next_dirty_page;
80923 +       i = -1;
80924 +next_dirty_page_vcn:
80925 +       i += 1;
80926 +       if (i >= le32_to_cpu(dp->lcns_follow))
80927 +               goto next_dirty_page;
80929 +       vcn = le64_to_cpu(dp->vcn) + i;
80930 +       size = (vcn + 1) << sbi->cluster_bits;
80932 +       if (!dp->page_lcns[i])
80933 +               goto next_dirty_page_vcn;
80935 +       rno = ino_get(&oe->ref);
80936 +       if (rno <= MFT_REC_MIRR &&
80937 +           size < (MFT_REC_VOL + 1) * sbi->record_size &&
80938 +           oe->type == ATTR_DATA) {
80939 +               goto next_dirty_page_vcn;
80940 +       }
80942 +       lcn = le64_to_cpu(dp->page_lcns[i]);
80944 +       if ((!run_lookup_entry(oa->run1, vcn, &lcn0, &len0, NULL) ||
80945 +            lcn0 != lcn) &&
80946 +           !run_add_entry(oa->run1, vcn, lcn, 1, false)) {
80947 +               err = -ENOMEM;
80948 +               goto out;
80949 +       }
80950 +       attr = oa->attr;
80951 +       t64 = le64_to_cpu(attr->nres.alloc_size);
80952 +       if (size > t64) {
80953 +               attr->nres.valid_size = attr->nres.data_size =
80954 +                       attr->nres.alloc_size = cpu_to_le64(size);
80955 +       }
80956 +       goto next_dirty_page_vcn;
80958 +do_redo_1:
80959 +       /*
80960 +        * Perform the Redo Pass, to restore all of the dirty pages to the same
80961 +        * contents that they had immediately before the crash
80962 +        * If the dirty page table is empty, then we can skip the entire Redo Pass
80963 +        */
80964 +       if (!dptbl || !dptbl->total)
80965 +               goto do_undo_action;
80967 +       rec_lsn = rlsn;
80969 +       /*
80970 +        * Read the record at the Redo lsn, before falling
80971 +        * into common code to handle each record
80972 +        */
80973 +       err = read_log_rec_lcb(log, rlsn, lcb_ctx_next, &lcb);
80974 +       if (err)
80975 +               goto out;
80977 +       /*
80978 +        * Now loop to read all of our log records forwards,
80979 +        * until we hit the end of the file, cleaning up at the end
80980 +        */
80981 +do_action_next:
80982 +       frh = lcb->lrh;
80984 +       if (LfsClientRecord != frh->record_type)
80985 +               goto read_next_log_do_action;
80987 +       transact_id = le32_to_cpu(frh->transact_id);
80988 +       rec_len = le32_to_cpu(frh->client_data_len);
80989 +       lrh = lcb->log_rec;
80991 +       if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) {
80992 +               err = -EINVAL;
80993 +               goto out;
80994 +       }
80996 +       /* Ignore log records that do not update pages */
80997 +       if (lrh->lcns_follow)
80998 +               goto find_dirty_page;
81000 +       goto read_next_log_do_action;
81002 +find_dirty_page:
81003 +       t16 = le16_to_cpu(lrh->target_attr);
81004 +       t64 = le64_to_cpu(lrh->target_vcn);
81005 +       dp = find_dp(dptbl, t16, t64);
81007 +       if (!dp)
81008 +               goto read_next_log_do_action;
81010 +       if (rec_lsn < le64_to_cpu(dp->oldest_lsn))
81011 +               goto read_next_log_do_action;
81013 +       t16 = le16_to_cpu(lrh->target_attr);
81014 +       if (t16 >= bytes_per_rt(oatbl)) {
81015 +               err = -EINVAL;
81016 +               goto out;
81017 +       }
81019 +       oe = Add2Ptr(oatbl, t16);
81021 +       if (oe->next != RESTART_ENTRY_ALLOCATED_LE) {
81022 +               err = -EINVAL;
81023 +               goto out;
81024 +       }
81026 +       oa = oe->ptr;
81028 +       if (!oa) {
81029 +               err = -EINVAL;
81030 +               goto out;
81031 +       }
81032 +       attr = oa->attr;
81034 +       vcn = le64_to_cpu(lrh->target_vcn);
81036 +       if (!run_lookup_entry(oa->run1, vcn, &lcn, NULL, NULL) ||
81037 +           lcn == SPARSE_LCN) {
81038 +               goto read_next_log_do_action;
81039 +       }
81041 +       /* Point to the Redo data and get its length */
81042 +       data = Add2Ptr(lrh, le16_to_cpu(lrh->redo_off));
81043 +       dlen = le16_to_cpu(lrh->redo_len);
81045 +       /* Shorten length by any Lcns which were deleted */
81046 +       saved_len = dlen;
81048 +       for (i = le16_to_cpu(lrh->lcns_follow); i; i--) {
81049 +               size_t j;
81050 +               u32 alen, voff;
81052 +               voff = le16_to_cpu(lrh->record_off) +
81053 +                      le16_to_cpu(lrh->attr_off);
81054 +               voff += le16_to_cpu(lrh->cluster_off) << SECTOR_SHIFT;
81056 +               /* If the Vcn question is allocated, we can just get out.*/
81057 +               j = le64_to_cpu(lrh->target_vcn) - le64_to_cpu(dp->vcn);
81058 +               if (dp->page_lcns[j + i - 1])
81059 +                       break;
81061 +               if (!saved_len)
81062 +                       saved_len = 1;
81064 +               /*
81065 +                * Calculate the allocated space left relative to the
81066 +                * log record Vcn, after removing this unallocated Vcn
81067 +                */
81068 +               alen = (i - 1) << sbi->cluster_bits;
81070 +               /*
81071 +                * If the update described this log record goes beyond
81072 +                * the allocated space, then we will have to reduce the length
81073 +                */
81074 +               if (voff >= alen)
81075 +                       dlen = 0;
81076 +               else if (voff + dlen > alen)
81077 +                       dlen = alen - voff;
81078 +       }
81080 +       /* If the resulting dlen from above is now zero, we can skip this log record */
81081 +       if (!dlen && saved_len)
81082 +               goto read_next_log_do_action;
81084 +       t16 = le16_to_cpu(lrh->redo_op);
81085 +       if (can_skip_action(t16))
81086 +               goto read_next_log_do_action;
81088 +       /* Apply the Redo operation a common routine */
81089 +       err = do_action(log, oe, lrh, t16, data, dlen, rec_len, &rec_lsn);
81090 +       if (err)
81091 +               goto out;
81093 +       /* Keep reading and looping back until end of file */
81094 +read_next_log_do_action:
81095 +       err = read_next_log_rec(log, lcb, &rec_lsn);
81096 +       if (!err && rec_lsn)
81097 +               goto do_action_next;
81099 +       lcb_put(lcb);
81100 +       lcb = NULL;
81102 +do_undo_action:
81103 +       /* Scan Transaction Table */
81104 +       tr = NULL;
81105 +transaction_table_next:
81106 +       tr = enum_rstbl(trtbl, tr);
81107 +       if (!tr)
81108 +               goto undo_action_done;
81110 +       if (TransactionActive != tr->transact_state || !tr->undo_next_lsn) {
81111 +               free_rsttbl_idx(trtbl, PtrOffset(trtbl, tr));
81112 +               goto transaction_table_next;
81113 +       }
81115 +       log->transaction_id = PtrOffset(trtbl, tr);
81116 +       undo_next_lsn = le64_to_cpu(tr->undo_next_lsn);
81118 +       /*
81119 +        * We only have to do anything if the transaction has
81120 +        * something its undo_next_lsn field
81121 +        */
81122 +       if (!undo_next_lsn)
81123 +               goto commit_undo;
81125 +       /* Read the first record to be undone by this transaction */
81126 +       err = read_log_rec_lcb(log, undo_next_lsn, lcb_ctx_undo_next, &lcb);
81127 +       if (err)
81128 +               goto out;
81130 +       /*
81131 +        * Now loop to read all of our log records forwards,
81132 +        * until we hit the end of the file, cleaning up at the end
81133 +        */
81134 +undo_action_next:
81136 +       lrh = lcb->log_rec;
81137 +       frh = lcb->lrh;
81138 +       transact_id = le32_to_cpu(frh->transact_id);
81139 +       rec_len = le32_to_cpu(frh->client_data_len);
81141 +       if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) {
81142 +               err = -EINVAL;
81143 +               goto out;
81144 +       }
81146 +       if (lrh->undo_op == cpu_to_le16(Noop))
81147 +               goto read_next_log_undo_action;
81149 +       oe = Add2Ptr(oatbl, le16_to_cpu(lrh->target_attr));
81150 +       oa = oe->ptr;
81152 +       t16 = le16_to_cpu(lrh->lcns_follow);
81153 +       if (!t16)
81154 +               goto add_allocated_vcns;
81156 +       is_mapped = run_lookup_entry(oa->run1, le64_to_cpu(lrh->target_vcn),
81157 +                                    &lcn, &clen, NULL);
81159 +       /*
81160 +        * If the mapping isn't already the table or the  mapping
81161 +        * corresponds to a hole the mapping, we need to make sure
81162 +        * there is no partial page already memory
81163 +        */
81164 +       if (is_mapped && lcn != SPARSE_LCN && clen >= t16)
81165 +               goto add_allocated_vcns;
81167 +       vcn = le64_to_cpu(lrh->target_vcn);
81168 +       vcn &= ~(log->clst_per_page - 1);
81170 +add_allocated_vcns:
81171 +       for (i = 0, vcn = le64_to_cpu(lrh->target_vcn),
81172 +           size = (vcn + 1) << sbi->cluster_bits;
81173 +            i < t16; i++, vcn += 1, size += sbi->cluster_size) {
81174 +               attr = oa->attr;
81175 +               if (!attr->non_res) {
81176 +                       if (size > le32_to_cpu(attr->res.data_size))
81177 +                               attr->res.data_size = cpu_to_le32(size);
81178 +               } else {
81179 +                       if (size > le64_to_cpu(attr->nres.data_size))
81180 +                               attr->nres.valid_size = attr->nres.data_size =
81181 +                                       attr->nres.alloc_size =
81182 +                                               cpu_to_le64(size);
81183 +               }
81184 +       }
81186 +       t16 = le16_to_cpu(lrh->undo_op);
81187 +       if (can_skip_action(t16))
81188 +               goto read_next_log_undo_action;
81190 +       /* Point to the Redo data and get its length */
81191 +       data = Add2Ptr(lrh, le16_to_cpu(lrh->undo_off));
81192 +       dlen = le16_to_cpu(lrh->undo_len);
81194 +       /* it is time to apply the undo action */
81195 +       err = do_action(log, oe, lrh, t16, data, dlen, rec_len, NULL);
81197 +read_next_log_undo_action:
81198 +       /*
81199 +        * Keep reading and looping back until we have read the
81200 +        * last record for this transaction
81201 +        */
81202 +       err = read_next_log_rec(log, lcb, &rec_lsn);
81203 +       if (err)
81204 +               goto out;
81206 +       if (rec_lsn)
81207 +               goto undo_action_next;
81209 +       lcb_put(lcb);
81210 +       lcb = NULL;
81212 +commit_undo:
81213 +       free_rsttbl_idx(trtbl, log->transaction_id);
81215 +       log->transaction_id = 0;
81217 +       goto transaction_table_next;
81219 +undo_action_done:
81221 +       ntfs_update_mftmirr(sbi, 0);
81223 +       sbi->flags &= ~NTFS_FLAGS_NEED_REPLAY;
81225 +end_reply:
81227 +       err = 0;
81228 +       if (is_ro)
81229 +               goto out;
81231 +       rh = ntfs_zalloc(log->page_size);
81232 +       if (!rh) {
81233 +               err = -ENOMEM;
81234 +               goto out;
81235 +       }
81237 +       rh->rhdr.sign = NTFS_RSTR_SIGNATURE;
81238 +       rh->rhdr.fix_off = cpu_to_le16(offsetof(struct RESTART_HDR, fixups));
81239 +       t16 = (log->page_size >> SECTOR_SHIFT) + 1;
81240 +       rh->rhdr.fix_num = cpu_to_le16(t16);
81241 +       rh->sys_page_size = cpu_to_le32(log->page_size);
81242 +       rh->page_size = cpu_to_le32(log->page_size);
81244 +       t16 = QuadAlign(offsetof(struct RESTART_HDR, fixups) +
81245 +                       sizeof(short) * t16);
81246 +       rh->ra_off = cpu_to_le16(t16);
81247 +       rh->minor_ver = cpu_to_le16(1); // 0x1A:
81248 +       rh->major_ver = cpu_to_le16(1); // 0x1C:
81250 +       ra2 = Add2Ptr(rh, t16);
81251 +       memcpy(ra2, ra, sizeof(struct RESTART_AREA));
81253 +       ra2->client_idx[0] = 0;
81254 +       ra2->client_idx[1] = LFS_NO_CLIENT_LE;
81255 +       ra2->flags = cpu_to_le16(2);
81257 +       le32_add_cpu(&ra2->open_log_count, 1);
81259 +       ntfs_fix_pre_write(&rh->rhdr, log->page_size);
81261 +       err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rh, log->page_size);
81262 +       if (!err)
81263 +               err = ntfs_sb_write_run(sbi, &log->ni->file.run, log->page_size,
81264 +                                       rh, log->page_size);
81266 +       ntfs_free(rh);
81267 +       if (err)
81268 +               goto out;
81270 +out:
81271 +       ntfs_free(rst);
81272 +       if (lcb)
81273 +               lcb_put(lcb);
81275 +       /* Scan the Open Attribute Table to close all of the open attributes */
81276 +       oe = NULL;
81277 +       while ((oe = enum_rstbl(oatbl, oe))) {
81278 +               rno = ino_get(&oe->ref);
81280 +               if (oe->is_attr_name == 1) {
81281 +                       ntfs_free(oe->ptr);
81282 +                       oe->ptr = NULL;
81283 +                       continue;
81284 +               }
81286 +               if (oe->is_attr_name)
81287 +                       continue;
81289 +               oa = oe->ptr;
81290 +               if (!oa)
81291 +                       continue;
81293 +               run_close(&oa->run0);
81294 +               ntfs_free(oa->attr);
81295 +               if (oa->ni)
81296 +                       iput(&oa->ni->vfs_inode);
81297 +               ntfs_free(oa);
81298 +       }
81300 +       ntfs_free(trtbl);
81301 +       ntfs_free(oatbl);
81302 +       ntfs_free(dptbl);
81303 +       ntfs_free(attr_names);
81304 +       ntfs_free(rst_info.r_page);
81306 +       ntfs_free(ra);
81307 +       ntfs_free(log->one_page_buf);
81309 +       if (err)
81310 +               sbi->flags |= NTFS_FLAGS_NEED_REPLAY;
81312 +       if (err == -EROFS)
81313 +               err = 0;
81314 +       else if (log->set_dirty)
81315 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
81317 +       ntfs_free(log);
81319 +       return err;
81321 diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
81322 new file mode 100644
81323 index 000000000000..327356b08187
81324 --- /dev/null
81325 +++ b/fs/ntfs3/fsntfs.c
81326 @@ -0,0 +1,2542 @@
81327 +// SPDX-License-Identifier: GPL-2.0
81329 + *
81330 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
81331 + *
81332 + */
81334 +#include <linux/blkdev.h>
81335 +#include <linux/buffer_head.h>
81336 +#include <linux/fs.h>
81337 +#include <linux/nls.h>
81339 +#include "debug.h"
81340 +#include "ntfs.h"
81341 +#include "ntfs_fs.h"
81343 +// clang-format off
81344 +const struct cpu_str NAME_MFT = {
81345 +       4, 0, { '$', 'M', 'F', 'T' },
81347 +const struct cpu_str NAME_MIRROR = {
81348 +       8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
81350 +const struct cpu_str NAME_LOGFILE = {
81351 +       8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
81353 +const struct cpu_str NAME_VOLUME = {
81354 +       7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
81356 +const struct cpu_str NAME_ATTRDEF = {
81357 +       8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
81359 +const struct cpu_str NAME_ROOT = {
81360 +       1, 0, { '.' },
81362 +const struct cpu_str NAME_BITMAP = {
81363 +       7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
81365 +const struct cpu_str NAME_BOOT = {
81366 +       5, 0, { '$', 'B', 'o', 'o', 't' },
81368 +const struct cpu_str NAME_BADCLUS = {
81369 +       8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
81371 +const struct cpu_str NAME_QUOTA = {
81372 +       6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
81374 +const struct cpu_str NAME_SECURE = {
81375 +       7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
81377 +const struct cpu_str NAME_UPCASE = {
81378 +       7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
81380 +const struct cpu_str NAME_EXTEND = {
81381 +       7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
81383 +const struct cpu_str NAME_OBJID = {
81384 +       6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
81386 +const struct cpu_str NAME_REPARSE = {
81387 +       8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
81389 +const struct cpu_str NAME_USNJRNL = {
81390 +       8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
81392 +const __le16 BAD_NAME[4] = {
81393 +       cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
81395 +const __le16 I30_NAME[4] = {
81396 +       cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
81398 +const __le16 SII_NAME[4] = {
81399 +       cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
81401 +const __le16 SDH_NAME[4] = {
81402 +       cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
81404 +const __le16 SDS_NAME[4] = {
81405 +       cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
81407 +const __le16 SO_NAME[2] = {
81408 +       cpu_to_le16('$'), cpu_to_le16('O'),
81410 +const __le16 SQ_NAME[2] = {
81411 +       cpu_to_le16('$'), cpu_to_le16('Q'),
81413 +const __le16 SR_NAME[2] = {
81414 +       cpu_to_le16('$'), cpu_to_le16('R'),
81417 +#ifdef CONFIG_NTFS3_LZX_XPRESS
81418 +const __le16 WOF_NAME[17] = {
81419 +       cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
81420 +       cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
81421 +       cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
81422 +       cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
81423 +       cpu_to_le16('a'),
81425 +#endif
81427 +// clang-format on
81430 + * ntfs_fix_pre_write
81431 + *
81432 + * inserts fixups into 'rhdr' before writing to disk
81433 + */
81434 +bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
81436 +       u16 *fixup, *ptr;
81437 +       u16 sample;
81438 +       u16 fo = le16_to_cpu(rhdr->fix_off);
81439 +       u16 fn = le16_to_cpu(rhdr->fix_num);
81441 +       if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
81442 +           fn * SECTOR_SIZE > bytes) {
81443 +               return false;
81444 +       }
81446 +       /* Get fixup pointer */
81447 +       fixup = Add2Ptr(rhdr, fo);
81449 +       if (*fixup >= 0x7FFF)
81450 +               *fixup = 1;
81451 +       else
81452 +               *fixup += 1;
81454 +       sample = *fixup;
81456 +       ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
81458 +       while (fn--) {
81459 +               *++fixup = *ptr;
81460 +               *ptr = sample;
81461 +               ptr += SECTOR_SIZE / sizeof(short);
81462 +       }
81463 +       return true;
81467 + * ntfs_fix_post_read
81468 + *
81469 + * remove fixups after reading from disk
81470 + * Returns < 0 if error, 0 if ok, 1 if need to update fixups
81471 + */
81472 +int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
81473 +                      bool simple)
81475 +       int ret;
81476 +       u16 *fixup, *ptr;
81477 +       u16 sample, fo, fn;
81479 +       fo = le16_to_cpu(rhdr->fix_off);
81480 +       fn = simple ? ((bytes >> SECTOR_SHIFT) + 1)
81481 +                   : le16_to_cpu(rhdr->fix_num);
81483 +       /* Check errors */
81484 +       if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
81485 +           fn * SECTOR_SIZE > bytes) {
81486 +               return -EINVAL; /* native chkntfs returns ok! */
81487 +       }
81489 +       /* Get fixup pointer */
81490 +       fixup = Add2Ptr(rhdr, fo);
81491 +       sample = *fixup;
81492 +       ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
81493 +       ret = 0;
81495 +       while (fn--) {
81496 +               /* Test current word */
81497 +               if (*ptr != sample) {
81498 +                       /* Fixup does not match! Is it serious error? */
81499 +                       ret = -E_NTFS_FIXUP;
81500 +               }
81502 +               /* Replace fixup */
81503 +               *ptr = *++fixup;
81504 +               ptr += SECTOR_SIZE / sizeof(short);
81505 +       }
81507 +       return ret;
81511 + * ntfs_extend_init
81512 + *
81513 + * loads $Extend file
81514 + */
81515 +int ntfs_extend_init(struct ntfs_sb_info *sbi)
81517 +       int err;
81518 +       struct super_block *sb = sbi->sb;
81519 +       struct inode *inode, *inode2;
81520 +       struct MFT_REF ref;
81522 +       if (sbi->volume.major_ver < 3) {
81523 +               ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
81524 +               return 0;
81525 +       }
81527 +       ref.low = cpu_to_le32(MFT_REC_EXTEND);
81528 +       ref.high = 0;
81529 +       ref.seq = cpu_to_le16(MFT_REC_EXTEND);
81530 +       inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
81531 +       if (IS_ERR(inode)) {
81532 +               err = PTR_ERR(inode);
81533 +               ntfs_err(sb, "Failed to load $Extend.");
81534 +               inode = NULL;
81535 +               goto out;
81536 +       }
81538 +       /* if ntfs_iget5 reads from disk it never returns bad inode */
81539 +       if (!S_ISDIR(inode->i_mode)) {
81540 +               err = -EINVAL;
81541 +               goto out;
81542 +       }
81544 +       /* Try to find $ObjId */
81545 +       inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
81546 +       if (inode2 && !IS_ERR(inode2)) {
81547 +               if (is_bad_inode(inode2)) {
81548 +                       iput(inode2);
81549 +               } else {
81550 +                       sbi->objid.ni = ntfs_i(inode2);
81551 +                       sbi->objid_no = inode2->i_ino;
81552 +               }
81553 +       }
81555 +       /* Try to find $Quota */
81556 +       inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
81557 +       if (inode2 && !IS_ERR(inode2)) {
81558 +               sbi->quota_no = inode2->i_ino;
81559 +               iput(inode2);
81560 +       }
81562 +       /* Try to find $Reparse */
81563 +       inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
81564 +       if (inode2 && !IS_ERR(inode2)) {
81565 +               sbi->reparse.ni = ntfs_i(inode2);
81566 +               sbi->reparse_no = inode2->i_ino;
81567 +       }
81569 +       /* Try to find $UsnJrnl */
81570 +       inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
81571 +       if (inode2 && !IS_ERR(inode2)) {
81572 +               sbi->usn_jrnl_no = inode2->i_ino;
81573 +               iput(inode2);
81574 +       }
81576 +       err = 0;
81577 +out:
81578 +       iput(inode);
81579 +       return err;
81582 +int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
81584 +       int err = 0;
81585 +       struct super_block *sb = sbi->sb;
81586 +       bool initialized = false;
81587 +       struct MFT_REF ref;
81588 +       struct inode *inode;
81590 +       /* Check for 4GB */
81591 +       if (ni->vfs_inode.i_size >= 0x100000000ull) {
81592 +               ntfs_err(sb, "\x24LogFile is too big");
81593 +               err = -EINVAL;
81594 +               goto out;
81595 +       }
81597 +       sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
81599 +       ref.low = cpu_to_le32(MFT_REC_MFT);
81600 +       ref.high = 0;
81601 +       ref.seq = cpu_to_le16(1);
81603 +       inode = ntfs_iget5(sb, &ref, NULL);
81605 +       if (IS_ERR(inode))
81606 +               inode = NULL;
81608 +       if (!inode) {
81609 +               /* Try to use mft copy */
81610 +               u64 t64 = sbi->mft.lbo;
81612 +               sbi->mft.lbo = sbi->mft.lbo2;
81613 +               inode = ntfs_iget5(sb, &ref, NULL);
81614 +               sbi->mft.lbo = t64;
81615 +               if (IS_ERR(inode))
81616 +                       inode = NULL;
81617 +       }
81619 +       if (!inode) {
81620 +               err = -EINVAL;
81621 +               ntfs_err(sb, "Failed to load $MFT.");
81622 +               goto out;
81623 +       }
81625 +       sbi->mft.ni = ntfs_i(inode);
81627 +       /* LogFile should not contains attribute list */
81628 +       err = ni_load_all_mi(sbi->mft.ni);
81629 +       if (!err)
81630 +               err = log_replay(ni, &initialized);
81632 +       iput(inode);
81633 +       sbi->mft.ni = NULL;
81635 +       sync_blockdev(sb->s_bdev);
81636 +       invalidate_bdev(sb->s_bdev);
81638 +       if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
81639 +               err = 0;
81640 +               goto out;
81641 +       }
81643 +       if (sb_rdonly(sb) || !initialized)
81644 +               goto out;
81646 +       /* fill LogFile by '-1' if it is initialized */
81647 +       err = ntfs_bio_fill_1(sbi, &ni->file.run);
81649 +out:
81650 +       sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
81652 +       return err;
81656 + * ntfs_query_def
81657 + *
81658 + * returns current ATTR_DEF_ENTRY for given attribute type
81659 + */
81660 +const struct ATTR_DEF_ENTRY *ntfs_query_def(struct ntfs_sb_info *sbi,
81661 +                                           enum ATTR_TYPE type)
81663 +       int type_in = le32_to_cpu(type);
81664 +       size_t min_idx = 0;
81665 +       size_t max_idx = sbi->def_entries - 1;
81667 +       while (min_idx <= max_idx) {
81668 +               size_t i = min_idx + ((max_idx - min_idx) >> 1);
81669 +               const struct ATTR_DEF_ENTRY *entry = sbi->def_table + i;
81670 +               int diff = le32_to_cpu(entry->type) - type_in;
81672 +               if (!diff)
81673 +                       return entry;
81674 +               if (diff < 0)
81675 +                       min_idx = i + 1;
81676 +               else if (i)
81677 +                       max_idx = i - 1;
81678 +               else
81679 +                       return NULL;
81680 +       }
81681 +       return NULL;
81685 + * ntfs_look_for_free_space
81686 + *
81687 + * looks for a free space in bitmap
81688 + */
81689 +int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
81690 +                            CLST *new_lcn, CLST *new_len,
81691 +                            enum ALLOCATE_OPT opt)
81693 +       int err;
81694 +       struct super_block *sb = sbi->sb;
81695 +       size_t a_lcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
81696 +       struct wnd_bitmap *wnd = &sbi->used.bitmap;
81698 +       down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
81699 +       if (opt & ALLOCATE_MFT) {
81700 +               CLST alen;
81702 +               zlen = wnd_zone_len(wnd);
81704 +               if (!zlen) {
81705 +                       err = ntfs_refresh_zone(sbi);
81706 +                       if (err)
81707 +                               goto out;
81709 +                       zlen = wnd_zone_len(wnd);
81711 +                       if (!zlen) {
81712 +                               ntfs_err(sbi->sb,
81713 +                                        "no free space to extend mft");
81714 +                               err = -ENOSPC;
81715 +                               goto out;
81716 +                       }
81717 +               }
81719 +               lcn = wnd_zone_bit(wnd);
81720 +               alen = zlen > len ? len : zlen;
81722 +               wnd_zone_set(wnd, lcn + alen, zlen - alen);
81724 +               err = wnd_set_used(wnd, lcn, alen);
81725 +               if (err)
81726 +                       goto out;
81728 +               *new_lcn = lcn;
81729 +               *new_len = alen;
81730 +               goto ok;
81731 +       }
81733 +       /*
81734 +        * 'Cause cluster 0 is always used this value means that we should use
81735 +        * cached value of 'next_free_lcn' to improve performance
81736 +        */
81737 +       if (!lcn)
81738 +               lcn = sbi->used.next_free_lcn;
81740 +       if (lcn >= wnd->nbits)
81741 +               lcn = 0;
81743 +       *new_len = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &a_lcn);
81744 +       if (*new_len) {
81745 +               *new_lcn = a_lcn;
81746 +               goto ok;
81747 +       }
81749 +       /* Try to use clusters from MftZone */
81750 +       zlen = wnd_zone_len(wnd);
81751 +       zeroes = wnd_zeroes(wnd);
81753 +       /* Check too big request */
81754 +       if (len > zeroes + zlen)
81755 +               goto no_space;
81757 +       if (zlen <= NTFS_MIN_MFT_ZONE)
81758 +               goto no_space;
81760 +       /* How many clusters to cat from zone */
81761 +       zlcn = wnd_zone_bit(wnd);
81762 +       zlen2 = zlen >> 1;
81763 +       ztrim = len > zlen ? zlen : (len > zlen2 ? len : zlen2);
81764 +       new_zlen = zlen - ztrim;
81766 +       if (new_zlen < NTFS_MIN_MFT_ZONE) {
81767 +               new_zlen = NTFS_MIN_MFT_ZONE;
81768 +               if (new_zlen > zlen)
81769 +                       new_zlen = zlen;
81770 +       }
81772 +       wnd_zone_set(wnd, zlcn, new_zlen);
81774 +       /* allocate continues clusters */
81775 +       *new_len =
81776 +               wnd_find(wnd, len, 0,
81777 +                        BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &a_lcn);
81778 +       if (*new_len) {
81779 +               *new_lcn = a_lcn;
81780 +               goto ok;
81781 +       }
81783 +no_space:
81784 +       up_write(&wnd->rw_lock);
81786 +       return -ENOSPC;
81788 +ok:
81789 +       err = 0;
81791 +       ntfs_unmap_meta(sb, *new_lcn, *new_len);
81793 +       if (opt & ALLOCATE_MFT)
81794 +               goto out;
81796 +       /* Set hint for next requests */
81797 +       sbi->used.next_free_lcn = *new_lcn + *new_len;
81799 +out:
81800 +       up_write(&wnd->rw_lock);
81801 +       return err;
81805 + * ntfs_extend_mft
81806 + *
81807 + * allocates additional MFT records
81808 + * sbi->mft.bitmap is locked for write
81809 + *
81810 + * NOTE: recursive:
81811 + *     ntfs_look_free_mft ->
81812 + *     ntfs_extend_mft ->
81813 + *     attr_set_size ->
81814 + *     ni_insert_nonresident ->
81815 + *     ni_insert_attr ->
81816 + *     ni_ins_attr_ext ->
81817 + *     ntfs_look_free_mft ->
81818 + *     ntfs_extend_mft
81819 + * To avoid recursive always allocate space for two new mft records
81820 + * see attrib.c: "at least two mft to avoid recursive loop"
81821 + */
81822 +static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
81824 +       int err;
81825 +       struct ntfs_inode *ni = sbi->mft.ni;
81826 +       size_t new_mft_total;
81827 +       u64 new_mft_bytes, new_bitmap_bytes;
81828 +       struct ATTRIB *attr;
81829 +       struct wnd_bitmap *wnd = &sbi->mft.bitmap;
81831 +       new_mft_total = (wnd->nbits + MFT_INCREASE_CHUNK + 127) & (CLST)~127;
81832 +       new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
81834 +       /* Step 1: Resize $MFT::DATA */
81835 +       down_write(&ni->file.run_lock);
81836 +       err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
81837 +                           new_mft_bytes, NULL, false, &attr);
81839 +       if (err) {
81840 +               up_write(&ni->file.run_lock);
81841 +               goto out;
81842 +       }
81844 +       attr->nres.valid_size = attr->nres.data_size;
81845 +       new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
81846 +       ni->mi.dirty = true;
81848 +       /* Step 2: Resize $MFT::BITMAP */
81849 +       new_bitmap_bytes = bitmap_size(new_mft_total);
81851 +       err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
81852 +                           new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
81854 +       /* Refresh Mft Zone if necessary */
81855 +       down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
81857 +       ntfs_refresh_zone(sbi);
81859 +       up_write(&sbi->used.bitmap.rw_lock);
81860 +       up_write(&ni->file.run_lock);
81862 +       if (err)
81863 +               goto out;
81865 +       err = wnd_extend(wnd, new_mft_total);
81867 +       if (err)
81868 +               goto out;
81870 +       ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
81872 +       err = _ni_write_inode(&ni->vfs_inode, 0);
81873 +out:
81874 +       return err;
81878 + * ntfs_look_free_mft
81879 + *
81880 + * looks for a free MFT record
81881 + */
81882 +int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
81883 +                      struct ntfs_inode *ni, struct mft_inode **mi)
81885 +       int err = 0;
81886 +       size_t zbit, zlen, from, to, fr;
81887 +       size_t mft_total;
81888 +       struct MFT_REF ref;
81889 +       struct super_block *sb = sbi->sb;
81890 +       struct wnd_bitmap *wnd = &sbi->mft.bitmap;
81891 +       u32 ir;
81893 +       static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
81894 +                     MFT_REC_FREE - MFT_REC_RESERVED);
81896 +       if (!mft)
81897 +               down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
81899 +       zlen = wnd_zone_len(wnd);
81901 +       /* Always reserve space for MFT */
81902 +       if (zlen) {
81903 +               if (mft) {
81904 +                       zbit = wnd_zone_bit(wnd);
81905 +                       *rno = zbit;
81906 +                       wnd_zone_set(wnd, zbit + 1, zlen - 1);
81907 +               }
81908 +               goto found;
81909 +       }
81911 +       /* No MFT zone. find the nearest to '0' free MFT */
81912 +       if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
81913 +               /* Resize MFT */
81914 +               mft_total = wnd->nbits;
81916 +               err = ntfs_extend_mft(sbi);
81917 +               if (!err) {
81918 +                       zbit = mft_total;
81919 +                       goto reserve_mft;
81920 +               }
81922 +               if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
81923 +                       goto out;
81925 +               err = 0;
81927 +               /*
81928 +                * Look for free record reserved area [11-16) ==
81929 +                * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
81930 +                * marks it as used
81931 +                */
81932 +               if (!sbi->mft.reserved_bitmap) {
81933 +                       /* Once per session create internal bitmap for 5 bits */
81934 +                       sbi->mft.reserved_bitmap = 0xFF;
81936 +                       ref.high = 0;
81937 +                       for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
81938 +                               struct inode *i;
81939 +                               struct ntfs_inode *ni;
81940 +                               struct MFT_REC *mrec;
81942 +                               ref.low = cpu_to_le32(ir);
81943 +                               ref.seq = cpu_to_le16(ir);
81945 +                               i = ntfs_iget5(sb, &ref, NULL);
81946 +                               if (IS_ERR(i)) {
81947 +next:
81948 +                                       ntfs_notice(
81949 +                                               sb,
81950 +                                               "Invalid reserved record %x",
81951 +                                               ref.low);
81952 +                                       continue;
81953 +                               }
81954 +                               if (is_bad_inode(i)) {
81955 +                                       iput(i);
81956 +                                       goto next;
81957 +                               }
81959 +                               ni = ntfs_i(i);
81961 +                               mrec = ni->mi.mrec;
81963 +                               if (!is_rec_base(mrec))
81964 +                                       goto next;
81966 +                               if (mrec->hard_links)
81967 +                                       goto next;
81969 +                               if (!ni_std(ni))
81970 +                                       goto next;
81972 +                               if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
81973 +                                                NULL, 0, NULL, NULL))
81974 +                                       goto next;
81976 +                               __clear_bit(ir - MFT_REC_RESERVED,
81977 +                                           &sbi->mft.reserved_bitmap);
81978 +                       }
81979 +               }
81981 +               /* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
81982 +               zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
81983 +                                         MFT_REC_FREE, MFT_REC_RESERVED);
81984 +               if (zbit >= MFT_REC_FREE) {
81985 +                       sbi->mft.next_reserved = MFT_REC_FREE;
81986 +                       goto out;
81987 +               }
81989 +               zlen = 1;
81990 +               sbi->mft.next_reserved = zbit;
81991 +       } else {
81992 +reserve_mft:
81993 +               zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
81994 +               if (zbit + zlen > wnd->nbits)
81995 +                       zlen = wnd->nbits - zbit;
81997 +               while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
81998 +                       zlen -= 1;
82000 +               /* [zbit, zbit + zlen) will be used for Mft itself */
82001 +               from = sbi->mft.used;
82002 +               if (from < zbit)
82003 +                       from = zbit;
82004 +               to = zbit + zlen;
82005 +               if (from < to) {
82006 +                       ntfs_clear_mft_tail(sbi, from, to);
82007 +                       sbi->mft.used = to;
82008 +               }
82009 +       }
82011 +       if (mft) {
82012 +               *rno = zbit;
82013 +               zbit += 1;
82014 +               zlen -= 1;
82015 +       }
82017 +       wnd_zone_set(wnd, zbit, zlen);
82019 +found:
82020 +       if (!mft) {
82021 +               /* The request to get record for general purpose */
82022 +               if (sbi->mft.next_free < MFT_REC_USER)
82023 +                       sbi->mft.next_free = MFT_REC_USER;
82025 +               for (;;) {
82026 +                       if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
82027 +                       } else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
82028 +                               sbi->mft.next_free = sbi->mft.bitmap.nbits;
82029 +                       } else {
82030 +                               *rno = fr;
82031 +                               sbi->mft.next_free = *rno + 1;
82032 +                               break;
82033 +                       }
82035 +                       err = ntfs_extend_mft(sbi);
82036 +                       if (err)
82037 +                               goto out;
82038 +               }
82039 +       }
82041 +       if (ni && !ni_add_subrecord(ni, *rno, mi)) {
82042 +               err = -ENOMEM;
82043 +               goto out;
82044 +       }
82046 +       /* We have found a record that are not reserved for next MFT */
82047 +       if (*rno >= MFT_REC_FREE)
82048 +               wnd_set_used(wnd, *rno, 1);
82049 +       else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
82050 +               __set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
82052 +out:
82053 +       if (!mft)
82054 +               up_write(&wnd->rw_lock);
82056 +       return err;
82060 + * ntfs_mark_rec_free
82061 + *
82062 + * marks record as free
82063 + */
82064 +void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno)
82066 +       struct wnd_bitmap *wnd = &sbi->mft.bitmap;
82068 +       down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
82069 +       if (rno >= wnd->nbits)
82070 +               goto out;
82072 +       if (rno >= MFT_REC_FREE) {
82073 +               if (!wnd_is_used(wnd, rno, 1))
82074 +                       ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
82075 +               else
82076 +                       wnd_set_free(wnd, rno, 1);
82077 +       } else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
82078 +               __clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
82079 +       }
82081 +       if (rno < wnd_zone_bit(wnd))
82082 +               wnd_zone_set(wnd, rno, 1);
82083 +       else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
82084 +               sbi->mft.next_free = rno;
82086 +out:
82087 +       up_write(&wnd->rw_lock);
82091 + * ntfs_clear_mft_tail
82092 + *
82093 + * formats empty records [from, to)
82094 + * sbi->mft.bitmap is locked for write
82095 + */
82096 +int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
82098 +       int err;
82099 +       u32 rs;
82100 +       u64 vbo;
82101 +       struct runs_tree *run;
82102 +       struct ntfs_inode *ni;
82104 +       if (from >= to)
82105 +               return 0;
82107 +       rs = sbi->record_size;
82108 +       ni = sbi->mft.ni;
82109 +       run = &ni->file.run;
82111 +       down_read(&ni->file.run_lock);
82112 +       vbo = (u64)from * rs;
82113 +       for (; from < to; from++, vbo += rs) {
82114 +               struct ntfs_buffers nb;
82116 +               err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
82117 +               if (err)
82118 +                       goto out;
82120 +               err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
82121 +               nb_put(&nb);
82122 +               if (err)
82123 +                       goto out;
82124 +       }
82126 +out:
82127 +       sbi->mft.used = from;
82128 +       up_read(&ni->file.run_lock);
82129 +       return err;
82133 + * ntfs_refresh_zone
82134 + *
82135 + * refreshes Mft zone
82136 + * sbi->used.bitmap is locked for rw
82137 + * sbi->mft.bitmap is locked for write
82138 + * sbi->mft.ni->file.run_lock for write
82139 + */
82140 +int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
82142 +       CLST zone_limit, zone_max, lcn, vcn, len;
82143 +       size_t lcn_s, zlen;
82144 +       struct wnd_bitmap *wnd = &sbi->used.bitmap;
82145 +       struct ntfs_inode *ni = sbi->mft.ni;
82147 +       /* Do not change anything unless we have non empty Mft zone */
82148 +       if (wnd_zone_len(wnd))
82149 +               return 0;
82151 +       /*
82152 +        * Compute the mft zone at two steps
82153 +        * It would be nice if we are able to allocate
82154 +        * 1/8 of total clusters for MFT but not more then 512 MB
82155 +        */
82156 +       zone_limit = (512 * 1024 * 1024) >> sbi->cluster_bits;
82157 +       zone_max = wnd->nbits >> 3;
82158 +       if (zone_max > zone_limit)
82159 +               zone_max = zone_limit;
82161 +       vcn = bytes_to_cluster(sbi,
82162 +                              (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
82164 +       if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
82165 +               lcn = SPARSE_LCN;
82167 +       /* We should always find Last Lcn for MFT */
82168 +       if (lcn == SPARSE_LCN)
82169 +               return -EINVAL;
82171 +       lcn_s = lcn + 1;
82173 +       /* Try to allocate clusters after last MFT run */
82174 +       zlen = wnd_find(wnd, zone_max, lcn_s, 0, &lcn_s);
82175 +       if (!zlen) {
82176 +               ntfs_notice(sbi->sb, "MftZone: unavailable");
82177 +               return 0;
82178 +       }
82180 +       /* Truncate too large zone */
82181 +       wnd_zone_set(wnd, lcn_s, zlen);
82183 +       return 0;
82187 + * ntfs_update_mftmirr
82188 + *
82189 + * updates $MFTMirr data
82190 + */
82191 +int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
82193 +       int err;
82194 +       struct super_block *sb = sbi->sb;
82195 +       u32 blocksize = sb->s_blocksize;
82196 +       sector_t block1, block2;
82197 +       u32 bytes;
82199 +       if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
82200 +               return 0;
82202 +       err = 0;
82203 +       bytes = sbi->mft.recs_mirr << sbi->record_bits;
82204 +       block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
82205 +       block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
82207 +       for (; bytes >= blocksize; bytes -= blocksize) {
82208 +               struct buffer_head *bh1, *bh2;
82210 +               bh1 = sb_bread(sb, block1++);
82211 +               if (!bh1) {
82212 +                       err = -EIO;
82213 +                       goto out;
82214 +               }
82216 +               bh2 = sb_getblk(sb, block2++);
82217 +               if (!bh2) {
82218 +                       put_bh(bh1);
82219 +                       err = -EIO;
82220 +                       goto out;
82221 +               }
82223 +               if (buffer_locked(bh2))
82224 +                       __wait_on_buffer(bh2);
82226 +               lock_buffer(bh2);
82227 +               memcpy(bh2->b_data, bh1->b_data, blocksize);
82228 +               set_buffer_uptodate(bh2);
82229 +               mark_buffer_dirty(bh2);
82230 +               unlock_buffer(bh2);
82232 +               put_bh(bh1);
82233 +               bh1 = NULL;
82235 +               if (wait)
82236 +                       err = sync_dirty_buffer(bh2);
82238 +               put_bh(bh2);
82239 +               if (err)
82240 +                       goto out;
82241 +       }
82243 +       sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
82245 +out:
82246 +       return err;
82250 + * ntfs_set_state
82251 + *
82252 + * mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
82253 + * umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
82254 + * ntfs error: ntfs_set_state(NTFS_DIRTY_ERROR)
82255 + */
82256 +int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
82258 +       int err;
82259 +       struct ATTRIB *attr;
82260 +       struct VOLUME_INFO *info;
82261 +       struct mft_inode *mi;
82262 +       struct ntfs_inode *ni;
82264 +       /*
82265 +        * do not change state if fs was real_dirty
82266 +        * do not change state if fs already dirty(clear)
82267 +        * do not change any thing if mounted read only
82268 +        */
82269 +       if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
82270 +               return 0;
82272 +       /* Check cached value */
82273 +       if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
82274 +           (sbi->volume.flags & VOLUME_FLAG_DIRTY))
82275 +               return 0;
82277 +       ni = sbi->volume.ni;
82278 +       if (!ni)
82279 +               return -EINVAL;
82281 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
82283 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
82284 +       if (!attr) {
82285 +               err = -EINVAL;
82286 +               goto out;
82287 +       }
82289 +       info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
82290 +       if (!info) {
82291 +               err = -EINVAL;
82292 +               goto out;
82293 +       }
82295 +       switch (dirty) {
82296 +       case NTFS_DIRTY_ERROR:
82297 +               ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
82298 +               sbi->volume.real_dirty = true;
82299 +               fallthrough;
82300 +       case NTFS_DIRTY_DIRTY:
82301 +               info->flags |= VOLUME_FLAG_DIRTY;
82302 +               break;
82303 +       case NTFS_DIRTY_CLEAR:
82304 +               info->flags &= ~VOLUME_FLAG_DIRTY;
82305 +               break;
82306 +       }
82307 +       /* cache current volume flags*/
82308 +       sbi->volume.flags = info->flags;
82309 +       mi->dirty = true;
82310 +       err = 0;
82312 +out:
82313 +       ni_unlock(ni);
82314 +       if (err)
82315 +               return err;
82317 +       mark_inode_dirty(&ni->vfs_inode);
82318 +       /*verify(!ntfs_update_mftmirr()); */
82319 +       err = sync_inode_metadata(&ni->vfs_inode, 1);
82321 +       return err;
82325 + * security_hash
82326 + *
82327 + * calculates a hash of security descriptor
82328 + */
82329 +static inline __le32 security_hash(const void *sd, size_t bytes)
82331 +       u32 hash = 0;
82332 +       const __le32 *ptr = sd;
82334 +       bytes >>= 2;
82335 +       while (bytes--)
82336 +               hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
82337 +       return cpu_to_le32(hash);
82340 +int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
82342 +       struct block_device *bdev = sb->s_bdev;
82343 +       u32 blocksize = sb->s_blocksize;
82344 +       u64 block = lbo >> sb->s_blocksize_bits;
82345 +       u32 off = lbo & (blocksize - 1);
82346 +       u32 op = blocksize - off;
82348 +       for (; bytes; block += 1, off = 0, op = blocksize) {
82349 +               struct buffer_head *bh = __bread(bdev, block, blocksize);
82351 +               if (!bh)
82352 +                       return -EIO;
82354 +               if (op > bytes)
82355 +                       op = bytes;
82357 +               memcpy(buffer, bh->b_data + off, op);
82359 +               put_bh(bh);
82361 +               bytes -= op;
82362 +               buffer = Add2Ptr(buffer, op);
82363 +       }
82365 +       return 0;
82368 +int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
82369 +                 const void *buf, int wait)
82371 +       u32 blocksize = sb->s_blocksize;
82372 +       struct block_device *bdev = sb->s_bdev;
82373 +       sector_t block = lbo >> sb->s_blocksize_bits;
82374 +       u32 off = lbo & (blocksize - 1);
82375 +       u32 op = blocksize - off;
82376 +       struct buffer_head *bh;
82378 +       if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
82379 +               wait = 1;
82381 +       for (; bytes; block += 1, off = 0, op = blocksize) {
82382 +               if (op > bytes)
82383 +                       op = bytes;
82385 +               if (op < blocksize) {
82386 +                       bh = __bread(bdev, block, blocksize);
82387 +                       if (!bh) {
82388 +                               ntfs_err(sb, "failed to read block %llx",
82389 +                                        (u64)block);
82390 +                               return -EIO;
82391 +                       }
82392 +               } else {
82393 +                       bh = __getblk(bdev, block, blocksize);
82394 +                       if (!bh)
82395 +                               return -ENOMEM;
82396 +               }
82398 +               if (buffer_locked(bh))
82399 +                       __wait_on_buffer(bh);
82401 +               lock_buffer(bh);
82402 +               if (buf) {
82403 +                       memcpy(bh->b_data + off, buf, op);
82404 +                       buf = Add2Ptr(buf, op);
82405 +               } else {
82406 +                       memset(bh->b_data + off, -1, op);
82407 +               }
82409 +               set_buffer_uptodate(bh);
82410 +               mark_buffer_dirty(bh);
82411 +               unlock_buffer(bh);
82413 +               if (wait) {
82414 +                       int err = sync_dirty_buffer(bh);
82416 +                       if (err) {
82417 +                               ntfs_err(
82418 +                                       sb,
82419 +                                       "failed to sync buffer at block %llx, error %d",
82420 +                                       (u64)block, err);
82421 +                               put_bh(bh);
82422 +                               return err;
82423 +                       }
82424 +               }
82426 +               put_bh(bh);
82428 +               bytes -= op;
82429 +       }
82430 +       return 0;
82433 +int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
82434 +                     u64 vbo, const void *buf, size_t bytes)
82436 +       struct super_block *sb = sbi->sb;
82437 +       u8 cluster_bits = sbi->cluster_bits;
82438 +       u32 off = vbo & sbi->cluster_mask;
82439 +       CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
82440 +       u64 lbo, len;
82441 +       size_t idx;
82443 +       if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
82444 +               return -ENOENT;
82446 +       if (lcn == SPARSE_LCN)
82447 +               return -EINVAL;
82449 +       lbo = ((u64)lcn << cluster_bits) + off;
82450 +       len = ((u64)clen << cluster_bits) - off;
82452 +       for (;;) {
82453 +               u32 op = len < bytes ? len : bytes;
82454 +               int err = ntfs_sb_write(sb, lbo, op, buf, 0);
82456 +               if (err)
82457 +                       return err;
82459 +               bytes -= op;
82460 +               if (!bytes)
82461 +                       break;
82463 +               vcn_next = vcn + clen;
82464 +               if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
82465 +                   vcn != vcn_next)
82466 +                       return -ENOENT;
82468 +               if (lcn == SPARSE_LCN)
82469 +                       return -EINVAL;
82471 +               if (buf)
82472 +                       buf = Add2Ptr(buf, op);
82474 +               lbo = ((u64)lcn << cluster_bits);
82475 +               len = ((u64)clen << cluster_bits);
82476 +       }
82478 +       return 0;
82481 +struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
82482 +                                  const struct runs_tree *run, u64 vbo)
82484 +       struct super_block *sb = sbi->sb;
82485 +       u8 cluster_bits = sbi->cluster_bits;
82486 +       CLST lcn;
82487 +       u64 lbo;
82489 +       if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
82490 +               return ERR_PTR(-ENOENT);
82492 +       lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
82494 +       return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
82497 +int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
82498 +                    u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
82500 +       int err;
82501 +       struct super_block *sb = sbi->sb;
82502 +       u32 blocksize = sb->s_blocksize;
82503 +       u8 cluster_bits = sbi->cluster_bits;
82504 +       u32 off = vbo & sbi->cluster_mask;
82505 +       u32 nbh = 0;
82506 +       CLST vcn_next, vcn = vbo >> cluster_bits;
82507 +       CLST lcn, clen;
82508 +       u64 lbo, len;
82509 +       size_t idx;
82510 +       struct buffer_head *bh;
82512 +       if (!run) {
82513 +               /* first reading of $Volume + $MFTMirr + LogFile goes here*/
82514 +               if (vbo > MFT_REC_VOL * sbi->record_size) {
82515 +                       err = -ENOENT;
82516 +                       goto out;
82517 +               }
82519 +               /* use absolute boot's 'MFTCluster' to read record */
82520 +               lbo = vbo + sbi->mft.lbo;
82521 +               len = sbi->record_size;
82522 +       } else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
82523 +               err = -ENOENT;
82524 +               goto out;
82525 +       } else {
82526 +               if (lcn == SPARSE_LCN) {
82527 +                       err = -EINVAL;
82528 +                       goto out;
82529 +               }
82531 +               lbo = ((u64)lcn << cluster_bits) + off;
82532 +               len = ((u64)clen << cluster_bits) - off;
82533 +       }
82535 +       off = lbo & (blocksize - 1);
82536 +       if (nb) {
82537 +               nb->off = off;
82538 +               nb->bytes = bytes;
82539 +       }
82541 +       for (;;) {
82542 +               u32 len32 = len >= bytes ? bytes : len;
82543 +               sector_t block = lbo >> sb->s_blocksize_bits;
82545 +               do {
82546 +                       u32 op = blocksize - off;
82548 +                       if (op > len32)
82549 +                               op = len32;
82551 +                       bh = ntfs_bread(sb, block);
82552 +                       if (!bh) {
82553 +                               err = -EIO;
82554 +                               goto out;
82555 +                       }
82557 +                       if (buf) {
82558 +                               memcpy(buf, bh->b_data + off, op);
82559 +                               buf = Add2Ptr(buf, op);
82560 +                       }
82562 +                       if (!nb) {
82563 +                               put_bh(bh);
82564 +                       } else if (nbh >= ARRAY_SIZE(nb->bh)) {
82565 +                               err = -EINVAL;
82566 +                               goto out;
82567 +                       } else {
82568 +                               nb->bh[nbh++] = bh;
82569 +                               nb->nbufs = nbh;
82570 +                       }
82572 +                       bytes -= op;
82573 +                       if (!bytes)
82574 +                               return 0;
82575 +                       len32 -= op;
82576 +                       block += 1;
82577 +                       off = 0;
82579 +               } while (len32);
82581 +               vcn_next = vcn + clen;
82582 +               if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
82583 +                   vcn != vcn_next) {
82584 +                       err = -ENOENT;
82585 +                       goto out;
82586 +               }
82588 +               if (lcn == SPARSE_LCN) {
82589 +                       err = -EINVAL;
82590 +                       goto out;
82591 +               }
82593 +               lbo = ((u64)lcn << cluster_bits);
82594 +               len = ((u64)clen << cluster_bits);
82595 +       }
82597 +out:
82598 +       if (!nbh)
82599 +               return err;
82601 +       while (nbh) {
82602 +               put_bh(nb->bh[--nbh]);
82603 +               nb->bh[nbh] = NULL;
82604 +       }
82606 +       nb->nbufs = 0;
82607 +       return err;
82610 +/* Returns < 0 if error, 0 if ok, '-E_NTFS_FIXUP' if need to update fixups */
82611 +int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
82612 +                struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
82613 +                struct ntfs_buffers *nb)
82615 +       int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
82617 +       if (err)
82618 +               return err;
82619 +       return ntfs_fix_post_read(rhdr, nb->bytes, true);
82622 +int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
82623 +               u32 bytes, struct ntfs_buffers *nb)
82625 +       int err = 0;
82626 +       struct super_block *sb = sbi->sb;
82627 +       u32 blocksize = sb->s_blocksize;
82628 +       u8 cluster_bits = sbi->cluster_bits;
82629 +       CLST vcn_next, vcn = vbo >> cluster_bits;
82630 +       u32 off;
82631 +       u32 nbh = 0;
82632 +       CLST lcn, clen;
82633 +       u64 lbo, len;
82634 +       size_t idx;
82636 +       nb->bytes = bytes;
82638 +       if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
82639 +               err = -ENOENT;
82640 +               goto out;
82641 +       }
82643 +       off = vbo & sbi->cluster_mask;
82644 +       lbo = ((u64)lcn << cluster_bits) + off;
82645 +       len = ((u64)clen << cluster_bits) - off;
82647 +       nb->off = off = lbo & (blocksize - 1);
82649 +       for (;;) {
82650 +               u32 len32 = len < bytes ? len : bytes;
82651 +               sector_t block = lbo >> sb->s_blocksize_bits;
82653 +               do {
82654 +                       u32 op;
82655 +                       struct buffer_head *bh;
82657 +                       if (nbh >= ARRAY_SIZE(nb->bh)) {
82658 +                               err = -EINVAL;
82659 +                               goto out;
82660 +                       }
82662 +                       op = blocksize - off;
82663 +                       if (op > len32)
82664 +                               op = len32;
82666 +                       if (op == blocksize) {
82667 +                               bh = sb_getblk(sb, block);
82668 +                               if (!bh) {
82669 +                                       err = -ENOMEM;
82670 +                                       goto out;
82671 +                               }
82672 +                               if (buffer_locked(bh))
82673 +                                       __wait_on_buffer(bh);
82674 +                               set_buffer_uptodate(bh);
82675 +                       } else {
82676 +                               bh = ntfs_bread(sb, block);
82677 +                               if (!bh) {
82678 +                                       err = -EIO;
82679 +                                       goto out;
82680 +                               }
82681 +                       }
82683 +                       nb->bh[nbh++] = bh;
82684 +                       bytes -= op;
82685 +                       if (!bytes) {
82686 +                               nb->nbufs = nbh;
82687 +                               return 0;
82688 +                       }
82690 +                       block += 1;
82691 +                       len32 -= op;
82692 +                       off = 0;
82693 +               } while (len32);
82695 +               vcn_next = vcn + clen;
82696 +               if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
82697 +                   vcn != vcn_next) {
82698 +                       err = -ENOENT;
82699 +                       goto out;
82700 +               }
82702 +               lbo = ((u64)lcn << cluster_bits);
82703 +               len = ((u64)clen << cluster_bits);
82704 +       }
82706 +out:
82707 +       while (nbh) {
82708 +               put_bh(nb->bh[--nbh]);
82709 +               nb->bh[nbh] = NULL;
82710 +       }
82712 +       nb->nbufs = 0;
82714 +       return err;
82717 +int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
82718 +                 struct ntfs_buffers *nb, int sync)
82720 +       int err = 0;
82721 +       struct super_block *sb = sbi->sb;
82722 +       u32 block_size = sb->s_blocksize;
82723 +       u32 bytes = nb->bytes;
82724 +       u32 off = nb->off;
82725 +       u16 fo = le16_to_cpu(rhdr->fix_off);
82726 +       u16 fn = le16_to_cpu(rhdr->fix_num);
82727 +       u32 idx;
82728 +       __le16 *fixup;
82729 +       __le16 sample;
82731 +       if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
82732 +           fn * SECTOR_SIZE > bytes) {
82733 +               return -EINVAL;
82734 +       }
82736 +       for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
82737 +               u32 op = block_size - off;
82738 +               char *bh_data;
82739 +               struct buffer_head *bh = nb->bh[idx];
82740 +               __le16 *ptr, *end_data;
82742 +               if (op > bytes)
82743 +                       op = bytes;
82745 +               if (buffer_locked(bh))
82746 +                       __wait_on_buffer(bh);
82748 +               lock_buffer(nb->bh[idx]);
82750 +               bh_data = bh->b_data + off;
82751 +               end_data = Add2Ptr(bh_data, op);
82752 +               memcpy(bh_data, rhdr, op);
82754 +               if (!idx) {
82755 +                       u16 t16;
82757 +                       fixup = Add2Ptr(bh_data, fo);
82758 +                       sample = *fixup;
82759 +                       t16 = le16_to_cpu(sample);
82760 +                       if (t16 >= 0x7FFF) {
82761 +                               sample = *fixup = cpu_to_le16(1);
82762 +                       } else {
82763 +                               sample = cpu_to_le16(t16 + 1);
82764 +                               *fixup = sample;
82765 +                       }
82767 +                       *(__le16 *)Add2Ptr(rhdr, fo) = sample;
82768 +               }
82770 +               ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
82772 +               do {
82773 +                       *++fixup = *ptr;
82774 +                       *ptr = sample;
82775 +                       ptr += SECTOR_SIZE / sizeof(short);
82776 +               } while (ptr < end_data);
82778 +               set_buffer_uptodate(bh);
82779 +               mark_buffer_dirty(bh);
82780 +               unlock_buffer(bh);
82782 +               if (sync) {
82783 +                       int err2 = sync_dirty_buffer(bh);
82785 +                       if (!err && err2)
82786 +                               err = err2;
82787 +               }
82789 +               bytes -= op;
82790 +               rhdr = Add2Ptr(rhdr, op);
82791 +       }
82793 +       return err;
82796 +static inline struct bio *ntfs_alloc_bio(u32 nr_vecs)
82798 +       struct bio *bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
82800 +       if (!bio && (current->flags & PF_MEMALLOC)) {
82801 +               while (!bio && (nr_vecs /= 2))
82802 +                       bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
82803 +       }
82804 +       return bio;
82807 +/* read/write pages from/to disk*/
82808 +int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
82809 +                  struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
82810 +                  u32 op)
82812 +       int err = 0;
82813 +       struct bio *new, *bio = NULL;
82814 +       struct super_block *sb = sbi->sb;
82815 +       struct block_device *bdev = sb->s_bdev;
82816 +       struct page *page;
82817 +       u8 cluster_bits = sbi->cluster_bits;
82818 +       CLST lcn, clen, vcn, vcn_next;
82819 +       u32 add, off, page_idx;
82820 +       u64 lbo, len;
82821 +       size_t run_idx;
82822 +       struct blk_plug plug;
82824 +       if (!bytes)
82825 +               return 0;
82827 +       blk_start_plug(&plug);
82829 +       /* align vbo and bytes to be 512 bytes aligned */
82830 +       lbo = (vbo + bytes + 511) & ~511ull;
82831 +       vbo = vbo & ~511ull;
82832 +       bytes = lbo - vbo;
82834 +       vcn = vbo >> cluster_bits;
82835 +       if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
82836 +               err = -ENOENT;
82837 +               goto out;
82838 +       }
82839 +       off = vbo & sbi->cluster_mask;
82840 +       page_idx = 0;
82841 +       page = pages[0];
82843 +       for (;;) {
82844 +               lbo = ((u64)lcn << cluster_bits) + off;
82845 +               len = ((u64)clen << cluster_bits) - off;
82846 +new_bio:
82847 +               new = ntfs_alloc_bio(nr_pages - page_idx);
82848 +               if (!new) {
82849 +                       err = -ENOMEM;
82850 +                       goto out;
82851 +               }
82852 +               if (bio) {
82853 +                       bio_chain(bio, new);
82854 +                       submit_bio(bio);
82855 +               }
82856 +               bio = new;
82857 +               bio_set_dev(bio, bdev);
82858 +               bio->bi_iter.bi_sector = lbo >> 9;
82859 +               bio->bi_opf = op;
82861 +               while (len) {
82862 +                       off = vbo & (PAGE_SIZE - 1);
82863 +                       add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
82865 +                       if (bio_add_page(bio, page, add, off) < add)
82866 +                               goto new_bio;
82868 +                       if (bytes <= add)
82869 +                               goto out;
82870 +                       bytes -= add;
82871 +                       vbo += add;
82873 +                       if (add + off == PAGE_SIZE) {
82874 +                               page_idx += 1;
82875 +                               if (WARN_ON(page_idx >= nr_pages)) {
82876 +                                       err = -EINVAL;
82877 +                                       goto out;
82878 +                               }
82879 +                               page = pages[page_idx];
82880 +                       }
82882 +                       if (len <= add)
82883 +                               break;
82884 +                       len -= add;
82885 +                       lbo += add;
82886 +               }
82888 +               vcn_next = vcn + clen;
82889 +               if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
82890 +                   vcn != vcn_next) {
82891 +                       err = -ENOENT;
82892 +                       goto out;
82893 +               }
82894 +               off = 0;
82895 +       }
82896 +out:
82897 +       if (bio) {
82898 +               if (!err)
82899 +                       err = submit_bio_wait(bio);
82900 +               bio_put(bio);
82901 +       }
82902 +       blk_finish_plug(&plug);
82904 +       return err;
82908 + * Helper for ntfs_loadlog_and_replay
82909 + * fill on-disk logfile range by (-1)
82910 + * this means empty logfile
82911 + */
82912 +int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
82914 +       int err = 0;
82915 +       struct super_block *sb = sbi->sb;
82916 +       struct block_device *bdev = sb->s_bdev;
82917 +       u8 cluster_bits = sbi->cluster_bits;
82918 +       struct bio *new, *bio = NULL;
82919 +       CLST lcn, clen;
82920 +       u64 lbo, len;
82921 +       size_t run_idx;
82922 +       struct page *fill;
82923 +       void *kaddr;
82924 +       struct blk_plug plug;
82926 +       fill = alloc_page(GFP_KERNEL);
82927 +       if (!fill)
82928 +               return -ENOMEM;
82930 +       kaddr = kmap_atomic(fill);
82931 +       memset(kaddr, -1, PAGE_SIZE);
82932 +       kunmap_atomic(kaddr);
82933 +       flush_dcache_page(fill);
82934 +       lock_page(fill);
82936 +       if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
82937 +               err = -ENOENT;
82938 +               goto out;
82939 +       }
82941 +       /*
82942 +        * TODO: try blkdev_issue_write_same
82943 +        */
82944 +       blk_start_plug(&plug);
82945 +       do {
82946 +               lbo = (u64)lcn << cluster_bits;
82947 +               len = (u64)clen << cluster_bits;
82948 +new_bio:
82949 +               new = ntfs_alloc_bio(BIO_MAX_VECS);
82950 +               if (!new) {
82951 +                       err = -ENOMEM;
82952 +                       break;
82953 +               }
82954 +               if (bio) {
82955 +                       bio_chain(bio, new);
82956 +                       submit_bio(bio);
82957 +               }
82958 +               bio = new;
82959 +               bio_set_dev(bio, bdev);
82960 +               bio->bi_opf = REQ_OP_WRITE;
82961 +               bio->bi_iter.bi_sector = lbo >> 9;
82963 +               for (;;) {
82964 +                       u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
82966 +                       if (bio_add_page(bio, fill, add, 0) < add)
82967 +                               goto new_bio;
82969 +                       lbo += add;
82970 +                       if (len <= add)
82971 +                               break;
82972 +                       len -= add;
82973 +               }
82974 +       } while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
82976 +       if (bio) {
82977 +               if (!err)
82978 +                       err = submit_bio_wait(bio);
82979 +               bio_put(bio);
82980 +       }
82981 +       blk_finish_plug(&plug);
82982 +out:
82983 +       unlock_page(fill);
82984 +       put_page(fill);
82986 +       return err;
82989 +int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
82990 +                   u64 vbo, u64 *lbo, u64 *bytes)
82992 +       u32 off;
82993 +       CLST lcn, len;
82994 +       u8 cluster_bits = sbi->cluster_bits;
82996 +       if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
82997 +               return -ENOENT;
82999 +       off = vbo & sbi->cluster_mask;
83000 +       *lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
83001 +       *bytes = ((u64)len << cluster_bits) - off;
83003 +       return 0;
83006 +struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
83008 +       int err = 0;
83009 +       struct super_block *sb = sbi->sb;
83010 +       struct inode *inode = new_inode(sb);
83011 +       struct ntfs_inode *ni;
83013 +       if (!inode)
83014 +               return ERR_PTR(-ENOMEM);
83016 +       ni = ntfs_i(inode);
83018 +       err = mi_format_new(&ni->mi, sbi, rno, dir ? RECORD_FLAG_DIR : 0,
83019 +                           false);
83020 +       if (err)
83021 +               goto out;
83023 +       inode->i_ino = rno;
83024 +       if (insert_inode_locked(inode) < 0) {
83025 +               err = -EIO;
83026 +               goto out;
83027 +       }
83029 +out:
83030 +       if (err) {
83031 +               iput(inode);
83032 +               ni = ERR_PTR(err);
83033 +       }
83034 +       return ni;
83038 + * O:BAG:BAD:(A;OICI;FA;;;WD)
83039 + * owner S-1-5-32-544 (Administrators)
83040 + * group S-1-5-32-544 (Administrators)
83041 + * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
83042 + */
83043 +const u8 s_default_security[] __aligned(8) = {
83044 +       0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
83045 +       0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
83046 +       0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
83047 +       0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
83048 +       0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
83049 +       0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
83050 +       0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
83053 +static_assert(sizeof(s_default_security) == 0x50);
83055 +static inline u32 sid_length(const struct SID *sid)
83057 +       return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
83061 + * Thanks Mark Harmstone for idea
83062 + */
83063 +static bool is_acl_valid(const struct ACL *acl, u32 len)
83065 +       const struct ACE_HEADER *ace;
83066 +       u32 i;
83067 +       u16 ace_count, ace_size;
83069 +       if (acl->AclRevision != ACL_REVISION &&
83070 +           acl->AclRevision != ACL_REVISION_DS) {
83071 +               /*
83072 +                * This value should be ACL_REVISION, unless the ACL contains an
83073 +                * object-specific ACE, in which case this value must be ACL_REVISION_DS.
83074 +                * All ACEs in an ACL must be at the same revision level.
83075 +                */
83076 +               return false;
83077 +       }
83079 +       if (acl->Sbz1)
83080 +               return false;
83082 +       if (le16_to_cpu(acl->AclSize) > len)
83083 +               return false;
83085 +       if (acl->Sbz2)
83086 +               return false;
83088 +       len -= sizeof(struct ACL);
83089 +       ace = (struct ACE_HEADER *)&acl[1];
83090 +       ace_count = le16_to_cpu(acl->AceCount);
83092 +       for (i = 0; i < ace_count; i++) {
83093 +               if (len < sizeof(struct ACE_HEADER))
83094 +                       return false;
83096 +               ace_size = le16_to_cpu(ace->AceSize);
83097 +               if (len < ace_size)
83098 +                       return false;
83100 +               len -= ace_size;
83101 +               ace = Add2Ptr(ace, ace_size);
83102 +       }
83104 +       return true;
83107 +bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
83109 +       u32 sd_owner, sd_group, sd_sacl, sd_dacl;
83111 +       if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
83112 +               return false;
83114 +       if (sd->Revision != 1)
83115 +               return false;
83117 +       if (sd->Sbz1)
83118 +               return false;
83120 +       if (!(sd->Control & SE_SELF_RELATIVE))
83121 +               return false;
83123 +       sd_owner = le32_to_cpu(sd->Owner);
83124 +       if (sd_owner) {
83125 +               const struct SID *owner = Add2Ptr(sd, sd_owner);
83127 +               if (sd_owner + offsetof(struct SID, SubAuthority) > len)
83128 +                       return false;
83130 +               if (owner->Revision != 1)
83131 +                       return false;
83133 +               if (sd_owner + sid_length(owner) > len)
83134 +                       return false;
83135 +       }
83137 +       sd_group = le32_to_cpu(sd->Group);
83138 +       if (sd_group) {
83139 +               const struct SID *group = Add2Ptr(sd, sd_group);
83141 +               if (sd_group + offsetof(struct SID, SubAuthority) > len)
83142 +                       return false;
83144 +               if (group->Revision != 1)
83145 +                       return false;
83147 +               if (sd_group + sid_length(group) > len)
83148 +                       return false;
83149 +       }
83151 +       sd_sacl = le32_to_cpu(sd->Sacl);
83152 +       if (sd_sacl) {
83153 +               const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
83155 +               if (sd_sacl + sizeof(struct ACL) > len)
83156 +                       return false;
83158 +               if (!is_acl_valid(sacl, len - sd_sacl))
83159 +                       return false;
83160 +       }
83162 +       sd_dacl = le32_to_cpu(sd->Dacl);
83163 +       if (sd_dacl) {
83164 +               const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
83166 +               if (sd_dacl + sizeof(struct ACL) > len)
83167 +                       return false;
83169 +               if (!is_acl_valid(dacl, len - sd_dacl))
83170 +                       return false;
83171 +       }
83173 +       return true;
83177 + * ntfs_security_init
83178 + *
83179 + * loads and parse $Secure
83180 + */
83181 +int ntfs_security_init(struct ntfs_sb_info *sbi)
83183 +       int err;
83184 +       struct super_block *sb = sbi->sb;
83185 +       struct inode *inode;
83186 +       struct ntfs_inode *ni;
83187 +       struct MFT_REF ref;
83188 +       struct ATTRIB *attr;
83189 +       struct ATTR_LIST_ENTRY *le;
83190 +       u64 sds_size;
83191 +       size_t cnt, off;
83192 +       struct NTFS_DE *ne;
83193 +       struct NTFS_DE_SII *sii_e;
83194 +       struct ntfs_fnd *fnd_sii = NULL;
83195 +       const struct INDEX_ROOT *root_sii;
83196 +       const struct INDEX_ROOT *root_sdh;
83197 +       struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
83198 +       struct ntfs_index *indx_sii = &sbi->security.index_sii;
83200 +       ref.low = cpu_to_le32(MFT_REC_SECURE);
83201 +       ref.high = 0;
83202 +       ref.seq = cpu_to_le16(MFT_REC_SECURE);
83204 +       inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
83205 +       if (IS_ERR(inode)) {
83206 +               err = PTR_ERR(inode);
83207 +               ntfs_err(sb, "Failed to load $Secure.");
83208 +               inode = NULL;
83209 +               goto out;
83210 +       }
83212 +       ni = ntfs_i(inode);
83214 +       le = NULL;
83216 +       attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
83217 +                           ARRAY_SIZE(SDH_NAME), NULL, NULL);
83218 +       if (!attr) {
83219 +               err = -EINVAL;
83220 +               goto out;
83221 +       }
83223 +       root_sdh = resident_data(attr);
83224 +       if (root_sdh->type != ATTR_ZERO ||
83225 +           root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH) {
83226 +               err = -EINVAL;
83227 +               goto out;
83228 +       }
83230 +       err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
83231 +       if (err)
83232 +               goto out;
83234 +       attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
83235 +                           ARRAY_SIZE(SII_NAME), NULL, NULL);
83236 +       if (!attr) {
83237 +               err = -EINVAL;
83238 +               goto out;
83239 +       }
83241 +       root_sii = resident_data(attr);
83242 +       if (root_sii->type != ATTR_ZERO ||
83243 +           root_sii->rule != NTFS_COLLATION_TYPE_UINT) {
83244 +               err = -EINVAL;
83245 +               goto out;
83246 +       }
83248 +       err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
83249 +       if (err)
83250 +               goto out;
83252 +       fnd_sii = fnd_get();
83253 +       if (!fnd_sii) {
83254 +               err = -ENOMEM;
83255 +               goto out;
83256 +       }
83258 +       sds_size = inode->i_size;
83260 +       /* Find the last valid Id */
83261 +       sbi->security.next_id = SECURITY_ID_FIRST;
83262 +       /* Always write new security at the end of bucket */
83263 +       sbi->security.next_off =
83264 +               Quad2Align(sds_size - SecurityDescriptorsBlockSize);
83266 +       cnt = 0;
83267 +       off = 0;
83268 +       ne = NULL;
83270 +       for (;;) {
83271 +               u32 next_id;
83273 +               err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
83274 +               if (err || !ne)
83275 +                       break;
83277 +               sii_e = (struct NTFS_DE_SII *)ne;
83278 +               if (le16_to_cpu(ne->view.data_size) < SIZEOF_SECURITY_HDR)
83279 +                       continue;
83281 +               next_id = le32_to_cpu(sii_e->sec_id) + 1;
83282 +               if (next_id >= sbi->security.next_id)
83283 +                       sbi->security.next_id = next_id;
83285 +               cnt += 1;
83286 +       }
83288 +       sbi->security.ni = ni;
83289 +       inode = NULL;
83290 +out:
83291 +       iput(inode);
83292 +       fnd_put(fnd_sii);
83294 +       return err;
83298 + * ntfs_get_security_by_id
83299 + *
83300 + * reads security descriptor by id
83301 + */
83302 +int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
83303 +                           struct SECURITY_DESCRIPTOR_RELATIVE **sd,
83304 +                           size_t *size)
83306 +       int err;
83307 +       int diff;
83308 +       struct ntfs_inode *ni = sbi->security.ni;
83309 +       struct ntfs_index *indx = &sbi->security.index_sii;
83310 +       void *p = NULL;
83311 +       struct NTFS_DE_SII *sii_e;
83312 +       struct ntfs_fnd *fnd_sii;
83313 +       struct SECURITY_HDR d_security;
83314 +       const struct INDEX_ROOT *root_sii;
83315 +       u32 t32;
83317 +       *sd = NULL;
83319 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
83321 +       fnd_sii = fnd_get();
83322 +       if (!fnd_sii) {
83323 +               err = -ENOMEM;
83324 +               goto out;
83325 +       }
83327 +       root_sii = indx_get_root(indx, ni, NULL, NULL);
83328 +       if (!root_sii) {
83329 +               err = -EINVAL;
83330 +               goto out;
83331 +       }
83333 +       /* Try to find this SECURITY descriptor in SII indexes */
83334 +       err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
83335 +                       NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
83336 +       if (err)
83337 +               goto out;
83339 +       if (diff)
83340 +               goto out;
83342 +       t32 = le32_to_cpu(sii_e->sec_hdr.size);
83343 +       if (t32 < SIZEOF_SECURITY_HDR) {
83344 +               err = -EINVAL;
83345 +               goto out;
83346 +       }
83348 +       if (t32 > SIZEOF_SECURITY_HDR + 0x10000) {
83349 +               /*
83350 +                * looks like too big security. 0x10000 - is arbitrary big number
83351 +                */
83352 +               err = -EFBIG;
83353 +               goto out;
83354 +       }
83356 +       *size = t32 - SIZEOF_SECURITY_HDR;
83358 +       p = ntfs_malloc(*size);
83359 +       if (!p) {
83360 +               err = -ENOMEM;
83361 +               goto out;
83362 +       }
83364 +       err = ntfs_read_run_nb(sbi, &ni->file.run,
83365 +                              le64_to_cpu(sii_e->sec_hdr.off), &d_security,
83366 +                              sizeof(d_security), NULL);
83367 +       if (err)
83368 +               goto out;
83370 +       if (memcmp(&d_security, &sii_e->sec_hdr, SIZEOF_SECURITY_HDR)) {
83371 +               err = -EINVAL;
83372 +               goto out;
83373 +       }
83375 +       err = ntfs_read_run_nb(sbi, &ni->file.run,
83376 +                              le64_to_cpu(sii_e->sec_hdr.off) +
83377 +                                      SIZEOF_SECURITY_HDR,
83378 +                              p, *size, NULL);
83379 +       if (err)
83380 +               goto out;
83382 +       *sd = p;
83383 +       p = NULL;
83385 +out:
83386 +       ntfs_free(p);
83387 +       fnd_put(fnd_sii);
83388 +       ni_unlock(ni);
83390 +       return err;
83394 + * ntfs_insert_security
83395 + *
83396 + * inserts security descriptor into $Secure::SDS
83397 + *
83398 + * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
83399 + * and it contains a mirror copy of each security descriptor.  When writing
83400 + * to a security descriptor at location X, another copy will be written at
83401 + * location (X+256K).
83402 + * When writing a security descriptor that will cross the 256K boundary,
83403 + * the pointer will be advanced by 256K to skip
83404 + * over the mirror portion.
83405 + */
83406 +int ntfs_insert_security(struct ntfs_sb_info *sbi,
83407 +                        const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
83408 +                        u32 size_sd, __le32 *security_id, bool *inserted)
83410 +       int err, diff;
83411 +       struct ntfs_inode *ni = sbi->security.ni;
83412 +       struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
83413 +       struct ntfs_index *indx_sii = &sbi->security.index_sii;
83414 +       struct NTFS_DE_SDH *e;
83415 +       struct NTFS_DE_SDH sdh_e;
83416 +       struct NTFS_DE_SII sii_e;
83417 +       struct SECURITY_HDR *d_security;
83418 +       u32 new_sec_size = size_sd + SIZEOF_SECURITY_HDR;
83419 +       u32 aligned_sec_size = Quad2Align(new_sec_size);
83420 +       struct SECURITY_KEY hash_key;
83421 +       struct ntfs_fnd *fnd_sdh = NULL;
83422 +       const struct INDEX_ROOT *root_sdh;
83423 +       const struct INDEX_ROOT *root_sii;
83424 +       u64 mirr_off, new_sds_size;
83425 +       u32 next, left;
83427 +       static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
83428 +                     SecurityDescriptorsBlockSize);
83430 +       hash_key.hash = security_hash(sd, size_sd);
83431 +       hash_key.sec_id = SECURITY_ID_INVALID;
83433 +       if (inserted)
83434 +               *inserted = false;
83435 +       *security_id = SECURITY_ID_INVALID;
83437 +       /* Allocate a temporal buffer*/
83438 +       d_security = ntfs_zalloc(aligned_sec_size);
83439 +       if (!d_security)
83440 +               return -ENOMEM;
83442 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
83444 +       fnd_sdh = fnd_get();
83445 +       if (!fnd_sdh) {
83446 +               err = -ENOMEM;
83447 +               goto out;
83448 +       }
83450 +       root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
83451 +       if (!root_sdh) {
83452 +               err = -EINVAL;
83453 +               goto out;
83454 +       }
83456 +       root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
83457 +       if (!root_sii) {
83458 +               err = -EINVAL;
83459 +               goto out;
83460 +       }
83462 +       /*
83463 +        * Check if such security already exists
83464 +        * use "SDH" and hash -> to get the offset in "SDS"
83465 +        */
83466 +       err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
83467 +                       &d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
83468 +                       fnd_sdh);
83469 +       if (err)
83470 +               goto out;
83472 +       while (e) {
83473 +               if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
83474 +                       err = ntfs_read_run_nb(sbi, &ni->file.run,
83475 +                                              le64_to_cpu(e->sec_hdr.off),
83476 +                                              d_security, new_sec_size, NULL);
83477 +                       if (err)
83478 +                               goto out;
83480 +                       if (le32_to_cpu(d_security->size) == new_sec_size &&
83481 +                           d_security->key.hash == hash_key.hash &&
83482 +                           !memcmp(d_security + 1, sd, size_sd)) {
83483 +                               *security_id = d_security->key.sec_id;
83484 +                               /*such security already exists*/
83485 +                               err = 0;
83486 +                               goto out;
83487 +                       }
83488 +               }
83490 +               err = indx_find_sort(indx_sdh, ni, root_sdh,
83491 +                                    (struct NTFS_DE **)&e, fnd_sdh);
83492 +               if (err)
83493 +                       goto out;
83495 +               if (!e || e->key.hash != hash_key.hash)
83496 +                       break;
83497 +       }
83499 +       /* Zero unused space */
83500 +       next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
83501 +       left = SecurityDescriptorsBlockSize - next;
83503 +       /* Zero gap until SecurityDescriptorsBlockSize */
83504 +       if (left < new_sec_size) {
83505 +               /* zero "left" bytes from sbi->security.next_off */
83506 +               sbi->security.next_off += SecurityDescriptorsBlockSize + left;
83507 +       }
83509 +       /* Zero tail of previous security */
83510 +       //used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
83512 +       /*
83513 +        * Example:
83514 +        * 0x40438 == ni->vfs_inode.i_size
83515 +        * 0x00440 == sbi->security.next_off
83516 +        * need to zero [0x438-0x440)
83517 +        * if (next > used) {
83518 +        *  u32 tozero = next - used;
83519 +        *  zero "tozero" bytes from sbi->security.next_off - tozero
83520 +        */
83522 +       /* format new security descriptor */
83523 +       d_security->key.hash = hash_key.hash;
83524 +       d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
83525 +       d_security->off = cpu_to_le64(sbi->security.next_off);
83526 +       d_security->size = cpu_to_le32(new_sec_size);
83527 +       memcpy(d_security + 1, sd, size_sd);
83529 +       /* Write main SDS bucket */
83530 +       err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
83531 +                               d_security, aligned_sec_size);
83533 +       if (err)
83534 +               goto out;
83536 +       mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
83537 +       new_sds_size = mirr_off + aligned_sec_size;
83539 +       if (new_sds_size > ni->vfs_inode.i_size) {
83540 +               err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
83541 +                                   ARRAY_SIZE(SDS_NAME), &ni->file.run,
83542 +                                   new_sds_size, &new_sds_size, false, NULL);
83543 +               if (err)
83544 +                       goto out;
83545 +       }
83547 +       /* Write copy SDS bucket */
83548 +       err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
83549 +                               aligned_sec_size);
83550 +       if (err)
83551 +               goto out;
83553 +       /* Fill SII entry */
83554 +       sii_e.de.view.data_off =
83555 +               cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
83556 +       sii_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
83557 +       sii_e.de.view.res = 0;
83558 +       sii_e.de.size = cpu_to_le16(SIZEOF_SII_DIRENTRY);
83559 +       sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
83560 +       sii_e.de.flags = 0;
83561 +       sii_e.de.res = 0;
83562 +       sii_e.sec_id = d_security->key.sec_id;
83563 +       memcpy(&sii_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
83565 +       err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL);
83566 +       if (err)
83567 +               goto out;
83569 +       /* Fill SDH entry */
83570 +       sdh_e.de.view.data_off =
83571 +               cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
83572 +       sdh_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
83573 +       sdh_e.de.view.res = 0;
83574 +       sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
83575 +       sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
83576 +       sdh_e.de.flags = 0;
83577 +       sdh_e.de.res = 0;
83578 +       sdh_e.key.hash = d_security->key.hash;
83579 +       sdh_e.key.sec_id = d_security->key.sec_id;
83580 +       memcpy(&sdh_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
83581 +       sdh_e.magic[0] = cpu_to_le16('I');
83582 +       sdh_e.magic[1] = cpu_to_le16('I');
83584 +       fnd_clear(fnd_sdh);
83585 +       err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
83586 +                               fnd_sdh);
83587 +       if (err)
83588 +               goto out;
83590 +       *security_id = d_security->key.sec_id;
83591 +       if (inserted)
83592 +               *inserted = true;
83594 +       /* Update Id and offset for next descriptor */
83595 +       sbi->security.next_id += 1;
83596 +       sbi->security.next_off += aligned_sec_size;
83598 +out:
83599 +       fnd_put(fnd_sdh);
83600 +       mark_inode_dirty(&ni->vfs_inode);
83601 +       ni_unlock(ni);
83602 +       ntfs_free(d_security);
83604 +       return err;
83608 + * ntfs_reparse_init
83609 + *
83610 + * loads and parse $Extend/$Reparse
83611 + */
83612 +int ntfs_reparse_init(struct ntfs_sb_info *sbi)
83614 +       int err;
83615 +       struct ntfs_inode *ni = sbi->reparse.ni;
83616 +       struct ntfs_index *indx = &sbi->reparse.index_r;
83617 +       struct ATTRIB *attr;
83618 +       struct ATTR_LIST_ENTRY *le;
83619 +       const struct INDEX_ROOT *root_r;
83621 +       if (!ni)
83622 +               return 0;
83624 +       le = NULL;
83625 +       attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
83626 +                           ARRAY_SIZE(SR_NAME), NULL, NULL);
83627 +       if (!attr) {
83628 +               err = -EINVAL;
83629 +               goto out;
83630 +       }
83632 +       root_r = resident_data(attr);
83633 +       if (root_r->type != ATTR_ZERO ||
83634 +           root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
83635 +               err = -EINVAL;
83636 +               goto out;
83637 +       }
83639 +       err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
83640 +       if (err)
83641 +               goto out;
83643 +out:
83644 +       return err;
83648 + * ntfs_objid_init
83649 + *
83650 + * loads and parse $Extend/$ObjId
83651 + */
83652 +int ntfs_objid_init(struct ntfs_sb_info *sbi)
83654 +       int err;
83655 +       struct ntfs_inode *ni = sbi->objid.ni;
83656 +       struct ntfs_index *indx = &sbi->objid.index_o;
83657 +       struct ATTRIB *attr;
83658 +       struct ATTR_LIST_ENTRY *le;
83659 +       const struct INDEX_ROOT *root;
83661 +       if (!ni)
83662 +               return 0;
83664 +       le = NULL;
83665 +       attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
83666 +                           ARRAY_SIZE(SO_NAME), NULL, NULL);
83667 +       if (!attr) {
83668 +               err = -EINVAL;
83669 +               goto out;
83670 +       }
83672 +       root = resident_data(attr);
83673 +       if (root->type != ATTR_ZERO ||
83674 +           root->rule != NTFS_COLLATION_TYPE_UINTS) {
83675 +               err = -EINVAL;
83676 +               goto out;
83677 +       }
83679 +       err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
83680 +       if (err)
83681 +               goto out;
83683 +out:
83684 +       return err;
83687 +int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
83689 +       int err;
83690 +       struct ntfs_inode *ni = sbi->objid.ni;
83691 +       struct ntfs_index *indx = &sbi->objid.index_o;
83693 +       if (!ni)
83694 +               return -EINVAL;
83696 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
83698 +       err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
83700 +       mark_inode_dirty(&ni->vfs_inode);
83701 +       ni_unlock(ni);
83703 +       return err;
83706 +int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
83707 +                       const struct MFT_REF *ref)
83709 +       int err;
83710 +       struct ntfs_inode *ni = sbi->reparse.ni;
83711 +       struct ntfs_index *indx = &sbi->reparse.index_r;
83712 +       struct NTFS_DE_R re;
83714 +       if (!ni)
83715 +               return -EINVAL;
83717 +       memset(&re, 0, sizeof(re));
83719 +       re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
83720 +       re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
83721 +       re.de.key_size = cpu_to_le16(sizeof(re.key));
83723 +       re.key.ReparseTag = rtag;
83724 +       memcpy(&re.key.ref, ref, sizeof(*ref));
83726 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
83728 +       err = indx_insert_entry(indx, ni, &re.de, NULL, NULL);
83730 +       mark_inode_dirty(&ni->vfs_inode);
83731 +       ni_unlock(ni);
83733 +       return err;
83736 +int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
83737 +                       const struct MFT_REF *ref)
83739 +       int err, diff;
83740 +       struct ntfs_inode *ni = sbi->reparse.ni;
83741 +       struct ntfs_index *indx = &sbi->reparse.index_r;
83742 +       struct ntfs_fnd *fnd = NULL;
83743 +       struct REPARSE_KEY rkey;
83744 +       struct NTFS_DE_R *re;
83745 +       struct INDEX_ROOT *root_r;
83747 +       if (!ni)
83748 +               return -EINVAL;
83750 +       rkey.ReparseTag = rtag;
83751 +       rkey.ref = *ref;
83753 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
83755 +       if (rtag) {
83756 +               err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
83757 +               goto out1;
83758 +       }
83760 +       fnd = fnd_get();
83761 +       if (!fnd) {
83762 +               err = -ENOMEM;
83763 +               goto out1;
83764 +       }
83766 +       root_r = indx_get_root(indx, ni, NULL, NULL);
83767 +       if (!root_r) {
83768 +               err = -EINVAL;
83769 +               goto out;
83770 +       }
83772 +       /* 1 - forces to ignore rkey.ReparseTag when comparing keys */
83773 +       err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
83774 +                       (struct NTFS_DE **)&re, fnd);
83775 +       if (err)
83776 +               goto out;
83778 +       if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
83779 +               /* Impossible. Looks like volume corrupt?*/
83780 +               goto out;
83781 +       }
83783 +       memcpy(&rkey, &re->key, sizeof(rkey));
83785 +       fnd_put(fnd);
83786 +       fnd = NULL;
83788 +       err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
83789 +       if (err)
83790 +               goto out;
83792 +out:
83793 +       fnd_put(fnd);
83795 +out1:
83796 +       mark_inode_dirty(&ni->vfs_inode);
83797 +       ni_unlock(ni);
83799 +       return err;
83802 +static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
83803 +                                         CLST len)
83805 +       ntfs_unmap_meta(sbi->sb, lcn, len);
83806 +       ntfs_discard(sbi, lcn, len);
83809 +void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
83811 +       CLST end, i;
83812 +       struct wnd_bitmap *wnd = &sbi->used.bitmap;
83814 +       down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
83815 +       if (!wnd_is_used(wnd, lcn, len)) {
83816 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
83818 +               end = lcn + len;
83819 +               len = 0;
83820 +               for (i = lcn; i < end; i++) {
83821 +                       if (wnd_is_used(wnd, i, 1)) {
83822 +                               if (!len)
83823 +                                       lcn = i;
83824 +                               len += 1;
83825 +                               continue;
83826 +                       }
83828 +                       if (!len)
83829 +                               continue;
83831 +                       if (trim)
83832 +                               ntfs_unmap_and_discard(sbi, lcn, len);
83834 +                       wnd_set_free(wnd, lcn, len);
83835 +                       len = 0;
83836 +               }
83838 +               if (!len)
83839 +                       goto out;
83840 +       }
83842 +       if (trim)
83843 +               ntfs_unmap_and_discard(sbi, lcn, len);
83844 +       wnd_set_free(wnd, lcn, len);
83846 +out:
83847 +       up_write(&wnd->rw_lock);
83851 + * run_deallocate
83852 + *
83853 + * deallocate clusters
83854 + */
83855 +int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim)
83857 +       CLST lcn, len;
83858 +       size_t idx = 0;
83860 +       while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
83861 +               if (lcn == SPARSE_LCN)
83862 +                       continue;
83864 +               mark_as_free_ex(sbi, lcn, len, trim);
83865 +       }
83867 +       return 0;
83869 diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
83870 new file mode 100644
83871 index 000000000000..931a7241ef00
83872 --- /dev/null
83873 +++ b/fs/ntfs3/index.c
83874 @@ -0,0 +1,2641 @@
83875 +// SPDX-License-Identifier: GPL-2.0
83877 + *
83878 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
83879 + *
83880 + */
83882 +#include <linux/blkdev.h>
83883 +#include <linux/buffer_head.h>
83884 +#include <linux/fs.h>
83885 +#include <linux/nls.h>
83887 +#include "debug.h"
83888 +#include "ntfs.h"
83889 +#include "ntfs_fs.h"
83891 +static const struct INDEX_NAMES {
83892 +       const __le16 *name;
83893 +       u8 name_len;
83894 +} s_index_names[INDEX_MUTEX_TOTAL] = {
83895 +       { I30_NAME, ARRAY_SIZE(I30_NAME) }, { SII_NAME, ARRAY_SIZE(SII_NAME) },
83896 +       { SDH_NAME, ARRAY_SIZE(SDH_NAME) }, { SO_NAME, ARRAY_SIZE(SO_NAME) },
83897 +       { SQ_NAME, ARRAY_SIZE(SQ_NAME) },   { SR_NAME, ARRAY_SIZE(SR_NAME) },
83901 + * compare two names in index
83902 + * if l1 != 0
83903 + *   both names are little endian on-disk ATTR_FILE_NAME structs
83904 + * else
83905 + *   key1 - cpu_str, key2 - ATTR_FILE_NAME
83906 + */
83907 +static int cmp_fnames(const void *key1, size_t l1, const void *key2, size_t l2,
83908 +                     const void *data)
83910 +       const struct ATTR_FILE_NAME *f2 = key2;
83911 +       const struct ntfs_sb_info *sbi = data;
83912 +       const struct ATTR_FILE_NAME *f1;
83913 +       u16 fsize2;
83914 +       bool both_case;
83916 +       if (l2 <= offsetof(struct ATTR_FILE_NAME, name))
83917 +               return -1;
83919 +       fsize2 = fname_full_size(f2);
83920 +       if (l2 < fsize2)
83921 +               return -1;
83923 +       both_case = f2->type != FILE_NAME_DOS /*&& !sbi->options.nocase*/;
83924 +       if (!l1) {
83925 +               const struct le_str *s2 = (struct le_str *)&f2->name_len;
83927 +               /*
83928 +                * If names are equal (case insensitive)
83929 +                * try to compare it case sensitive
83930 +                */
83931 +               return ntfs_cmp_names_cpu(key1, s2, sbi->upcase, both_case);
83932 +       }
83934 +       f1 = key1;
83935 +       return ntfs_cmp_names(f1->name, f1->name_len, f2->name, f2->name_len,
83936 +                             sbi->upcase, both_case);
83939 +/* $SII of $Secure and $Q of Quota */
83940 +static int cmp_uint(const void *key1, size_t l1, const void *key2, size_t l2,
83941 +                   const void *data)
83943 +       const u32 *k1 = key1;
83944 +       const u32 *k2 = key2;
83946 +       if (l2 < sizeof(u32))
83947 +               return -1;
83949 +       if (*k1 < *k2)
83950 +               return -1;
83951 +       if (*k1 > *k2)
83952 +               return 1;
83953 +       return 0;
83956 +/* $SDH of $Secure */
83957 +static int cmp_sdh(const void *key1, size_t l1, const void *key2, size_t l2,
83958 +                  const void *data)
83960 +       const struct SECURITY_KEY *k1 = key1;
83961 +       const struct SECURITY_KEY *k2 = key2;
83962 +       u32 t1, t2;
83964 +       if (l2 < sizeof(struct SECURITY_KEY))
83965 +               return -1;
83967 +       t1 = le32_to_cpu(k1->hash);
83968 +       t2 = le32_to_cpu(k2->hash);
83970 +       /* First value is a hash value itself */
83971 +       if (t1 < t2)
83972 +               return -1;
83973 +       if (t1 > t2)
83974 +               return 1;
83976 +       /* Second value is security Id */
83977 +       if (data) {
83978 +               t1 = le32_to_cpu(k1->sec_id);
83979 +               t2 = le32_to_cpu(k2->sec_id);
83980 +               if (t1 < t2)
83981 +                       return -1;
83982 +               if (t1 > t2)
83983 +                       return 1;
83984 +       }
83986 +       return 0;
83989 +/* $O of ObjId and "$R" for Reparse */
83990 +static int cmp_uints(const void *key1, size_t l1, const void *key2, size_t l2,
83991 +                    const void *data)
83993 +       const __le32 *k1 = key1;
83994 +       const __le32 *k2 = key2;
83995 +       size_t count;
83997 +       if ((size_t)data == 1) {
83998 +               /*
83999 +                * ni_delete_all -> ntfs_remove_reparse -> delete all with this reference
84000 +                * k1, k2 - pointers to REPARSE_KEY
84001 +                */
84003 +               k1 += 1; // skip REPARSE_KEY.ReparseTag
84004 +               k2 += 1; // skip REPARSE_KEY.ReparseTag
84005 +               if (l2 <= sizeof(int))
84006 +                       return -1;
84007 +               l2 -= sizeof(int);
84008 +               if (l1 <= sizeof(int))
84009 +                       return 1;
84010 +               l1 -= sizeof(int);
84011 +       }
84013 +       if (l2 < sizeof(int))
84014 +               return -1;
84016 +       for (count = min(l1, l2) >> 2; count > 0; --count, ++k1, ++k2) {
84017 +               u32 t1 = le32_to_cpu(*k1);
84018 +               u32 t2 = le32_to_cpu(*k2);
84020 +               if (t1 > t2)
84021 +                       return 1;
84022 +               if (t1 < t2)
84023 +                       return -1;
84024 +       }
84026 +       if (l1 > l2)
84027 +               return 1;
84028 +       if (l1 < l2)
84029 +               return -1;
84031 +       return 0;
84034 +static inline NTFS_CMP_FUNC get_cmp_func(const struct INDEX_ROOT *root)
84036 +       switch (root->type) {
84037 +       case ATTR_NAME:
84038 +               if (root->rule == NTFS_COLLATION_TYPE_FILENAME)
84039 +                       return &cmp_fnames;
84040 +               break;
84041 +       case ATTR_ZERO:
84042 +               switch (root->rule) {
84043 +               case NTFS_COLLATION_TYPE_UINT:
84044 +                       return &cmp_uint;
84045 +               case NTFS_COLLATION_TYPE_SECURITY_HASH:
84046 +                       return &cmp_sdh;
84047 +               case NTFS_COLLATION_TYPE_UINTS:
84048 +                       return &cmp_uints;
84049 +               default:
84050 +                       break;
84051 +               }
84052 +       default:
84053 +               break;
84054 +       }
84056 +       return NULL;
84059 +struct bmp_buf {
84060 +       struct ATTRIB *b;
84061 +       struct mft_inode *mi;
84062 +       struct buffer_head *bh;
84063 +       ulong *buf;
84064 +       size_t bit;
84065 +       u32 nbits;
84066 +       u64 new_valid;
84069 +static int bmp_buf_get(struct ntfs_index *indx, struct ntfs_inode *ni,
84070 +                      size_t bit, struct bmp_buf *bbuf)
84072 +       struct ATTRIB *b;
84073 +       size_t data_size, valid_size, vbo, off = bit >> 3;
84074 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
84075 +       CLST vcn = off >> sbi->cluster_bits;
84076 +       struct ATTR_LIST_ENTRY *le = NULL;
84077 +       struct buffer_head *bh;
84078 +       struct super_block *sb;
84079 +       u32 blocksize;
84080 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
84082 +       bbuf->bh = NULL;
84084 +       b = ni_find_attr(ni, NULL, &le, ATTR_BITMAP, in->name, in->name_len,
84085 +                        &vcn, &bbuf->mi);
84086 +       bbuf->b = b;
84087 +       if (!b)
84088 +               return -EINVAL;
84090 +       if (!b->non_res) {
84091 +               data_size = le32_to_cpu(b->res.data_size);
84093 +               if (off >= data_size)
84094 +                       return -EINVAL;
84096 +               bbuf->buf = (ulong *)resident_data(b);
84097 +               bbuf->bit = 0;
84098 +               bbuf->nbits = data_size * 8;
84100 +               return 0;
84101 +       }
84103 +       data_size = le64_to_cpu(b->nres.data_size);
84104 +       if (WARN_ON(off >= data_size)) {
84105 +               /* looks like filesystem error */
84106 +               return -EINVAL;
84107 +       }
84109 +       valid_size = le64_to_cpu(b->nres.valid_size);
84111 +       bh = ntfs_bread_run(sbi, &indx->bitmap_run, off);
84112 +       if (!bh)
84113 +               return -EIO;
84115 +       if (IS_ERR(bh))
84116 +               return PTR_ERR(bh);
84118 +       bbuf->bh = bh;
84120 +       if (buffer_locked(bh))
84121 +               __wait_on_buffer(bh);
84123 +       lock_buffer(bh);
84125 +       sb = sbi->sb;
84126 +       blocksize = sb->s_blocksize;
84128 +       vbo = off & ~(size_t)sbi->block_mask;
84130 +       bbuf->new_valid = vbo + blocksize;
84131 +       if (bbuf->new_valid <= valid_size)
84132 +               bbuf->new_valid = 0;
84133 +       else if (bbuf->new_valid > data_size)
84134 +               bbuf->new_valid = data_size;
84136 +       if (vbo >= valid_size) {
84137 +               memset(bh->b_data, 0, blocksize);
84138 +       } else if (vbo + blocksize > valid_size) {
84139 +               u32 voff = valid_size & sbi->block_mask;
84141 +               memset(bh->b_data + voff, 0, blocksize - voff);
84142 +       }
84144 +       bbuf->buf = (ulong *)bh->b_data;
84145 +       bbuf->bit = 8 * (off & ~(size_t)sbi->block_mask);
84146 +       bbuf->nbits = 8 * blocksize;
84148 +       return 0;
84151 +static void bmp_buf_put(struct bmp_buf *bbuf, bool dirty)
84153 +       struct buffer_head *bh = bbuf->bh;
84154 +       struct ATTRIB *b = bbuf->b;
84156 +       if (!bh) {
84157 +               if (b && !b->non_res && dirty)
84158 +                       bbuf->mi->dirty = true;
84159 +               return;
84160 +       }
84162 +       if (!dirty)
84163 +               goto out;
84165 +       if (bbuf->new_valid) {
84166 +               b->nres.valid_size = cpu_to_le64(bbuf->new_valid);
84167 +               bbuf->mi->dirty = true;
84168 +       }
84170 +       set_buffer_uptodate(bh);
84171 +       mark_buffer_dirty(bh);
84173 +out:
84174 +       unlock_buffer(bh);
84175 +       put_bh(bh);
84179 + * indx_mark_used
84180 + *
84181 + * marks the bit 'bit' as used
84182 + */
84183 +static int indx_mark_used(struct ntfs_index *indx, struct ntfs_inode *ni,
84184 +                         size_t bit)
84186 +       int err;
84187 +       struct bmp_buf bbuf;
84189 +       err = bmp_buf_get(indx, ni, bit, &bbuf);
84190 +       if (err)
84191 +               return err;
84193 +       __set_bit(bit - bbuf.bit, bbuf.buf);
84195 +       bmp_buf_put(&bbuf, true);
84197 +       return 0;
84201 + * indx_mark_free
84202 + *
84203 + * the bit 'bit' as free
84204 + */
84205 +static int indx_mark_free(struct ntfs_index *indx, struct ntfs_inode *ni,
84206 +                         size_t bit)
84208 +       int err;
84209 +       struct bmp_buf bbuf;
84211 +       err = bmp_buf_get(indx, ni, bit, &bbuf);
84212 +       if (err)
84213 +               return err;
84215 +       __clear_bit(bit - bbuf.bit, bbuf.buf);
84217 +       bmp_buf_put(&bbuf, true);
84219 +       return 0;
84223 + * if ntfs_readdir calls this function (indx_used_bit -> scan_nres_bitmap),
84224 + * inode is shared locked and no ni_lock
84225 + * use rw_semaphore for read/write access to bitmap_run
84226 + */
84227 +static int scan_nres_bitmap(struct ntfs_inode *ni, struct ATTRIB *bitmap,
84228 +                           struct ntfs_index *indx, size_t from,
84229 +                           bool (*fn)(const ulong *buf, u32 bit, u32 bits,
84230 +                                      size_t *ret),
84231 +                           size_t *ret)
84233 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
84234 +       struct super_block *sb = sbi->sb;
84235 +       struct runs_tree *run = &indx->bitmap_run;
84236 +       struct rw_semaphore *lock = &indx->run_lock;
84237 +       u32 nbits = sb->s_blocksize * 8;
84238 +       u32 blocksize = sb->s_blocksize;
84239 +       u64 valid_size = le64_to_cpu(bitmap->nres.valid_size);
84240 +       u64 data_size = le64_to_cpu(bitmap->nres.data_size);
84241 +       sector_t eblock = bytes_to_block(sb, data_size);
84242 +       size_t vbo = from >> 3;
84243 +       sector_t blk = (vbo & sbi->cluster_mask) >> sb->s_blocksize_bits;
84244 +       sector_t vblock = vbo >> sb->s_blocksize_bits;
84245 +       sector_t blen, block;
84246 +       CLST lcn, clen, vcn, vcn_next;
84247 +       size_t idx;
84248 +       struct buffer_head *bh;
84249 +       bool ok;
84251 +       *ret = MINUS_ONE_T;
84253 +       if (vblock >= eblock)
84254 +               return 0;
84256 +       from &= nbits - 1;
84257 +       vcn = vbo >> sbi->cluster_bits;
84259 +       down_read(lock);
84260 +       ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx);
84261 +       up_read(lock);
84263 +next_run:
84264 +       if (!ok) {
84265 +               int err;
84266 +               const struct INDEX_NAMES *name = &s_index_names[indx->type];
84268 +               down_write(lock);
84269 +               err = attr_load_runs_vcn(ni, ATTR_BITMAP, name->name,
84270 +                                        name->name_len, run, vcn);
84271 +               up_write(lock);
84272 +               if (err)
84273 +                       return err;
84274 +               down_read(lock);
84275 +               ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx);
84276 +               up_read(lock);
84277 +               if (!ok)
84278 +                       return -EINVAL;
84279 +       }
84281 +       blen = (sector_t)clen * sbi->blocks_per_cluster;
84282 +       block = (sector_t)lcn * sbi->blocks_per_cluster;
84284 +       for (; blk < blen; blk++, from = 0) {
84285 +               bh = ntfs_bread(sb, block + blk);
84286 +               if (!bh)
84287 +                       return -EIO;
84289 +               vbo = (u64)vblock << sb->s_blocksize_bits;
84290 +               if (vbo >= valid_size) {
84291 +                       memset(bh->b_data, 0, blocksize);
84292 +               } else if (vbo + blocksize > valid_size) {
84293 +                       u32 voff = valid_size & sbi->block_mask;
84295 +                       memset(bh->b_data + voff, 0, blocksize - voff);
84296 +               }
84298 +               if (vbo + blocksize > data_size)
84299 +                       nbits = 8 * (data_size - vbo);
84301 +               ok = nbits > from ? (*fn)((ulong *)bh->b_data, from, nbits, ret)
84302 +                                 : false;
84303 +               put_bh(bh);
84305 +               if (ok) {
84306 +                       *ret += 8 * vbo;
84307 +                       return 0;
84308 +               }
84310 +               if (++vblock >= eblock) {
84311 +                       *ret = MINUS_ONE_T;
84312 +                       return 0;
84313 +               }
84314 +       }
84315 +       blk = 0;
84316 +       vcn_next = vcn + clen;
84317 +       down_read(lock);
84318 +       ok = run_get_entry(run, ++idx, &vcn, &lcn, &clen) && vcn == vcn_next;
84319 +       if (!ok)
84320 +               vcn = vcn_next;
84321 +       up_read(lock);
84322 +       goto next_run;
84325 +static bool scan_for_free(const ulong *buf, u32 bit, u32 bits, size_t *ret)
84327 +       size_t pos = find_next_zero_bit(buf, bits, bit);
84329 +       if (pos >= bits)
84330 +               return false;
84331 +       *ret = pos;
84332 +       return true;
84336 + * indx_find_free
84337 + *
84338 + * looks for free bit
84339 + * returns -1 if no free bits
84340 + */
84341 +static int indx_find_free(struct ntfs_index *indx, struct ntfs_inode *ni,
84342 +                         size_t *bit, struct ATTRIB **bitmap)
84344 +       struct ATTRIB *b;
84345 +       struct ATTR_LIST_ENTRY *le = NULL;
84346 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
84347 +       int err;
84349 +       b = ni_find_attr(ni, NULL, &le, ATTR_BITMAP, in->name, in->name_len,
84350 +                        NULL, NULL);
84352 +       if (!b)
84353 +               return -ENOENT;
84355 +       *bitmap = b;
84356 +       *bit = MINUS_ONE_T;
84358 +       if (!b->non_res) {
84359 +               u32 nbits = 8 * le32_to_cpu(b->res.data_size);
84360 +               size_t pos = find_next_zero_bit(resident_data(b), nbits, 0);
84362 +               if (pos < nbits)
84363 +                       *bit = pos;
84364 +       } else {
84365 +               err = scan_nres_bitmap(ni, b, indx, 0, &scan_for_free, bit);
84367 +               if (err)
84368 +                       return err;
84369 +       }
84371 +       return 0;
84374 +static bool scan_for_used(const ulong *buf, u32 bit, u32 bits, size_t *ret)
84376 +       size_t pos = find_next_bit(buf, bits, bit);
84378 +       if (pos >= bits)
84379 +               return false;
84380 +       *ret = pos;
84381 +       return true;
84385 + * indx_used_bit
84386 + *
84387 + * looks for used bit
84388 + * returns MINUS_ONE_T if no used bits
84389 + */
84390 +int indx_used_bit(struct ntfs_index *indx, struct ntfs_inode *ni, size_t *bit)
84392 +       struct ATTRIB *b;
84393 +       struct ATTR_LIST_ENTRY *le = NULL;
84394 +       size_t from = *bit;
84395 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
84396 +       int err;
84398 +       b = ni_find_attr(ni, NULL, &le, ATTR_BITMAP, in->name, in->name_len,
84399 +                        NULL, NULL);
84401 +       if (!b)
84402 +               return -ENOENT;
84404 +       *bit = MINUS_ONE_T;
84406 +       if (!b->non_res) {
84407 +               u32 nbits = le32_to_cpu(b->res.data_size) * 8;
84408 +               size_t pos = find_next_bit(resident_data(b), nbits, from);
84410 +               if (pos < nbits)
84411 +                       *bit = pos;
84412 +       } else {
84413 +               err = scan_nres_bitmap(ni, b, indx, from, &scan_for_used, bit);
84414 +               if (err)
84415 +                       return err;
84416 +       }
84418 +       return 0;
84422 + * hdr_find_split
84423 + *
84424 + * finds a point at which the index allocation buffer would like to
84425 + * be split.
84426 + * NOTE: This function should never return 'END' entry NULL returns on error
84427 + */
84428 +static const struct NTFS_DE *hdr_find_split(const struct INDEX_HDR *hdr)
84430 +       size_t o;
84431 +       const struct NTFS_DE *e = hdr_first_de(hdr);
84432 +       u32 used_2 = le32_to_cpu(hdr->used) >> 1;
84433 +       u16 esize = le16_to_cpu(e->size);
84435 +       if (!e || de_is_last(e))
84436 +               return NULL;
84438 +       for (o = le32_to_cpu(hdr->de_off) + esize; o < used_2; o += esize) {
84439 +               const struct NTFS_DE *p = e;
84441 +               e = Add2Ptr(hdr, o);
84443 +               /* We must not return END entry */
84444 +               if (de_is_last(e))
84445 +                       return p;
84447 +               esize = le16_to_cpu(e->size);
84448 +       }
84450 +       return e;
84454 + * hdr_insert_head
84455 + *
84456 + * inserts some entries at the beginning of the buffer.
84457 + * It is used to insert entries into a newly-created buffer.
84458 + */
84459 +static const struct NTFS_DE *hdr_insert_head(struct INDEX_HDR *hdr,
84460 +                                            const void *ins, u32 ins_bytes)
84462 +       u32 to_move;
84463 +       struct NTFS_DE *e = hdr_first_de(hdr);
84464 +       u32 used = le32_to_cpu(hdr->used);
84466 +       if (!e)
84467 +               return NULL;
84469 +       /* Now we just make room for the inserted entries and jam it in. */
84470 +       to_move = used - le32_to_cpu(hdr->de_off);
84471 +       memmove(Add2Ptr(e, ins_bytes), e, to_move);
84472 +       memcpy(e, ins, ins_bytes);
84473 +       hdr->used = cpu_to_le32(used + ins_bytes);
84475 +       return e;
84478 +void fnd_clear(struct ntfs_fnd *fnd)
84480 +       int i;
84482 +       for (i = 0; i < fnd->level; i++) {
84483 +               struct indx_node *n = fnd->nodes[i];
84485 +               if (!n)
84486 +                       continue;
84488 +               put_indx_node(n);
84489 +               fnd->nodes[i] = NULL;
84490 +       }
84491 +       fnd->level = 0;
84492 +       fnd->root_de = NULL;
84495 +static int fnd_push(struct ntfs_fnd *fnd, struct indx_node *n,
84496 +                   struct NTFS_DE *e)
84498 +       int i;
84500 +       i = fnd->level;
84501 +       if (i < 0 || i >= ARRAY_SIZE(fnd->nodes))
84502 +               return -EINVAL;
84503 +       fnd->nodes[i] = n;
84504 +       fnd->de[i] = e;
84505 +       fnd->level += 1;
84506 +       return 0;
84509 +static struct indx_node *fnd_pop(struct ntfs_fnd *fnd)
84511 +       struct indx_node *n;
84512 +       int i = fnd->level;
84514 +       i -= 1;
84515 +       n = fnd->nodes[i];
84516 +       fnd->nodes[i] = NULL;
84517 +       fnd->level = i;
84519 +       return n;
84522 +static bool fnd_is_empty(struct ntfs_fnd *fnd)
84524 +       if (!fnd->level)
84525 +               return !fnd->root_de;
84527 +       return !fnd->de[fnd->level - 1];
84531 + * hdr_find_e
84532 + *
84533 + * locates an entry the index buffer.
84534 + * If no matching entry is found, it returns the first entry which is greater
84535 + * than the desired entry If the search key is greater than all the entries the
84536 + * buffer, it returns the 'end' entry. This function does a binary search of the
84537 + * current index buffer, for the first entry that is <= to the search value
84538 + * Returns NULL if error
84539 + */
84540 +static struct NTFS_DE *hdr_find_e(const struct ntfs_index *indx,
84541 +                                 const struct INDEX_HDR *hdr, const void *key,
84542 +                                 size_t key_len, const void *ctx, int *diff)
84544 +       struct NTFS_DE *e;
84545 +       NTFS_CMP_FUNC cmp = indx->cmp;
84546 +       u32 e_size, e_key_len;
84547 +       u32 end = le32_to_cpu(hdr->used);
84548 +       u32 off = le32_to_cpu(hdr->de_off);
84550 +#ifdef NTFS3_INDEX_BINARY_SEARCH
84551 +       int max_idx = 0, fnd, min_idx;
84552 +       int nslots = 64;
84553 +       u16 *offs;
84555 +       if (end > 0x10000)
84556 +               goto next;
84558 +       offs = ntfs_malloc(sizeof(u16) * nslots);
84559 +       if (!offs)
84560 +               goto next;
84562 +       /* use binary search algorithm */
84563 +next1:
84564 +       if (off + sizeof(struct NTFS_DE) > end) {
84565 +               e = NULL;
84566 +               goto out1;
84567 +       }
84568 +       e = Add2Ptr(hdr, off);
84569 +       e_size = le16_to_cpu(e->size);
84571 +       if (e_size < sizeof(struct NTFS_DE) || off + e_size > end) {
84572 +               e = NULL;
84573 +               goto out1;
84574 +       }
84576 +       if (max_idx >= nslots) {
84577 +               u16 *ptr;
84578 +               int new_slots = QuadAlign(2 * nslots);
84580 +               ptr = ntfs_malloc(sizeof(u16) * new_slots);
84581 +               if (ptr)
84582 +                       memcpy(ptr, offs, sizeof(u16) * max_idx);
84583 +               ntfs_free(offs);
84584 +               offs = ptr;
84585 +               nslots = new_slots;
84586 +               if (!ptr)
84587 +                       goto next;
84588 +       }
84590 +       /* Store entry table */
84591 +       offs[max_idx] = off;
84593 +       if (!de_is_last(e)) {
84594 +               off += e_size;
84595 +               max_idx += 1;
84596 +               goto next1;
84597 +       }
84599 +       /*
84600 +        * Table of pointers is created
84601 +        * Use binary search to find entry that is <= to the search value
84602 +        */
84603 +       fnd = -1;
84604 +       min_idx = 0;
84606 +       while (min_idx <= max_idx) {
84607 +               int mid_idx = min_idx + ((max_idx - min_idx) >> 1);
84608 +               int diff2;
84610 +               e = Add2Ptr(hdr, offs[mid_idx]);
84612 +               e_key_len = le16_to_cpu(e->key_size);
84614 +               diff2 = (*cmp)(key, key_len, e + 1, e_key_len, ctx);
84616 +               if (!diff2) {
84617 +                       *diff = 0;
84618 +                       goto out1;
84619 +               }
84621 +               if (diff2 < 0) {
84622 +                       max_idx = mid_idx - 1;
84623 +                       fnd = mid_idx;
84624 +                       if (!fnd)
84625 +                               break;
84626 +               } else {
84627 +                       min_idx = mid_idx + 1;
84628 +               }
84629 +       }
84631 +       if (fnd == -1) {
84632 +               e = NULL;
84633 +               goto out1;
84634 +       }
84636 +       *diff = -1;
84637 +       e = Add2Ptr(hdr, offs[fnd]);
84639 +out1:
84640 +       ntfs_free(offs);
84642 +       return e;
84643 +#endif
84645 +next:
84646 +       /*
84647 +        * Entries index are sorted
84648 +        * Enumerate all entries until we find entry that is <= to the search value
84649 +        */
84650 +       if (off + sizeof(struct NTFS_DE) > end)
84651 +               return NULL;
84653 +       e = Add2Ptr(hdr, off);
84654 +       e_size = le16_to_cpu(e->size);
84656 +       if (e_size < sizeof(struct NTFS_DE) || off + e_size > end)
84657 +               return NULL;
84659 +       off += e_size;
84661 +       e_key_len = le16_to_cpu(e->key_size);
84663 +       *diff = (*cmp)(key, key_len, e + 1, e_key_len, ctx);
84664 +       if (!*diff)
84665 +               return e;
84667 +       if (*diff <= 0)
84668 +               return e;
84670 +       if (de_is_last(e)) {
84671 +               *diff = 1;
84672 +               return e;
84673 +       }
84674 +       goto next;
84678 + * hdr_insert_de
84679 + *
84680 + * inserts an index entry into the buffer.
84681 + * 'before' should be a pointer previously returned from hdr_find_e
84682 + */
84683 +static struct NTFS_DE *hdr_insert_de(const struct ntfs_index *indx,
84684 +                                    struct INDEX_HDR *hdr,
84685 +                                    const struct NTFS_DE *de,
84686 +                                    struct NTFS_DE *before, const void *ctx)
84688 +       int diff;
84689 +       size_t off = PtrOffset(hdr, before);
84690 +       u32 used = le32_to_cpu(hdr->used);
84691 +       u32 total = le32_to_cpu(hdr->total);
84692 +       u16 de_size = le16_to_cpu(de->size);
84694 +       /* First, check to see if there's enough room */
84695 +       if (used + de_size > total)
84696 +               return NULL;
84698 +       /* We know there's enough space, so we know we'll succeed. */
84699 +       if (before) {
84700 +               /* Check that before is inside Index */
84701 +               if (off >= used || off < le32_to_cpu(hdr->de_off) ||
84702 +                   off + le16_to_cpu(before->size) > total) {
84703 +                       return NULL;
84704 +               }
84705 +               goto ok;
84706 +       }
84707 +       /* No insert point is applied. Get it manually */
84708 +       before = hdr_find_e(indx, hdr, de + 1, le16_to_cpu(de->key_size), ctx,
84709 +                           &diff);
84710 +       if (!before)
84711 +               return NULL;
84712 +       off = PtrOffset(hdr, before);
84714 +ok:
84715 +       /* Now we just make room for the entry and jam it in. */
84716 +       memmove(Add2Ptr(before, de_size), before, used - off);
84718 +       hdr->used = cpu_to_le32(used + de_size);
84719 +       memcpy(before, de, de_size);
84721 +       return before;
84725 + * hdr_delete_de
84726 + *
84727 + * removes an entry from the index buffer
84728 + */
84729 +static inline struct NTFS_DE *hdr_delete_de(struct INDEX_HDR *hdr,
84730 +                                           struct NTFS_DE *re)
84732 +       u32 used = le32_to_cpu(hdr->used);
84733 +       u16 esize = le16_to_cpu(re->size);
84734 +       u32 off = PtrOffset(hdr, re);
84735 +       int bytes = used - (off + esize);
84737 +       if (off >= used || esize < sizeof(struct NTFS_DE) ||
84738 +           bytes < sizeof(struct NTFS_DE))
84739 +               return NULL;
84741 +       hdr->used = cpu_to_le32(used - esize);
84742 +       memmove(re, Add2Ptr(re, esize), bytes);
84744 +       return re;
84747 +void indx_clear(struct ntfs_index *indx)
84749 +       run_close(&indx->alloc_run);
84750 +       run_close(&indx->bitmap_run);
84753 +int indx_init(struct ntfs_index *indx, struct ntfs_sb_info *sbi,
84754 +             const struct ATTRIB *attr, enum index_mutex_classed type)
84756 +       u32 t32;
84757 +       const struct INDEX_ROOT *root = resident_data(attr);
84759 +       /* Check root fields */
84760 +       if (!root->index_block_clst)
84761 +               return -EINVAL;
84763 +       indx->type = type;
84764 +       indx->idx2vbn_bits = __ffs(root->index_block_clst);
84766 +       t32 = le32_to_cpu(root->index_block_size);
84767 +       indx->index_bits = blksize_bits(t32);
84769 +       /* Check index record size */
84770 +       if (t32 < sbi->cluster_size) {
84771 +               /* index record is smaller than a cluster, use 512 blocks */
84772 +               if (t32 != root->index_block_clst * SECTOR_SIZE)
84773 +                       return -EINVAL;
84775 +               /* Check alignment to a cluster */
84776 +               if ((sbi->cluster_size >> SECTOR_SHIFT) &
84777 +                   (root->index_block_clst - 1)) {
84778 +                       return -EINVAL;
84779 +               }
84781 +               indx->vbn2vbo_bits = SECTOR_SHIFT;
84782 +       } else {
84783 +               /* index record must be a multiple of cluster size */
84784 +               if (t32 != root->index_block_clst << sbi->cluster_bits)
84785 +                       return -EINVAL;
84787 +               indx->vbn2vbo_bits = sbi->cluster_bits;
84788 +       }
84790 +       init_rwsem(&indx->run_lock);
84792 +       indx->cmp = get_cmp_func(root);
84793 +       return indx->cmp ? 0 : -EINVAL;
84796 +static struct indx_node *indx_new(struct ntfs_index *indx,
84797 +                                 struct ntfs_inode *ni, CLST vbn,
84798 +                                 const __le64 *sub_vbn)
84800 +       int err;
84801 +       struct NTFS_DE *e;
84802 +       struct indx_node *r;
84803 +       struct INDEX_HDR *hdr;
84804 +       struct INDEX_BUFFER *index;
84805 +       u64 vbo = (u64)vbn << indx->vbn2vbo_bits;
84806 +       u32 bytes = 1u << indx->index_bits;
84807 +       u16 fn;
84808 +       u32 eo;
84810 +       r = ntfs_zalloc(sizeof(struct indx_node));
84811 +       if (!r)
84812 +               return ERR_PTR(-ENOMEM);
84814 +       index = ntfs_zalloc(bytes);
84815 +       if (!index) {
84816 +               ntfs_free(r);
84817 +               return ERR_PTR(-ENOMEM);
84818 +       }
84820 +       err = ntfs_get_bh(ni->mi.sbi, &indx->alloc_run, vbo, bytes, &r->nb);
84822 +       if (err) {
84823 +               ntfs_free(index);
84824 +               ntfs_free(r);
84825 +               return ERR_PTR(err);
84826 +       }
84828 +       /* Create header */
84829 +       index->rhdr.sign = NTFS_INDX_SIGNATURE;
84830 +       index->rhdr.fix_off = cpu_to_le16(sizeof(struct INDEX_BUFFER)); // 0x28
84831 +       fn = (bytes >> SECTOR_SHIFT) + 1; // 9
84832 +       index->rhdr.fix_num = cpu_to_le16(fn);
84833 +       index->vbn = cpu_to_le64(vbn);
84834 +       hdr = &index->ihdr;
84835 +       eo = QuadAlign(sizeof(struct INDEX_BUFFER) + fn * sizeof(short));
84836 +       hdr->de_off = cpu_to_le32(eo);
84838 +       e = Add2Ptr(hdr, eo);
84840 +       if (sub_vbn) {
84841 +               e->flags = NTFS_IE_LAST | NTFS_IE_HAS_SUBNODES;
84842 +               e->size = cpu_to_le16(sizeof(struct NTFS_DE) + sizeof(u64));
84843 +               hdr->used =
84844 +                       cpu_to_le32(eo + sizeof(struct NTFS_DE) + sizeof(u64));
84845 +               de_set_vbn_le(e, *sub_vbn);
84846 +               hdr->flags = 1;
84847 +       } else {
84848 +               e->size = cpu_to_le16(sizeof(struct NTFS_DE));
84849 +               hdr->used = cpu_to_le32(eo + sizeof(struct NTFS_DE));
84850 +               e->flags = NTFS_IE_LAST;
84851 +       }
84853 +       hdr->total = cpu_to_le32(bytes - offsetof(struct INDEX_BUFFER, ihdr));
84855 +       r->index = index;
84856 +       return r;
84859 +struct INDEX_ROOT *indx_get_root(struct ntfs_index *indx, struct ntfs_inode *ni,
84860 +                                struct ATTRIB **attr, struct mft_inode **mi)
84862 +       struct ATTR_LIST_ENTRY *le = NULL;
84863 +       struct ATTRIB *a;
84864 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
84866 +       a = ni_find_attr(ni, NULL, &le, ATTR_ROOT, in->name, in->name_len, NULL,
84867 +                        mi);
84868 +       if (!a)
84869 +               return NULL;
84871 +       if (attr)
84872 +               *attr = a;
84874 +       return resident_data_ex(a, sizeof(struct INDEX_ROOT));
84877 +static int indx_write(struct ntfs_index *indx, struct ntfs_inode *ni,
84878 +                     struct indx_node *node, int sync)
84880 +       struct INDEX_BUFFER *ib = node->index;
84882 +       return ntfs_write_bh(ni->mi.sbi, &ib->rhdr, &node->nb, sync);
84886 + * if ntfs_readdir calls this function
84887 + * inode is shared locked and no ni_lock
84888 + * use rw_semaphore for read/write access to alloc_run
84889 + */
84890 +int indx_read(struct ntfs_index *indx, struct ntfs_inode *ni, CLST vbn,
84891 +             struct indx_node **node)
84893 +       int err;
84894 +       struct INDEX_BUFFER *ib;
84895 +       struct runs_tree *run = &indx->alloc_run;
84896 +       struct rw_semaphore *lock = &indx->run_lock;
84897 +       u64 vbo = (u64)vbn << indx->vbn2vbo_bits;
84898 +       u32 bytes = 1u << indx->index_bits;
84899 +       struct indx_node *in = *node;
84900 +       const struct INDEX_NAMES *name;
84902 +       if (!in) {
84903 +               in = ntfs_zalloc(sizeof(struct indx_node));
84904 +               if (!in)
84905 +                       return -ENOMEM;
84906 +       } else {
84907 +               nb_put(&in->nb);
84908 +       }
84910 +       ib = in->index;
84911 +       if (!ib) {
84912 +               ib = ntfs_malloc(bytes);
84913 +               if (!ib) {
84914 +                       err = -ENOMEM;
84915 +                       goto out;
84916 +               }
84917 +       }
84919 +       down_read(lock);
84920 +       err = ntfs_read_bh(ni->mi.sbi, run, vbo, &ib->rhdr, bytes, &in->nb);
84921 +       up_read(lock);
84922 +       if (!err)
84923 +               goto ok;
84925 +       if (err == -E_NTFS_FIXUP)
84926 +               goto ok;
84928 +       if (err != -ENOENT)
84929 +               goto out;
84931 +       name = &s_index_names[indx->type];
84932 +       down_write(lock);
84933 +       err = attr_load_runs_range(ni, ATTR_ALLOC, name->name, name->name_len,
84934 +                                  run, vbo, vbo + bytes);
84935 +       up_write(lock);
84936 +       if (err)
84937 +               goto out;
84939 +       down_read(lock);
84940 +       err = ntfs_read_bh(ni->mi.sbi, run, vbo, &ib->rhdr, bytes, &in->nb);
84941 +       up_read(lock);
84942 +       if (err == -E_NTFS_FIXUP)
84943 +               goto ok;
84945 +       if (err)
84946 +               goto out;
84948 +ok:
84949 +       if (err == -E_NTFS_FIXUP) {
84950 +               ntfs_write_bh(ni->mi.sbi, &ib->rhdr, &in->nb, 0);
84951 +               err = 0;
84952 +       }
84954 +       in->index = ib;
84955 +       *node = in;
84957 +out:
84958 +       if (ib != in->index)
84959 +               ntfs_free(ib);
84961 +       if (*node != in) {
84962 +               nb_put(&in->nb);
84963 +               ntfs_free(in);
84964 +       }
84966 +       return err;
84970 + * indx_find
84971 + *
84972 + * scans NTFS directory for given entry
84973 + */
84974 +int indx_find(struct ntfs_index *indx, struct ntfs_inode *ni,
84975 +             const struct INDEX_ROOT *root, const void *key, size_t key_len,
84976 +             const void *ctx, int *diff, struct NTFS_DE **entry,
84977 +             struct ntfs_fnd *fnd)
84979 +       int err;
84980 +       struct NTFS_DE *e;
84981 +       const struct INDEX_HDR *hdr;
84982 +       struct indx_node *node;
84984 +       if (!root)
84985 +               root = indx_get_root(&ni->dir, ni, NULL, NULL);
84987 +       if (!root) {
84988 +               err = -EINVAL;
84989 +               goto out;
84990 +       }
84992 +       hdr = &root->ihdr;
84994 +       /* Check cache */
84995 +       e = fnd->level ? fnd->de[fnd->level - 1] : fnd->root_de;
84996 +       if (e && !de_is_last(e) &&
84997 +           !(*indx->cmp)(key, key_len, e + 1, le16_to_cpu(e->key_size), ctx)) {
84998 +               *entry = e;
84999 +               *diff = 0;
85000 +               return 0;
85001 +       }
85003 +       /* Soft finder reset */
85004 +       fnd_clear(fnd);
85006 +       /* Lookup entry that is <= to the search value */
85007 +       e = hdr_find_e(indx, hdr, key, key_len, ctx, diff);
85008 +       if (!e)
85009 +               return -EINVAL;
85011 +       if (fnd)
85012 +               fnd->root_de = e;
85014 +       err = 0;
85016 +       for (;;) {
85017 +               node = NULL;
85018 +               if (*diff >= 0 || !de_has_vcn_ex(e)) {
85019 +                       *entry = e;
85020 +                       goto out;
85021 +               }
85023 +               /* Read next level. */
85024 +               err = indx_read(indx, ni, de_get_vbn(e), &node);
85025 +               if (err)
85026 +                       goto out;
85028 +               /* Lookup entry that is <= to the search value */
85029 +               e = hdr_find_e(indx, &node->index->ihdr, key, key_len, ctx,
85030 +                              diff);
85031 +               if (!e) {
85032 +                       err = -EINVAL;
85033 +                       put_indx_node(node);
85034 +                       goto out;
85035 +               }
85037 +               fnd_push(fnd, node, e);
85038 +       }
85040 +out:
85041 +       return err;
85044 +int indx_find_sort(struct ntfs_index *indx, struct ntfs_inode *ni,
85045 +                  const struct INDEX_ROOT *root, struct NTFS_DE **entry,
85046 +                  struct ntfs_fnd *fnd)
85048 +       int err;
85049 +       struct indx_node *n = NULL;
85050 +       struct NTFS_DE *e;
85051 +       size_t iter = 0;
85052 +       int level = fnd->level;
85054 +       if (!*entry) {
85055 +               /* Start find */
85056 +               e = hdr_first_de(&root->ihdr);
85057 +               if (!e)
85058 +                       return 0;
85059 +               fnd_clear(fnd);
85060 +               fnd->root_de = e;
85061 +       } else if (!level) {
85062 +               if (de_is_last(fnd->root_de)) {
85063 +                       *entry = NULL;
85064 +                       return 0;
85065 +               }
85067 +               e = hdr_next_de(&root->ihdr, fnd->root_de);
85068 +               if (!e)
85069 +                       return -EINVAL;
85070 +               fnd->root_de = e;
85071 +       } else {
85072 +               n = fnd->nodes[level - 1];
85073 +               e = fnd->de[level - 1];
85075 +               if (de_is_last(e))
85076 +                       goto pop_level;
85078 +               e = hdr_next_de(&n->index->ihdr, e);
85079 +               if (!e)
85080 +                       return -EINVAL;
85082 +               fnd->de[level - 1] = e;
85083 +       }
85085 +       /* Just to avoid tree cycle */
85086 +next_iter:
85087 +       if (iter++ >= 1000)
85088 +               return -EINVAL;
85090 +       while (de_has_vcn_ex(e)) {
85091 +               if (le16_to_cpu(e->size) <
85092 +                   sizeof(struct NTFS_DE) + sizeof(u64)) {
85093 +                       if (n) {
85094 +                               fnd_pop(fnd);
85095 +                               ntfs_free(n);
85096 +                       }
85097 +                       return -EINVAL;
85098 +               }
85100 +               /* Read next level */
85101 +               err = indx_read(indx, ni, de_get_vbn(e), &n);
85102 +               if (err)
85103 +                       return err;
85105 +               /* Try next level */
85106 +               e = hdr_first_de(&n->index->ihdr);
85107 +               if (!e) {
85108 +                       ntfs_free(n);
85109 +                       return -EINVAL;
85110 +               }
85112 +               fnd_push(fnd, n, e);
85113 +       }
85115 +       if (le16_to_cpu(e->size) > sizeof(struct NTFS_DE)) {
85116 +               *entry = e;
85117 +               return 0;
85118 +       }
85120 +pop_level:
85121 +       for (;;) {
85122 +               if (!de_is_last(e))
85123 +                       goto next_iter;
85125 +               /* Pop one level */
85126 +               if (n) {
85127 +                       fnd_pop(fnd);
85128 +                       ntfs_free(n);
85129 +               }
85131 +               level = fnd->level;
85133 +               if (level) {
85134 +                       n = fnd->nodes[level - 1];
85135 +                       e = fnd->de[level - 1];
85136 +               } else if (fnd->root_de) {
85137 +                       n = NULL;
85138 +                       e = fnd->root_de;
85139 +                       fnd->root_de = NULL;
85140 +               } else {
85141 +                       *entry = NULL;
85142 +                       return 0;
85143 +               }
85145 +               if (le16_to_cpu(e->size) > sizeof(struct NTFS_DE)) {
85146 +                       *entry = e;
85147 +                       if (!fnd->root_de)
85148 +                               fnd->root_de = e;
85149 +                       return 0;
85150 +               }
85151 +       }
85154 +int indx_find_raw(struct ntfs_index *indx, struct ntfs_inode *ni,
85155 +                 const struct INDEX_ROOT *root, struct NTFS_DE **entry,
85156 +                 size_t *off, struct ntfs_fnd *fnd)
85158 +       int err;
85159 +       struct indx_node *n = NULL;
85160 +       struct NTFS_DE *e = NULL;
85161 +       struct NTFS_DE *e2;
85162 +       size_t bit;
85163 +       CLST next_used_vbn;
85164 +       CLST next_vbn;
85165 +       u32 record_size = ni->mi.sbi->record_size;
85167 +       /* Use non sorted algorithm */
85168 +       if (!*entry) {
85169 +               /* This is the first call */
85170 +               e = hdr_first_de(&root->ihdr);
85171 +               if (!e)
85172 +                       return 0;
85173 +               fnd_clear(fnd);
85174 +               fnd->root_de = e;
85176 +               /* The first call with setup of initial element */
85177 +               if (*off >= record_size) {
85178 +                       next_vbn = (((*off - record_size) >> indx->index_bits))
85179 +                                  << indx->idx2vbn_bits;
85180 +                       /* jump inside cycle 'for'*/
85181 +                       goto next;
85182 +               }
85184 +               /* Start enumeration from root */
85185 +               *off = 0;
85186 +       } else if (!fnd->root_de)
85187 +               return -EINVAL;
85189 +       for (;;) {
85190 +               /* Check if current entry can be used */
85191 +               if (e && le16_to_cpu(e->size) > sizeof(struct NTFS_DE))
85192 +                       goto ok;
85194 +               if (!fnd->level) {
85195 +                       /* Continue to enumerate root */
85196 +                       if (!de_is_last(fnd->root_de)) {
85197 +                               e = hdr_next_de(&root->ihdr, fnd->root_de);
85198 +                               if (!e)
85199 +                                       return -EINVAL;
85200 +                               fnd->root_de = e;
85201 +                               continue;
85202 +                       }
85204 +                       /* Start to enumerate indexes from 0 */
85205 +                       next_vbn = 0;
85206 +               } else {
85207 +                       /* Continue to enumerate indexes */
85208 +                       e2 = fnd->de[fnd->level - 1];
85210 +                       n = fnd->nodes[fnd->level - 1];
85212 +                       if (!de_is_last(e2)) {
85213 +                               e = hdr_next_de(&n->index->ihdr, e2);
85214 +                               if (!e)
85215 +                                       return -EINVAL;
85216 +                               fnd->de[fnd->level - 1] = e;
85217 +                               continue;
85218 +                       }
85220 +                       /* Continue with next index */
85221 +                       next_vbn = le64_to_cpu(n->index->vbn) +
85222 +                                  root->index_block_clst;
85223 +               }
85225 +next:
85226 +               /* Release current index */
85227 +               if (n) {
85228 +                       fnd_pop(fnd);
85229 +                       put_indx_node(n);
85230 +                       n = NULL;
85231 +               }
85233 +               /* Skip all free indexes */
85234 +               bit = next_vbn >> indx->idx2vbn_bits;
85235 +               err = indx_used_bit(indx, ni, &bit);
85236 +               if (err == -ENOENT || bit == MINUS_ONE_T) {
85237 +                       /* No used indexes */
85238 +                       *entry = NULL;
85239 +                       return 0;
85240 +               }
85242 +               next_used_vbn = bit << indx->idx2vbn_bits;
85244 +               /* Read buffer into memory */
85245 +               err = indx_read(indx, ni, next_used_vbn, &n);
85246 +               if (err)
85247 +                       return err;
85249 +               e = hdr_first_de(&n->index->ihdr);
85250 +               fnd_push(fnd, n, e);
85251 +               if (!e)
85252 +                       return -EINVAL;
85253 +       }
85255 +ok:
85256 +       /* return offset to restore enumerator if necessary */
85257 +       if (!n) {
85258 +               /* 'e' points in root */
85259 +               *off = PtrOffset(&root->ihdr, e);
85260 +       } else {
85261 +               /* 'e' points in index */
85262 +               *off = (le64_to_cpu(n->index->vbn) << indx->vbn2vbo_bits) +
85263 +                      record_size + PtrOffset(&n->index->ihdr, e);
85264 +       }
85266 +       *entry = e;
85267 +       return 0;
85271 + * indx_create_allocate
85272 + *
85273 + * create "Allocation + Bitmap" attributes
85274 + */
85275 +static int indx_create_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
85276 +                               CLST *vbn)
85278 +       int err = -ENOMEM;
85279 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
85280 +       struct ATTRIB *bitmap;
85281 +       struct ATTRIB *alloc;
85282 +       u32 data_size = 1u << indx->index_bits;
85283 +       u32 alloc_size = ntfs_up_cluster(sbi, data_size);
85284 +       CLST len = alloc_size >> sbi->cluster_bits;
85285 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
85286 +       CLST alen;
85287 +       struct runs_tree run;
85289 +       run_init(&run);
85291 +       err = attr_allocate_clusters(sbi, &run, 0, 0, len, NULL, 0, &alen, 0,
85292 +                                    NULL);
85293 +       if (err)
85294 +               goto out;
85296 +       err = ni_insert_nonresident(ni, ATTR_ALLOC, in->name, in->name_len,
85297 +                                   &run, 0, len, 0, &alloc, NULL);
85298 +       if (err)
85299 +               goto out1;
85301 +       alloc->nres.valid_size = alloc->nres.data_size = cpu_to_le64(data_size);
85303 +       err = ni_insert_resident(ni, bitmap_size(1), ATTR_BITMAP, in->name,
85304 +                                in->name_len, &bitmap, NULL);
85305 +       if (err)
85306 +               goto out2;
85308 +       if (in->name == I30_NAME) {
85309 +               ni->vfs_inode.i_size = data_size;
85310 +               inode_set_bytes(&ni->vfs_inode, alloc_size);
85311 +       }
85313 +       memcpy(&indx->alloc_run, &run, sizeof(run));
85315 +       *vbn = 0;
85317 +       return 0;
85319 +out2:
85320 +       mi_remove_attr(&ni->mi, alloc);
85322 +out1:
85323 +       run_deallocate(sbi, &run, false);
85325 +out:
85326 +       return err;
85330 + * indx_add_allocate
85331 + *
85332 + * add clusters to index
85333 + */
85334 +static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
85335 +                            CLST *vbn)
85337 +       int err;
85338 +       size_t bit;
85339 +       u64 data_size;
85340 +       u64 bmp_size, bmp_size_v;
85341 +       struct ATTRIB *bmp, *alloc;
85342 +       struct mft_inode *mi;
85343 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
85345 +       err = indx_find_free(indx, ni, &bit, &bmp);
85346 +       if (err)
85347 +               goto out1;
85349 +       if (bit != MINUS_ONE_T) {
85350 +               bmp = NULL;
85351 +       } else {
85352 +               if (bmp->non_res) {
85353 +                       bmp_size = le64_to_cpu(bmp->nres.data_size);
85354 +                       bmp_size_v = le64_to_cpu(bmp->nres.valid_size);
85355 +               } else {
85356 +                       bmp_size = bmp_size_v = le32_to_cpu(bmp->res.data_size);
85357 +               }
85359 +               bit = bmp_size << 3;
85360 +       }
85362 +       data_size = (u64)(bit + 1) << indx->index_bits;
85364 +       if (bmp) {
85365 +               /* Increase bitmap */
85366 +               err = attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len,
85367 +                                   &indx->bitmap_run, bitmap_size(bit + 1),
85368 +                                   NULL, true, NULL);
85369 +               if (err)
85370 +                       goto out1;
85371 +       }
85373 +       alloc = ni_find_attr(ni, NULL, NULL, ATTR_ALLOC, in->name, in->name_len,
85374 +                            NULL, &mi);
85375 +       if (!alloc) {
85376 +               if (bmp)
85377 +                       goto out2;
85378 +               goto out1;
85379 +       }
85381 +       /* Increase allocation */
85382 +       err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len,
85383 +                           &indx->alloc_run, data_size, &data_size, true,
85384 +                           NULL);
85385 +       if (err) {
85386 +               if (bmp)
85387 +                       goto out2;
85388 +               goto out1;
85389 +       }
85391 +       *vbn = bit << indx->idx2vbn_bits;
85393 +       return 0;
85395 +out2:
85396 +       /* Ops (no space?) */
85397 +       attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len,
85398 +                     &indx->bitmap_run, bmp_size, &bmp_size_v, false, NULL);
85400 +out1:
85401 +       return err;
85405 + * indx_insert_into_root
85406 + *
85407 + * attempts to insert an entry into the index root
85408 + * If necessary, it will twiddle the index b-tree.
85409 + */
85410 +static int indx_insert_into_root(struct ntfs_index *indx, struct ntfs_inode *ni,
85411 +                                const struct NTFS_DE *new_de,
85412 +                                struct NTFS_DE *root_de, const void *ctx,
85413 +                                struct ntfs_fnd *fnd)
85415 +       int err = 0;
85416 +       struct NTFS_DE *e, *e0, *re;
85417 +       struct mft_inode *mi;
85418 +       struct ATTRIB *attr;
85419 +       struct MFT_REC *rec;
85420 +       struct INDEX_HDR *hdr;
85421 +       struct indx_node *n;
85422 +       CLST new_vbn;
85423 +       __le64 *sub_vbn, t_vbn;
85424 +       u16 new_de_size;
85425 +       u32 hdr_used, hdr_total, asize, used, to_move;
85426 +       u32 root_size, new_root_size;
85427 +       struct ntfs_sb_info *sbi;
85428 +       int ds_root;
85429 +       struct INDEX_ROOT *root, *a_root = NULL;
85431 +       /* Get the record this root placed in */
85432 +       root = indx_get_root(indx, ni, &attr, &mi);
85433 +       if (!root)
85434 +               goto out;
85436 +       /*
85437 +        * Try easy case:
85438 +        * hdr_insert_de will succeed if there's room the root for the new entry.
85439 +        */
85440 +       hdr = &root->ihdr;
85441 +       sbi = ni->mi.sbi;
85442 +       rec = mi->mrec;
85443 +       used = le32_to_cpu(rec->used);
85444 +       new_de_size = le16_to_cpu(new_de->size);
85445 +       hdr_used = le32_to_cpu(hdr->used);
85446 +       hdr_total = le32_to_cpu(hdr->total);
85447 +       asize = le32_to_cpu(attr->size);
85448 +       root_size = le32_to_cpu(attr->res.data_size);
85450 +       ds_root = new_de_size + hdr_used - hdr_total;
85452 +       if (used + ds_root < sbi->max_bytes_per_attr) {
85453 +               /* make a room for new elements */
85454 +               mi_resize_attr(mi, attr, ds_root);
85455 +               hdr->total = cpu_to_le32(hdr_total + ds_root);
85456 +               e = hdr_insert_de(indx, hdr, new_de, root_de, ctx);
85457 +               WARN_ON(!e);
85458 +               fnd_clear(fnd);
85459 +               fnd->root_de = e;
85461 +               return 0;
85462 +       }
85464 +       /* Make a copy of root attribute to restore if error */
85465 +       a_root = ntfs_memdup(attr, asize);
85466 +       if (!a_root) {
85467 +               err = -ENOMEM;
85468 +               goto out;
85469 +       }
85471 +       /* copy all the non-end entries from the index root to the new buffer.*/
85472 +       to_move = 0;
85473 +       e0 = hdr_first_de(hdr);
85475 +       /* Calculate the size to copy */
85476 +       for (e = e0;; e = hdr_next_de(hdr, e)) {
85477 +               if (!e) {
85478 +                       err = -EINVAL;
85479 +                       goto out;
85480 +               }
85482 +               if (de_is_last(e))
85483 +                       break;
85484 +               to_move += le16_to_cpu(e->size);
85485 +       }
85487 +       n = NULL;
85488 +       if (!to_move) {
85489 +               re = NULL;
85490 +       } else {
85491 +               re = ntfs_memdup(e0, to_move);
85492 +               if (!re) {
85493 +                       err = -ENOMEM;
85494 +                       goto out;
85495 +               }
85496 +       }
85498 +       sub_vbn = NULL;
85499 +       if (de_has_vcn(e)) {
85500 +               t_vbn = de_get_vbn_le(e);
85501 +               sub_vbn = &t_vbn;
85502 +       }
85504 +       new_root_size = sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE) +
85505 +                       sizeof(u64);
85506 +       ds_root = new_root_size - root_size;
85508 +       if (ds_root > 0 && used + ds_root > sbi->max_bytes_per_attr) {
85509 +               /* make root external */
85510 +               err = -EOPNOTSUPP;
85511 +               goto out;
85512 +       }
85514 +       if (ds_root)
85515 +               mi_resize_attr(mi, attr, ds_root);
85517 +       /* Fill first entry (vcn will be set later) */
85518 +       e = (struct NTFS_DE *)(root + 1);
85519 +       memset(e, 0, sizeof(struct NTFS_DE));
85520 +       e->size = cpu_to_le16(sizeof(struct NTFS_DE) + sizeof(u64));
85521 +       e->flags = NTFS_IE_HAS_SUBNODES | NTFS_IE_LAST;
85523 +       hdr->flags = 1;
85524 +       hdr->used = hdr->total =
85525 +               cpu_to_le32(new_root_size - offsetof(struct INDEX_ROOT, ihdr));
85527 +       fnd->root_de = hdr_first_de(hdr);
85528 +       mi->dirty = true;
85530 +       /* Create alloc and bitmap attributes (if not) */
85531 +       err = run_is_empty(&indx->alloc_run)
85532 +                     ? indx_create_allocate(indx, ni, &new_vbn)
85533 +                     : indx_add_allocate(indx, ni, &new_vbn);
85535 +       /* layout of record may be changed, so rescan root */
85536 +       root = indx_get_root(indx, ni, &attr, &mi);
85537 +       if (!root) {
85538 +               /* bug? */
85539 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
85540 +               err = -EINVAL;
85541 +               goto out1;
85542 +       }
85544 +       if (err) {
85545 +               /* restore root */
85546 +               if (mi_resize_attr(mi, attr, -ds_root))
85547 +                       memcpy(attr, a_root, asize);
85548 +               else {
85549 +                       /* bug? */
85550 +                       ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
85551 +               }
85552 +               goto out1;
85553 +       }
85555 +       e = (struct NTFS_DE *)(root + 1);
85556 +       *(__le64 *)(e + 1) = cpu_to_le64(new_vbn);
85557 +       mi->dirty = true;
85559 +       /* now we can create/format the new buffer and copy the entries into */
85560 +       n = indx_new(indx, ni, new_vbn, sub_vbn);
85561 +       if (IS_ERR(n)) {
85562 +               err = PTR_ERR(n);
85563 +               goto out1;
85564 +       }
85566 +       hdr = &n->index->ihdr;
85567 +       hdr_used = le32_to_cpu(hdr->used);
85568 +       hdr_total = le32_to_cpu(hdr->total);
85570 +       /* Copy root entries into new buffer */
85571 +       hdr_insert_head(hdr, re, to_move);
85573 +       /* Update bitmap attribute */
85574 +       indx_mark_used(indx, ni, new_vbn >> indx->idx2vbn_bits);
85576 +       /* Check if we can insert new entry new index buffer */
85577 +       if (hdr_used + new_de_size > hdr_total) {
85578 +               /*
85579 +                * This occurs if mft record is the same or bigger than index
85580 +                * buffer. Move all root new index and have no space to add
85581 +                * new entry classic case when mft record is 1K and index
85582 +                * buffer 4K the problem should not occurs
85583 +                */
85584 +               ntfs_free(re);
85585 +               indx_write(indx, ni, n, 0);
85587 +               put_indx_node(n);
85588 +               fnd_clear(fnd);
85589 +               err = indx_insert_entry(indx, ni, new_de, ctx, fnd);
85590 +               goto out;
85591 +       }
85593 +       /*
85594 +        * Now root is a parent for new index buffer
85595 +        * Insert NewEntry a new buffer
85596 +        */
85597 +       e = hdr_insert_de(indx, hdr, new_de, NULL, ctx);
85598 +       if (!e) {
85599 +               err = -EINVAL;
85600 +               goto out1;
85601 +       }
85602 +       fnd_push(fnd, n, e);
85604 +       /* Just write updates index into disk */
85605 +       indx_write(indx, ni, n, 0);
85607 +       n = NULL;
85609 +out1:
85610 +       ntfs_free(re);
85611 +       if (n)
85612 +               put_indx_node(n);
85614 +out:
85615 +       ntfs_free(a_root);
85616 +       return err;
85620 + * indx_insert_into_buffer
85621 + *
85622 + * attempts to insert an entry into an Index Allocation Buffer.
85623 + * If necessary, it will split the buffer.
85624 + */
85625 +static int
85626 +indx_insert_into_buffer(struct ntfs_index *indx, struct ntfs_inode *ni,
85627 +                       struct INDEX_ROOT *root, const struct NTFS_DE *new_de,
85628 +                       const void *ctx, int level, struct ntfs_fnd *fnd)
85630 +       int err;
85631 +       const struct NTFS_DE *sp;
85632 +       struct NTFS_DE *e, *de_t, *up_e = NULL;
85633 +       struct indx_node *n2 = NULL;
85634 +       struct indx_node *n1 = fnd->nodes[level];
85635 +       struct INDEX_HDR *hdr1 = &n1->index->ihdr;
85636 +       struct INDEX_HDR *hdr2;
85637 +       u32 to_copy, used;
85638 +       CLST new_vbn;
85639 +       __le64 t_vbn, *sub_vbn;
85640 +       u16 sp_size;
85642 +       /* Try the most easy case */
85643 +       e = fnd->level - 1 == level ? fnd->de[level] : NULL;
85644 +       e = hdr_insert_de(indx, hdr1, new_de, e, ctx);
85645 +       fnd->de[level] = e;
85646 +       if (e) {
85647 +               /* Just write updated index into disk */
85648 +               indx_write(indx, ni, n1, 0);
85649 +               return 0;
85650 +       }
85652 +       /*
85653 +        * No space to insert into buffer. Split it.
85654 +        * To split we:
85655 +        *  - Save split point ('cause index buffers will be changed)
85656 +        * - Allocate NewBuffer and copy all entries <= sp into new buffer
85657 +        * - Remove all entries (sp including) from TargetBuffer
85658 +        * - Insert NewEntry into left or right buffer (depending on sp <=>
85659 +        *     NewEntry)
85660 +        * - Insert sp into parent buffer (or root)
85661 +        * - Make sp a parent for new buffer
85662 +        */
85663 +       sp = hdr_find_split(hdr1);
85664 +       if (!sp)
85665 +               return -EINVAL;
85667 +       sp_size = le16_to_cpu(sp->size);
85668 +       up_e = ntfs_malloc(sp_size + sizeof(u64));
85669 +       if (!up_e)
85670 +               return -ENOMEM;
85671 +       memcpy(up_e, sp, sp_size);
85673 +       if (!hdr1->flags) {
85674 +               up_e->flags |= NTFS_IE_HAS_SUBNODES;
85675 +               up_e->size = cpu_to_le16(sp_size + sizeof(u64));
85676 +               sub_vbn = NULL;
85677 +       } else {
85678 +               t_vbn = de_get_vbn_le(up_e);
85679 +               sub_vbn = &t_vbn;
85680 +       }
85682 +       /* Allocate on disk a new index allocation buffer. */
85683 +       err = indx_add_allocate(indx, ni, &new_vbn);
85684 +       if (err)
85685 +               goto out;
85687 +       /* Allocate and format memory a new index buffer */
85688 +       n2 = indx_new(indx, ni, new_vbn, sub_vbn);
85689 +       if (IS_ERR(n2)) {
85690 +               err = PTR_ERR(n2);
85691 +               goto out;
85692 +       }
85694 +       hdr2 = &n2->index->ihdr;
85696 +       /* Make sp a parent for new buffer */
85697 +       de_set_vbn(up_e, new_vbn);
85699 +       /* copy all the entries <= sp into the new buffer. */
85700 +       de_t = hdr_first_de(hdr1);
85701 +       to_copy = PtrOffset(de_t, sp);
85702 +       hdr_insert_head(hdr2, de_t, to_copy);
85704 +       /* remove all entries (sp including) from hdr1 */
85705 +       used = le32_to_cpu(hdr1->used) - to_copy - sp_size;
85706 +       memmove(de_t, Add2Ptr(sp, sp_size), used - le32_to_cpu(hdr1->de_off));
85707 +       hdr1->used = cpu_to_le32(used);
85709 +       /* Insert new entry into left or right buffer (depending on sp <=> new_de) */
85710 +       hdr_insert_de(indx,
85711 +                     (*indx->cmp)(new_de + 1, le16_to_cpu(new_de->key_size),
85712 +                                  up_e + 1, le16_to_cpu(up_e->key_size),
85713 +                                  ctx) < 0
85714 +                             ? hdr2
85715 +                             : hdr1,
85716 +                     new_de, NULL, ctx);
85718 +       indx_mark_used(indx, ni, new_vbn >> indx->idx2vbn_bits);
85720 +       indx_write(indx, ni, n1, 0);
85721 +       indx_write(indx, ni, n2, 0);
85723 +       put_indx_node(n2);
85725 +       /*
85726 +        * we've finished splitting everybody, so we are ready to
85727 +        * insert the promoted entry into the parent.
85728 +        */
85729 +       if (!level) {
85730 +               /* Insert in root */
85731 +               err = indx_insert_into_root(indx, ni, up_e, NULL, ctx, fnd);
85732 +               if (err)
85733 +                       goto out;
85734 +       } else {
85735 +               /*
85736 +                * The target buffer's parent is another index buffer
85737 +                * TODO: Remove recursion
85738 +                */
85739 +               err = indx_insert_into_buffer(indx, ni, root, up_e, ctx,
85740 +                                             level - 1, fnd);
85741 +               if (err)
85742 +                       goto out;
85743 +       }
85745 +out:
85746 +       ntfs_free(up_e);
85748 +       return err;
85752 + * indx_insert_entry
85753 + *
85754 + * inserts new entry into index
85755 + */
85756 +int indx_insert_entry(struct ntfs_index *indx, struct ntfs_inode *ni,
85757 +                     const struct NTFS_DE *new_de, const void *ctx,
85758 +                     struct ntfs_fnd *fnd)
85760 +       int err;
85761 +       int diff;
85762 +       struct NTFS_DE *e;
85763 +       struct ntfs_fnd *fnd_a = NULL;
85764 +       struct INDEX_ROOT *root;
85766 +       if (!fnd) {
85767 +               fnd_a = fnd_get();
85768 +               if (!fnd_a) {
85769 +                       err = -ENOMEM;
85770 +                       goto out1;
85771 +               }
85772 +               fnd = fnd_a;
85773 +       }
85775 +       root = indx_get_root(indx, ni, NULL, NULL);
85776 +       if (!root) {
85777 +               err = -EINVAL;
85778 +               goto out;
85779 +       }
85781 +       if (fnd_is_empty(fnd)) {
85782 +               /* Find the spot the tree where we want to insert the new entry. */
85783 +               err = indx_find(indx, ni, root, new_de + 1,
85784 +                               le16_to_cpu(new_de->key_size), ctx, &diff, &e,
85785 +                               fnd);
85786 +               if (err)
85787 +                       goto out;
85789 +               if (!diff) {
85790 +                       err = -EEXIST;
85791 +                       goto out;
85792 +               }
85793 +       }
85795 +       if (!fnd->level) {
85796 +               /* The root is also a leaf, so we'll insert the new entry into it. */
85797 +               err = indx_insert_into_root(indx, ni, new_de, fnd->root_de, ctx,
85798 +                                           fnd);
85799 +               if (err)
85800 +                       goto out;
85801 +       } else {
85802 +               /* found a leaf buffer, so we'll insert the new entry into it.*/
85803 +               err = indx_insert_into_buffer(indx, ni, root, new_de, ctx,
85804 +                                             fnd->level - 1, fnd);
85805 +               if (err)
85806 +                       goto out;
85807 +       }
85809 +out:
85810 +       fnd_put(fnd_a);
85811 +out1:
85812 +       return err;
85816 + * indx_find_buffer
85817 + *
85818 + * locates a buffer the tree.
85819 + */
85820 +static struct indx_node *indx_find_buffer(struct ntfs_index *indx,
85821 +                                         struct ntfs_inode *ni,
85822 +                                         const struct INDEX_ROOT *root,
85823 +                                         __le64 vbn, struct indx_node *n)
85825 +       int err;
85826 +       const struct NTFS_DE *e;
85827 +       struct indx_node *r;
85828 +       const struct INDEX_HDR *hdr = n ? &n->index->ihdr : &root->ihdr;
85830 +       /* Step 1: Scan one level */
85831 +       for (e = hdr_first_de(hdr);; e = hdr_next_de(hdr, e)) {
85832 +               if (!e)
85833 +                       return ERR_PTR(-EINVAL);
85835 +               if (de_has_vcn(e) && vbn == de_get_vbn_le(e))
85836 +                       return n;
85838 +               if (de_is_last(e))
85839 +                       break;
85840 +       }
85842 +       /* Step2: Do recursion */
85843 +       e = Add2Ptr(hdr, le32_to_cpu(hdr->de_off));
85844 +       for (;;) {
85845 +               if (de_has_vcn_ex(e)) {
85846 +                       err = indx_read(indx, ni, de_get_vbn(e), &n);
85847 +                       if (err)
85848 +                               return ERR_PTR(err);
85850 +                       r = indx_find_buffer(indx, ni, root, vbn, n);
85851 +                       if (r)
85852 +                               return r;
85853 +               }
85855 +               if (de_is_last(e))
85856 +                       break;
85858 +               e = Add2Ptr(e, le16_to_cpu(e->size));
85859 +       }
85861 +       return NULL;
85865 + * indx_shrink
85866 + *
85867 + * deallocates unused tail indexes
85868 + */
85869 +static int indx_shrink(struct ntfs_index *indx, struct ntfs_inode *ni,
85870 +                      size_t bit)
85872 +       int err = 0;
85873 +       u64 bpb, new_data;
85874 +       size_t nbits;
85875 +       struct ATTRIB *b;
85876 +       struct ATTR_LIST_ENTRY *le = NULL;
85877 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
85879 +       b = ni_find_attr(ni, NULL, &le, ATTR_BITMAP, in->name, in->name_len,
85880 +                        NULL, NULL);
85882 +       if (!b)
85883 +               return -ENOENT;
85885 +       if (!b->non_res) {
85886 +               unsigned long pos;
85887 +               const unsigned long *bm = resident_data(b);
85889 +               nbits = le32_to_cpu(b->res.data_size) * 8;
85891 +               if (bit >= nbits)
85892 +                       return 0;
85894 +               pos = find_next_bit(bm, nbits, bit);
85895 +               if (pos < nbits)
85896 +                       return 0;
85897 +       } else {
85898 +               size_t used = MINUS_ONE_T;
85900 +               nbits = le64_to_cpu(b->nres.data_size) * 8;
85902 +               if (bit >= nbits)
85903 +                       return 0;
85905 +               err = scan_nres_bitmap(ni, b, indx, bit, &scan_for_used, &used);
85906 +               if (err)
85907 +                       return err;
85909 +               if (used != MINUS_ONE_T)
85910 +                       return 0;
85911 +       }
85913 +       new_data = (u64)bit << indx->index_bits;
85915 +       err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len,
85916 +                           &indx->alloc_run, new_data, &new_data, false, NULL);
85917 +       if (err)
85918 +               return err;
85920 +       bpb = bitmap_size(bit);
85921 +       if (bpb * 8 == nbits)
85922 +               return 0;
85924 +       err = attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len,
85925 +                           &indx->bitmap_run, bpb, &bpb, false, NULL);
85927 +       return err;
85930 +static int indx_free_children(struct ntfs_index *indx, struct ntfs_inode *ni,
85931 +                             const struct NTFS_DE *e, bool trim)
85933 +       int err;
85934 +       struct indx_node *n;
85935 +       struct INDEX_HDR *hdr;
85936 +       CLST vbn = de_get_vbn(e);
85937 +       size_t i;
85939 +       err = indx_read(indx, ni, vbn, &n);
85940 +       if (err)
85941 +               return err;
85943 +       hdr = &n->index->ihdr;
85944 +       /* First, recurse into the children, if any.*/
85945 +       if (hdr_has_subnode(hdr)) {
85946 +               for (e = hdr_first_de(hdr); e; e = hdr_next_de(hdr, e)) {
85947 +                       indx_free_children(indx, ni, e, false);
85948 +                       if (de_is_last(e))
85949 +                               break;
85950 +               }
85951 +       }
85953 +       put_indx_node(n);
85955 +       i = vbn >> indx->idx2vbn_bits;
85956 +       /* We've gotten rid of the children; add this buffer to the free list. */
85957 +       indx_mark_free(indx, ni, i);
85959 +       if (!trim)
85960 +               return 0;
85962 +       /*
85963 +        * If there are no used indexes after current free index
85964 +        * then we can truncate allocation and bitmap
85965 +        * Use bitmap to estimate the case
85966 +        */
85967 +       indx_shrink(indx, ni, i + 1);
85968 +       return 0;
85972 + * indx_get_entry_to_replace
85973 + *
85974 + * finds a replacement entry for a deleted entry
85975 + * always returns a node entry:
85976 + * NTFS_IE_HAS_SUBNODES is set the flags and the size includes the sub_vcn
85977 + */
85978 +static int indx_get_entry_to_replace(struct ntfs_index *indx,
85979 +                                    struct ntfs_inode *ni,
85980 +                                    const struct NTFS_DE *de_next,
85981 +                                    struct NTFS_DE **de_to_replace,
85982 +                                    struct ntfs_fnd *fnd)
85984 +       int err;
85985 +       int level = -1;
85986 +       CLST vbn;
85987 +       struct NTFS_DE *e, *te, *re;
85988 +       struct indx_node *n;
85989 +       struct INDEX_BUFFER *ib;
85991 +       *de_to_replace = NULL;
85993 +       /* Find first leaf entry down from de_next */
85994 +       vbn = de_get_vbn(de_next);
85995 +       for (;;) {
85996 +               n = NULL;
85997 +               err = indx_read(indx, ni, vbn, &n);
85998 +               if (err)
85999 +                       goto out;
86001 +               e = hdr_first_de(&n->index->ihdr);
86002 +               fnd_push(fnd, n, e);
86004 +               if (!de_is_last(e)) {
86005 +                       /*
86006 +                        * This buffer is non-empty, so its first entry could be used as the
86007 +                        * replacement entry.
86008 +                        */
86009 +                       level = fnd->level - 1;
86010 +               }
86012 +               if (!de_has_vcn(e))
86013 +                       break;
86015 +               /* This buffer is a node. Continue to go down */
86016 +               vbn = de_get_vbn(e);
86017 +       }
86019 +       if (level == -1)
86020 +               goto out;
86022 +       n = fnd->nodes[level];
86023 +       te = hdr_first_de(&n->index->ihdr);
86024 +       /* Copy the candidate entry into the replacement entry buffer. */
86025 +       re = ntfs_malloc(le16_to_cpu(te->size) + sizeof(u64));
86026 +       if (!re) {
86027 +               err = -ENOMEM;
86028 +               goto out;
86029 +       }
86031 +       *de_to_replace = re;
86032 +       memcpy(re, te, le16_to_cpu(te->size));
86034 +       if (!de_has_vcn(re)) {
86035 +               /*
86036 +                * The replacement entry we found doesn't have a sub_vcn. increase its size
86037 +                * to hold one.
86038 +                */
86039 +               le16_add_cpu(&re->size, sizeof(u64));
86040 +               re->flags |= NTFS_IE_HAS_SUBNODES;
86041 +       } else {
86042 +               /*
86043 +                * The replacement entry we found was a node entry, which means that all
86044 +                * its child buffers are empty. Return them to the free pool.
86045 +                */
86046 +               indx_free_children(indx, ni, te, true);
86047 +       }
86049 +       /*
86050 +        * Expunge the replacement entry from its former location,
86051 +        * and then write that buffer.
86052 +        */
86053 +       ib = n->index;
86054 +       e = hdr_delete_de(&ib->ihdr, te);
86056 +       fnd->de[level] = e;
86057 +       indx_write(indx, ni, n, 0);
86059 +       /* Check to see if this action created an empty leaf. */
86060 +       if (ib_is_leaf(ib) && ib_is_empty(ib))
86061 +               return 0;
86063 +out:
86064 +       fnd_clear(fnd);
86065 +       return err;
86069 + * indx_delete_entry
86070 + *
86071 + * deletes an entry from the index.
86072 + */
86073 +int indx_delete_entry(struct ntfs_index *indx, struct ntfs_inode *ni,
86074 +                     const void *key, u32 key_len, const void *ctx)
86076 +       int err, diff;
86077 +       struct INDEX_ROOT *root;
86078 +       struct INDEX_HDR *hdr;
86079 +       struct ntfs_fnd *fnd, *fnd2;
86080 +       struct INDEX_BUFFER *ib;
86081 +       struct NTFS_DE *e, *re, *next, *prev, *me;
86082 +       struct indx_node *n, *n2d = NULL;
86083 +       __le64 sub_vbn;
86084 +       int level, level2;
86085 +       struct ATTRIB *attr;
86086 +       struct mft_inode *mi;
86087 +       u32 e_size, root_size, new_root_size;
86088 +       size_t trim_bit;
86089 +       const struct INDEX_NAMES *in;
86091 +       fnd = fnd_get();
86092 +       if (!fnd) {
86093 +               err = -ENOMEM;
86094 +               goto out2;
86095 +       }
86097 +       fnd2 = fnd_get();
86098 +       if (!fnd2) {
86099 +               err = -ENOMEM;
86100 +               goto out1;
86101 +       }
86103 +       root = indx_get_root(indx, ni, &attr, &mi);
86104 +       if (!root) {
86105 +               err = -EINVAL;
86106 +               goto out;
86107 +       }
86109 +       /* Locate the entry to remove. */
86110 +       err = indx_find(indx, ni, root, key, key_len, ctx, &diff, &e, fnd);
86111 +       if (err)
86112 +               goto out;
86114 +       if (!e || diff) {
86115 +               err = -ENOENT;
86116 +               goto out;
86117 +       }
86119 +       level = fnd->level;
86121 +       if (level) {
86122 +               n = fnd->nodes[level - 1];
86123 +               e = fnd->de[level - 1];
86124 +               ib = n->index;
86125 +               hdr = &ib->ihdr;
86126 +       } else {
86127 +               hdr = &root->ihdr;
86128 +               e = fnd->root_de;
86129 +               n = NULL;
86130 +       }
86132 +       e_size = le16_to_cpu(e->size);
86134 +       if (!de_has_vcn_ex(e)) {
86135 +               /* The entry to delete is a leaf, so we can just rip it out */
86136 +               hdr_delete_de(hdr, e);
86138 +               if (!level) {
86139 +                       hdr->total = hdr->used;
86141 +                       /* Shrink resident root attribute */
86142 +                       mi_resize_attr(mi, attr, 0 - e_size);
86143 +                       goto out;
86144 +               }
86146 +               indx_write(indx, ni, n, 0);
86148 +               /*
86149 +                * Check to see if removing that entry made
86150 +                * the leaf empty.
86151 +                */
86152 +               if (ib_is_leaf(ib) && ib_is_empty(ib)) {
86153 +                       fnd_pop(fnd);
86154 +                       fnd_push(fnd2, n, e);
86155 +               }
86156 +       } else {
86157 +               /*
86158 +                * The entry we wish to delete is a node buffer, so we
86159 +                * have to find a replacement for it.
86160 +                */
86161 +               next = de_get_next(e);
86163 +               err = indx_get_entry_to_replace(indx, ni, next, &re, fnd2);
86164 +               if (err)
86165 +                       goto out;
86167 +               if (re) {
86168 +                       de_set_vbn_le(re, de_get_vbn_le(e));
86169 +                       hdr_delete_de(hdr, e);
86171 +                       err = level ? indx_insert_into_buffer(indx, ni, root,
86172 +                                                             re, ctx,
86173 +                                                             fnd->level - 1,
86174 +                                                             fnd)
86175 +                                   : indx_insert_into_root(indx, ni, re, e,
86176 +                                                           ctx, fnd);
86177 +                       ntfs_free(re);
86179 +                       if (err)
86180 +                               goto out;
86181 +               } else {
86182 +                       /*
86183 +                        * There is no replacement for the current entry.
86184 +                        * This means that the subtree rooted at its node is empty,
86185 +                        * and can be deleted, which turn means that the node can
86186 +                        * just inherit the deleted entry sub_vcn
86187 +                        */
86188 +                       indx_free_children(indx, ni, next, true);
86190 +                       de_set_vbn_le(next, de_get_vbn_le(e));
86191 +                       hdr_delete_de(hdr, e);
86192 +                       if (level) {
86193 +                               indx_write(indx, ni, n, 0);
86194 +                       } else {
86195 +                               hdr->total = hdr->used;
86197 +                               /* Shrink resident root attribute */
86198 +                               mi_resize_attr(mi, attr, 0 - e_size);
86199 +                       }
86200 +               }
86201 +       }
86203 +       /* Delete a branch of tree */
86204 +       if (!fnd2 || !fnd2->level)
86205 +               goto out;
86207 +       /* Reinit root 'cause it can be changed */
86208 +       root = indx_get_root(indx, ni, &attr, &mi);
86209 +       if (!root) {
86210 +               err = -EINVAL;
86211 +               goto out;
86212 +       }
86214 +       n2d = NULL;
86215 +       sub_vbn = fnd2->nodes[0]->index->vbn;
86216 +       level2 = 0;
86217 +       level = fnd->level;
86219 +       hdr = level ? &fnd->nodes[level - 1]->index->ihdr : &root->ihdr;
86221 +       /* Scan current level */
86222 +       for (e = hdr_first_de(hdr);; e = hdr_next_de(hdr, e)) {
86223 +               if (!e) {
86224 +                       err = -EINVAL;
86225 +                       goto out;
86226 +               }
86228 +               if (de_has_vcn(e) && sub_vbn == de_get_vbn_le(e))
86229 +                       break;
86231 +               if (de_is_last(e)) {
86232 +                       e = NULL;
86233 +                       break;
86234 +               }
86235 +       }
86237 +       if (!e) {
86238 +               /* Do slow search from root */
86239 +               struct indx_node *in;
86241 +               fnd_clear(fnd);
86243 +               in = indx_find_buffer(indx, ni, root, sub_vbn, NULL);
86244 +               if (IS_ERR(in)) {
86245 +                       err = PTR_ERR(in);
86246 +                       goto out;
86247 +               }
86249 +               if (in)
86250 +                       fnd_push(fnd, in, NULL);
86251 +       }
86253 +       /* Merge fnd2 -> fnd */
86254 +       for (level = 0; level < fnd2->level; level++) {
86255 +               fnd_push(fnd, fnd2->nodes[level], fnd2->de[level]);
86256 +               fnd2->nodes[level] = NULL;
86257 +       }
86258 +       fnd2->level = 0;
86260 +       hdr = NULL;
86261 +       for (level = fnd->level; level; level--) {
86262 +               struct indx_node *in = fnd->nodes[level - 1];
86264 +               ib = in->index;
86265 +               if (ib_is_empty(ib)) {
86266 +                       sub_vbn = ib->vbn;
86267 +               } else {
86268 +                       hdr = &ib->ihdr;
86269 +                       n2d = in;
86270 +                       level2 = level;
86271 +                       break;
86272 +               }
86273 +       }
86275 +       if (!hdr)
86276 +               hdr = &root->ihdr;
86278 +       e = hdr_first_de(hdr);
86279 +       if (!e) {
86280 +               err = -EINVAL;
86281 +               goto out;
86282 +       }
86284 +       if (hdr != &root->ihdr || !de_is_last(e)) {
86285 +               prev = NULL;
86286 +               while (!de_is_last(e)) {
86287 +                       if (de_has_vcn(e) && sub_vbn == de_get_vbn_le(e))
86288 +                               break;
86289 +                       prev = e;
86290 +                       e = hdr_next_de(hdr, e);
86291 +                       if (!e) {
86292 +                               err = -EINVAL;
86293 +                               goto out;
86294 +                       }
86295 +               }
86297 +               if (sub_vbn != de_get_vbn_le(e)) {
86298 +                       /*
86299 +                        * Didn't find the parent entry, although this buffer is the parent trail.
86300 +                        * Something is corrupt.
86301 +                        */
86302 +                       err = -EINVAL;
86303 +                       goto out;
86304 +               }
86306 +               if (de_is_last(e)) {
86307 +                       /*
86308 +                        * Since we can't remove the end entry, we'll remove its
86309 +                        * predecessor instead. This means we have to transfer the
86310 +                        * predecessor's sub_vcn to the end entry.
86311 +                        * Note: that this index block is not empty, so the
86312 +                        * predecessor must exist
86313 +                        */
86314 +                       if (!prev) {
86315 +                               err = -EINVAL;
86316 +                               goto out;
86317 +                       }
86319 +                       if (de_has_vcn(prev)) {
86320 +                               de_set_vbn_le(e, de_get_vbn_le(prev));
86321 +                       } else if (de_has_vcn(e)) {
86322 +                               le16_sub_cpu(&e->size, sizeof(u64));
86323 +                               e->flags &= ~NTFS_IE_HAS_SUBNODES;
86324 +                               le32_sub_cpu(&hdr->used, sizeof(u64));
86325 +                       }
86326 +                       e = prev;
86327 +               }
86329 +               /*
86330 +                * Copy the current entry into a temporary buffer (stripping off its
86331 +                * down-pointer, if any) and delete it from the current buffer or root,
86332 +                * as appropriate.
86333 +                */
86334 +               e_size = le16_to_cpu(e->size);
86335 +               me = ntfs_memdup(e, e_size);
86336 +               if (!me) {
86337 +                       err = -ENOMEM;
86338 +                       goto out;
86339 +               }
86341 +               if (de_has_vcn(me)) {
86342 +                       me->flags &= ~NTFS_IE_HAS_SUBNODES;
86343 +                       le16_sub_cpu(&me->size, sizeof(u64));
86344 +               }
86346 +               hdr_delete_de(hdr, e);
86348 +               if (hdr == &root->ihdr) {
86349 +                       level = 0;
86350 +                       hdr->total = hdr->used;
86352 +                       /* Shrink resident root attribute */
86353 +                       mi_resize_attr(mi, attr, 0 - e_size);
86354 +               } else {
86355 +                       indx_write(indx, ni, n2d, 0);
86356 +                       level = level2;
86357 +               }
86359 +               /* Mark unused buffers as free */
86360 +               trim_bit = -1;
86361 +               for (; level < fnd->level; level++) {
86362 +                       ib = fnd->nodes[level]->index;
86363 +                       if (ib_is_empty(ib)) {
86364 +                               size_t k = le64_to_cpu(ib->vbn) >>
86365 +                                          indx->idx2vbn_bits;
86367 +                               indx_mark_free(indx, ni, k);
86368 +                               if (k < trim_bit)
86369 +                                       trim_bit = k;
86370 +                       }
86371 +               }
86373 +               fnd_clear(fnd);
86374 +               /*fnd->root_de = NULL;*/
86376 +               /*
86377 +                * Re-insert the entry into the tree.
86378 +                * Find the spot the tree where we want to insert the new entry.
86379 +                */
86380 +               err = indx_insert_entry(indx, ni, me, ctx, fnd);
86381 +               ntfs_free(me);
86382 +               if (err)
86383 +                       goto out;
86385 +               if (trim_bit != -1)
86386 +                       indx_shrink(indx, ni, trim_bit);
86387 +       } else {
86388 +               /*
86389 +                * This tree needs to be collapsed down to an empty root.
86390 +                * Recreate the index root as an empty leaf and free all the bits the
86391 +                * index allocation bitmap.
86392 +                */
86393 +               fnd_clear(fnd);
86394 +               fnd_clear(fnd2);
86396 +               in = &s_index_names[indx->type];
86398 +               err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len,
86399 +                                   &indx->alloc_run, 0, NULL, false, NULL);
86400 +               err = ni_remove_attr(ni, ATTR_ALLOC, in->name, in->name_len,
86401 +                                    false, NULL);
86402 +               run_close(&indx->alloc_run);
86404 +               err = attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len,
86405 +                                   &indx->bitmap_run, 0, NULL, false, NULL);
86406 +               err = ni_remove_attr(ni, ATTR_BITMAP, in->name, in->name_len,
86407 +                                    false, NULL);
86408 +               run_close(&indx->bitmap_run);
86410 +               root = indx_get_root(indx, ni, &attr, &mi);
86411 +               if (!root) {
86412 +                       err = -EINVAL;
86413 +                       goto out;
86414 +               }
86416 +               root_size = le32_to_cpu(attr->res.data_size);
86417 +               new_root_size =
86418 +                       sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE);
86420 +               if (new_root_size != root_size &&
86421 +                   !mi_resize_attr(mi, attr, new_root_size - root_size)) {
86422 +                       err = -EINVAL;
86423 +                       goto out;
86424 +               }
86426 +               /* Fill first entry */
86427 +               e = (struct NTFS_DE *)(root + 1);
86428 +               e->ref.low = 0;
86429 +               e->ref.high = 0;
86430 +               e->ref.seq = 0;
86431 +               e->size = cpu_to_le16(sizeof(struct NTFS_DE));
86432 +               e->flags = NTFS_IE_LAST; // 0x02
86433 +               e->key_size = 0;
86434 +               e->res = 0;
86436 +               hdr = &root->ihdr;
86437 +               hdr->flags = 0;
86438 +               hdr->used = hdr->total = cpu_to_le32(
86439 +                       new_root_size - offsetof(struct INDEX_ROOT, ihdr));
86440 +               mi->dirty = true;
86441 +       }
86443 +out:
86444 +       fnd_put(fnd2);
86445 +out1:
86446 +       fnd_put(fnd);
86447 +out2:
86448 +       return err;
86451 +int indx_update_dup(struct ntfs_inode *ni, struct ntfs_sb_info *sbi,
86452 +                   const struct ATTR_FILE_NAME *fname,
86453 +                   const struct NTFS_DUP_INFO *dup, int sync)
86455 +       int err, diff;
86456 +       struct NTFS_DE *e = NULL;
86457 +       struct ATTR_FILE_NAME *e_fname;
86458 +       struct ntfs_fnd *fnd;
86459 +       struct INDEX_ROOT *root;
86460 +       struct mft_inode *mi;
86461 +       struct ntfs_index *indx = &ni->dir;
86463 +       fnd = fnd_get();
86464 +       if (!fnd) {
86465 +               err = -ENOMEM;
86466 +               goto out1;
86467 +       }
86469 +       root = indx_get_root(indx, ni, NULL, &mi);
86470 +       if (!root) {
86471 +               err = -EINVAL;
86472 +               goto out;
86473 +       }
86475 +       /* Find entries tree and on disk */
86476 +       err = indx_find(indx, ni, root, fname, fname_full_size(fname), sbi,
86477 +                       &diff, &e, fnd);
86478 +       if (err)
86479 +               goto out;
86481 +       if (!e) {
86482 +               err = -EINVAL;
86483 +               goto out;
86484 +       }
86486 +       if (diff) {
86487 +               err = -EINVAL;
86488 +               goto out;
86489 +       }
86491 +       e_fname = (struct ATTR_FILE_NAME *)(e + 1);
86493 +       if (!memcmp(&e_fname->dup, dup, sizeof(*dup))) {
86494 +               /* nothing to update in index! Try to avoid this call */
86495 +               goto out;
86496 +       }
86498 +       memcpy(&e_fname->dup, dup, sizeof(*dup));
86500 +       if (fnd->level) {
86501 +               err = indx_write(indx, ni, fnd->nodes[fnd->level - 1], sync);
86502 +       } else if (sync) {
86503 +               mi->dirty = true;
86504 +               err = mi_write(mi, 1);
86505 +       } else {
86506 +               mi->dirty = true;
86507 +               mark_inode_dirty(&ni->vfs_inode);
86508 +       }
86510 +out:
86511 +       fnd_put(fnd);
86513 +out1:
86514 +       return err;
86516 diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
86517 new file mode 100644
86518 index 000000000000..9e836c192ddf
86519 --- /dev/null
86520 +++ b/fs/ntfs3/inode.c
86521 @@ -0,0 +1,2033 @@
86522 +// SPDX-License-Identifier: GPL-2.0
86524 + *
86525 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
86526 + *
86527 + */
86529 +#include <linux/blkdev.h>
86530 +#include <linux/buffer_head.h>
86531 +#include <linux/fs.h>
86532 +#include <linux/iversion.h>
86533 +#include <linux/mpage.h>
86534 +#include <linux/namei.h>
86535 +#include <linux/nls.h>
86536 +#include <linux/uio.h>
86537 +#include <linux/version.h>
86538 +#include <linux/writeback.h>
86540 +#include "debug.h"
86541 +#include "ntfs.h"
86542 +#include "ntfs_fs.h"
86545 + * ntfs_read_mft
86546 + *
86547 + * reads record and parses MFT
86548 + */
86549 +static struct inode *ntfs_read_mft(struct inode *inode,
86550 +                                  const struct cpu_str *name,
86551 +                                  const struct MFT_REF *ref)
86553 +       int err = 0;
86554 +       struct ntfs_inode *ni = ntfs_i(inode);
86555 +       struct super_block *sb = inode->i_sb;
86556 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
86557 +       mode_t mode = 0;
86558 +       struct ATTR_STD_INFO5 *std5 = NULL;
86559 +       struct ATTR_LIST_ENTRY *le;
86560 +       struct ATTRIB *attr;
86561 +       bool is_match = false;
86562 +       bool is_root = false;
86563 +       bool is_dir;
86564 +       unsigned long ino = inode->i_ino;
86565 +       u32 rp_fa = 0, asize, t32;
86566 +       u16 roff, rsize, names = 0;
86567 +       const struct ATTR_FILE_NAME *fname = NULL;
86568 +       const struct INDEX_ROOT *root;
86569 +       struct REPARSE_DATA_BUFFER rp; // 0x18 bytes
86570 +       u64 t64;
86571 +       struct MFT_REC *rec;
86572 +       struct runs_tree *run;
86574 +       inode->i_op = NULL;
86576 +       err = mi_init(&ni->mi, sbi, ino);
86577 +       if (err)
86578 +               goto out;
86580 +       if (!sbi->mft.ni && ino == MFT_REC_MFT && !sb->s_root) {
86581 +               t64 = sbi->mft.lbo >> sbi->cluster_bits;
86582 +               t32 = bytes_to_cluster(sbi, MFT_REC_VOL * sbi->record_size);
86583 +               sbi->mft.ni = ni;
86584 +               init_rwsem(&ni->file.run_lock);
86586 +               if (!run_add_entry(&ni->file.run, 0, t64, t32, true)) {
86587 +                       err = -ENOMEM;
86588 +                       goto out;
86589 +               }
86590 +       }
86592 +       err = mi_read(&ni->mi, ino == MFT_REC_MFT);
86594 +       if (err)
86595 +               goto out;
86597 +       rec = ni->mi.mrec;
86599 +       if (sbi->flags & NTFS_FLAGS_LOG_REPLAYING) {
86600 +               ;
86601 +       } else if (ref->seq != rec->seq) {
86602 +               err = -EINVAL;
86603 +               ntfs_err(sb, "MFT: r=%lx, expect seq=%x instead of %x!", ino,
86604 +                        le16_to_cpu(ref->seq), le16_to_cpu(rec->seq));
86605 +               goto out;
86606 +       } else if (!is_rec_inuse(rec)) {
86607 +               err = -EINVAL;
86608 +               ntfs_err(sb, "Inode r=%x is not in use!", (u32)ino);
86609 +               goto out;
86610 +       }
86612 +       if (le32_to_cpu(rec->total) != sbi->record_size) {
86613 +               // bad inode?
86614 +               err = -EINVAL;
86615 +               goto out;
86616 +       }
86618 +       if (!is_rec_base(rec))
86619 +               goto Ok;
86621 +       /* record should contain $I30 root */
86622 +       is_dir = rec->flags & RECORD_FLAG_DIR;
86624 +       inode->i_generation = le16_to_cpu(rec->seq);
86626 +       /* Enumerate all struct Attributes MFT */
86627 +       le = NULL;
86628 +       attr = NULL;
86630 +       /*
86631 +        * to reduce tab pressure use goto instead of
86632 +        * while( (attr = ni_enum_attr_ex(ni, attr, &le, NULL) ))
86633 +        */
86634 +next_attr:
86635 +       run = NULL;
86636 +       err = -EINVAL;
86637 +       attr = ni_enum_attr_ex(ni, attr, &le, NULL);
86638 +       if (!attr)
86639 +               goto end_enum;
86641 +       if (le && le->vcn) {
86642 +               /* This is non primary attribute segment. Ignore if not MFT */
86643 +               if (ino != MFT_REC_MFT || attr->type != ATTR_DATA)
86644 +                       goto next_attr;
86646 +               run = &ni->file.run;
86647 +               asize = le32_to_cpu(attr->size);
86648 +               goto attr_unpack_run;
86649 +       }
86651 +       roff = attr->non_res ? 0 : le16_to_cpu(attr->res.data_off);
86652 +       rsize = attr->non_res ? 0 : le32_to_cpu(attr->res.data_size);
86653 +       asize = le32_to_cpu(attr->size);
86655 +       switch (attr->type) {
86656 +       case ATTR_STD:
86657 +               if (attr->non_res ||
86658 +                   asize < sizeof(struct ATTR_STD_INFO) + roff ||
86659 +                   rsize < sizeof(struct ATTR_STD_INFO))
86660 +                       goto out;
86662 +               if (std5)
86663 +                       goto next_attr;
86665 +               std5 = Add2Ptr(attr, roff);
86667 +#ifdef STATX_BTIME
86668 +               nt2kernel(std5->cr_time, &ni->i_crtime);
86669 +#endif
86670 +               nt2kernel(std5->a_time, &inode->i_atime);
86671 +               nt2kernel(std5->c_time, &inode->i_ctime);
86672 +               nt2kernel(std5->m_time, &inode->i_mtime);
86674 +               ni->std_fa = std5->fa;
86676 +               if (asize >= sizeof(struct ATTR_STD_INFO5) + roff &&
86677 +                   rsize >= sizeof(struct ATTR_STD_INFO5))
86678 +                       ni->std_security_id = std5->security_id;
86679 +               goto next_attr;
86681 +       case ATTR_LIST:
86682 +               if (attr->name_len || le || ino == MFT_REC_LOG)
86683 +                       goto out;
86685 +               err = ntfs_load_attr_list(ni, attr);
86686 +               if (err)
86687 +                       goto out;
86689 +               le = NULL;
86690 +               attr = NULL;
86691 +               goto next_attr;
86693 +       case ATTR_NAME:
86694 +               if (attr->non_res || asize < SIZEOF_ATTRIBUTE_FILENAME + roff ||
86695 +                   rsize < SIZEOF_ATTRIBUTE_FILENAME)
86696 +                       goto out;
86698 +               fname = Add2Ptr(attr, roff);
86699 +               if (fname->type == FILE_NAME_DOS)
86700 +                       goto next_attr;
86702 +               names += 1;
86703 +               if (name && name->len == fname->name_len &&
86704 +                   !ntfs_cmp_names_cpu(name, (struct le_str *)&fname->name_len,
86705 +                                       NULL, false))
86706 +                       is_match = true;
86708 +               goto next_attr;
86710 +       case ATTR_DATA:
86711 +               if (is_dir) {
86712 +                       /* ignore data attribute in dir record */
86713 +                       goto next_attr;
86714 +               }
86716 +               if (ino == MFT_REC_BADCLUST && !attr->non_res)
86717 +                       goto next_attr;
86719 +               if (attr->name_len &&
86720 +                   ((ino != MFT_REC_BADCLUST || !attr->non_res ||
86721 +                     attr->name_len != ARRAY_SIZE(BAD_NAME) ||
86722 +                     memcmp(attr_name(attr), BAD_NAME, sizeof(BAD_NAME))) &&
86723 +                    (ino != MFT_REC_SECURE || !attr->non_res ||
86724 +                     attr->name_len != ARRAY_SIZE(SDS_NAME) ||
86725 +                     memcmp(attr_name(attr), SDS_NAME, sizeof(SDS_NAME))))) {
86726 +                       /* file contains stream attribute. ignore it */
86727 +                       goto next_attr;
86728 +               }
86730 +               if (is_attr_sparsed(attr))
86731 +                       ni->std_fa |= FILE_ATTRIBUTE_SPARSE_FILE;
86732 +               else
86733 +                       ni->std_fa &= ~FILE_ATTRIBUTE_SPARSE_FILE;
86735 +               if (is_attr_compressed(attr))
86736 +                       ni->std_fa |= FILE_ATTRIBUTE_COMPRESSED;
86737 +               else
86738 +                       ni->std_fa &= ~FILE_ATTRIBUTE_COMPRESSED;
86740 +               if (is_attr_encrypted(attr))
86741 +                       ni->std_fa |= FILE_ATTRIBUTE_ENCRYPTED;
86742 +               else
86743 +                       ni->std_fa &= ~FILE_ATTRIBUTE_ENCRYPTED;
86745 +               if (!attr->non_res) {
86746 +                       ni->i_valid = inode->i_size = rsize;
86747 +                       inode_set_bytes(inode, rsize);
86748 +                       t32 = asize;
86749 +               } else {
86750 +                       t32 = le16_to_cpu(attr->nres.run_off);
86751 +               }
86753 +               mode = S_IFREG | (0777 & sbi->options.fs_fmask_inv);
86755 +               if (!attr->non_res) {
86756 +                       ni->ni_flags |= NI_FLAG_RESIDENT;
86757 +                       goto next_attr;
86758 +               }
86760 +               inode_set_bytes(inode, attr_ondisk_size(attr));
86762 +               ni->i_valid = le64_to_cpu(attr->nres.valid_size);
86763 +               inode->i_size = le64_to_cpu(attr->nres.data_size);
86764 +               if (!attr->nres.alloc_size)
86765 +                       goto next_attr;
86767 +               run = ino == MFT_REC_BITMAP ? &sbi->used.bitmap.run
86768 +                                           : &ni->file.run;
86769 +               break;
86771 +       case ATTR_ROOT:
86772 +               if (attr->non_res)
86773 +                       goto out;
86775 +               root = Add2Ptr(attr, roff);
86776 +               is_root = true;
86778 +               if (attr->name_len != ARRAY_SIZE(I30_NAME) ||
86779 +                   memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
86780 +                       goto next_attr;
86782 +               if (root->type != ATTR_NAME ||
86783 +                   root->rule != NTFS_COLLATION_TYPE_FILENAME)
86784 +                       goto out;
86786 +               if (!is_dir)
86787 +                       goto next_attr;
86789 +               ni->ni_flags |= NI_FLAG_DIR;
86791 +               err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
86792 +               if (err)
86793 +                       goto out;
86795 +               mode = sb->s_root
86796 +                              ? (S_IFDIR | (0777 & sbi->options.fs_dmask_inv))
86797 +                              : (S_IFDIR | 0777);
86798 +               goto next_attr;
86800 +       case ATTR_ALLOC:
86801 +               if (!is_root || attr->name_len != ARRAY_SIZE(I30_NAME) ||
86802 +                   memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
86803 +                       goto next_attr;
86805 +               inode->i_size = le64_to_cpu(attr->nres.data_size);
86806 +               ni->i_valid = le64_to_cpu(attr->nres.valid_size);
86807 +               inode_set_bytes(inode, le64_to_cpu(attr->nres.alloc_size));
86809 +               run = &ni->dir.alloc_run;
86810 +               break;
86812 +       case ATTR_BITMAP:
86813 +               if (ino == MFT_REC_MFT) {
86814 +                       if (!attr->non_res)
86815 +                               goto out;
86816 +#ifndef CONFIG_NTFS3_64BIT_CLUSTER
86817 +                       /* 0x20000000 = 2^32 / 8 */
86818 +                       if (le64_to_cpu(attr->nres.alloc_size) >= 0x20000000)
86819 +                               goto out;
86820 +#endif
86821 +                       run = &sbi->mft.bitmap.run;
86822 +                       break;
86823 +               } else if (is_dir && attr->name_len == ARRAY_SIZE(I30_NAME) &&
86824 +                          !memcmp(attr_name(attr), I30_NAME,
86825 +                                  sizeof(I30_NAME)) &&
86826 +                          attr->non_res) {
86827 +                       run = &ni->dir.bitmap_run;
86828 +                       break;
86829 +               }
86830 +               goto next_attr;
86832 +       case ATTR_REPARSE:
86833 +               if (attr->name_len)
86834 +                       goto next_attr;
86836 +               rp_fa = ni_parse_reparse(ni, attr, &rp);
86837 +               switch (rp_fa) {
86838 +               case REPARSE_LINK:
86839 +                       if (!attr->non_res) {
86840 +                               inode->i_size = rsize;
86841 +                               inode_set_bytes(inode, rsize);
86842 +                               t32 = asize;
86843 +                       } else {
86844 +                               inode->i_size =
86845 +                                       le64_to_cpu(attr->nres.data_size);
86846 +                               t32 = le16_to_cpu(attr->nres.run_off);
86847 +                       }
86849 +                       /* Looks like normal symlink */
86850 +                       ni->i_valid = inode->i_size;
86852 +                       /* Clear directory bit */
86853 +                       if (ni->ni_flags & NI_FLAG_DIR) {
86854 +                               indx_clear(&ni->dir);
86855 +                               memset(&ni->dir, 0, sizeof(ni->dir));
86856 +                               ni->ni_flags &= ~NI_FLAG_DIR;
86857 +                       } else {
86858 +                               run_close(&ni->file.run);
86859 +                       }
86860 +                       mode = S_IFLNK | 0777;
86861 +                       is_dir = false;
86862 +                       if (attr->non_res) {
86863 +                               run = &ni->file.run;
86864 +                               goto attr_unpack_run; // double break
86865 +                       }
86866 +                       break;
86868 +               case REPARSE_COMPRESSED:
86869 +                       break;
86871 +               case REPARSE_DEDUPLICATED:
86872 +                       break;
86873 +               }
86874 +               goto next_attr;
86876 +       case ATTR_EA_INFO:
86877 +               if (!attr->name_len &&
86878 +                   resident_data_ex(attr, sizeof(struct EA_INFO)))
86879 +                       ni->ni_flags |= NI_FLAG_EA;
86880 +               goto next_attr;
86882 +       default:
86883 +               goto next_attr;
86884 +       }
86886 +attr_unpack_run:
86887 +       roff = le16_to_cpu(attr->nres.run_off);
86889 +       t64 = le64_to_cpu(attr->nres.svcn);
86890 +       err = run_unpack_ex(run, sbi, ino, t64, le64_to_cpu(attr->nres.evcn),
86891 +                           t64, Add2Ptr(attr, roff), asize - roff);
86892 +       if (err < 0)
86893 +               goto out;
86894 +       err = 0;
86895 +       goto next_attr;
86897 +end_enum:
86899 +       if (!std5)
86900 +               goto out;
86902 +       if (!is_match && name) {
86903 +               /* reuse rec as buffer for ascii name */
86904 +               err = -ENOENT;
86905 +               goto out;
86906 +       }
86908 +       if (std5->fa & FILE_ATTRIBUTE_READONLY)
86909 +               mode &= ~0222;
86911 +       /* Setup 'uid' and 'gid' */
86912 +       inode->i_uid = sbi->options.fs_uid;
86913 +       inode->i_gid = sbi->options.fs_gid;
86915 +       if (!names) {
86916 +               err = -EINVAL;
86917 +               goto out;
86918 +       }
86920 +       if (S_ISDIR(mode)) {
86921 +               ni->std_fa |= FILE_ATTRIBUTE_DIRECTORY;
86923 +               /*
86924 +                * dot and dot-dot should be included in count but was not
86925 +                * included in enumeration.
86926 +                * Usually a hard links to directories are disabled
86927 +                */
86928 +               set_nlink(inode, 1);
86929 +               inode->i_op = &ntfs_dir_inode_operations;
86930 +               inode->i_fop = &ntfs_dir_operations;
86931 +               ni->i_valid = 0;
86932 +       } else if (S_ISLNK(mode)) {
86933 +               ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
86934 +               inode->i_op = &ntfs_link_inode_operations;
86935 +               inode->i_fop = NULL;
86936 +               inode_nohighmem(inode); // ??
86937 +               set_nlink(inode, names);
86938 +       } else if (S_ISREG(mode)) {
86939 +               ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
86941 +               set_nlink(inode, names);
86943 +               inode->i_op = &ntfs_file_inode_operations;
86944 +               inode->i_fop = &ntfs_file_operations;
86945 +               inode->i_mapping->a_ops =
86946 +                       is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops;
86948 +               if (ino != MFT_REC_MFT)
86949 +                       init_rwsem(&ni->file.run_lock);
86950 +       } else if (fname && fname->home.low == cpu_to_le32(MFT_REC_EXTEND) &&
86951 +                  fname->home.seq == cpu_to_le16(MFT_REC_EXTEND)) {
86952 +               /* Records in $Extend are not a files or general directories */
86953 +       } else {
86954 +               err = -EINVAL;
86955 +               goto out;
86956 +       }
86958 +       if ((sbi->options.sys_immutable &&
86959 +            (std5->fa & FILE_ATTRIBUTE_SYSTEM)) &&
86960 +           !S_ISFIFO(mode) && !S_ISSOCK(mode) && !S_ISLNK(mode)) {
86961 +               inode->i_flags |= S_IMMUTABLE;
86962 +       } else {
86963 +               inode->i_flags &= ~S_IMMUTABLE;
86964 +       }
86966 +       inode->i_mode = mode;
86967 +       if (!(ni->ni_flags & NI_FLAG_EA)) {
86968 +               /* if no xattr then no security (stored in xattr) */
86969 +               inode->i_flags |= S_NOSEC;
86970 +       }
86972 +Ok:
86973 +       if (ino == MFT_REC_MFT && !sb->s_root)
86974 +               sbi->mft.ni = NULL;
86976 +       unlock_new_inode(inode);
86978 +       return inode;
86980 +out:
86981 +       if (ino == MFT_REC_MFT && !sb->s_root)
86982 +               sbi->mft.ni = NULL;
86984 +       iget_failed(inode);
86985 +       return ERR_PTR(err);
86988 +/* returns 1 if match */
86989 +static int ntfs_test_inode(struct inode *inode, void *data)
86991 +       struct MFT_REF *ref = data;
86993 +       return ino_get(ref) == inode->i_ino;
86996 +static int ntfs_set_inode(struct inode *inode, void *data)
86998 +       const struct MFT_REF *ref = data;
87000 +       inode->i_ino = ino_get(ref);
87001 +       return 0;
87004 +struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
87005 +                        const struct cpu_str *name)
87007 +       struct inode *inode;
87009 +       inode = iget5_locked(sb, ino_get(ref), ntfs_test_inode, ntfs_set_inode,
87010 +                            (void *)ref);
87011 +       if (unlikely(!inode))
87012 +               return ERR_PTR(-ENOMEM);
87014 +       /* If this is a freshly allocated inode, need to read it now. */
87015 +       if (inode->i_state & I_NEW)
87016 +               inode = ntfs_read_mft(inode, name, ref);
87017 +       else if (ref->seq != ntfs_i(inode)->mi.mrec->seq) {
87018 +               /* inode overlaps? */
87019 +               make_bad_inode(inode);
87020 +       }
87022 +       return inode;
87025 +enum get_block_ctx {
87026 +       GET_BLOCK_GENERAL = 0,
87027 +       GET_BLOCK_WRITE_BEGIN = 1,
87028 +       GET_BLOCK_DIRECT_IO_R = 2,
87029 +       GET_BLOCK_DIRECT_IO_W = 3,
87030 +       GET_BLOCK_BMAP = 4,
87033 +static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
87034 +                                      struct buffer_head *bh, int create,
87035 +                                      enum get_block_ctx ctx)
87037 +       struct super_block *sb = inode->i_sb;
87038 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
87039 +       struct ntfs_inode *ni = ntfs_i(inode);
87040 +       struct page *page = bh->b_page;
87041 +       u8 cluster_bits = sbi->cluster_bits;
87042 +       u32 block_size = sb->s_blocksize;
87043 +       u64 bytes, lbo, valid;
87044 +       u32 off;
87045 +       int err;
87046 +       CLST vcn, lcn, len;
87047 +       bool new;
87049 +       /*clear previous state*/
87050 +       clear_buffer_new(bh);
87051 +       clear_buffer_uptodate(bh);
87053 +       /* direct write uses 'create=0'*/
87054 +       if (!create && vbo >= ni->i_valid) {
87055 +               /* out of valid */
87056 +               return 0;
87057 +       }
87059 +       if (vbo >= inode->i_size) {
87060 +               /* out of size */
87061 +               return 0;
87062 +       }
87064 +       if (is_resident(ni)) {
87065 +               ni_lock(ni);
87066 +               err = attr_data_read_resident(ni, page);
87067 +               ni_unlock(ni);
87069 +               if (!err)
87070 +                       set_buffer_uptodate(bh);
87071 +               bh->b_size = block_size;
87072 +               return err;
87073 +       }
87075 +       vcn = vbo >> cluster_bits;
87076 +       off = vbo & sbi->cluster_mask;
87077 +       new = false;
87079 +       err = attr_data_get_block(ni, vcn, 1, &lcn, &len, create ? &new : NULL);
87080 +       if (err)
87081 +               goto out;
87083 +       if (!len)
87084 +               return 0;
87086 +       bytes = ((u64)len << cluster_bits) - off;
87088 +       if (lcn == SPARSE_LCN) {
87089 +               if (!create) {
87090 +                       if (bh->b_size > bytes)
87091 +                               bh->b_size = bytes;
87093 +                       return 0;
87094 +               }
87095 +               WARN_ON(1);
87096 +       }
87098 +       if (new) {
87099 +               set_buffer_new(bh);
87100 +               if ((len << cluster_bits) > block_size)
87101 +                       ntfs_sparse_cluster(inode, page, vcn, len);
87102 +       }
87104 +       lbo = ((u64)lcn << cluster_bits) + off;
87106 +       set_buffer_mapped(bh);
87107 +       bh->b_bdev = sb->s_bdev;
87108 +       bh->b_blocknr = lbo >> sb->s_blocksize_bits;
87110 +       valid = ni->i_valid;
87112 +       if (ctx == GET_BLOCK_DIRECT_IO_W) {
87113 +               /*ntfs_direct_IO will update ni->i_valid */
87114 +               if (vbo >= valid)
87115 +                       set_buffer_new(bh);
87116 +       } else if (create) {
87117 +               /*normal write*/
87118 +               if (vbo >= valid) {
87119 +                       set_buffer_new(bh);
87120 +                       if (bytes > bh->b_size)
87121 +                               bytes = bh->b_size;
87122 +                       ni->i_valid = vbo + bytes;
87123 +                       mark_inode_dirty(inode);
87124 +               }
87125 +       } else if (valid >= inode->i_size) {
87126 +               /* normal read of normal file*/
87127 +       } else if (vbo >= valid) {
87128 +               /* read out of valid data*/
87129 +               /* should never be here 'cause already checked */
87130 +               clear_buffer_mapped(bh);
87131 +       } else if (vbo + bytes <= valid) {
87132 +               /* normal read */
87133 +       } else if (vbo + block_size <= valid) {
87134 +               /* normal short read */
87135 +               bytes = block_size;
87136 +       } else {
87137 +               /*
87138 +                * read across valid size: vbo < valid && valid < vbo + block_size
87139 +                */
87140 +               u32 voff = valid - vbo;
87142 +               bh->b_size = bytes = block_size;
87143 +               off = vbo & (PAGE_SIZE - 1);
87144 +               set_bh_page(bh, page, off);
87145 +               ll_rw_block(REQ_OP_READ, 0, 1, &bh);
87146 +               wait_on_buffer(bh);
87147 +               /* Uhhuh. Read error. Complain and punt. */
87148 +               if (!buffer_uptodate(bh)) {
87149 +                       err = -EIO;
87150 +                       goto out;
87151 +               }
87152 +               zero_user_segment(page, off + voff, off + block_size);
87153 +       }
87155 +       if (bh->b_size > bytes)
87156 +               bh->b_size = bytes;
87158 +#ifndef __LP64__
87159 +       if (ctx == GET_BLOCK_DIRECT_IO_W || ctx == GET_BLOCK_DIRECT_IO_R) {
87160 +               static_assert(sizeof(size_t) < sizeof(loff_t));
87161 +               if (bytes > 0x40000000u)
87162 +                       bh->b_size = 0x40000000u;
87163 +       }
87164 +#endif
87166 +       return 0;
87168 +out:
87169 +       return err;
87172 +int ntfs_get_block(struct inode *inode, sector_t vbn,
87173 +                  struct buffer_head *bh_result, int create)
87175 +       return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits,
87176 +                                 bh_result, create, GET_BLOCK_GENERAL);
87179 +static int ntfs_get_block_bmap(struct inode *inode, sector_t vsn,
87180 +                              struct buffer_head *bh_result, int create)
87182 +       return ntfs_get_block_vbo(inode,
87183 +                                 (u64)vsn << inode->i_sb->s_blocksize_bits,
87184 +                                 bh_result, create, GET_BLOCK_BMAP);
87187 +static sector_t ntfs_bmap(struct address_space *mapping, sector_t block)
87189 +       return generic_block_bmap(mapping, block, ntfs_get_block_bmap);
87192 +static int ntfs_readpage(struct file *file, struct page *page)
87194 +       int err;
87195 +       struct address_space *mapping = page->mapping;
87196 +       struct inode *inode = mapping->host;
87197 +       struct ntfs_inode *ni = ntfs_i(inode);
87199 +       if (is_resident(ni)) {
87200 +               ni_lock(ni);
87201 +               err = attr_data_read_resident(ni, page);
87202 +               ni_unlock(ni);
87203 +               if (err != E_NTFS_NONRESIDENT) {
87204 +                       unlock_page(page);
87205 +                       return err;
87206 +               }
87207 +       }
87209 +       if (is_compressed(ni)) {
87210 +               ni_lock(ni);
87211 +               err = ni_readpage_cmpr(ni, page);
87212 +               ni_unlock(ni);
87213 +               return err;
87214 +       }
87216 +       /* normal + sparse files */
87217 +       return mpage_readpage(page, ntfs_get_block);
87220 +static void ntfs_readahead(struct readahead_control *rac)
87222 +       struct address_space *mapping = rac->mapping;
87223 +       struct inode *inode = mapping->host;
87224 +       struct ntfs_inode *ni = ntfs_i(inode);
87225 +       u64 valid;
87226 +       loff_t pos;
87228 +       if (is_resident(ni)) {
87229 +               /* no readahead for resident */
87230 +               return;
87231 +       }
87233 +       if (is_compressed(ni)) {
87234 +               /* no readahead for compressed */
87235 +               return;
87236 +       }
87238 +       valid = ni->i_valid;
87239 +       pos = readahead_pos(rac);
87241 +       if (valid < i_size_read(inode) && pos <= valid &&
87242 +           valid < pos + readahead_length(rac)) {
87243 +               /* range cross 'valid'. read it page by page */
87244 +               return;
87245 +       }
87247 +       mpage_readahead(rac, ntfs_get_block);
87250 +static int ntfs_get_block_direct_IO_R(struct inode *inode, sector_t iblock,
87251 +                                     struct buffer_head *bh_result, int create)
87253 +       return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
87254 +                                 bh_result, create, GET_BLOCK_DIRECT_IO_R);
87257 +static int ntfs_get_block_direct_IO_W(struct inode *inode, sector_t iblock,
87258 +                                     struct buffer_head *bh_result, int create)
87260 +       return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
87261 +                                 bh_result, create, GET_BLOCK_DIRECT_IO_W);
87264 +static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
87266 +       struct file *file = iocb->ki_filp;
87267 +       struct address_space *mapping = file->f_mapping;
87268 +       struct inode *inode = mapping->host;
87269 +       struct ntfs_inode *ni = ntfs_i(inode);
87270 +       size_t count = iov_iter_count(iter);
87271 +       loff_t vbo = iocb->ki_pos;
87272 +       loff_t end = vbo + count;
87273 +       int wr = iov_iter_rw(iter) & WRITE;
87274 +       const struct iovec *iov = iter->iov;
87275 +       unsigned long nr_segs = iter->nr_segs;
87276 +       loff_t valid;
87277 +       ssize_t ret;
87279 +       if (is_resident(ni)) {
87280 +               /*switch to buffered write*/
87281 +               ret = 0;
87282 +               goto out;
87283 +       }
87285 +       ret = blockdev_direct_IO(iocb, inode, iter,
87286 +                                wr ? ntfs_get_block_direct_IO_W
87287 +                                   : ntfs_get_block_direct_IO_R);
87288 +       valid = ni->i_valid;
87289 +       if (wr) {
87290 +               if (ret <= 0)
87291 +                       goto out;
87293 +               vbo += ret;
87294 +               if (vbo > valid && !S_ISBLK(inode->i_mode)) {
87295 +                       ni->i_valid = vbo;
87296 +                       mark_inode_dirty(inode);
87297 +               }
87298 +       } else if (vbo < valid && valid < end) {
87299 +               /* fix page */
87300 +               unsigned long uaddr = ~0ul;
87301 +               struct page *page;
87302 +               long i, npages;
87303 +               size_t dvbo = valid - vbo;
87304 +               size_t off = 0;
87306 +               /*Find user address*/
87307 +               for (i = 0; i < nr_segs; i++) {
87308 +                       if (off <= dvbo && dvbo < off + iov[i].iov_len) {
87309 +                               uaddr = (unsigned long)iov[i].iov_base + dvbo -
87310 +                                       off;
87311 +                               break;
87312 +                       }
87313 +                       off += iov[i].iov_len;
87314 +               }
87316 +               if (uaddr == ~0ul)
87317 +                       goto fix_error;
87319 +               npages = get_user_pages_unlocked(uaddr, 1, &page, FOLL_WRITE);
87321 +               if (npages <= 0)
87322 +                       goto fix_error;
87324 +               zero_user_segment(page, valid & (PAGE_SIZE - 1), PAGE_SIZE);
87325 +               put_page(page);
87326 +       }
87328 +out:
87329 +       return ret;
87330 +fix_error:
87331 +       ntfs_inode_warn(inode, "file garbage at 0x%llx", valid);
87332 +       goto out;
87335 +int ntfs_set_size(struct inode *inode, u64 new_size)
87337 +       struct super_block *sb = inode->i_sb;
87338 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
87339 +       struct ntfs_inode *ni = ntfs_i(inode);
87340 +       int err;
87342 +       /* Check for maximum file size */
87343 +       if (is_sparsed(ni) || is_compressed(ni)) {
87344 +               if (new_size > sbi->maxbytes_sparse) {
87345 +                       err = -EFBIG;
87346 +                       goto out;
87347 +               }
87348 +       } else if (new_size > sbi->maxbytes) {
87349 +               err = -EFBIG;
87350 +               goto out;
87351 +       }
87353 +       ni_lock(ni);
87354 +       down_write(&ni->file.run_lock);
87356 +       err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
87357 +                           &ni->i_valid, true, NULL);
87359 +       up_write(&ni->file.run_lock);
87360 +       ni_unlock(ni);
87362 +       mark_inode_dirty(inode);
87364 +out:
87365 +       return err;
87368 +static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
87370 +       struct address_space *mapping = page->mapping;
87371 +       struct inode *inode = mapping->host;
87372 +       struct ntfs_inode *ni = ntfs_i(inode);
87373 +       int err;
87375 +       if (is_resident(ni)) {
87376 +               ni_lock(ni);
87377 +               err = attr_data_write_resident(ni, page);
87378 +               ni_unlock(ni);
87379 +               if (err != E_NTFS_NONRESIDENT) {
87380 +                       unlock_page(page);
87381 +                       return err;
87382 +               }
87383 +       }
87385 +       return block_write_full_page(page, ntfs_get_block, wbc);
87388 +static int ntfs_writepages(struct address_space *mapping,
87389 +                          struct writeback_control *wbc)
87391 +       struct inode *inode = mapping->host;
87392 +       struct ntfs_inode *ni = ntfs_i(inode);
87393 +       /* redirect call to 'ntfs_writepage' for resident files*/
87394 +       get_block_t *get_block = is_resident(ni) ? NULL : &ntfs_get_block;
87396 +       return mpage_writepages(mapping, wbc, get_block);
87399 +static int ntfs_get_block_write_begin(struct inode *inode, sector_t vbn,
87400 +                                     struct buffer_head *bh_result, int create)
87402 +       return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits,
87403 +                                 bh_result, create, GET_BLOCK_WRITE_BEGIN);
87406 +static int ntfs_write_begin(struct file *file, struct address_space *mapping,
87407 +                           loff_t pos, u32 len, u32 flags, struct page **pagep,
87408 +                           void **fsdata)
87410 +       int err;
87411 +       struct inode *inode = mapping->host;
87412 +       struct ntfs_inode *ni = ntfs_i(inode);
87414 +       *pagep = NULL;
87415 +       if (is_resident(ni)) {
87416 +               struct page *page = grab_cache_page_write_begin(
87417 +                       mapping, pos >> PAGE_SHIFT, flags);
87419 +               if (!page) {
87420 +                       err = -ENOMEM;
87421 +                       goto out;
87422 +               }
87424 +               ni_lock(ni);
87425 +               err = attr_data_read_resident(ni, page);
87426 +               ni_unlock(ni);
87428 +               if (!err) {
87429 +                       *pagep = page;
87430 +                       goto out;
87431 +               }
87432 +               unlock_page(page);
87433 +               put_page(page);
87435 +               if (err != E_NTFS_NONRESIDENT)
87436 +                       goto out;
87437 +       }
87439 +       err = block_write_begin(mapping, pos, len, flags, pagep,
87440 +                               ntfs_get_block_write_begin);
87442 +out:
87443 +       return err;
87446 +/* address_space_operations::write_end */
87447 +static int ntfs_write_end(struct file *file, struct address_space *mapping,
87448 +                         loff_t pos, u32 len, u32 copied, struct page *page,
87449 +                         void *fsdata)
87452 +       struct inode *inode = mapping->host;
87453 +       struct ntfs_inode *ni = ntfs_i(inode);
87454 +       u64 valid = ni->i_valid;
87455 +       bool dirty = false;
87456 +       int err;
87458 +       if (is_resident(ni)) {
87459 +               ni_lock(ni);
87460 +               err = attr_data_write_resident(ni, page);
87461 +               ni_unlock(ni);
87462 +               if (!err) {
87463 +                       dirty = true;
87464 +                       /* clear any buffers in page*/
87465 +                       if (page_has_buffers(page)) {
87466 +                               struct buffer_head *head, *bh;
87468 +                               bh = head = page_buffers(page);
87469 +                               do {
87470 +                                       clear_buffer_dirty(bh);
87471 +                                       clear_buffer_mapped(bh);
87472 +                                       set_buffer_uptodate(bh);
87473 +                               } while (head != (bh = bh->b_this_page));
87474 +                       }
87475 +                       SetPageUptodate(page);
87476 +                       err = copied;
87477 +               }
87478 +               unlock_page(page);
87479 +               put_page(page);
87480 +       } else {
87481 +               err = generic_write_end(file, mapping, pos, len, copied, page,
87482 +                                       fsdata);
87483 +       }
87485 +       if (err >= 0) {
87486 +               if (!(ni->std_fa & FILE_ATTRIBUTE_ARCHIVE)) {
87487 +                       inode->i_ctime = inode->i_mtime = current_time(inode);
87488 +                       ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
87489 +                       dirty = true;
87490 +               }
87492 +               if (valid != ni->i_valid) {
87493 +                       /* ni->i_valid is changed in ntfs_get_block_vbo */
87494 +                       dirty = true;
87495 +               }
87497 +               if (dirty)
87498 +                       mark_inode_dirty(inode);
87499 +       }
87501 +       return err;
87504 +int reset_log_file(struct inode *inode)
87506 +       int err;
87507 +       loff_t pos = 0;
87508 +       u32 log_size = inode->i_size;
87509 +       struct address_space *mapping = inode->i_mapping;
87511 +       for (;;) {
87512 +               u32 len;
87513 +               void *kaddr;
87514 +               struct page *page;
87516 +               len = pos + PAGE_SIZE > log_size ? (log_size - pos) : PAGE_SIZE;
87518 +               err = block_write_begin(mapping, pos, len, 0, &page,
87519 +                                       ntfs_get_block_write_begin);
87520 +               if (err)
87521 +                       goto out;
87523 +               kaddr = kmap_atomic(page);
87524 +               memset(kaddr, -1, len);
87525 +               kunmap_atomic(kaddr);
87526 +               flush_dcache_page(page);
87528 +               err = block_write_end(NULL, mapping, pos, len, len, page, NULL);
87529 +               if (err < 0)
87530 +                       goto out;
87531 +               pos += len;
87533 +               if (pos >= log_size)
87534 +                       break;
87535 +               balance_dirty_pages_ratelimited(mapping);
87536 +       }
87537 +out:
87538 +       mark_inode_dirty_sync(inode);
87540 +       return err;
87543 +int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc)
87545 +       return _ni_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
87548 +int ntfs_sync_inode(struct inode *inode)
87550 +       return _ni_write_inode(inode, 1);
87554 + * helper function for ntfs_flush_inodes.  This writes both the inode
87555 + * and the file data blocks, waiting for in flight data blocks before
87556 + * the start of the call.  It does not wait for any io started
87557 + * during the call
87558 + */
87559 +static int writeback_inode(struct inode *inode)
87561 +       int ret = sync_inode_metadata(inode, 0);
87563 +       if (!ret)
87564 +               ret = filemap_fdatawrite(inode->i_mapping);
87565 +       return ret;
87569 + * write data and metadata corresponding to i1 and i2.  The io is
87570 + * started but we do not wait for any of it to finish.
87571 + *
87572 + * filemap_flush is used for the block device, so if there is a dirty
87573 + * page for a block already in flight, we will not wait and start the
87574 + * io over again
87575 + */
87576 +int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
87577 +                     struct inode *i2)
87579 +       int ret = 0;
87581 +       if (i1)
87582 +               ret = writeback_inode(i1);
87583 +       if (!ret && i2)
87584 +               ret = writeback_inode(i2);
87585 +       if (!ret)
87586 +               ret = filemap_flush(sb->s_bdev->bd_inode->i_mapping);
87587 +       return ret;
87590 +int inode_write_data(struct inode *inode, const void *data, size_t bytes)
87592 +       pgoff_t idx;
87594 +       /* Write non resident data */
87595 +       for (idx = 0; bytes; idx++) {
87596 +               size_t op = bytes > PAGE_SIZE ? PAGE_SIZE : bytes;
87597 +               struct page *page = ntfs_map_page(inode->i_mapping, idx);
87599 +               if (IS_ERR(page))
87600 +                       return PTR_ERR(page);
87602 +               lock_page(page);
87603 +               WARN_ON(!PageUptodate(page));
87604 +               ClearPageUptodate(page);
87606 +               memcpy(page_address(page), data, op);
87608 +               flush_dcache_page(page);
87609 +               SetPageUptodate(page);
87610 +               unlock_page(page);
87612 +               ntfs_unmap_page(page);
87614 +               bytes -= op;
87615 +               data = Add2Ptr(data, PAGE_SIZE);
87616 +       }
87617 +       return 0;
87621 + * number of bytes to for REPARSE_DATA_BUFFER(IO_REPARSE_TAG_SYMLINK)
87622 + * for unicode string of 'uni_len' length
87623 + */
87624 +static inline u32 ntfs_reparse_bytes(u32 uni_len)
87626 +       /* header + unicode string + decorated unicode string */
87627 +       return sizeof(short) * (2 * uni_len + 4) +
87628 +              offsetof(struct REPARSE_DATA_BUFFER,
87629 +                       SymbolicLinkReparseBuffer.PathBuffer);
87632 +static struct REPARSE_DATA_BUFFER *
87633 +ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
87634 +                          u32 size, u16 *nsize)
87636 +       int i, err;
87637 +       struct REPARSE_DATA_BUFFER *rp;
87638 +       __le16 *rp_name;
87639 +       typeof(rp->SymbolicLinkReparseBuffer) *rs;
87641 +       rp = ntfs_zalloc(ntfs_reparse_bytes(2 * size + 2));
87642 +       if (!rp)
87643 +               return ERR_PTR(-ENOMEM);
87645 +       rs = &rp->SymbolicLinkReparseBuffer;
87646 +       rp_name = rs->PathBuffer;
87648 +       /* Convert link name to utf16 */
87649 +       err = ntfs_nls_to_utf16(sbi, symname, size,
87650 +                               (struct cpu_str *)(rp_name - 1), 2 * size,
87651 +                               UTF16_LITTLE_ENDIAN);
87652 +       if (err < 0)
87653 +               goto out;
87655 +       /* err = the length of unicode name of symlink */
87656 +       *nsize = ntfs_reparse_bytes(err);
87658 +       if (*nsize > sbi->reparse.max_size) {
87659 +               err = -EFBIG;
87660 +               goto out;
87661 +       }
87663 +       /* translate linux '/' into windows '\' */
87664 +       for (i = 0; i < err; i++) {
87665 +               if (rp_name[i] == cpu_to_le16('/'))
87666 +                       rp_name[i] = cpu_to_le16('\\');
87667 +       }
87669 +       rp->ReparseTag = IO_REPARSE_TAG_SYMLINK;
87670 +       rp->ReparseDataLength =
87671 +               cpu_to_le16(*nsize - offsetof(struct REPARSE_DATA_BUFFER,
87672 +                                             SymbolicLinkReparseBuffer));
87674 +       /* PrintName + SubstituteName */
87675 +       rs->SubstituteNameOffset = cpu_to_le16(sizeof(short) * err);
87676 +       rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + 8);
87677 +       rs->PrintNameLength = rs->SubstituteNameOffset;
87679 +       /*
87680 +        * TODO: use relative path if possible to allow windows to parse this path
87681 +        * 0-absolute path 1- relative path (SYMLINK_FLAG_RELATIVE)
87682 +        */
87683 +       rs->Flags = 0;
87685 +       memmove(rp_name + err + 4, rp_name, sizeof(short) * err);
87687 +       /* decorate SubstituteName */
87688 +       rp_name += err;
87689 +       rp_name[0] = cpu_to_le16('\\');
87690 +       rp_name[1] = cpu_to_le16('?');
87691 +       rp_name[2] = cpu_to_le16('?');
87692 +       rp_name[3] = cpu_to_le16('\\');
87694 +       return rp;
87695 +out:
87696 +       ntfs_free(rp);
87697 +       return ERR_PTR(err);
87700 +struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
87701 +                               struct inode *dir, struct dentry *dentry,
87702 +                               const struct cpu_str *uni, umode_t mode,
87703 +                               dev_t dev, const char *symname, u32 size,
87704 +                               int excl, struct ntfs_fnd *fnd)
87706 +       int err;
87707 +       struct super_block *sb = dir->i_sb;
87708 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
87709 +       const struct qstr *name = &dentry->d_name;
87710 +       CLST ino = 0;
87711 +       struct ntfs_inode *dir_ni = ntfs_i(dir);
87712 +       struct ntfs_inode *ni = NULL;
87713 +       struct inode *inode = NULL;
87714 +       struct ATTRIB *attr;
87715 +       struct ATTR_STD_INFO5 *std5;
87716 +       struct ATTR_FILE_NAME *fname;
87717 +       struct MFT_REC *rec;
87718 +       u32 asize, dsize, sd_size;
87719 +       enum FILE_ATTRIBUTE fa;
87720 +       __le32 security_id = SECURITY_ID_INVALID;
87721 +       CLST vcn;
87722 +       const void *sd;
87723 +       u16 t16, nsize = 0, aid = 0;
87724 +       struct INDEX_ROOT *root, *dir_root;
87725 +       struct NTFS_DE *e, *new_de = NULL;
87726 +       struct REPARSE_DATA_BUFFER *rp = NULL;
87727 +       bool is_dir = S_ISDIR(mode);
87728 +       bool is_link = S_ISLNK(mode);
87729 +       bool rp_inserted = false;
87730 +       bool is_sp = S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode) ||
87731 +                    S_ISSOCK(mode);
87733 +       if (is_sp)
87734 +               return ERR_PTR(-EOPNOTSUPP);
87736 +       dir_root = indx_get_root(&dir_ni->dir, dir_ni, NULL, NULL);
87737 +       if (!dir_root)
87738 +               return ERR_PTR(-EINVAL);
87740 +       if (is_dir) {
87741 +               /* use parent's directory attributes */
87742 +               fa = dir_ni->std_fa | FILE_ATTRIBUTE_DIRECTORY |
87743 +                    FILE_ATTRIBUTE_ARCHIVE;
87744 +               /*
87745 +                * By default child directory inherits parent attributes
87746 +                * root directory is hidden + system
87747 +                * Make an exception for children in root
87748 +                */
87749 +               if (dir->i_ino == MFT_REC_ROOT)
87750 +                       fa &= ~(FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM);
87751 +       } else if (is_link) {
87752 +               /* It is good idea that link should be the same type (file/dir) as target */
87753 +               fa = FILE_ATTRIBUTE_REPARSE_POINT;
87755 +               /*
87756 +                * linux: there are dir/file/symlink and so on
87757 +                * NTFS: symlinks are "dir + reparse" or "file + reparse"
87758 +                * It is good idea to create:
87759 +                * dir + reparse if 'symname' points to directory
87760 +                * or
87761 +                * file + reparse if 'symname' points to file
87762 +                * Unfortunately kern_path hangs if symname contains 'dir'
87763 +                */
87765 +               /*
87766 +                *      struct path path;
87767 +                *
87768 +                *      if (!kern_path(symname, LOOKUP_FOLLOW, &path)){
87769 +                *              struct inode *target = d_inode(path.dentry);
87770 +                *
87771 +                *              if (S_ISDIR(target->i_mode))
87772 +                *                      fa |= FILE_ATTRIBUTE_DIRECTORY;
87773 +                *              // if ( target->i_sb == sb ){
87774 +                *              //      use relative path?
87775 +                *              // }
87776 +                *              path_put(&path);
87777 +                *      }
87778 +                */
87779 +       } else if (sbi->options.sparse) {
87780 +               /* sparsed regular file, cause option 'sparse' */
87781 +               fa = FILE_ATTRIBUTE_SPARSE_FILE | FILE_ATTRIBUTE_ARCHIVE;
87782 +       } else if (dir_ni->std_fa & FILE_ATTRIBUTE_COMPRESSED) {
87783 +               /* compressed regular file, if parent is compressed */
87784 +               fa = FILE_ATTRIBUTE_COMPRESSED | FILE_ATTRIBUTE_ARCHIVE;
87785 +       } else {
87786 +               /* regular file, default attributes */
87787 +               fa = FILE_ATTRIBUTE_ARCHIVE;
87788 +       }
87790 +       if (!(mode & 0222))
87791 +               fa |= FILE_ATTRIBUTE_READONLY;
87793 +       /* allocate PATH_MAX bytes */
87794 +       new_de = __getname();
87795 +       if (!new_de) {
87796 +               err = -ENOMEM;
87797 +               goto out1;
87798 +       }
87800 +       /*mark rw ntfs as dirty. it will be cleared at umount*/
87801 +       ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
87803 +       /* Step 1: allocate and fill new mft record */
87804 +       err = ntfs_look_free_mft(sbi, &ino, false, NULL, NULL);
87805 +       if (err)
87806 +               goto out2;
87808 +       ni = ntfs_new_inode(sbi, ino, fa & FILE_ATTRIBUTE_DIRECTORY);
87809 +       if (IS_ERR(ni)) {
87810 +               err = PTR_ERR(ni);
87811 +               ni = NULL;
87812 +               goto out3;
87813 +       }
87814 +       inode = &ni->vfs_inode;
87816 +       inode->i_atime = inode->i_mtime = inode->i_ctime = ni->i_crtime =
87817 +               current_time(inode);
87819 +       rec = ni->mi.mrec;
87820 +       rec->hard_links = cpu_to_le16(1);
87821 +       attr = Add2Ptr(rec, le16_to_cpu(rec->attr_off));
87823 +       /* Get default security id */
87824 +       sd = s_default_security;
87825 +       sd_size = sizeof(s_default_security);
87827 +       if (is_ntfs3(sbi)) {
87828 +               security_id = dir_ni->std_security_id;
87829 +               if (le32_to_cpu(security_id) < SECURITY_ID_FIRST) {
87830 +                       security_id = sbi->security.def_security_id;
87832 +                       if (security_id == SECURITY_ID_INVALID &&
87833 +                           !ntfs_insert_security(sbi, sd, sd_size,
87834 +                                                 &security_id, NULL))
87835 +                               sbi->security.def_security_id = security_id;
87836 +               }
87837 +       }
87839 +       /* Insert standard info */
87840 +       std5 = Add2Ptr(attr, SIZEOF_RESIDENT);
87842 +       if (security_id == SECURITY_ID_INVALID) {
87843 +               dsize = sizeof(struct ATTR_STD_INFO);
87844 +       } else {
87845 +               dsize = sizeof(struct ATTR_STD_INFO5);
87846 +               std5->security_id = security_id;
87847 +               ni->std_security_id = security_id;
87848 +       }
87849 +       asize = SIZEOF_RESIDENT + dsize;
87851 +       attr->type = ATTR_STD;
87852 +       attr->size = cpu_to_le32(asize);
87853 +       attr->id = cpu_to_le16(aid++);
87854 +       attr->res.data_off = SIZEOF_RESIDENT_LE;
87855 +       attr->res.data_size = cpu_to_le32(dsize);
87857 +       std5->cr_time = std5->m_time = std5->c_time = std5->a_time =
87858 +               kernel2nt(&inode->i_atime);
87860 +       ni->std_fa = fa;
87861 +       std5->fa = fa;
87863 +       attr = Add2Ptr(attr, asize);
87865 +       /* Insert file name */
87866 +       err = fill_name_de(sbi, new_de, name, uni);
87867 +       if (err)
87868 +               goto out4;
87870 +       mi_get_ref(&ni->mi, &new_de->ref);
87872 +       fname = (struct ATTR_FILE_NAME *)(new_de + 1);
87873 +       mi_get_ref(&dir_ni->mi, &fname->home);
87874 +       fname->dup.cr_time = fname->dup.m_time = fname->dup.c_time =
87875 +               fname->dup.a_time = std5->cr_time;
87876 +       fname->dup.alloc_size = fname->dup.data_size = 0;
87877 +       fname->dup.fa = std5->fa;
87878 +       fname->dup.ea_size = fname->dup.reparse = 0;
87880 +       dsize = le16_to_cpu(new_de->key_size);
87881 +       asize = QuadAlign(SIZEOF_RESIDENT + dsize);
87883 +       attr->type = ATTR_NAME;
87884 +       attr->size = cpu_to_le32(asize);
87885 +       attr->res.data_off = SIZEOF_RESIDENT_LE;
87886 +       attr->res.flags = RESIDENT_FLAG_INDEXED;
87887 +       attr->id = cpu_to_le16(aid++);
87888 +       attr->res.data_size = cpu_to_le32(dsize);
87889 +       memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), fname, dsize);
87891 +       attr = Add2Ptr(attr, asize);
87893 +       if (security_id == SECURITY_ID_INVALID) {
87894 +               /* Insert security attribute */
87895 +               asize = SIZEOF_RESIDENT + QuadAlign(sd_size);
87897 +               attr->type = ATTR_SECURE;
87898 +               attr->size = cpu_to_le32(asize);
87899 +               attr->id = cpu_to_le16(aid++);
87900 +               attr->res.data_off = SIZEOF_RESIDENT_LE;
87901 +               attr->res.data_size = cpu_to_le32(sd_size);
87902 +               memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), sd, sd_size);
87904 +               attr = Add2Ptr(attr, asize);
87905 +       }
87907 +       if (fa & FILE_ATTRIBUTE_DIRECTORY) {
87908 +               /*
87909 +                * regular directory or symlink to directory
87910 +                * Create root attribute
87911 +                */
87912 +               dsize = sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE);
87913 +               asize = sizeof(I30_NAME) + SIZEOF_RESIDENT + dsize;
87915 +               attr->type = ATTR_ROOT;
87916 +               attr->size = cpu_to_le32(asize);
87917 +               attr->id = cpu_to_le16(aid++);
87919 +               attr->name_len = ARRAY_SIZE(I30_NAME);
87920 +               attr->name_off = SIZEOF_RESIDENT_LE;
87921 +               attr->res.data_off =
87922 +                       cpu_to_le16(sizeof(I30_NAME) + SIZEOF_RESIDENT);
87923 +               attr->res.data_size = cpu_to_le32(dsize);
87924 +               memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), I30_NAME,
87925 +                      sizeof(I30_NAME));
87927 +               root = Add2Ptr(attr, sizeof(I30_NAME) + SIZEOF_RESIDENT);
87928 +               memcpy(root, dir_root, offsetof(struct INDEX_ROOT, ihdr));
87929 +               root->ihdr.de_off =
87930 +                       cpu_to_le32(sizeof(struct INDEX_HDR)); // 0x10
87931 +               root->ihdr.used = cpu_to_le32(sizeof(struct INDEX_HDR) +
87932 +                                             sizeof(struct NTFS_DE));
87933 +               root->ihdr.total = root->ihdr.used;
87935 +               e = Add2Ptr(root, sizeof(struct INDEX_ROOT));
87936 +               e->size = cpu_to_le16(sizeof(struct NTFS_DE));
87937 +               e->flags = NTFS_IE_LAST;
87938 +       } else if (is_link) {
87939 +               /*
87940 +                * symlink to file
87941 +                * Create empty resident data attribute
87942 +                */
87943 +               asize = SIZEOF_RESIDENT;
87945 +               /* insert empty ATTR_DATA */
87946 +               attr->type = ATTR_DATA;
87947 +               attr->size = cpu_to_le32(SIZEOF_RESIDENT);
87948 +               attr->id = cpu_to_le16(aid++);
87949 +               attr->name_off = SIZEOF_RESIDENT_LE;
87950 +               attr->res.data_off = SIZEOF_RESIDENT_LE;
87951 +       } else {
87952 +               /*
87953 +                * regular file
87954 +                */
87955 +               attr->type = ATTR_DATA;
87956 +               attr->id = cpu_to_le16(aid++);
87957 +               /* Create empty non resident data attribute */
87958 +               attr->non_res = 1;
87959 +               attr->nres.evcn = cpu_to_le64(-1ll);
87960 +               if (fa & FILE_ATTRIBUTE_SPARSE_FILE) {
87961 +                       attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
87962 +                       attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
87963 +                       attr->flags = ATTR_FLAG_SPARSED;
87964 +                       asize = SIZEOF_NONRESIDENT_EX + 8;
87965 +               } else if (fa & FILE_ATTRIBUTE_COMPRESSED) {
87966 +                       attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
87967 +                       attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
87968 +                       attr->flags = ATTR_FLAG_COMPRESSED;
87969 +                       attr->nres.c_unit = COMPRESSION_UNIT;
87970 +                       asize = SIZEOF_NONRESIDENT_EX + 8;
87971 +               } else {
87972 +                       attr->size = cpu_to_le32(SIZEOF_NONRESIDENT + 8);
87973 +                       attr->name_off = SIZEOF_NONRESIDENT_LE;
87974 +                       asize = SIZEOF_NONRESIDENT + 8;
87975 +               }
87976 +               attr->nres.run_off = attr->name_off;
87977 +       }
87979 +       if (is_dir) {
87980 +               ni->ni_flags |= NI_FLAG_DIR;
87981 +               err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
87982 +               if (err)
87983 +                       goto out4;
87984 +       } else if (is_link) {
87985 +               rp = ntfs_create_reparse_buffer(sbi, symname, size, &nsize);
87987 +               if (IS_ERR(rp)) {
87988 +                       err = PTR_ERR(rp);
87989 +                       rp = NULL;
87990 +                       goto out4;
87991 +               }
87993 +               /*
87994 +                * Insert ATTR_REPARSE
87995 +                */
87996 +               attr = Add2Ptr(attr, asize);
87997 +               attr->type = ATTR_REPARSE;
87998 +               attr->id = cpu_to_le16(aid++);
88000 +               /* resident or non resident? */
88001 +               asize = QuadAlign(SIZEOF_RESIDENT + nsize);
88002 +               t16 = PtrOffset(rec, attr);
88004 +               if (asize + t16 + 8 > sbi->record_size) {
88005 +                       CLST alen;
88006 +                       CLST clst = bytes_to_cluster(sbi, nsize);
88008 +                       /* bytes per runs */
88009 +                       t16 = sbi->record_size - t16 - SIZEOF_NONRESIDENT;
88011 +                       attr->non_res = 1;
88012 +                       attr->nres.evcn = cpu_to_le64(clst - 1);
88013 +                       attr->name_off = SIZEOF_NONRESIDENT_LE;
88014 +                       attr->nres.run_off = attr->name_off;
88015 +                       attr->nres.data_size = cpu_to_le64(nsize);
88016 +                       attr->nres.valid_size = attr->nres.data_size;
88017 +                       attr->nres.alloc_size =
88018 +                               cpu_to_le64(ntfs_up_cluster(sbi, nsize));
88020 +                       err = attr_allocate_clusters(sbi, &ni->file.run, 0, 0,
88021 +                                                    clst, NULL, 0, &alen, 0,
88022 +                                                    NULL);
88023 +                       if (err)
88024 +                               goto out5;
88026 +                       err = run_pack(&ni->file.run, 0, clst,
88027 +                                      Add2Ptr(attr, SIZEOF_NONRESIDENT), t16,
88028 +                                      &vcn);
88029 +                       if (err < 0)
88030 +                               goto out5;
88032 +                       if (vcn != clst) {
88033 +                               err = -EINVAL;
88034 +                               goto out5;
88035 +                       }
88037 +                       asize = SIZEOF_NONRESIDENT + QuadAlign(err);
88038 +                       inode->i_size = nsize;
88039 +               } else {
88040 +                       attr->res.data_off = SIZEOF_RESIDENT_LE;
88041 +                       attr->res.data_size = cpu_to_le32(nsize);
88042 +                       memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), rp, nsize);
88043 +                       inode->i_size = nsize;
88044 +                       nsize = 0;
88045 +               }
88047 +               attr->size = cpu_to_le32(asize);
88049 +               err = ntfs_insert_reparse(sbi, IO_REPARSE_TAG_SYMLINK,
88050 +                                         &new_de->ref);
88051 +               if (err)
88052 +                       goto out5;
88054 +               rp_inserted = true;
88055 +       }
88057 +       attr = Add2Ptr(attr, asize);
88058 +       attr->type = ATTR_END;
88060 +       rec->used = cpu_to_le32(PtrOffset(rec, attr) + 8);
88061 +       rec->next_attr_id = cpu_to_le16(aid);
88063 +       /* Step 2: Add new name in index */
88064 +       err = indx_insert_entry(&dir_ni->dir, dir_ni, new_de, sbi, fnd);
88065 +       if (err)
88066 +               goto out6;
88068 +       /* Update current directory record */
88069 +       mark_inode_dirty(dir);
88071 +       /* Fill vfs inode fields */
88072 +       inode->i_uid = sbi->options.uid ? sbi->options.fs_uid : current_fsuid();
88073 +       inode->i_gid = sbi->options.gid          ? sbi->options.fs_gid
88074 +                      : (dir->i_mode & S_ISGID) ? dir->i_gid
88075 +                                                : current_fsgid();
88076 +       inode->i_generation = le16_to_cpu(rec->seq);
88078 +       dir->i_mtime = dir->i_ctime = inode->i_atime;
88080 +       if (is_dir) {
88081 +               if (dir->i_mode & S_ISGID)
88082 +                       mode |= S_ISGID;
88083 +               inode->i_op = &ntfs_dir_inode_operations;
88084 +               inode->i_fop = &ntfs_dir_operations;
88085 +       } else if (is_link) {
88086 +               inode->i_op = &ntfs_link_inode_operations;
88087 +               inode->i_fop = NULL;
88088 +               inode->i_mapping->a_ops = &ntfs_aops;
88089 +       } else {
88090 +               inode->i_op = &ntfs_file_inode_operations;
88091 +               inode->i_fop = &ntfs_file_operations;
88092 +               inode->i_mapping->a_ops =
88093 +                       is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops;
88094 +               init_rwsem(&ni->file.run_lock);
88095 +       }
88097 +       inode->i_mode = mode;
88099 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
88100 +       if (!is_link && (sb->s_flags & SB_POSIXACL)) {
88101 +               err = ntfs_init_acl(mnt_userns, inode, dir);
88102 +               if (err)
88103 +                       goto out6;
88104 +       } else
88105 +#endif
88106 +       {
88107 +               inode->i_flags |= S_NOSEC;
88108 +       }
88110 +       /* Write non resident data */
88111 +       if (nsize) {
88112 +               err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rp, nsize);
88113 +               if (err)
88114 +                       goto out7;
88115 +       }
88117 +       /* call 'd_instantiate' after inode->i_op is set but before finish_open */
88118 +       d_instantiate(dentry, inode);
88120 +       mark_inode_dirty(inode);
88121 +       mark_inode_dirty(dir);
88123 +       /* normal exit */
88124 +       goto out2;
88126 +out7:
88128 +       /* undo 'indx_insert_entry' */
88129 +       indx_delete_entry(&dir_ni->dir, dir_ni, new_de + 1,
88130 +                         le16_to_cpu(new_de->key_size), sbi);
88131 +out6:
88132 +       if (rp_inserted)
88133 +               ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref);
88135 +out5:
88136 +       if (is_dir || run_is_empty(&ni->file.run))
88137 +               goto out4;
88139 +       run_deallocate(sbi, &ni->file.run, false);
88141 +out4:
88142 +       clear_rec_inuse(rec);
88143 +       clear_nlink(inode);
88144 +       ni->mi.dirty = false;
88145 +       discard_new_inode(inode);
88146 +out3:
88147 +       ntfs_mark_rec_free(sbi, ino);
88149 +out2:
88150 +       __putname(new_de);
88151 +       ntfs_free(rp);
88153 +out1:
88154 +       if (err)
88155 +               return ERR_PTR(err);
88157 +       unlock_new_inode(inode);
88159 +       return inode;
88162 +int ntfs_link_inode(struct inode *inode, struct dentry *dentry)
88164 +       int err;
88165 +       struct inode *dir = d_inode(dentry->d_parent);
88166 +       struct ntfs_inode *dir_ni = ntfs_i(dir);
88167 +       struct ntfs_inode *ni = ntfs_i(inode);
88168 +       struct super_block *sb = inode->i_sb;
88169 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
88170 +       const struct qstr *name = &dentry->d_name;
88171 +       struct NTFS_DE *new_de = NULL;
88172 +       struct ATTR_FILE_NAME *fname;
88173 +       struct ATTRIB *attr;
88174 +       u16 key_size;
88175 +       struct INDEX_ROOT *dir_root;
88177 +       dir_root = indx_get_root(&dir_ni->dir, dir_ni, NULL, NULL);
88178 +       if (!dir_root)
88179 +               return -EINVAL;
88181 +       /* allocate PATH_MAX bytes */
88182 +       new_de = __getname();
88183 +       if (!new_de)
88184 +               return -ENOMEM;
88186 +       /*mark rw ntfs as dirty. it will be cleared at umount*/
88187 +       ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY);
88189 +       // Insert file name
88190 +       err = fill_name_de(sbi, new_de, name, NULL);
88191 +       if (err)
88192 +               goto out;
88194 +       key_size = le16_to_cpu(new_de->key_size);
88195 +       err = ni_insert_resident(ni, key_size, ATTR_NAME, NULL, 0, &attr, NULL);
88196 +       if (err)
88197 +               goto out;
88199 +       mi_get_ref(&ni->mi, &new_de->ref);
88201 +       fname = (struct ATTR_FILE_NAME *)(new_de + 1);
88202 +       mi_get_ref(&dir_ni->mi, &fname->home);
88203 +       fname->dup.cr_time = fname->dup.m_time = fname->dup.c_time =
88204 +               fname->dup.a_time = kernel2nt(&inode->i_ctime);
88205 +       fname->dup.alloc_size = fname->dup.data_size = 0;
88206 +       fname->dup.fa = ni->std_fa;
88207 +       fname->dup.ea_size = fname->dup.reparse = 0;
88209 +       memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), fname, key_size);
88211 +       err = indx_insert_entry(&dir_ni->dir, dir_ni, new_de, sbi, NULL);
88212 +       if (err)
88213 +               goto out;
88215 +       le16_add_cpu(&ni->mi.mrec->hard_links, 1);
88216 +       ni->mi.dirty = true;
88218 +out:
88219 +       __putname(new_de);
88220 +       return err;
88224 + * ntfs_unlink_inode
88225 + *
88226 + * inode_operations::unlink
88227 + * inode_operations::rmdir
88228 + */
88229 +int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry)
88231 +       int err;
88232 +       struct super_block *sb = dir->i_sb;
88233 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
88234 +       struct inode *inode = d_inode(dentry);
88235 +       struct ntfs_inode *ni = ntfs_i(inode);
88236 +       const struct qstr *name = &dentry->d_name;
88237 +       struct ntfs_inode *dir_ni = ntfs_i(dir);
88238 +       struct ntfs_index *indx = &dir_ni->dir;
88239 +       struct cpu_str *uni = NULL;
88240 +       struct ATTR_FILE_NAME *fname;
88241 +       u8 name_type;
88242 +       struct ATTR_LIST_ENTRY *le;
88243 +       struct MFT_REF ref;
88244 +       bool is_dir = S_ISDIR(inode->i_mode);
88245 +       struct INDEX_ROOT *dir_root;
88247 +       dir_root = indx_get_root(indx, dir_ni, NULL, NULL);
88248 +       if (!dir_root)
88249 +               return -EINVAL;
88251 +       ni_lock(ni);
88253 +       if (is_dir && !dir_is_empty(inode)) {
88254 +               err = -ENOTEMPTY;
88255 +               goto out1;
88256 +       }
88258 +       if (ntfs_is_meta_file(sbi, inode->i_ino)) {
88259 +               err = -EINVAL;
88260 +               goto out1;
88261 +       }
88263 +       /* allocate PATH_MAX bytes */
88264 +       uni = __getname();
88265 +       if (!uni) {
88266 +               err = -ENOMEM;
88267 +               goto out1;
88268 +       }
88270 +       /* Convert input string to unicode */
88271 +       err = ntfs_nls_to_utf16(sbi, name->name, name->len, uni, NTFS_NAME_LEN,
88272 +                               UTF16_HOST_ENDIAN);
88273 +       if (err < 0)
88274 +               goto out2;
88276 +       /*mark rw ntfs as dirty. it will be cleared at umount*/
88277 +       ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
88279 +       /* find name in record */
88280 +       mi_get_ref(&dir_ni->mi, &ref);
88282 +       le = NULL;
88283 +       fname = ni_fname_name(ni, uni, &ref, &le);
88284 +       if (!fname) {
88285 +               err = -ENOENT;
88286 +               goto out3;
88287 +       }
88289 +       name_type = paired_name(fname->type);
88291 +       err = indx_delete_entry(indx, dir_ni, fname, fname_full_size(fname),
88292 +                               sbi);
88293 +       if (err)
88294 +               goto out3;
88296 +       /* Then remove name from mft */
88297 +       ni_remove_attr_le(ni, attr_from_name(fname), le);
88299 +       le16_add_cpu(&ni->mi.mrec->hard_links, -1);
88300 +       ni->mi.dirty = true;
88302 +       if (name_type != FILE_NAME_POSIX) {
88303 +               /* Now we should delete name by type */
88304 +               fname = ni_fname_type(ni, name_type, &le);
88305 +               if (fname) {
88306 +                       err = indx_delete_entry(indx, dir_ni, fname,
88307 +                                               fname_full_size(fname), sbi);
88308 +                       if (err)
88309 +                               goto out3;
88311 +                       ni_remove_attr_le(ni, attr_from_name(fname), le);
88313 +                       le16_add_cpu(&ni->mi.mrec->hard_links, -1);
88314 +               }
88315 +       }
88316 +out3:
88317 +       switch (err) {
88318 +       case 0:
88319 +               drop_nlink(inode);
88320 +       case -ENOTEMPTY:
88321 +       case -ENOSPC:
88322 +       case -EROFS:
88323 +               break;
88324 +       default:
88325 +               make_bad_inode(inode);
88326 +       }
88328 +       dir->i_mtime = dir->i_ctime = current_time(dir);
88329 +       mark_inode_dirty(dir);
88330 +       inode->i_ctime = dir->i_ctime;
88331 +       if (inode->i_nlink)
88332 +               mark_inode_dirty(inode);
88334 +out2:
88335 +       __putname(uni);
88336 +out1:
88337 +       ni_unlock(ni);
88338 +       return err;
88341 +void ntfs_evict_inode(struct inode *inode)
88343 +       truncate_inode_pages_final(&inode->i_data);
88345 +       if (inode->i_nlink)
88346 +               _ni_write_inode(inode, inode_needs_sync(inode));
88348 +       invalidate_inode_buffers(inode);
88349 +       clear_inode(inode);
88351 +       ni_clear(ntfs_i(inode));
88354 +static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
88355 +                                     int buflen)
88357 +       int i, err = 0;
88358 +       struct ntfs_inode *ni = ntfs_i(inode);
88359 +       struct super_block *sb = inode->i_sb;
88360 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
88361 +       u64 i_size = inode->i_size;
88362 +       u16 nlen = 0;
88363 +       void *to_free = NULL;
88364 +       struct REPARSE_DATA_BUFFER *rp;
88365 +       struct le_str *uni;
88366 +       struct ATTRIB *attr;
88368 +       /* Reparse data present. Try to parse it */
88369 +       static_assert(!offsetof(struct REPARSE_DATA_BUFFER, ReparseTag));
88370 +       static_assert(sizeof(u32) == sizeof(rp->ReparseTag));
88372 +       *buffer = 0;
88374 +       /* Read into temporal buffer */
88375 +       if (i_size > sbi->reparse.max_size || i_size <= sizeof(u32)) {
88376 +               err = -EINVAL;
88377 +               goto out;
88378 +       }
88380 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_REPARSE, NULL, 0, NULL, NULL);
88381 +       if (!attr) {
88382 +               err = -EINVAL;
88383 +               goto out;
88384 +       }
88386 +       if (!attr->non_res) {
88387 +               rp = resident_data_ex(attr, i_size);
88388 +               if (!rp) {
88389 +                       err = -EINVAL;
88390 +                       goto out;
88391 +               }
88392 +       } else {
88393 +               rp = ntfs_malloc(i_size);
88394 +               if (!rp) {
88395 +                       err = -ENOMEM;
88396 +                       goto out;
88397 +               }
88398 +               to_free = rp;
88399 +               err = ntfs_read_run_nb(sbi, &ni->file.run, 0, rp, i_size, NULL);
88400 +               if (err)
88401 +                       goto out;
88402 +       }
88404 +       err = -EINVAL;
88406 +       /* Microsoft Tag */
88407 +       switch (rp->ReparseTag) {
88408 +       case IO_REPARSE_TAG_MOUNT_POINT:
88409 +               /* Mount points and junctions */
88410 +               /* Can we use 'Rp->MountPointReparseBuffer.PrintNameLength'? */
88411 +               if (i_size <= offsetof(struct REPARSE_DATA_BUFFER,
88412 +                                      MountPointReparseBuffer.PathBuffer))
88413 +                       goto out;
88414 +               uni = Add2Ptr(rp,
88415 +                             offsetof(struct REPARSE_DATA_BUFFER,
88416 +                                      MountPointReparseBuffer.PathBuffer) +
88417 +                                     le16_to_cpu(rp->MountPointReparseBuffer
88418 +                                                         .PrintNameOffset) -
88419 +                                     2);
88420 +               nlen = le16_to_cpu(rp->MountPointReparseBuffer.PrintNameLength);
88421 +               break;
88423 +       case IO_REPARSE_TAG_SYMLINK:
88424 +               /* FolderSymbolicLink */
88425 +               /* Can we use 'Rp->SymbolicLinkReparseBuffer.PrintNameLength'? */
88426 +               if (i_size <= offsetof(struct REPARSE_DATA_BUFFER,
88427 +                                      SymbolicLinkReparseBuffer.PathBuffer))
88428 +                       goto out;
88429 +               uni = Add2Ptr(rp,
88430 +                             offsetof(struct REPARSE_DATA_BUFFER,
88431 +                                      SymbolicLinkReparseBuffer.PathBuffer) +
88432 +                                     le16_to_cpu(rp->SymbolicLinkReparseBuffer
88433 +                                                         .PrintNameOffset) -
88434 +                                     2);
88435 +               nlen = le16_to_cpu(
88436 +                       rp->SymbolicLinkReparseBuffer.PrintNameLength);
88437 +               break;
88439 +       case IO_REPARSE_TAG_CLOUD:
88440 +       case IO_REPARSE_TAG_CLOUD_1:
88441 +       case IO_REPARSE_TAG_CLOUD_2:
88442 +       case IO_REPARSE_TAG_CLOUD_3:
88443 +       case IO_REPARSE_TAG_CLOUD_4:
88444 +       case IO_REPARSE_TAG_CLOUD_5:
88445 +       case IO_REPARSE_TAG_CLOUD_6:
88446 +       case IO_REPARSE_TAG_CLOUD_7:
88447 +       case IO_REPARSE_TAG_CLOUD_8:
88448 +       case IO_REPARSE_TAG_CLOUD_9:
88449 +       case IO_REPARSE_TAG_CLOUD_A:
88450 +       case IO_REPARSE_TAG_CLOUD_B:
88451 +       case IO_REPARSE_TAG_CLOUD_C:
88452 +       case IO_REPARSE_TAG_CLOUD_D:
88453 +       case IO_REPARSE_TAG_CLOUD_E:
88454 +       case IO_REPARSE_TAG_CLOUD_F:
88455 +               err = sizeof("OneDrive") - 1;
88456 +               if (err > buflen)
88457 +                       err = buflen;
88458 +               memcpy(buffer, "OneDrive", err);
88459 +               goto out;
88461 +       default:
88462 +               if (IsReparseTagMicrosoft(rp->ReparseTag)) {
88463 +                       /* unknown Microsoft Tag */
88464 +                       goto out;
88465 +               }
88466 +               if (!IsReparseTagNameSurrogate(rp->ReparseTag) ||
88467 +                   i_size <= sizeof(struct REPARSE_POINT)) {
88468 +                       goto out;
88469 +               }
88471 +               /* Users tag */
88472 +               uni = Add2Ptr(rp, sizeof(struct REPARSE_POINT) - 2);
88473 +               nlen = le16_to_cpu(rp->ReparseDataLength) -
88474 +                      sizeof(struct REPARSE_POINT);
88475 +       }
88477 +       /* Convert nlen from bytes to UNICODE chars */
88478 +       nlen >>= 1;
88480 +       /* Check that name is available */
88481 +       if (!nlen || &uni->name[nlen] > (__le16 *)Add2Ptr(rp, i_size))
88482 +               goto out;
88484 +       /* If name is already zero terminated then truncate it now */
88485 +       if (!uni->name[nlen - 1])
88486 +               nlen -= 1;
88487 +       uni->len = nlen;
88489 +       err = ntfs_utf16_to_nls(sbi, uni, buffer, buflen);
88491 +       if (err < 0)
88492 +               goto out;
88494 +       /* translate windows '\' into linux '/' */
88495 +       for (i = 0; i < err; i++) {
88496 +               if (buffer[i] == '\\')
88497 +                       buffer[i] = '/';
88498 +       }
88500 +       /* Always set last zero */
88501 +       buffer[err] = 0;
88502 +out:
88503 +       ntfs_free(to_free);
88504 +       return err;
88507 +static const char *ntfs_get_link(struct dentry *de, struct inode *inode,
88508 +                                struct delayed_call *done)
88510 +       int err;
88511 +       char *ret;
88513 +       if (!de)
88514 +               return ERR_PTR(-ECHILD);
88516 +       ret = kmalloc(PAGE_SIZE, GFP_NOFS);
88517 +       if (!ret)
88518 +               return ERR_PTR(-ENOMEM);
88520 +       err = ntfs_readlink_hlp(inode, ret, PAGE_SIZE);
88521 +       if (err < 0) {
88522 +               kfree(ret);
88523 +               return ERR_PTR(err);
88524 +       }
88526 +       set_delayed_call(done, kfree_link, ret);
88528 +       return ret;
88531 +const struct inode_operations ntfs_link_inode_operations = {
88532 +       .get_link = ntfs_get_link,
88533 +       .setattr = ntfs3_setattr,
88534 +       .listxattr = ntfs_listxattr,
88535 +       .permission = ntfs_permission,
88536 +       .get_acl = ntfs_get_acl,
88537 +       .set_acl = ntfs_set_acl,
88540 +const struct address_space_operations ntfs_aops = {
88541 +       .readpage = ntfs_readpage,
88542 +       .readahead = ntfs_readahead,
88543 +       .writepage = ntfs_writepage,
88544 +       .writepages = ntfs_writepages,
88545 +       .write_begin = ntfs_write_begin,
88546 +       .write_end = ntfs_write_end,
88547 +       .direct_IO = ntfs_direct_IO,
88548 +       .bmap = ntfs_bmap,
88551 +const struct address_space_operations ntfs_aops_cmpr = {
88552 +       .readpage = ntfs_readpage,
88553 +       .readahead = ntfs_readahead,
88555 diff --git a/fs/ntfs3/lib/decompress_common.c b/fs/ntfs3/lib/decompress_common.c
88556 new file mode 100644
88557 index 000000000000..83c9e93aea77
88558 --- /dev/null
88559 +++ b/fs/ntfs3/lib/decompress_common.c
88560 @@ -0,0 +1,332 @@
88561 +// SPDX-License-Identifier: GPL-2.0-or-later
88563 + * decompress_common.c - Code shared by the XPRESS and LZX decompressors
88564 + *
88565 + * Copyright (C) 2015 Eric Biggers
88566 + *
88567 + * This program is free software: you can redistribute it and/or modify it under
88568 + * the terms of the GNU General Public License as published by the Free Software
88569 + * Foundation, either version 2 of the License, or (at your option) any later
88570 + * version.
88571 + *
88572 + * This program is distributed in the hope that it will be useful, but WITHOUT
88573 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
88574 + * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
88575 + * details.
88576 + *
88577 + * You should have received a copy of the GNU General Public License along with
88578 + * this program.  If not, see <http://www.gnu.org/licenses/>.
88579 + */
88581 +#include "decompress_common.h"
88584 + * make_huffman_decode_table() -
88585 + *
88586 + * Build a decoding table for a canonical prefix code, or "Huffman code".
88587 + *
88588 + * This is an internal function, not part of the library API!
88589 + *
88590 + * This takes as input the length of the codeword for each symbol in the
88591 + * alphabet and produces as output a table that can be used for fast
88592 + * decoding of prefix-encoded symbols using read_huffsym().
88593 + *
88594 + * Strictly speaking, a canonical prefix code might not be a Huffman
88595 + * code.  But this algorithm will work either way; and in fact, since
88596 + * Huffman codes are defined in terms of symbol frequencies, there is no
88597 + * way for the decompressor to know whether the code is a true Huffman
88598 + * code or not until all symbols have been decoded.
88599 + *
88600 + * Because the prefix code is assumed to be "canonical", it can be
88601 + * reconstructed directly from the codeword lengths.  A prefix code is
88602 + * canonical if and only if a longer codeword never lexicographically
88603 + * precedes a shorter codeword, and the lexicographic ordering of
88604 + * codewords of the same length is the same as the lexicographic ordering
88605 + * of the corresponding symbols.  Consequently, we can sort the symbols
88606 + * primarily by codeword length and secondarily by symbol value, then
88607 + * reconstruct the prefix code by generating codewords lexicographically
88608 + * in that order.
88609 + *
88610 + * This function does not, however, generate the prefix code explicitly.
88611 + * Instead, it directly builds a table for decoding symbols using the
88612 + * code.  The basic idea is this: given the next 'max_codeword_len' bits
88613 + * in the input, we can look up the decoded symbol by indexing a table
88614 + * containing 2**max_codeword_len entries.  A codeword with length
88615 + * 'max_codeword_len' will have exactly one entry in this table, whereas
88616 + * a codeword shorter than 'max_codeword_len' will have multiple entries
88617 + * in this table.  Precisely, a codeword of length n will be represented
88618 + * by 2**(max_codeword_len - n) entries in this table.  The 0-based index
88619 + * of each such entry will contain the corresponding codeword as a prefix
88620 + * when zero-padded on the left to 'max_codeword_len' binary digits.
88621 + *
88622 + * That's the basic idea, but we implement two optimizations regarding
88623 + * the format of the decode table itself:
88624 + *
88625 + * - For many compression formats, the maximum codeword length is too
88626 + *   long for it to be efficient to build the full decoding table
88627 + *   whenever a new prefix code is used.  Instead, we can build the table
88628 + *   using only 2**table_bits entries, where 'table_bits' is some number
88629 + *   less than or equal to 'max_codeword_len'.  Then, only codewords of
88630 + *   length 'table_bits' and shorter can be directly looked up.  For
88631 + *   longer codewords, the direct lookup instead produces the root of a
88632 + *   binary tree.  Using this tree, the decoder can do traditional
88633 + *   bit-by-bit decoding of the remainder of the codeword.  Child nodes
88634 + *   are allocated in extra entries at the end of the table; leaf nodes
88635 + *   contain symbols.  Note that the long-codeword case is, in general,
88636 + *   not performance critical, since in Huffman codes the most frequently
88637 + *   used symbols are assigned the shortest codeword lengths.
88638 + *
88639 + * - When we decode a symbol using a direct lookup of the table, we still
88640 + *   need to know its length so that the bitstream can be advanced by the
88641 + *   appropriate number of bits.  The simple solution is to simply retain
88642 + *   the 'lens' array and use the decoded symbol as an index into it.
88643 + *   However, this requires two separate array accesses in the fast path.
88644 + *   The optimization is to store the length directly in the decode
88645 + *   table.  We use the bottom 11 bits for the symbol and the top 5 bits
88646 + *   for the length.  In addition, to combine this optimization with the
88647 + *   previous one, we introduce a special case where the top 2 bits of
88648 + *   the length are both set if the entry is actually the root of a
88649 + *   binary tree.
88650 + *
88651 + * @decode_table:
88652 + *     The array in which to create the decoding table.  This must have
88653 + *     a length of at least ((2**table_bits) + 2 * num_syms) entries.
88654 + *
88655 + * @num_syms:
88656 + *     The number of symbols in the alphabet; also, the length of the
88657 + *     'lens' array.  Must be less than or equal to 2048.
88658 + *
88659 + * @table_bits:
88660 + *     The order of the decode table size, as explained above.  Must be
88661 + *     less than or equal to 13.
88662 + *
88663 + * @lens:
88664 + *     An array of length @num_syms, indexable by symbol, that gives the
88665 + *     length of the codeword, in bits, for that symbol.  The length can
88666 + *     be 0, which means that the symbol does not have a codeword
88667 + *     assigned.
88668 + *
88669 + * @max_codeword_len:
88670 + *     The longest codeword length allowed in the compression format.
88671 + *     All entries in 'lens' must be less than or equal to this value.
88672 + *     This must be less than or equal to 23.
88673 + *
88674 + * @working_space
88675 + *     A temporary array of length '2 * (max_codeword_len + 1) +
88676 + *     num_syms'.
88677 + *
88678 + * Returns 0 on success, or -1 if the lengths do not form a valid prefix
88679 + * code.
88680 + */
88681 +int make_huffman_decode_table(u16 decode_table[], const u32 num_syms,
88682 +                             const u32 table_bits, const u8 lens[],
88683 +                             const u32 max_codeword_len,
88684 +                             u16 working_space[])
88686 +       const u32 table_num_entries = 1 << table_bits;
88687 +       u16 * const len_counts = &working_space[0];
88688 +       u16 * const offsets = &working_space[1 * (max_codeword_len + 1)];
88689 +       u16 * const sorted_syms = &working_space[2 * (max_codeword_len + 1)];
88690 +       int left;
88691 +       void *decode_table_ptr;
88692 +       u32 sym_idx;
88693 +       u32 codeword_len;
88694 +       u32 stores_per_loop;
88695 +       u32 decode_table_pos;
88696 +       u32 len;
88697 +       u32 sym;
88699 +       /* Count how many symbols have each possible codeword length.
88700 +        * Note that a length of 0 indicates the corresponding symbol is not
88701 +        * used in the code and therefore does not have a codeword.
88702 +        */
88703 +       for (len = 0; len <= max_codeword_len; len++)
88704 +               len_counts[len] = 0;
88705 +       for (sym = 0; sym < num_syms; sym++)
88706 +               len_counts[lens[sym]]++;
88708 +       /* We can assume all lengths are <= max_codeword_len, but we
88709 +        * cannot assume they form a valid prefix code.  A codeword of
88710 +        * length n should require a proportion of the codespace equaling
88711 +        * (1/2)^n.  The code is valid if and only if the codespace is
88712 +        * exactly filled by the lengths, by this measure.
88713 +        */
88714 +       left = 1;
88715 +       for (len = 1; len <= max_codeword_len; len++) {
88716 +               left <<= 1;
88717 +               left -= len_counts[len];
88718 +               if (left < 0) {
88719 +                       /* The lengths overflow the codespace; that is, the code
88720 +                        * is over-subscribed.
88721 +                        */
88722 +                       return -1;
88723 +               }
88724 +       }
88726 +       if (left) {
88727 +               /* The lengths do not fill the codespace; that is, they form an
88728 +                * incomplete set.
88729 +                */
88730 +               if (left == (1 << max_codeword_len)) {
88731 +                       /* The code is completely empty.  This is arguably
88732 +                        * invalid, but in fact it is valid in LZX and XPRESS,
88733 +                        * so we must allow it.  By definition, no symbols can
88734 +                        * be decoded with an empty code.  Consequently, we
88735 +                        * technically don't even need to fill in the decode
88736 +                        * table.  However, to avoid accessing uninitialized
88737 +                        * memory if the algorithm nevertheless attempts to
88738 +                        * decode symbols using such a code, we zero out the
88739 +                        * decode table.
88740 +                        */
88741 +                       memset(decode_table, 0,
88742 +                              table_num_entries * sizeof(decode_table[0]));
88743 +                       return 0;
88744 +               }
88745 +               return -1;
88746 +       }
88748 +       /* Sort the symbols primarily by length and secondarily by symbol order.
88749 +        */
88751 +       /* Initialize 'offsets' so that offsets[len] for 1 <= len <=
88752 +        * max_codeword_len is the number of codewords shorter than 'len' bits.
88753 +        */
88754 +       offsets[1] = 0;
88755 +       for (len = 1; len < max_codeword_len; len++)
88756 +               offsets[len + 1] = offsets[len] + len_counts[len];
88758 +       /* Use the 'offsets' array to sort the symbols.  Note that we do not
88759 +        * include symbols that are not used in the code.  Consequently, fewer
88760 +        * than 'num_syms' entries in 'sorted_syms' may be filled.
88761 +        */
88762 +       for (sym = 0; sym < num_syms; sym++)
88763 +               if (lens[sym])
88764 +                       sorted_syms[offsets[lens[sym]]++] = sym;
88766 +       /* Fill entries for codewords with length <= table_bits
88767 +        * --- that is, those short enough for a direct mapping.
88768 +        *
88769 +        * The table will start with entries for the shortest codeword(s), which
88770 +        * have the most entries.  From there, the number of entries per
88771 +        * codeword will decrease.
88772 +        */
88773 +       decode_table_ptr = decode_table;
88774 +       sym_idx = 0;
88775 +       codeword_len = 1;
88776 +       stores_per_loop = (1 << (table_bits - codeword_len));
88777 +       for (; stores_per_loop != 0; codeword_len++, stores_per_loop >>= 1) {
88778 +               u32 end_sym_idx = sym_idx + len_counts[codeword_len];
88780 +               for (; sym_idx < end_sym_idx; sym_idx++) {
88781 +                       u16 entry;
88782 +                       u16 *p;
88783 +                       u32 n;
88785 +                       entry = ((u32)codeword_len << 11) | sorted_syms[sym_idx];
88786 +                       p = (u16 *)decode_table_ptr;
88787 +                       n = stores_per_loop;
88789 +                       do {
88790 +                               *p++ = entry;
88791 +                       } while (--n);
88793 +                       decode_table_ptr = p;
88794 +               }
88795 +       }
88797 +       /* If we've filled in the entire table, we are done.  Otherwise,
88798 +        * there are codewords longer than table_bits for which we must
88799 +        * generate binary trees.
88800 +        */
88801 +       decode_table_pos = (u16 *)decode_table_ptr - decode_table;
88802 +       if (decode_table_pos != table_num_entries) {
88803 +               u32 j;
88804 +               u32 next_free_tree_slot;
88805 +               u32 cur_codeword;
88807 +               /* First, zero out the remaining entries.  This is
88808 +                * necessary so that these entries appear as
88809 +                * "unallocated" in the next part.  Each of these entries
88810 +                * will eventually be filled with the representation of
88811 +                * the root node of a binary tree.
88812 +                */
88813 +               j = decode_table_pos;
88814 +               do {
88815 +                       decode_table[j] = 0;
88816 +               } while (++j != table_num_entries);
88818 +               /* We allocate child nodes starting at the end of the
88819 +                * direct lookup table.  Note that there should be
88820 +                * 2*num_syms extra entries for this purpose, although
88821 +                * fewer than this may actually be needed.
88822 +                */
88823 +               next_free_tree_slot = table_num_entries;
88825 +               /* Iterate through each codeword with length greater than
88826 +                * 'table_bits', primarily in order of codeword length
88827 +                * and secondarily in order of symbol.
88828 +                */
88829 +               for (cur_codeword = decode_table_pos << 1;
88830 +                    codeword_len <= max_codeword_len;
88831 +                    codeword_len++, cur_codeword <<= 1) {
88832 +                       u32 end_sym_idx = sym_idx + len_counts[codeword_len];
88834 +                       for (; sym_idx < end_sym_idx; sym_idx++, cur_codeword++) {
88835 +                               /* 'sorted_sym' is the symbol represented by the
88836 +                                * codeword.
88837 +                                */
88838 +                               u32 sorted_sym = sorted_syms[sym_idx];
88839 +                               u32 extra_bits = codeword_len - table_bits;
88840 +                               u32 node_idx = cur_codeword >> extra_bits;
88842 +                               /* Go through each bit of the current codeword
88843 +                                * beyond the prefix of length @table_bits and
88844 +                                * walk the appropriate binary tree, allocating
88845 +                                * any slots that have not yet been allocated.
88846 +                                *
88847 +                                * Note that the 'pointer' entry to the binary
88848 +                                * tree, which is stored in the direct lookup
88849 +                                * portion of the table, is represented
88850 +                                * identically to other internal (non-leaf)
88851 +                                * nodes of the binary tree; it can be thought
88852 +                                * of as simply the root of the tree.  The
88853 +                                * representation of these internal nodes is
88854 +                                * simply the index of the left child combined
88855 +                                * with the special bits 0xC000 to distingush
88856 +                                * the entry from direct mapping and leaf node
88857 +                                * entries.
88858 +                                */
88859 +                               do {
88860 +                                       /* At least one bit remains in the
88861 +                                        * codeword, but the current node is an
88862 +                                        * unallocated leaf.  Change it to an
88863 +                                        * internal node.
88864 +                                        */
88865 +                                       if (decode_table[node_idx] == 0) {
88866 +                                               decode_table[node_idx] =
88867 +                                                       next_free_tree_slot | 0xC000;
88868 +                                               decode_table[next_free_tree_slot++] = 0;
88869 +                                               decode_table[next_free_tree_slot++] = 0;
88870 +                                       }
88872 +                                       /* Go to the left child if the next bit
88873 +                                        * in the codeword is 0; otherwise go to
88874 +                                        * the right child.
88875 +                                        */
88876 +                                       node_idx = decode_table[node_idx] & 0x3FFF;
88877 +                                       --extra_bits;
88878 +                                       node_idx += (cur_codeword >> extra_bits) & 1;
88879 +                               } while (extra_bits != 0);
88881 +                               /* We've traversed the tree using the entire
88882 +                                * codeword, and we're now at the entry where
88883 +                                * the actual symbol will be stored.  This is
88884 +                                * distinguished from internal nodes by not
88885 +                                * having its high two bits set.
88886 +                                */
88887 +                               decode_table[node_idx] = sorted_sym;
88888 +                       }
88889 +               }
88890 +       }
88891 +       return 0;
88893 diff --git a/fs/ntfs3/lib/decompress_common.h b/fs/ntfs3/lib/decompress_common.h
88894 new file mode 100644
88895 index 000000000000..66297f398403
88896 --- /dev/null
88897 +++ b/fs/ntfs3/lib/decompress_common.h
88898 @@ -0,0 +1,352 @@
88899 +/* SPDX-License-Identifier: GPL-2.0-or-later */
88902 + * decompress_common.h - Code shared by the XPRESS and LZX decompressors
88903 + *
88904 + * Copyright (C) 2015 Eric Biggers
88905 + *
88906 + * This program is free software: you can redistribute it and/or modify it under
88907 + * the terms of the GNU General Public License as published by the Free Software
88908 + * Foundation, either version 2 of the License, or (at your option) any later
88909 + * version.
88910 + *
88911 + * This program is distributed in the hope that it will be useful, but WITHOUT
88912 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
88913 + * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
88914 + * details.
88915 + *
88916 + * You should have received a copy of the GNU General Public License along with
88917 + * this program.  If not, see <http://www.gnu.org/licenses/>.
88918 + */
88920 +#include <linux/string.h>
88921 +#include <linux/compiler.h>
88922 +#include <linux/types.h>
88923 +#include <linux/slab.h>
88924 +#include <asm/unaligned.h>
88927 +/* "Force inline" macro (not required, but helpful for performance)  */
88928 +#define forceinline __always_inline
88930 +/* Enable whole-word match copying on selected architectures  */
88931 +#if defined(__i386__) || defined(__x86_64__) || defined(__ARM_FEATURE_UNALIGNED)
88932 +#  define FAST_UNALIGNED_ACCESS
88933 +#endif
88935 +/* Size of a machine word  */
88936 +#define WORDBYTES (sizeof(size_t))
88938 +static forceinline void
88939 +copy_unaligned_word(const void *src, void *dst)
88941 +       put_unaligned(get_unaligned((const size_t *)src), (size_t *)dst);
88945 +/* Generate a "word" with platform-dependent size whose bytes all contain the
88946 + * value 'b'.
88947 + */
88948 +static forceinline size_t repeat_byte(u8 b)
88950 +       size_t v;
88952 +       v = b;
88953 +       v |= v << 8;
88954 +       v |= v << 16;
88955 +       v |= v << ((WORDBYTES == 8) ? 32 : 0);
88956 +       return v;
88959 +/* Structure that encapsulates a block of in-memory data being interpreted as a
88960 + * stream of bits, optionally with interwoven literal bytes.  Bits are assumed
88961 + * to be stored in little endian 16-bit coding units, with the bits ordered high
88962 + * to low.
88963 + */
88964 +struct input_bitstream {
88966 +       /* Bits that have been read from the input buffer.  The bits are
88967 +        * left-justified; the next bit is always bit 31.
88968 +        */
88969 +       u32 bitbuf;
88971 +       /* Number of bits currently held in @bitbuf.  */
88972 +       u32 bitsleft;
88974 +       /* Pointer to the next byte to be retrieved from the input buffer.  */
88975 +       const u8 *next;
88977 +       /* Pointer to just past the end of the input buffer.  */
88978 +       const u8 *end;
88981 +/* Initialize a bitstream to read from the specified input buffer.  */
88982 +static forceinline void init_input_bitstream(struct input_bitstream *is,
88983 +                                            const void *buffer, u32 size)
88985 +       is->bitbuf = 0;
88986 +       is->bitsleft = 0;
88987 +       is->next = buffer;
88988 +       is->end = is->next + size;
88991 +/* Ensure the bit buffer variable for the bitstream contains at least @num_bits
88992 + * bits.  Following this, bitstream_peek_bits() and/or bitstream_remove_bits()
88993 + * may be called on the bitstream to peek or remove up to @num_bits bits.  Note
88994 + * that @num_bits must be <= 16.
88995 + */
88996 +static forceinline void bitstream_ensure_bits(struct input_bitstream *is,
88997 +                                             u32 num_bits)
88999 +       if (is->bitsleft < num_bits) {
89000 +               if (is->end - is->next >= 2) {
89001 +                       is->bitbuf |= (u32)get_unaligned_le16(is->next)
89002 +                                       << (16 - is->bitsleft);
89003 +                       is->next += 2;
89004 +               }
89005 +               is->bitsleft += 16;
89006 +       }
89009 +/* Return the next @num_bits bits from the bitstream, without removing them.
89010 + * There must be at least @num_bits remaining in the buffer variable, from a
89011 + * previous call to bitstream_ensure_bits().
89012 + */
89013 +static forceinline u32
89014 +bitstream_peek_bits(const struct input_bitstream *is, const u32 num_bits)
89016 +       return (is->bitbuf >> 1) >> (sizeof(is->bitbuf) * 8 - num_bits - 1);
89019 +/* Remove @num_bits from the bitstream.  There must be at least @num_bits
89020 + * remaining in the buffer variable, from a previous call to
89021 + * bitstream_ensure_bits().
89022 + */
89023 +static forceinline void
89024 +bitstream_remove_bits(struct input_bitstream *is, u32 num_bits)
89026 +       is->bitbuf <<= num_bits;
89027 +       is->bitsleft -= num_bits;
89030 +/* Remove and return @num_bits bits from the bitstream.  There must be at least
89031 + * @num_bits remaining in the buffer variable, from a previous call to
89032 + * bitstream_ensure_bits().
89033 + */
89034 +static forceinline u32
89035 +bitstream_pop_bits(struct input_bitstream *is, u32 num_bits)
89037 +       u32 bits = bitstream_peek_bits(is, num_bits);
89039 +       bitstream_remove_bits(is, num_bits);
89040 +       return bits;
89043 +/* Read and return the next @num_bits bits from the bitstream.  */
89044 +static forceinline u32
89045 +bitstream_read_bits(struct input_bitstream *is, u32 num_bits)
89047 +       bitstream_ensure_bits(is, num_bits);
89048 +       return bitstream_pop_bits(is, num_bits);
89051 +/* Read and return the next literal byte embedded in the bitstream.  */
89052 +static forceinline u8
89053 +bitstream_read_byte(struct input_bitstream *is)
89055 +       if (unlikely(is->end == is->next))
89056 +               return 0;
89057 +       return *is->next++;
89060 +/* Read and return the next 16-bit integer embedded in the bitstream.  */
89061 +static forceinline u16
89062 +bitstream_read_u16(struct input_bitstream *is)
89064 +       u16 v;
89066 +       if (unlikely(is->end - is->next < 2))
89067 +               return 0;
89068 +       v = get_unaligned_le16(is->next);
89069 +       is->next += 2;
89070 +       return v;
89073 +/* Read and return the next 32-bit integer embedded in the bitstream.  */
89074 +static forceinline u32
89075 +bitstream_read_u32(struct input_bitstream *is)
89077 +       u32 v;
89079 +       if (unlikely(is->end - is->next < 4))
89080 +               return 0;
89081 +       v = get_unaligned_le32(is->next);
89082 +       is->next += 4;
89083 +       return v;
89086 +/* Read into @dst_buffer an array of literal bytes embedded in the bitstream.
89087 + * Return either a pointer to the byte past the last written, or NULL if the
89088 + * read overflows the input buffer.
89089 + */
89090 +static forceinline void *bitstream_read_bytes(struct input_bitstream *is,
89091 +                                             void *dst_buffer, size_t count)
89093 +       if ((size_t)(is->end - is->next) < count)
89094 +               return NULL;
89095 +       memcpy(dst_buffer, is->next, count);
89096 +       is->next += count;
89097 +       return (u8 *)dst_buffer + count;
89100 +/* Align the input bitstream on a coding-unit boundary.  */
89101 +static forceinline void bitstream_align(struct input_bitstream *is)
89103 +       is->bitsleft = 0;
89104 +       is->bitbuf = 0;
89107 +extern int make_huffman_decode_table(u16 decode_table[], const u32 num_syms,
89108 +                                    const u32 num_bits, const u8 lens[],
89109 +                                    const u32 max_codeword_len,
89110 +                                    u16 working_space[]);
89113 +/* Reads and returns the next Huffman-encoded symbol from a bitstream.  If the
89114 + * input data is exhausted, the Huffman symbol is decoded as if the missing bits
89115 + * are all zeroes.
89116 + */
89117 +static forceinline u32 read_huffsym(struct input_bitstream *istream,
89118 +                                        const u16 decode_table[],
89119 +                                        u32 table_bits,
89120 +                                        u32 max_codeword_len)
89122 +       u32 entry;
89123 +       u32 key_bits;
89125 +       bitstream_ensure_bits(istream, max_codeword_len);
89127 +       /* Index the decode table by the next table_bits bits of the input.  */
89128 +       key_bits = bitstream_peek_bits(istream, table_bits);
89129 +       entry = decode_table[key_bits];
89130 +       if (entry < 0xC000) {
89131 +               /* Fast case: The decode table directly provided the
89132 +                * symbol and codeword length.  The low 11 bits are the
89133 +                * symbol, and the high 5 bits are the codeword length.
89134 +                */
89135 +               bitstream_remove_bits(istream, entry >> 11);
89136 +               return entry & 0x7FF;
89137 +       }
89138 +       /* Slow case: The codeword for the symbol is longer than
89139 +        * table_bits, so the symbol does not have an entry
89140 +        * directly in the first (1 << table_bits) entries of the
89141 +        * decode table.  Traverse the appropriate binary tree
89142 +        * bit-by-bit to decode the symbol.
89143 +        */
89144 +       bitstream_remove_bits(istream, table_bits);
89145 +       do {
89146 +               key_bits = (entry & 0x3FFF) + bitstream_pop_bits(istream, 1);
89147 +       } while ((entry = decode_table[key_bits]) >= 0xC000);
89148 +       return entry;
89152 + * Copy an LZ77 match at (dst - offset) to dst.
89153 + *
89154 + * The length and offset must be already validated --- that is, (dst - offset)
89155 + * can't underrun the output buffer, and (dst + length) can't overrun the output
89156 + * buffer.  Also, the length cannot be 0.
89157 + *
89158 + * @bufend points to the byte past the end of the output buffer.  This function
89159 + * won't write any data beyond this position.
89160 + *
89161 + * Returns dst + length.
89162 + */
89163 +static forceinline u8 *lz_copy(u8 *dst, u32 length, u32 offset, const u8 *bufend,
89164 +                              u32 min_length)
89166 +       const u8 *src = dst - offset;
89168 +       /*
89169 +        * Try to copy one machine word at a time.  On i386 and x86_64 this is
89170 +        * faster than copying one byte at a time, unless the data is
89171 +        * near-random and all the matches have very short lengths.  Note that
89172 +        * since this requires unaligned memory accesses, it won't necessarily
89173 +        * be faster on every architecture.
89174 +        *
89175 +        * Also note that we might copy more than the length of the match.  For
89176 +        * example, if a word is 8 bytes and the match is of length 5, then
89177 +        * we'll simply copy 8 bytes.  This is okay as long as we don't write
89178 +        * beyond the end of the output buffer, hence the check for (bufend -
89179 +        * end >= WORDBYTES - 1).
89180 +        */
89181 +#ifdef FAST_UNALIGNED_ACCESS
89182 +       u8 * const end = dst + length;
89184 +       if (bufend - end >= (ptrdiff_t)(WORDBYTES - 1)) {
89186 +               if (offset >= WORDBYTES) {
89187 +                       /* The source and destination words don't overlap.  */
89189 +                       /* To improve branch prediction, one iteration of this
89190 +                        * loop is unrolled.  Most matches are short and will
89191 +                        * fail the first check.  But if that check passes, then
89192 +                        * it becomes increasing likely that the match is long
89193 +                        * and we'll need to continue copying.
89194 +                        */
89196 +                       copy_unaligned_word(src, dst);
89197 +                       src += WORDBYTES;
89198 +                       dst += WORDBYTES;
89200 +                       if (dst < end) {
89201 +                               do {
89202 +                                       copy_unaligned_word(src, dst);
89203 +                                       src += WORDBYTES;
89204 +                                       dst += WORDBYTES;
89205 +                               } while (dst < end);
89206 +                       }
89207 +                       return end;
89208 +               } else if (offset == 1) {
89210 +                       /* Offset 1 matches are equivalent to run-length
89211 +                        * encoding of the previous byte.  This case is common
89212 +                        * if the data contains many repeated bytes.
89213 +                        */
89214 +                       size_t v = repeat_byte(*(dst - 1));
89216 +                       do {
89217 +                               put_unaligned(v, (size_t *)dst);
89218 +                               src += WORDBYTES;
89219 +                               dst += WORDBYTES;
89220 +                       } while (dst < end);
89221 +                       return end;
89222 +               }
89223 +               /*
89224 +                * We don't bother with special cases for other 'offset <
89225 +                * WORDBYTES', which are usually rarer than 'offset == 1'.  Extra
89226 +                * checks will just slow things down.  Actually, it's possible
89227 +                * to handle all the 'offset < WORDBYTES' cases using the same
89228 +                * code, but it still becomes more complicated doesn't seem any
89229 +                * faster overall; it definitely slows down the more common
89230 +                * 'offset == 1' case.
89231 +                */
89232 +       }
89233 +#endif /* FAST_UNALIGNED_ACCESS */
89235 +       /* Fall back to a bytewise copy.  */
89237 +       if (min_length >= 2) {
89238 +               *dst++ = *src++;
89239 +               length--;
89240 +       }
89241 +       if (min_length >= 3) {
89242 +               *dst++ = *src++;
89243 +               length--;
89244 +       }
89245 +       do {
89246 +               *dst++ = *src++;
89247 +       } while (--length);
89249 +       return dst;
89251 diff --git a/fs/ntfs3/lib/lib.h b/fs/ntfs3/lib/lib.h
89252 new file mode 100644
89253 index 000000000000..f508fbad2e71
89254 --- /dev/null
89255 +++ b/fs/ntfs3/lib/lib.h
89256 @@ -0,0 +1,26 @@
89257 +/* SPDX-License-Identifier: GPL-2.0-or-later */
89259 + * Adapted for linux kernel by Alexander Mamaev:
89260 + * - remove implementations of get_unaligned_
89261 + * - assume GCC is always defined
89262 + * - ISO C90
89263 + * - linux kernel code style
89264 + */
89267 +/* globals from xpress_decompress.c */
89268 +struct xpress_decompressor *xpress_allocate_decompressor(void);
89269 +void xpress_free_decompressor(struct xpress_decompressor *d);
89270 +int xpress_decompress(struct xpress_decompressor *__restrict d,
89271 +                     const void *__restrict compressed_data,
89272 +                     size_t compressed_size,
89273 +                     void *__restrict uncompressed_data,
89274 +                     size_t uncompressed_size);
89276 +/* globals from lzx_decompress.c */
89277 +struct lzx_decompressor *lzx_allocate_decompressor(void);
89278 +void lzx_free_decompressor(struct lzx_decompressor *d);
89279 +int lzx_decompress(struct lzx_decompressor *__restrict d,
89280 +                  const void *__restrict compressed_data,
89281 +                  size_t compressed_size, void *__restrict uncompressed_data,
89282 +                  size_t uncompressed_size);
89283 diff --git a/fs/ntfs3/lib/lzx_decompress.c b/fs/ntfs3/lib/lzx_decompress.c
89284 new file mode 100644
89285 index 000000000000..77a381a693d1
89286 --- /dev/null
89287 +++ b/fs/ntfs3/lib/lzx_decompress.c
89288 @@ -0,0 +1,683 @@
89289 +// SPDX-License-Identifier: GPL-2.0-or-later
89291 + * lzx_decompress.c - A decompressor for the LZX compression format, which can
89292 + * be used in "System Compressed" files.  This is based on the code from wimlib.
89293 + * This code only supports a window size (dictionary size) of 32768 bytes, since
89294 + * this is the only size used in System Compression.
89295 + *
89296 + * Copyright (C) 2015 Eric Biggers
89297 + *
89298 + * This program is free software: you can redistribute it and/or modify it under
89299 + * the terms of the GNU General Public License as published by the Free Software
89300 + * Foundation, either version 2 of the License, or (at your option) any later
89301 + * version.
89302 + *
89303 + * This program is distributed in the hope that it will be useful, but WITHOUT
89304 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
89305 + * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
89306 + * details.
89307 + *
89308 + * You should have received a copy of the GNU General Public License along with
89309 + * this program.  If not, see <http://www.gnu.org/licenses/>.
89310 + */
89312 +#include "decompress_common.h"
89313 +#include "lib.h"
89315 +/* Number of literal byte values  */
89316 +#define LZX_NUM_CHARS                  256
89318 +/* The smallest and largest allowed match lengths  */
89319 +#define LZX_MIN_MATCH_LEN              2
89320 +#define LZX_MAX_MATCH_LEN              257
89322 +/* Number of distinct match lengths that can be represented  */
89323 +#define LZX_NUM_LENS                   (LZX_MAX_MATCH_LEN - LZX_MIN_MATCH_LEN + 1)
89325 +/* Number of match lengths for which no length symbol is required  */
89326 +#define LZX_NUM_PRIMARY_LENS           7
89327 +#define LZX_NUM_LEN_HEADERS            (LZX_NUM_PRIMARY_LENS + 1)
89329 +/* Valid values of the 3-bit block type field  */
89330 +#define LZX_BLOCKTYPE_VERBATIM         1
89331 +#define LZX_BLOCKTYPE_ALIGNED          2
89332 +#define LZX_BLOCKTYPE_UNCOMPRESSED     3
89334 +/* Number of offset slots for a window size of 32768  */
89335 +#define LZX_NUM_OFFSET_SLOTS           30
89337 +/* Number of symbols in the main code for a window size of 32768  */
89338 +#define LZX_MAINCODE_NUM_SYMBOLS       \
89339 +       (LZX_NUM_CHARS + (LZX_NUM_OFFSET_SLOTS * LZX_NUM_LEN_HEADERS))
89341 +/* Number of symbols in the length code  */
89342 +#define LZX_LENCODE_NUM_SYMBOLS                (LZX_NUM_LENS - LZX_NUM_PRIMARY_LENS)
89344 +/* Number of symbols in the precode  */
89345 +#define LZX_PRECODE_NUM_SYMBOLS                20
89347 +/* Number of bits in which each precode codeword length is represented  */
89348 +#define LZX_PRECODE_ELEMENT_SIZE       4
89350 +/* Number of low-order bits of each match offset that are entropy-encoded in
89351 + * aligned offset blocks
89352 + */
89353 +#define LZX_NUM_ALIGNED_OFFSET_BITS    3
89355 +/* Number of symbols in the aligned offset code  */
89356 +#define LZX_ALIGNEDCODE_NUM_SYMBOLS    (1 << LZX_NUM_ALIGNED_OFFSET_BITS)
89358 +/* Mask for the match offset bits that are entropy-encoded in aligned offset
89359 + * blocks
89360 + */
89361 +#define LZX_ALIGNED_OFFSET_BITMASK     ((1 << LZX_NUM_ALIGNED_OFFSET_BITS) - 1)
89363 +/* Number of bits in which each aligned offset codeword length is represented  */
89364 +#define LZX_ALIGNEDCODE_ELEMENT_SIZE   3
89366 +/* Maximum lengths (in bits) of the codewords in each Huffman code  */
89367 +#define LZX_MAX_MAIN_CODEWORD_LEN      16
89368 +#define LZX_MAX_LEN_CODEWORD_LEN       16
89369 +#define LZX_MAX_PRE_CODEWORD_LEN       ((1 << LZX_PRECODE_ELEMENT_SIZE) - 1)
89370 +#define LZX_MAX_ALIGNED_CODEWORD_LEN   ((1 << LZX_ALIGNEDCODE_ELEMENT_SIZE) - 1)
89372 +/* The default "filesize" value used in pre/post-processing.  In the LZX format
89373 + * used in cabinet files this value must be given to the decompressor, whereas
89374 + * in the LZX format used in WIM files and system-compressed files this value is
89375 + * fixed at 12000000.
89376 + */
89377 +#define LZX_DEFAULT_FILESIZE           12000000
89379 +/* Assumed block size when the encoded block size begins with a 0 bit.  */
89380 +#define LZX_DEFAULT_BLOCK_SIZE         32768
89382 +/* Number of offsets in the recent (or "repeat") offsets queue.  */
89383 +#define LZX_NUM_RECENT_OFFSETS         3
89385 +/* These values are chosen for fast decompression.  */
89386 +#define LZX_MAINCODE_TABLEBITS         11
89387 +#define LZX_LENCODE_TABLEBITS          10
89388 +#define LZX_PRECODE_TABLEBITS          6
89389 +#define LZX_ALIGNEDCODE_TABLEBITS      7
89391 +#define LZX_READ_LENS_MAX_OVERRUN      50
89393 +/* Mapping: offset slot => first match offset that uses that offset slot.
89394 + */
89395 +static const u32 lzx_offset_slot_base[LZX_NUM_OFFSET_SLOTS + 1] = {
89396 +       0,      1,      2,      3,      4,      /* 0  --- 4  */
89397 +       6,      8,      12,     16,     24,     /* 5  --- 9  */
89398 +       32,     48,     64,     96,     128,    /* 10 --- 14 */
89399 +       192,    256,    384,    512,    768,    /* 15 --- 19 */
89400 +       1024,   1536,   2048,   3072,   4096,   /* 20 --- 24 */
89401 +       6144,   8192,   12288,  16384,  24576,  /* 25 --- 29 */
89402 +       32768,                                  /* extra     */
89405 +/* Mapping: offset slot => how many extra bits must be read and added to the
89406 + * corresponding offset slot base to decode the match offset.
89407 + */
89408 +static const u8 lzx_extra_offset_bits[LZX_NUM_OFFSET_SLOTS] = {
89409 +       0,      0,      0,      0,      1,
89410 +       1,      2,      2,      3,      3,
89411 +       4,      4,      5,      5,      6,
89412 +       6,      7,      7,      8,      8,
89413 +       9,      9,      10,     10,     11,
89414 +       11,     12,     12,     13,     13,
89417 +/* Reusable heap-allocated memory for LZX decompression  */
89418 +struct lzx_decompressor {
89420 +       /* Huffman decoding tables, and arrays that map symbols to codeword
89421 +        * lengths
89422 +        */
89424 +       u16 maincode_decode_table[(1 << LZX_MAINCODE_TABLEBITS) +
89425 +                                       (LZX_MAINCODE_NUM_SYMBOLS * 2)];
89426 +       u8 maincode_lens[LZX_MAINCODE_NUM_SYMBOLS + LZX_READ_LENS_MAX_OVERRUN];
89429 +       u16 lencode_decode_table[(1 << LZX_LENCODE_TABLEBITS) +
89430 +                                       (LZX_LENCODE_NUM_SYMBOLS * 2)];
89431 +       u8 lencode_lens[LZX_LENCODE_NUM_SYMBOLS + LZX_READ_LENS_MAX_OVERRUN];
89434 +       u16 alignedcode_decode_table[(1 << LZX_ALIGNEDCODE_TABLEBITS) +
89435 +                                       (LZX_ALIGNEDCODE_NUM_SYMBOLS * 2)];
89436 +       u8 alignedcode_lens[LZX_ALIGNEDCODE_NUM_SYMBOLS];
89438 +       u16 precode_decode_table[(1 << LZX_PRECODE_TABLEBITS) +
89439 +                                (LZX_PRECODE_NUM_SYMBOLS * 2)];
89440 +       u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
89442 +       /* Temporary space for make_huffman_decode_table()  */
89443 +       u16 working_space[2 * (1 + LZX_MAX_MAIN_CODEWORD_LEN) +
89444 +                         LZX_MAINCODE_NUM_SYMBOLS];
89447 +static void undo_e8_translation(void *target, s32 input_pos)
89449 +       s32 abs_offset, rel_offset;
89451 +       abs_offset = get_unaligned_le32(target);
89452 +       if (abs_offset >= 0) {
89453 +               if (abs_offset < LZX_DEFAULT_FILESIZE) {
89454 +                       /* "good translation" */
89455 +                       rel_offset = abs_offset - input_pos;
89456 +                       put_unaligned_le32(rel_offset, target);
89457 +               }
89458 +       } else {
89459 +               if (abs_offset >= -input_pos) {
89460 +                       /* "compensating translation" */
89461 +                       rel_offset = abs_offset + LZX_DEFAULT_FILESIZE;
89462 +                       put_unaligned_le32(rel_offset, target);
89463 +               }
89464 +       }
89468 + * Undo the 'E8' preprocessing used in LZX.  Before compression, the
89469 + * uncompressed data was preprocessed by changing the targets of suspected x86
89470 + * CALL instructions from relative offsets to absolute offsets.  After
89471 + * match/literal decoding, the decompressor must undo the translation.
89472 + */
89473 +static void lzx_postprocess(u8 *data, u32 size)
89475 +       /*
89476 +        * A worthwhile optimization is to push the end-of-buffer check into the
89477 +        * relatively rare E8 case.  This is possible if we replace the last six
89478 +        * bytes of data with E8 bytes; then we are guaranteed to hit an E8 byte
89479 +        * before reaching end-of-buffer.  In addition, this scheme guarantees
89480 +        * that no translation can begin following an E8 byte in the last 10
89481 +        * bytes because a 4-byte offset containing E8 as its high byte is a
89482 +        * large negative number that is not valid for translation.  That is
89483 +        * exactly what we need.
89484 +        */
89485 +       u8 *tail;
89486 +       u8 saved_bytes[6];
89487 +       u8 *p;
89489 +       if (size <= 10)
89490 +               return;
89492 +       tail = &data[size - 6];
89493 +       memcpy(saved_bytes, tail, 6);
89494 +       memset(tail, 0xE8, 6);
89495 +       p = data;
89496 +       for (;;) {
89497 +               while (*p != 0xE8)
89498 +                       p++;
89499 +               if (p >= tail)
89500 +                       break;
89501 +               undo_e8_translation(p + 1, p - data);
89502 +               p += 5;
89503 +       }
89504 +       memcpy(tail, saved_bytes, 6);
89507 +/* Read a Huffman-encoded symbol using the precode.  */
89508 +static forceinline u32 read_presym(const struct lzx_decompressor *d,
89509 +                                       struct input_bitstream *is)
89511 +       return read_huffsym(is, d->precode_decode_table,
89512 +                           LZX_PRECODE_TABLEBITS, LZX_MAX_PRE_CODEWORD_LEN);
89515 +/* Read a Huffman-encoded symbol using the main code.  */
89516 +static forceinline u32 read_mainsym(const struct lzx_decompressor *d,
89517 +                                        struct input_bitstream *is)
89519 +       return read_huffsym(is, d->maincode_decode_table,
89520 +                           LZX_MAINCODE_TABLEBITS, LZX_MAX_MAIN_CODEWORD_LEN);
89523 +/* Read a Huffman-encoded symbol using the length code.  */
89524 +static forceinline u32 read_lensym(const struct lzx_decompressor *d,
89525 +                                       struct input_bitstream *is)
89527 +       return read_huffsym(is, d->lencode_decode_table,
89528 +                           LZX_LENCODE_TABLEBITS, LZX_MAX_LEN_CODEWORD_LEN);
89531 +/* Read a Huffman-encoded symbol using the aligned offset code.  */
89532 +static forceinline u32 read_alignedsym(const struct lzx_decompressor *d,
89533 +                                           struct input_bitstream *is)
89535 +       return read_huffsym(is, d->alignedcode_decode_table,
89536 +                           LZX_ALIGNEDCODE_TABLEBITS,
89537 +                           LZX_MAX_ALIGNED_CODEWORD_LEN);
89541 + * Read the precode from the compressed input bitstream, then use it to decode
89542 + * @num_lens codeword length values.
89543 + *
89544 + * @is:                The input bitstream.
89545 + *
89546 + * @lens:      An array that contains the length values from the previous time
89547 + *             the codeword lengths for this Huffman code were read, or all 0's
89548 + *             if this is the first time.  This array must have at least
89549 + *             (@num_lens + LZX_READ_LENS_MAX_OVERRUN) entries.
89550 + *
89551 + * @num_lens:  Number of length values to decode.
89552 + *
89553 + * Returns 0 on success, or -1 if the data was invalid.
89554 + */
89555 +static int lzx_read_codeword_lens(struct lzx_decompressor *d,
89556 +                                 struct input_bitstream *is,
89557 +                                 u8 *lens, u32 num_lens)
89559 +       u8 *len_ptr = lens;
89560 +       u8 *lens_end = lens + num_lens;
89561 +       int i;
89563 +       /* Read the lengths of the precode codewords.  These are given
89564 +        * explicitly.
89565 +        */
89566 +       for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++) {
89567 +               d->precode_lens[i] =
89568 +                       bitstream_read_bits(is, LZX_PRECODE_ELEMENT_SIZE);
89569 +       }
89571 +       /* Make the decoding table for the precode.  */
89572 +       if (make_huffman_decode_table(d->precode_decode_table,
89573 +                                     LZX_PRECODE_NUM_SYMBOLS,
89574 +                                     LZX_PRECODE_TABLEBITS,
89575 +                                     d->precode_lens,
89576 +                                     LZX_MAX_PRE_CODEWORD_LEN,
89577 +                                     d->working_space))
89578 +               return -1;
89580 +       /* Decode the codeword lengths.  */
89581 +       do {
89582 +               u32 presym;
89583 +               u8 len;
89585 +               /* Read the next precode symbol.  */
89586 +               presym = read_presym(d, is);
89587 +               if (presym < 17) {
89588 +                       /* Difference from old length  */
89589 +                       len = *len_ptr - presym;
89590 +                       if ((s8)len < 0)
89591 +                               len += 17;
89592 +                       *len_ptr++ = len;
89593 +               } else {
89594 +                       /* Special RLE values  */
89596 +                       u32 run_len;
89598 +                       if (presym == 17) {
89599 +                               /* Run of 0's  */
89600 +                               run_len = 4 + bitstream_read_bits(is, 4);
89601 +                               len = 0;
89602 +                       } else if (presym == 18) {
89603 +                               /* Longer run of 0's  */
89604 +                               run_len = 20 + bitstream_read_bits(is, 5);
89605 +                               len = 0;
89606 +                       } else {
89607 +                               /* Run of identical lengths  */
89608 +                               run_len = 4 + bitstream_read_bits(is, 1);
89609 +                               presym = read_presym(d, is);
89610 +                               if (presym > 17)
89611 +                                       return -1;
89612 +                               len = *len_ptr - presym;
89613 +                               if ((s8)len < 0)
89614 +                                       len += 17;
89615 +                       }
89617 +                       do {
89618 +                               *len_ptr++ = len;
89619 +                       } while (--run_len);
89620 +                       /* Worst case overrun is when presym == 18,
89621 +                        * run_len == 20 + 31, and only 1 length was remaining.
89622 +                        * So LZX_READ_LENS_MAX_OVERRUN == 50.
89623 +                        *
89624 +                        * Overrun while reading the first half of maincode_lens
89625 +                        * can corrupt the previous values in the second half.
89626 +                        * This doesn't really matter because the resulting
89627 +                        * lengths will still be in range, and data that
89628 +                        * generates overruns is invalid anyway.
89629 +                        */
89630 +               }
89631 +       } while (len_ptr < lens_end);
89633 +       return 0;
89637 + * Read the header of an LZX block and save the block type and (uncompressed)
89638 + * size in *block_type_ret and *block_size_ret, respectively.
89639 + *
89640 + * If the block is compressed, also update the Huffman decode @tables with the
89641 + * new Huffman codes.  If the block is uncompressed, also update the match
89642 + * offset @queue with the new match offsets.
89643 + *
89644 + * Return 0 on success, or -1 if the data was invalid.
89645 + */
89646 +static int lzx_read_block_header(struct lzx_decompressor *d,
89647 +                                struct input_bitstream *is,
89648 +                                int *block_type_ret,
89649 +                                u32 *block_size_ret,
89650 +                                u32 recent_offsets[])
89652 +       int block_type;
89653 +       u32 block_size;
89654 +       int i;
89656 +       bitstream_ensure_bits(is, 4);
89658 +       /* The first three bits tell us what kind of block it is, and should be
89659 +        * one of the LZX_BLOCKTYPE_* values.
89660 +        */
89661 +       block_type = bitstream_pop_bits(is, 3);
89663 +       /* Read the block size.  */
89664 +       if (bitstream_pop_bits(is, 1)) {
89665 +               block_size = LZX_DEFAULT_BLOCK_SIZE;
89666 +       } else {
89667 +               block_size = 0;
89668 +               block_size |= bitstream_read_bits(is, 8);
89669 +               block_size <<= 8;
89670 +               block_size |= bitstream_read_bits(is, 8);
89671 +       }
89673 +       switch (block_type) {
89675 +       case LZX_BLOCKTYPE_ALIGNED:
89677 +               /* Read the aligned offset code and prepare its decode table.
89678 +                */
89680 +               for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
89681 +                       d->alignedcode_lens[i] =
89682 +                               bitstream_read_bits(is,
89683 +                                                   LZX_ALIGNEDCODE_ELEMENT_SIZE);
89684 +               }
89686 +               if (make_huffman_decode_table(d->alignedcode_decode_table,
89687 +                                             LZX_ALIGNEDCODE_NUM_SYMBOLS,
89688 +                                             LZX_ALIGNEDCODE_TABLEBITS,
89689 +                                             d->alignedcode_lens,
89690 +                                             LZX_MAX_ALIGNED_CODEWORD_LEN,
89691 +                                             d->working_space))
89692 +                       return -1;
89694 +               /* Fall though, since the rest of the header for aligned offset
89695 +                * blocks is the same as that for verbatim blocks.
89696 +                */
89697 +               fallthrough;
89699 +       case LZX_BLOCKTYPE_VERBATIM:
89701 +               /* Read the main code and prepare its decode table.
89702 +                *
89703 +                * Note that the codeword lengths in the main code are encoded
89704 +                * in two parts: one part for literal symbols, and one part for
89705 +                * match symbols.
89706 +                */
89708 +               if (lzx_read_codeword_lens(d, is, d->maincode_lens,
89709 +                                          LZX_NUM_CHARS))
89710 +                       return -1;
89712 +               if (lzx_read_codeword_lens(d, is,
89713 +                                          d->maincode_lens + LZX_NUM_CHARS,
89714 +                                          LZX_MAINCODE_NUM_SYMBOLS - LZX_NUM_CHARS))
89715 +                       return -1;
89717 +               if (make_huffman_decode_table(d->maincode_decode_table,
89718 +                                             LZX_MAINCODE_NUM_SYMBOLS,
89719 +                                             LZX_MAINCODE_TABLEBITS,
89720 +                                             d->maincode_lens,
89721 +                                             LZX_MAX_MAIN_CODEWORD_LEN,
89722 +                                             d->working_space))
89723 +                       return -1;
89725 +               /* Read the length code and prepare its decode table.  */
89727 +               if (lzx_read_codeword_lens(d, is, d->lencode_lens,
89728 +                                          LZX_LENCODE_NUM_SYMBOLS))
89729 +                       return -1;
89731 +               if (make_huffman_decode_table(d->lencode_decode_table,
89732 +                                             LZX_LENCODE_NUM_SYMBOLS,
89733 +                                             LZX_LENCODE_TABLEBITS,
89734 +                                             d->lencode_lens,
89735 +                                             LZX_MAX_LEN_CODEWORD_LEN,
89736 +                                             d->working_space))
89737 +                       return -1;
89739 +               break;
89741 +       case LZX_BLOCKTYPE_UNCOMPRESSED:
89743 +               /* Before reading the three recent offsets from the uncompressed
89744 +                * block header, the stream must be aligned on a 16-bit
89745 +                * boundary.  But if the stream is *already* aligned, then the
89746 +                * next 16 bits must be discarded.
89747 +                */
89748 +               bitstream_ensure_bits(is, 1);
89749 +               bitstream_align(is);
89751 +               recent_offsets[0] = bitstream_read_u32(is);
89752 +               recent_offsets[1] = bitstream_read_u32(is);
89753 +               recent_offsets[2] = bitstream_read_u32(is);
89755 +               /* Offsets of 0 are invalid.  */
89756 +               if (recent_offsets[0] == 0 || recent_offsets[1] == 0 ||
89757 +                   recent_offsets[2] == 0)
89758 +                       return -1;
89759 +               break;
89761 +       default:
89762 +               /* Unrecognized block type.  */
89763 +               return -1;
89764 +       }
89766 +       *block_type_ret = block_type;
89767 +       *block_size_ret = block_size;
89768 +       return 0;
89771 +/* Decompress a block of LZX-compressed data.  */
89772 +static int lzx_decompress_block(const struct lzx_decompressor *d,
89773 +                               struct input_bitstream *is,
89774 +                               int block_type, u32 block_size,
89775 +                               u8 * const out_begin, u8 *out_next,
89776 +                               u32 recent_offsets[])
89778 +       u8 * const block_end = out_next + block_size;
89779 +       u32 ones_if_aligned = 0U - (block_type == LZX_BLOCKTYPE_ALIGNED);
89781 +       do {
89782 +               u32 mainsym;
89783 +               u32 match_len;
89784 +               u32 match_offset;
89785 +               u32 offset_slot;
89786 +               u32 num_extra_bits;
89788 +               mainsym = read_mainsym(d, is);
89789 +               if (mainsym < LZX_NUM_CHARS) {
89790 +                       /* Literal  */
89791 +                       *out_next++ = mainsym;
89792 +                       continue;
89793 +               }
89795 +               /* Match  */
89797 +               /* Decode the length header and offset slot.  */
89798 +               mainsym -= LZX_NUM_CHARS;
89799 +               match_len = mainsym % LZX_NUM_LEN_HEADERS;
89800 +               offset_slot = mainsym / LZX_NUM_LEN_HEADERS;
89802 +               /* If needed, read a length symbol to decode the full length. */
89803 +               if (match_len == LZX_NUM_PRIMARY_LENS)
89804 +                       match_len += read_lensym(d, is);
89805 +               match_len += LZX_MIN_MATCH_LEN;
89807 +               if (offset_slot < LZX_NUM_RECENT_OFFSETS) {
89808 +                       /* Repeat offset  */
89810 +                       /* Note: This isn't a real LRU queue, since using the R2
89811 +                        * offset doesn't bump the R1 offset down to R2.  This
89812 +                        * quirk allows all 3 recent offsets to be handled by
89813 +                        * the same code.  (For R0, the swap is a no-op.)
89814 +                        */
89815 +                       match_offset = recent_offsets[offset_slot];
89816 +                       recent_offsets[offset_slot] = recent_offsets[0];
89817 +                       recent_offsets[0] = match_offset;
89818 +               } else {
89819 +                       /* Explicit offset  */
89821 +                       /* Look up the number of extra bits that need to be read
89822 +                        * to decode offsets with this offset slot.
89823 +                        */
89824 +                       num_extra_bits = lzx_extra_offset_bits[offset_slot];
89826 +                       /* Start with the offset slot base value.  */
89827 +                       match_offset = lzx_offset_slot_base[offset_slot];
89829 +                       /* In aligned offset blocks, the low-order 3 bits of
89830 +                        * each offset are encoded using the aligned offset
89831 +                        * code.  Otherwise, all the extra bits are literal.
89832 +                        */
89834 +                       if ((num_extra_bits & ones_if_aligned) >= LZX_NUM_ALIGNED_OFFSET_BITS) {
89835 +                               match_offset +=
89836 +                                       bitstream_read_bits(is, num_extra_bits -
89837 +                                                               LZX_NUM_ALIGNED_OFFSET_BITS)
89838 +                                                       << LZX_NUM_ALIGNED_OFFSET_BITS;
89839 +                               match_offset += read_alignedsym(d, is);
89840 +                       } else {
89841 +                               match_offset += bitstream_read_bits(is, num_extra_bits);
89842 +                       }
89844 +                       /* Adjust the offset.  */
89845 +                       match_offset -= (LZX_NUM_RECENT_OFFSETS - 1);
89847 +                       /* Update the recent offsets.  */
89848 +                       recent_offsets[2] = recent_offsets[1];
89849 +                       recent_offsets[1] = recent_offsets[0];
89850 +                       recent_offsets[0] = match_offset;
89851 +               }
89853 +               /* Validate the match, then copy it to the current position.  */
89855 +               if (match_len > (size_t)(block_end - out_next))
89856 +                       return -1;
89858 +               if (match_offset > (size_t)(out_next - out_begin))
89859 +                       return -1;
89861 +               out_next = lz_copy(out_next, match_len, match_offset,
89862 +                                  block_end, LZX_MIN_MATCH_LEN);
89864 +       } while (out_next != block_end);
89866 +       return 0;
89870 + * lzx_allocate_decompressor - Allocate an LZX decompressor
89871 + *
89872 + * Return the pointer to the decompressor on success, or return NULL and set
89873 + * errno on failure.
89874 + */
89875 +struct lzx_decompressor *lzx_allocate_decompressor(void)
89877 +       return kmalloc(sizeof(struct lzx_decompressor), GFP_NOFS);
89881 + * lzx_decompress - Decompress a buffer of LZX-compressed data
89882 + *
89883 + * @decompressor:      A decompressor allocated with lzx_allocate_decompressor()
89884 + * @compressed_data:   The buffer of data to decompress
89885 + * @compressed_size:   Number of bytes of compressed data
89886 + * @uncompressed_data: The buffer in which to store the decompressed data
89887 + * @uncompressed_size: The number of bytes the data decompresses into
89888 + *
89889 + * Return 0 on success, or return -1 and set errno on failure.
89890 + */
89891 +int lzx_decompress(struct lzx_decompressor *decompressor,
89892 +                  const void *compressed_data, size_t compressed_size,
89893 +                  void *uncompressed_data, size_t uncompressed_size)
89895 +       struct lzx_decompressor *d = decompressor;
89896 +       u8 * const out_begin = uncompressed_data;
89897 +       u8 *out_next = out_begin;
89898 +       u8 * const out_end = out_begin + uncompressed_size;
89899 +       struct input_bitstream is;
89900 +       u32 recent_offsets[LZX_NUM_RECENT_OFFSETS] = {1, 1, 1};
89901 +       int e8_status = 0;
89903 +       init_input_bitstream(&is, compressed_data, compressed_size);
89905 +       /* Codeword lengths begin as all 0's for delta encoding purposes.  */
89906 +       memset(d->maincode_lens, 0, LZX_MAINCODE_NUM_SYMBOLS);
89907 +       memset(d->lencode_lens, 0, LZX_LENCODE_NUM_SYMBOLS);
89909 +       /* Decompress blocks until we have all the uncompressed data.  */
89911 +       while (out_next != out_end) {
89912 +               int block_type;
89913 +               u32 block_size;
89915 +               if (lzx_read_block_header(d, &is, &block_type, &block_size,
89916 +                                         recent_offsets))
89917 +                       goto invalid;
89919 +               if (block_size < 1 || block_size > (size_t)(out_end - out_next))
89920 +                       goto invalid;
89922 +               if (block_type != LZX_BLOCKTYPE_UNCOMPRESSED) {
89924 +                       /* Compressed block  */
89926 +                       if (lzx_decompress_block(d,
89927 +                                                &is,
89928 +                                                block_type,
89929 +                                                block_size,
89930 +                                                out_begin,
89931 +                                                out_next,
89932 +                                                recent_offsets))
89933 +                               goto invalid;
89935 +                       e8_status |= d->maincode_lens[0xe8];
89936 +                       out_next += block_size;
89937 +               } else {
89938 +                       /* Uncompressed block  */
89940 +                       out_next = bitstream_read_bytes(&is, out_next,
89941 +                                                       block_size);
89942 +                       if (!out_next)
89943 +                               goto invalid;
89945 +                       if (block_size & 1)
89946 +                               bitstream_read_byte(&is);
89948 +                       e8_status = 1;
89949 +               }
89950 +       }
89952 +       /* Postprocess the data unless it cannot possibly contain 0xe8 bytes. */
89953 +       if (e8_status)
89954 +               lzx_postprocess(uncompressed_data, uncompressed_size);
89956 +       return 0;
89958 +invalid:
89959 +       return -1;
89963 + * lzx_free_decompressor - Free an LZX decompressor
89964 + *
89965 + * @decompressor:       A decompressor that was allocated with
89966 + *                     lzx_allocate_decompressor(), or NULL.
89967 + */
89968 +void lzx_free_decompressor(struct lzx_decompressor *decompressor)
89970 +       kfree(decompressor);
89972 diff --git a/fs/ntfs3/lib/xpress_decompress.c b/fs/ntfs3/lib/xpress_decompress.c
89973 new file mode 100644
89974 index 000000000000..3d98f36a981e
89975 --- /dev/null
89976 +++ b/fs/ntfs3/lib/xpress_decompress.c
89977 @@ -0,0 +1,155 @@
89978 +// SPDX-License-Identifier: GPL-2.0-or-later
89980 + * xpress_decompress.c - A decompressor for the XPRESS compression format
89981 + * (Huffman variant), which can be used in "System Compressed" files.  This is
89982 + * based on the code from wimlib.
89983 + *
89984 + * Copyright (C) 2015 Eric Biggers
89985 + *
89986 + * This program is free software: you can redistribute it and/or modify it under
89987 + * the terms of the GNU General Public License as published by the Free Software
89988 + * Foundation, either version 2 of the License, or (at your option) any later
89989 + * version.
89990 + *
89991 + * This program is distributed in the hope that it will be useful, but WITHOUT
89992 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
89993 + * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
89994 + * details.
89995 + *
89996 + * You should have received a copy of the GNU General Public License along with
89997 + * this program.  If not, see <http://www.gnu.org/licenses/>.
89998 + */
90000 +#include "decompress_common.h"
90001 +#include "lib.h"
90003 +#define XPRESS_NUM_SYMBOLS     512
90004 +#define XPRESS_MAX_CODEWORD_LEN        15
90005 +#define XPRESS_MIN_MATCH_LEN   3
90007 +/* This value is chosen for fast decompression.  */
90008 +#define XPRESS_TABLEBITS 12
90010 +/* Reusable heap-allocated memory for XPRESS decompression  */
90011 +struct xpress_decompressor {
90013 +       /* The Huffman decoding table  */
90014 +       u16 decode_table[(1 << XPRESS_TABLEBITS) + 2 * XPRESS_NUM_SYMBOLS];
90016 +       /* An array that maps symbols to codeword lengths  */
90017 +       u8 lens[XPRESS_NUM_SYMBOLS];
90019 +       /* Temporary space for make_huffman_decode_table()  */
90020 +       u16 working_space[2 * (1 + XPRESS_MAX_CODEWORD_LEN) +
90021 +                         XPRESS_NUM_SYMBOLS];
90025 + * xpress_allocate_decompressor - Allocate an XPRESS decompressor
90026 + *
90027 + * Return the pointer to the decompressor on success, or return NULL and set
90028 + * errno on failure.
90029 + */
90030 +struct xpress_decompressor *xpress_allocate_decompressor(void)
90032 +       return kmalloc(sizeof(struct xpress_decompressor), GFP_NOFS);
90036 + * xpress_decompress - Decompress a buffer of XPRESS-compressed data
90037 + *
90038 + * @decompressor:       A decompressor that was allocated with
90039 + *                     xpress_allocate_decompressor()
90040 + * @compressed_data:   The buffer of data to decompress
90041 + * @compressed_size:   Number of bytes of compressed data
90042 + * @uncompressed_data: The buffer in which to store the decompressed data
90043 + * @uncompressed_size: The number of bytes the data decompresses into
90044 + *
90045 + * Return 0 on success, or return -1 and set errno on failure.
90046 + */
90047 +int xpress_decompress(struct xpress_decompressor *decompressor,
90048 +                     const void *compressed_data, size_t compressed_size,
90049 +                     void *uncompressed_data, size_t uncompressed_size)
90051 +       struct xpress_decompressor *d = decompressor;
90052 +       const u8 * const in_begin = compressed_data;
90053 +       u8 * const out_begin = uncompressed_data;
90054 +       u8 *out_next = out_begin;
90055 +       u8 * const out_end = out_begin + uncompressed_size;
90056 +       struct input_bitstream is;
90057 +       u32 i;
90059 +       /* Read the Huffman codeword lengths.  */
90060 +       if (compressed_size < XPRESS_NUM_SYMBOLS / 2)
90061 +               goto invalid;
90062 +       for (i = 0; i < XPRESS_NUM_SYMBOLS / 2; i++) {
90063 +               d->lens[i*2 + 0] = in_begin[i] & 0xF;
90064 +               d->lens[i*2 + 1] = in_begin[i] >> 4;
90065 +       }
90067 +       /* Build a decoding table for the Huffman code.  */
90068 +       if (make_huffman_decode_table(d->decode_table, XPRESS_NUM_SYMBOLS,
90069 +                                     XPRESS_TABLEBITS, d->lens,
90070 +                                     XPRESS_MAX_CODEWORD_LEN,
90071 +                                     d->working_space))
90072 +               goto invalid;
90074 +       /* Decode the matches and literals.  */
90076 +       init_input_bitstream(&is, in_begin + XPRESS_NUM_SYMBOLS / 2,
90077 +                            compressed_size - XPRESS_NUM_SYMBOLS / 2);
90079 +       while (out_next != out_end) {
90080 +               u32 sym;
90081 +               u32 log2_offset;
90082 +               u32 length;
90083 +               u32 offset;
90085 +               sym = read_huffsym(&is, d->decode_table,
90086 +                                  XPRESS_TABLEBITS, XPRESS_MAX_CODEWORD_LEN);
90087 +               if (sym < 256) {
90088 +                       /* Literal  */
90089 +                       *out_next++ = sym;
90090 +               } else {
90091 +                       /* Match  */
90092 +                       length = sym & 0xf;
90093 +                       log2_offset = (sym >> 4) & 0xf;
90095 +                       bitstream_ensure_bits(&is, 16);
90097 +                       offset = ((u32)1 << log2_offset) |
90098 +                                bitstream_pop_bits(&is, log2_offset);
90100 +                       if (length == 0xf) {
90101 +                               length += bitstream_read_byte(&is);
90102 +                               if (length == 0xf + 0xff)
90103 +                                       length = bitstream_read_u16(&is);
90104 +                       }
90105 +                       length += XPRESS_MIN_MATCH_LEN;
90107 +                       if (offset > (size_t)(out_next - out_begin))
90108 +                               goto invalid;
90110 +                       if (length > (size_t)(out_end - out_next))
90111 +                               goto invalid;
90113 +                       out_next = lz_copy(out_next, length, offset, out_end,
90114 +                                          XPRESS_MIN_MATCH_LEN);
90115 +               }
90116 +       }
90117 +       return 0;
90119 +invalid:
90120 +       return -1;
90124 + * xpress_free_decompressor - Free an XPRESS decompressor
90125 + *
90126 + * @decompressor:       A decompressor that was allocated with
90127 + *                     xpress_allocate_decompressor(), or NULL.
90128 + */
90129 +void xpress_free_decompressor(struct xpress_decompressor *decompressor)
90131 +       kfree(decompressor);
90133 diff --git a/fs/ntfs3/lznt.c b/fs/ntfs3/lznt.c
90134 new file mode 100644
90135 index 000000000000..ead9ab7d69b3
90136 --- /dev/null
90137 +++ b/fs/ntfs3/lznt.c
90138 @@ -0,0 +1,452 @@
90139 +// SPDX-License-Identifier: GPL-2.0
90141 + *
90142 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
90143 + *
90144 + */
90145 +#include <linux/blkdev.h>
90146 +#include <linux/buffer_head.h>
90147 +#include <linux/fs.h>
90148 +#include <linux/nls.h>
90150 +#include "debug.h"
90151 +#include "ntfs.h"
90152 +#include "ntfs_fs.h"
90154 +// clang-format off
90155 +/* src buffer is zero */
90156 +#define LZNT_ERROR_ALL_ZEROS   1
90157 +#define LZNT_CHUNK_SIZE                0x1000
90158 +// clang-format on
90160 +struct lznt_hash {
90161 +       const u8 *p1;
90162 +       const u8 *p2;
90165 +struct lznt {
90166 +       const u8 *unc;
90167 +       const u8 *unc_end;
90168 +       const u8 *best_match;
90169 +       size_t max_len;
90170 +       bool std;
90172 +       struct lznt_hash hash[LZNT_CHUNK_SIZE];
90175 +static inline size_t get_match_len(const u8 *ptr, const u8 *end, const u8 *prev,
90176 +                                  size_t max_len)
90178 +       size_t len = 0;
90180 +       while (ptr + len < end && ptr[len] == prev[len] && ++len < max_len)
90181 +               ;
90182 +       return len;
90185 +static size_t longest_match_std(const u8 *src, struct lznt *ctx)
90187 +       size_t hash_index;
90188 +       size_t len1 = 0, len2 = 0;
90189 +       const u8 **hash;
90191 +       hash_index =
90192 +               ((40543U * ((((src[0] << 4) ^ src[1]) << 4) ^ src[2])) >> 4) &
90193 +               (LZNT_CHUNK_SIZE - 1);
90195 +       hash = &(ctx->hash[hash_index].p1);
90197 +       if (hash[0] >= ctx->unc && hash[0] < src && hash[0][0] == src[0] &&
90198 +           hash[0][1] == src[1] && hash[0][2] == src[2]) {
90199 +               len1 = 3;
90200 +               if (ctx->max_len > 3)
90201 +                       len1 += get_match_len(src + 3, ctx->unc_end,
90202 +                                             hash[0] + 3, ctx->max_len - 3);
90203 +       }
90205 +       if (hash[1] >= ctx->unc && hash[1] < src && hash[1][0] == src[0] &&
90206 +           hash[1][1] == src[1] && hash[1][2] == src[2]) {
90207 +               len2 = 3;
90208 +               if (ctx->max_len > 3)
90209 +                       len2 += get_match_len(src + 3, ctx->unc_end,
90210 +                                             hash[1] + 3, ctx->max_len - 3);
90211 +       }
90213 +       /* Compare two matches and select the best one */
90214 +       if (len1 < len2) {
90215 +               ctx->best_match = hash[1];
90216 +               len1 = len2;
90217 +       } else {
90218 +               ctx->best_match = hash[0];
90219 +       }
90221 +       hash[1] = hash[0];
90222 +       hash[0] = src;
90223 +       return len1;
90226 +static size_t longest_match_best(const u8 *src, struct lznt *ctx)
90228 +       size_t max_len;
90229 +       const u8 *ptr;
90231 +       if (ctx->unc >= src || !ctx->max_len)
90232 +               return 0;
90234 +       max_len = 0;
90235 +       for (ptr = ctx->unc; ptr < src; ++ptr) {
90236 +               size_t len =
90237 +                       get_match_len(src, ctx->unc_end, ptr, ctx->max_len);
90238 +               if (len >= max_len) {
90239 +                       max_len = len;
90240 +                       ctx->best_match = ptr;
90241 +               }
90242 +       }
90244 +       return max_len >= 3 ? max_len : 0;
90247 +static const size_t s_max_len[] = {
90248 +       0x1002, 0x802, 0x402, 0x202, 0x102, 0x82, 0x42, 0x22, 0x12,
90251 +static const size_t s_max_off[] = {
90252 +       0x10, 0x20, 0x40, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
90255 +static inline u16 make_pair(size_t offset, size_t len, size_t index)
90257 +       return ((offset - 1) << (12 - index)) |
90258 +              ((len - 3) & (((1 << (12 - index)) - 1)));
90261 +static inline size_t parse_pair(u16 pair, size_t *offset, size_t index)
90263 +       *offset = 1 + (pair >> (12 - index));
90264 +       return 3 + (pair & ((1 << (12 - index)) - 1));
90268 + * compress_chunk
90269 + *
90270 + * returns one of the three values:
90271 + * 0 - ok, 'cmpr' contains 'cmpr_chunk_size' bytes of compressed data
90272 + * 1 - input buffer is full zero
90273 + * -2 - the compressed buffer is too small to hold the compressed data
90274 + */
90275 +static inline int compress_chunk(size_t (*match)(const u8 *, struct lznt *),
90276 +                                const u8 *unc, const u8 *unc_end, u8 *cmpr,
90277 +                                u8 *cmpr_end, size_t *cmpr_chunk_size,
90278 +                                struct lznt *ctx)
90280 +       size_t cnt = 0;
90281 +       size_t idx = 0;
90282 +       const u8 *up = unc;
90283 +       u8 *cp = cmpr + 3;
90284 +       u8 *cp2 = cmpr + 2;
90285 +       u8 not_zero = 0;
90286 +       /* Control byte of 8-bit values: ( 0 - means byte as is, 1 - short pair ) */
90287 +       u8 ohdr = 0;
90288 +       u8 *last;
90289 +       u16 t16;
90291 +       if (unc + LZNT_CHUNK_SIZE < unc_end)
90292 +               unc_end = unc + LZNT_CHUNK_SIZE;
90294 +       last = min(cmpr + LZNT_CHUNK_SIZE + sizeof(short), cmpr_end);
90296 +       ctx->unc = unc;
90297 +       ctx->unc_end = unc_end;
90298 +       ctx->max_len = s_max_len[0];
90300 +       while (up < unc_end) {
90301 +               size_t max_len;
90303 +               while (unc + s_max_off[idx] < up)
90304 +                       ctx->max_len = s_max_len[++idx];
90306 +               // Find match
90307 +               max_len = up + 3 <= unc_end ? (*match)(up, ctx) : 0;
90309 +               if (!max_len) {
90310 +                       if (cp >= last)
90311 +                               goto NotCompressed;
90312 +                       not_zero |= *cp++ = *up++;
90313 +               } else if (cp + 1 >= last) {
90314 +                       goto NotCompressed;
90315 +               } else {
90316 +                       t16 = make_pair(up - ctx->best_match, max_len, idx);
90317 +                       *cp++ = t16;
90318 +                       *cp++ = t16 >> 8;
90320 +                       ohdr |= 1 << cnt;
90321 +                       up += max_len;
90322 +               }
90324 +               cnt = (cnt + 1) & 7;
90325 +               if (!cnt) {
90326 +                       *cp2 = ohdr;
90327 +                       ohdr = 0;
90328 +                       cp2 = cp;
90329 +                       cp += 1;
90330 +               }
90331 +       }
90333 +       if (cp2 < last)
90334 +               *cp2 = ohdr;
90335 +       else
90336 +               cp -= 1;
90338 +       *cmpr_chunk_size = cp - cmpr;
90340 +       t16 = (*cmpr_chunk_size - 3) | 0xB000;
90341 +       cmpr[0] = t16;
90342 +       cmpr[1] = t16 >> 8;
90344 +       return not_zero ? 0 : LZNT_ERROR_ALL_ZEROS;
90346 +NotCompressed:
90348 +       if ((cmpr + LZNT_CHUNK_SIZE + sizeof(short)) > last)
90349 +               return -2;
90351 +       /*
90352 +        * Copy non cmpr data
90353 +        * 0x3FFF == ((LZNT_CHUNK_SIZE + 2 - 3) | 0x3000)
90354 +        */
90355 +       cmpr[0] = 0xff;
90356 +       cmpr[1] = 0x3f;
90358 +       memcpy(cmpr + sizeof(short), unc, LZNT_CHUNK_SIZE);
90359 +       *cmpr_chunk_size = LZNT_CHUNK_SIZE + sizeof(short);
90361 +       return 0;
90364 +static inline ssize_t decompress_chunk(u8 *unc, u8 *unc_end, const u8 *cmpr,
90365 +                                      const u8 *cmpr_end)
90367 +       u8 *up = unc;
90368 +       u8 ch = *cmpr++;
90369 +       size_t bit = 0;
90370 +       size_t index = 0;
90371 +       u16 pair;
90372 +       size_t offset, length;
90374 +       /* Do decompression until pointers are inside range */
90375 +       while (up < unc_end && cmpr < cmpr_end) {
90376 +               /* Correct index */
90377 +               while (unc + s_max_off[index] < up)
90378 +                       index += 1;
90380 +               /* Check the current flag for zero */
90381 +               if (!(ch & (1 << bit))) {
90382 +                       /* Just copy byte */
90383 +                       *up++ = *cmpr++;
90384 +                       goto next;
90385 +               }
90387 +               /* Check for boundary */
90388 +               if (cmpr + 1 >= cmpr_end)
90389 +                       return -EINVAL;
90391 +               /* Read a short from little endian stream */
90392 +               pair = cmpr[1];
90393 +               pair <<= 8;
90394 +               pair |= cmpr[0];
90396 +               cmpr += 2;
90398 +               /* Translate packed information into offset and length */
90399 +               length = parse_pair(pair, &offset, index);
90401 +               /* Check offset for boundary */
90402 +               if (unc + offset > up)
90403 +                       return -EINVAL;
90405 +               /* Truncate the length if necessary */
90406 +               if (up + length >= unc_end)
90407 +                       length = unc_end - up;
90409 +               /* Now we copy bytes. This is the heart of LZ algorithm. */
90410 +               for (; length > 0; length--, up++)
90411 +                       *up = *(up - offset);
90413 +next:
90414 +               /* Advance flag bit value */
90415 +               bit = (bit + 1) & 7;
90417 +               if (!bit) {
90418 +                       if (cmpr >= cmpr_end)
90419 +                               break;
90421 +                       ch = *cmpr++;
90422 +               }
90423 +       }
90425 +       /* return the size of uncompressed data */
90426 +       return up - unc;
90430 + * 0 - standard compression
90431 + * !0 - best compression, requires a lot of cpu
90432 + */
90433 +struct lznt *get_lznt_ctx(int level)
90435 +       struct lznt *r = ntfs_zalloc(level ? offsetof(struct lznt, hash)
90436 +                                          : sizeof(struct lznt));
90438 +       if (r)
90439 +               r->std = !level;
90440 +       return r;
90444 + * compress_lznt
90445 + *
90446 + * Compresses "unc" into "cmpr"
90447 + * +x - ok, 'cmpr' contains 'final_compressed_size' bytes of compressed data
90448 + * 0 - input buffer is full zero
90449 + */
90450 +size_t compress_lznt(const void *unc, size_t unc_size, void *cmpr,
90451 +                    size_t cmpr_size, struct lznt *ctx)
90453 +       int err;
90454 +       size_t (*match)(const u8 *src, struct lznt *ctx);
90455 +       u8 *p = cmpr;
90456 +       u8 *end = p + cmpr_size;
90457 +       const u8 *unc_chunk = unc;
90458 +       const u8 *unc_end = unc_chunk + unc_size;
90459 +       bool is_zero = true;
90461 +       if (ctx->std) {
90462 +               match = &longest_match_std;
90463 +               memset(ctx->hash, 0, sizeof(ctx->hash));
90464 +       } else {
90465 +               match = &longest_match_best;
90466 +       }
90468 +       /* compression cycle */
90469 +       for (; unc_chunk < unc_end; unc_chunk += LZNT_CHUNK_SIZE) {
90470 +               cmpr_size = 0;
90471 +               err = compress_chunk(match, unc_chunk, unc_end, p, end,
90472 +                                    &cmpr_size, ctx);
90473 +               if (err < 0)
90474 +                       return unc_size;
90476 +               if (is_zero && err != LZNT_ERROR_ALL_ZEROS)
90477 +                       is_zero = false;
90479 +               p += cmpr_size;
90480 +       }
90482 +       if (p <= end - 2)
90483 +               p[0] = p[1] = 0;
90485 +       return is_zero ? 0 : PtrOffset(cmpr, p);
90489 + * decompress_lznt
90490 + *
90491 + * decompresses "cmpr" into "unc"
90492 + */
90493 +ssize_t decompress_lznt(const void *cmpr, size_t cmpr_size, void *unc,
90494 +                       size_t unc_size)
90496 +       const u8 *cmpr_chunk = cmpr;
90497 +       const u8 *cmpr_end = cmpr_chunk + cmpr_size;
90498 +       u8 *unc_chunk = unc;
90499 +       u8 *unc_end = unc_chunk + unc_size;
90500 +       u16 chunk_hdr;
90502 +       if (cmpr_size < sizeof(short))
90503 +               return -EINVAL;
90505 +       /* read chunk header */
90506 +       chunk_hdr = cmpr_chunk[1];
90507 +       chunk_hdr <<= 8;
90508 +       chunk_hdr |= cmpr_chunk[0];
90510 +       /* loop through decompressing chunks */
90511 +       for (;;) {
90512 +               size_t chunk_size_saved;
90513 +               size_t unc_use;
90514 +               size_t cmpr_use = 3 + (chunk_hdr & (LZNT_CHUNK_SIZE - 1));
90516 +               /* Check that the chunk actually fits the supplied buffer */
90517 +               if (cmpr_chunk + cmpr_use > cmpr_end)
90518 +                       return -EINVAL;
90520 +               /* First make sure the chunk contains compressed data */
90521 +               if (chunk_hdr & 0x8000) {
90522 +                       /* Decompress a chunk and return if we get an error */
90523 +                       ssize_t err =
90524 +                               decompress_chunk(unc_chunk, unc_end,
90525 +                                                cmpr_chunk + sizeof(chunk_hdr),
90526 +                                                cmpr_chunk + cmpr_use);
90527 +                       if (err < 0)
90528 +                               return err;
90529 +                       unc_use = err;
90530 +               } else {
90531 +                       /* This chunk does not contain compressed data */
90532 +                       unc_use = unc_chunk + LZNT_CHUNK_SIZE > unc_end
90533 +                                         ? unc_end - unc_chunk
90534 +                                         : LZNT_CHUNK_SIZE;
90536 +                       if (cmpr_chunk + sizeof(chunk_hdr) + unc_use >
90537 +                           cmpr_end) {
90538 +                               return -EINVAL;
90539 +                       }
90541 +                       memcpy(unc_chunk, cmpr_chunk + sizeof(chunk_hdr),
90542 +                              unc_use);
90543 +               }
90545 +               /* Advance pointers */
90546 +               cmpr_chunk += cmpr_use;
90547 +               unc_chunk += unc_use;
90549 +               /* Check for the end of unc buffer */
90550 +               if (unc_chunk >= unc_end)
90551 +                       break;
90553 +               /* Proceed the next chunk */
90554 +               if (cmpr_chunk > cmpr_end - 2)
90555 +                       break;
90557 +               chunk_size_saved = LZNT_CHUNK_SIZE;
90559 +               /* read chunk header */
90560 +               chunk_hdr = cmpr_chunk[1];
90561 +               chunk_hdr <<= 8;
90562 +               chunk_hdr |= cmpr_chunk[0];
90564 +               if (!chunk_hdr)
90565 +                       break;
90567 +               /* Check the size of unc buffer */
90568 +               if (unc_use < chunk_size_saved) {
90569 +                       size_t t1 = chunk_size_saved - unc_use;
90570 +                       u8 *t2 = unc_chunk + t1;
90572 +                       /* 'Zero' memory */
90573 +                       if (t2 >= unc_end)
90574 +                               break;
90576 +                       memset(unc_chunk, 0, t1);
90577 +                       unc_chunk = t2;
90578 +               }
90579 +       }
90581 +       /* Check compression boundary */
90582 +       if (cmpr_chunk > cmpr_end)
90583 +               return -EINVAL;
90585 +       /*
90586 +        * The unc size is just a difference between current
90587 +        * pointer and original one
90588 +        */
90589 +       return PtrOffset(unc, unc_chunk);
90591 diff --git a/fs/ntfs3/namei.c b/fs/ntfs3/namei.c
90592 new file mode 100644
90593 index 000000000000..f5db12cd3b20
90594 --- /dev/null
90595 +++ b/fs/ntfs3/namei.c
90596 @@ -0,0 +1,578 @@
90597 +// SPDX-License-Identifier: GPL-2.0
90599 + *
90600 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
90601 + *
90602 + */
90604 +#include <linux/blkdev.h>
90605 +#include <linux/buffer_head.h>
90606 +#include <linux/fs.h>
90607 +#include <linux/iversion.h>
90608 +#include <linux/namei.h>
90609 +#include <linux/nls.h>
90611 +#include "debug.h"
90612 +#include "ntfs.h"
90613 +#include "ntfs_fs.h"
90616 + * fill_name_de
90617 + *
90618 + * formats NTFS_DE in 'buf'
90619 + */
90620 +int fill_name_de(struct ntfs_sb_info *sbi, void *buf, const struct qstr *name,
90621 +                const struct cpu_str *uni)
90623 +       int err;
90624 +       struct NTFS_DE *e = buf;
90625 +       u16 data_size;
90626 +       struct ATTR_FILE_NAME *fname = (struct ATTR_FILE_NAME *)(e + 1);
90628 +#ifndef CONFIG_NTFS3_64BIT_CLUSTER
90629 +       e->ref.high = fname->home.high = 0;
90630 +#endif
90631 +       if (uni) {
90632 +#ifdef __BIG_ENDIAN
90633 +               int ulen = uni->len;
90634 +               __le16 *uname = fname->name;
90635 +               const u16 *name_cpu = uni->name;
90637 +               while (ulen--)
90638 +                       *uname++ = cpu_to_le16(*name_cpu++);
90639 +#else
90640 +               memcpy(fname->name, uni->name, uni->len * sizeof(u16));
90641 +#endif
90642 +               fname->name_len = uni->len;
90644 +       } else {
90645 +               /* Convert input string to unicode */
90646 +               err = ntfs_nls_to_utf16(sbi, name->name, name->len,
90647 +                                       (struct cpu_str *)&fname->name_len,
90648 +                                       NTFS_NAME_LEN, UTF16_LITTLE_ENDIAN);
90649 +               if (err < 0)
90650 +                       return err;
90651 +       }
90653 +       fname->type = FILE_NAME_POSIX;
90654 +       data_size = fname_full_size(fname);
90656 +       e->size = cpu_to_le16(QuadAlign(data_size) + sizeof(struct NTFS_DE));
90657 +       e->key_size = cpu_to_le16(data_size);
90658 +       e->flags = 0;
90659 +       e->res = 0;
90661 +       return 0;
90665 + * ntfs_lookup
90666 + *
90667 + * inode_operations::lookup
90668 + */
90669 +static struct dentry *ntfs_lookup(struct inode *dir, struct dentry *dentry,
90670 +                                 u32 flags)
90672 +       struct ntfs_inode *ni = ntfs_i(dir);
90673 +       struct cpu_str *uni = __getname();
90674 +       struct inode *inode;
90675 +       int err;
90677 +       if (!uni)
90678 +               inode = ERR_PTR(-ENOMEM);
90679 +       else {
90680 +               err = ntfs_nls_to_utf16(ni->mi.sbi, dentry->d_name.name,
90681 +                                       dentry->d_name.len, uni, NTFS_NAME_LEN,
90682 +                                       UTF16_HOST_ENDIAN);
90683 +               if (err < 0)
90684 +                       inode = ERR_PTR(err);
90685 +               else {
90686 +                       ni_lock(ni);
90687 +                       inode = dir_search_u(dir, uni, NULL);
90688 +                       ni_unlock(ni);
90689 +               }
90690 +               __putname(uni);
90691 +       }
90693 +       return d_splice_alias(inode, dentry);
90697 + * ntfs_create
90698 + *
90699 + * inode_operations::create
90700 + */
90701 +static int ntfs_create(struct user_namespace *mnt_userns, struct inode *dir,
90702 +                      struct dentry *dentry, umode_t mode, bool excl)
90704 +       struct ntfs_inode *ni = ntfs_i(dir);
90705 +       struct inode *inode;
90707 +       ni_lock_dir(ni);
90709 +       inode = ntfs_create_inode(mnt_userns, dir, dentry, NULL, S_IFREG | mode,
90710 +                                 0, NULL, 0, excl, NULL);
90712 +       ni_unlock(ni);
90714 +       return IS_ERR(inode) ? PTR_ERR(inode) : 0;
90718 + * ntfs_link
90719 + *
90720 + * inode_operations::link
90721 + */
90722 +static int ntfs_link(struct dentry *ode, struct inode *dir, struct dentry *de)
90724 +       int err;
90725 +       struct inode *inode = d_inode(ode);
90726 +       struct ntfs_inode *ni = ntfs_i(inode);
90728 +       if (S_ISDIR(inode->i_mode))
90729 +               return -EPERM;
90731 +       if (inode->i_nlink >= NTFS_LINK_MAX)
90732 +               return -EMLINK;
90734 +       ni_lock_dir(ntfs_i(dir));
90735 +       if (inode != dir)
90736 +               ni_lock(ni);
90738 +       dir->i_ctime = dir->i_mtime = inode->i_ctime = current_time(inode);
90739 +       inc_nlink(inode);
90740 +       ihold(inode);
90742 +       err = ntfs_link_inode(inode, de);
90743 +       if (!err) {
90744 +               mark_inode_dirty(inode);
90745 +               mark_inode_dirty(dir);
90746 +               d_instantiate(de, inode);
90747 +       } else {
90748 +               drop_nlink(inode);
90749 +               iput(inode);
90750 +       }
90752 +       if (inode != dir)
90753 +               ni_unlock(ni);
90754 +       ni_unlock(ntfs_i(dir));
90756 +       return err;
90760 + * ntfs_unlink
90761 + *
90762 + * inode_operations::unlink
90763 + */
90764 +static int ntfs_unlink(struct inode *dir, struct dentry *dentry)
90766 +       struct ntfs_inode *ni = ntfs_i(dir);
90767 +       int err;
90769 +       ni_lock_dir(ni);
90771 +       err = ntfs_unlink_inode(dir, dentry);
90773 +       ni_unlock(ni);
90775 +       return err;
90779 + * ntfs_symlink
90780 + *
90781 + * inode_operations::symlink
90782 + */
90783 +static int ntfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
90784 +                       struct dentry *dentry, const char *symname)
90786 +       u32 size = strlen(symname);
90787 +       struct inode *inode;
90788 +       struct ntfs_inode *ni = ntfs_i(dir);
90790 +       ni_lock_dir(ni);
90792 +       inode = ntfs_create_inode(mnt_userns, dir, dentry, NULL, S_IFLNK | 0777,
90793 +                                 0, symname, size, 0, NULL);
90795 +       ni_unlock(ni);
90797 +       return IS_ERR(inode) ? PTR_ERR(inode) : 0;
90801 + * ntfs_mkdir
90802 + *
90803 + * inode_operations::mkdir
90804 + */
90805 +static int ntfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
90806 +                     struct dentry *dentry, umode_t mode)
90808 +       struct inode *inode;
90809 +       struct ntfs_inode *ni = ntfs_i(dir);
90811 +       ni_lock_dir(ni);
90813 +       inode = ntfs_create_inode(mnt_userns, dir, dentry, NULL, S_IFDIR | mode,
90814 +                                 0, NULL, -1, 0, NULL);
90816 +       ni_unlock(ni);
90818 +       return IS_ERR(inode) ? PTR_ERR(inode) : 0;
90822 + * ntfs_rmdir
90823 + *
90824 + * inode_operations::rm_dir
90825 + */
90826 +static int ntfs_rmdir(struct inode *dir, struct dentry *dentry)
90828 +       struct ntfs_inode *ni = ntfs_i(dir);
90829 +       int err;
90831 +       ni_lock_dir(ni);
90833 +       err = ntfs_unlink_inode(dir, dentry);
90835 +       ni_unlock(ni);
90837 +       return err;
90841 + * ntfs_rename
90842 + *
90843 + * inode_operations::rename
90844 + */
90845 +static int ntfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
90846 +                      struct dentry *old_dentry, struct inode *new_dir,
90847 +                      struct dentry *new_dentry, u32 flags)
90849 +       int err;
90850 +       struct super_block *sb = old_dir->i_sb;
90851 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
90852 +       struct ntfs_inode *old_dir_ni = ntfs_i(old_dir);
90853 +       struct ntfs_inode *new_dir_ni = ntfs_i(new_dir);
90854 +       struct ntfs_inode *old_ni;
90855 +       struct ATTR_FILE_NAME *old_name, *new_name, *fname;
90856 +       u8 name_type;
90857 +       bool is_same;
90858 +       struct inode *old_inode, *new_inode;
90859 +       struct NTFS_DE *old_de, *new_de;
90860 +       struct ATTRIB *attr;
90861 +       struct ATTR_LIST_ENTRY *le;
90862 +       u16 new_de_key_size;
90864 +       static_assert(SIZEOF_ATTRIBUTE_FILENAME_MAX + SIZEOF_RESIDENT < 1024);
90865 +       static_assert(SIZEOF_ATTRIBUTE_FILENAME_MAX + sizeof(struct NTFS_DE) <
90866 +                     1024);
90867 +       static_assert(PATH_MAX >= 4 * 1024);
90869 +       if (flags & ~RENAME_NOREPLACE)
90870 +               return -EINVAL;
90872 +       old_inode = d_inode(old_dentry);
90873 +       new_inode = d_inode(new_dentry);
90875 +       old_ni = ntfs_i(old_inode);
90877 +       is_same = old_dentry->d_name.len == new_dentry->d_name.len &&
90878 +                 !memcmp(old_dentry->d_name.name, new_dentry->d_name.name,
90879 +                         old_dentry->d_name.len);
90881 +       if (is_same && old_dir == new_dir) {
90882 +               /* Nothing to do */
90883 +               err = 0;
90884 +               goto out;
90885 +       }
90887 +       if (ntfs_is_meta_file(sbi, old_inode->i_ino)) {
90888 +               err = -EINVAL;
90889 +               goto out;
90890 +       }
90892 +       if (new_inode) {
90893 +               /*target name exists. unlink it*/
90894 +               dget(new_dentry);
90895 +               ni_lock_dir(new_dir_ni);
90896 +               err = ntfs_unlink_inode(new_dir, new_dentry);
90897 +               ni_unlock(new_dir_ni);
90898 +               dput(new_dentry);
90899 +               if (err)
90900 +                       goto out;
90901 +       }
90903 +       /* allocate PATH_MAX bytes */
90904 +       old_de = __getname();
90905 +       if (!old_de) {
90906 +               err = -ENOMEM;
90907 +               goto out;
90908 +       }
90910 +       err = fill_name_de(sbi, old_de, &old_dentry->d_name, NULL);
90911 +       if (err < 0)
90912 +               goto out1;
90914 +       old_name = (struct ATTR_FILE_NAME *)(old_de + 1);
90916 +       if (is_same) {
90917 +               new_de = old_de;
90918 +       } else {
90919 +               new_de = Add2Ptr(old_de, 1024);
90920 +               err = fill_name_de(sbi, new_de, &new_dentry->d_name, NULL);
90921 +               if (err < 0)
90922 +                       goto out1;
90923 +       }
90925 +       ni_lock_dir(old_dir_ni);
90926 +       ni_lock(old_ni);
90928 +       mi_get_ref(&old_dir_ni->mi, &old_name->home);
90930 +       /*get pointer to file_name in mft*/
90931 +       fname = ni_fname_name(old_ni, (struct cpu_str *)&old_name->name_len,
90932 +                             &old_name->home, &le);
90933 +       if (!fname) {
90934 +               err = -EINVAL;
90935 +               goto out2;
90936 +       }
90938 +       /* Copy fname info from record into new fname */
90939 +       new_name = (struct ATTR_FILE_NAME *)(new_de + 1);
90940 +       memcpy(&new_name->dup, &fname->dup, sizeof(fname->dup));
90942 +       name_type = paired_name(fname->type);
90944 +       /* remove first name from directory */
90945 +       err = indx_delete_entry(&old_dir_ni->dir, old_dir_ni, old_de + 1,
90946 +                               le16_to_cpu(old_de->key_size), sbi);
90947 +       if (err)
90948 +               goto out3;
90950 +       /* remove first name from mft */
90951 +       err = ni_remove_attr_le(old_ni, attr_from_name(fname), le);
90952 +       if (err)
90953 +               goto out4;
90955 +       le16_add_cpu(&old_ni->mi.mrec->hard_links, -1);
90956 +       old_ni->mi.dirty = true;
90958 +       if (name_type != FILE_NAME_POSIX) {
90959 +               /* get paired name */
90960 +               fname = ni_fname_type(old_ni, name_type, &le);
90961 +               if (fname) {
90962 +                       /* remove second name from directory */
90963 +                       err = indx_delete_entry(&old_dir_ni->dir, old_dir_ni,
90964 +                                               fname, fname_full_size(fname),
90965 +                                               sbi);
90966 +                       if (err)
90967 +                               goto out5;
90969 +                       /* remove second name from mft */
90970 +                       err = ni_remove_attr_le(old_ni, attr_from_name(fname),
90971 +                                               le);
90972 +                       if (err)
90973 +                               goto out6;
90975 +                       le16_add_cpu(&old_ni->mi.mrec->hard_links, -1);
90976 +                       old_ni->mi.dirty = true;
90977 +               }
90978 +       }
90980 +       /* Add new name */
90981 +       mi_get_ref(&old_ni->mi, &new_de->ref);
90982 +       mi_get_ref(&ntfs_i(new_dir)->mi, &new_name->home);
90984 +       new_de_key_size = le16_to_cpu(new_de->key_size);
90986 +       /* insert new name in mft */
90987 +       err = ni_insert_resident(old_ni, new_de_key_size, ATTR_NAME, NULL, 0,
90988 +                                &attr, NULL);
90989 +       if (err)
90990 +               goto out7;
90992 +       attr->res.flags = RESIDENT_FLAG_INDEXED;
90994 +       memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), new_name, new_de_key_size);
90996 +       le16_add_cpu(&old_ni->mi.mrec->hard_links, 1);
90997 +       old_ni->mi.dirty = true;
90999 +       /* insert new name in directory */
91000 +       err = indx_insert_entry(&new_dir_ni->dir, new_dir_ni, new_de, sbi,
91001 +                               NULL);
91002 +       if (err)
91003 +               goto out8;
91005 +       if (IS_DIRSYNC(new_dir))
91006 +               err = ntfs_sync_inode(old_inode);
91007 +       else
91008 +               mark_inode_dirty(old_inode);
91010 +       old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
91011 +       if (IS_DIRSYNC(old_dir))
91012 +               (void)ntfs_sync_inode(old_dir);
91013 +       else
91014 +               mark_inode_dirty(old_dir);
91016 +       if (old_dir != new_dir) {
91017 +               new_dir->i_mtime = new_dir->i_ctime = old_dir->i_ctime;
91018 +               mark_inode_dirty(new_dir);
91019 +       }
91021 +       if (old_inode) {
91022 +               old_inode->i_ctime = old_dir->i_ctime;
91023 +               mark_inode_dirty(old_inode);
91024 +       }
91026 +       err = 0;
91027 +       /* normal way */
91028 +       goto out2;
91030 +out8:
91031 +       /* undo
91032 +        * ni_insert_resident(old_ni, new_de_key_size, ATTR_NAME, NULL, 0,
91033 +        *                       &attr, NULL);
91034 +        */
91035 +       mi_remove_attr(&old_ni->mi, attr);
91036 +out7:
91037 +       /* undo
91038 +        * ni_remove_attr_le(old_ni, attr_from_name(fname), le);
91039 +        */
91040 +out6:
91041 +       /* undo
91042 +        * indx_delete_entry(&old_dir_ni->dir, old_dir_ni,
91043 +        *                                      fname, fname_full_size(fname),
91044 +        *                                      sbi);
91045 +        */
91046 +out5:
91047 +       /* undo
91048 +        * ni_remove_attr_le(old_ni, attr_from_name(fname), le);
91049 +        */
91050 +out4:
91051 +       /* undo:
91052 +        * indx_delete_entry(&old_dir_ni->dir, old_dir_ni, old_de + 1,
91053 +        *                      old_de->key_size, NULL);
91054 +        */
91055 +out3:
91056 +out2:
91057 +       ni_unlock(old_ni);
91058 +       ni_unlock(old_dir_ni);
91059 +out1:
91060 +       __putname(old_de);
91061 +out:
91062 +       return err;
91066 + * ntfs_atomic_open
91067 + *
91068 + * inode_operations::atomic_open
91069 + */
91070 +static int ntfs_atomic_open(struct inode *dir, struct dentry *dentry,
91071 +                           struct file *file, u32 flags, umode_t mode)
91073 +       int err;
91074 +       bool excl = !!(flags & O_EXCL);
91075 +       struct inode *inode;
91076 +       struct ntfs_fnd *fnd = NULL;
91077 +       struct ntfs_inode *ni = ntfs_i(dir);
91078 +       struct dentry *d = NULL;
91079 +       struct cpu_str *uni = __getname();
91081 +       if (!uni)
91082 +               return -ENOMEM;
91084 +       err = ntfs_nls_to_utf16(ni->mi.sbi, dentry->d_name.name,
91085 +                               dentry->d_name.len, uni, NTFS_NAME_LEN,
91086 +                               UTF16_HOST_ENDIAN);
91087 +       if (err < 0)
91088 +               goto out;
91090 +       ni_lock_dir(ni);
91092 +       if (d_in_lookup(dentry)) {
91093 +               fnd = fnd_get();
91094 +               if (!fnd) {
91095 +                       err = -ENOMEM;
91096 +                       goto out1;
91097 +               }
91099 +               d = d_splice_alias(dir_search_u(dir, uni, fnd), dentry);
91100 +               if (IS_ERR(d)) {
91101 +                       err = PTR_ERR(d);
91102 +                       d = NULL;
91103 +                       goto out2;
91104 +               }
91106 +               if (d)
91107 +                       dentry = d;
91108 +       }
91110 +       if (!(flags & O_CREAT) || d_really_is_positive(dentry)) {
91111 +               err = finish_no_open(file, d);
91112 +               goto out2;
91113 +       }
91115 +       file->f_mode |= FMODE_CREATED;
91117 +       /*fnd contains tree's path to insert to*/
91118 +       /* TODO: init_user_ns? */
91119 +       inode = ntfs_create_inode(&init_user_ns, dir, dentry, uni, mode, 0,
91120 +                                 NULL, 0, excl, fnd);
91121 +       err = IS_ERR(inode) ? PTR_ERR(inode)
91122 +                           : finish_open(file, dentry, ntfs_file_open);
91123 +       dput(d);
91125 +out2:
91126 +       fnd_put(fnd);
91127 +out1:
91128 +       ni_unlock(ni);
91129 +out:
91130 +       __putname(uni);
91132 +       return err;
91135 +struct dentry *ntfs3_get_parent(struct dentry *child)
91137 +       struct inode *inode = d_inode(child);
91138 +       struct ntfs_inode *ni = ntfs_i(inode);
91140 +       struct ATTR_LIST_ENTRY *le = NULL;
91141 +       struct ATTRIB *attr = NULL;
91142 +       struct ATTR_FILE_NAME *fname;
91144 +       while ((attr = ni_find_attr(ni, attr, &le, ATTR_NAME, NULL, 0, NULL,
91145 +                                   NULL))) {
91146 +               fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
91147 +               if (!fname)
91148 +                       continue;
91150 +               return d_obtain_alias(
91151 +                       ntfs_iget5(inode->i_sb, &fname->home, NULL));
91152 +       }
91154 +       return ERR_PTR(-ENOENT);
91157 +const struct inode_operations ntfs_dir_inode_operations = {
91158 +       .lookup = ntfs_lookup,
91159 +       .create = ntfs_create,
91160 +       .link = ntfs_link,
91161 +       .unlink = ntfs_unlink,
91162 +       .symlink = ntfs_symlink,
91163 +       .mkdir = ntfs_mkdir,
91164 +       .rmdir = ntfs_rmdir,
91165 +       .rename = ntfs_rename,
91166 +       .permission = ntfs_permission,
91167 +       .get_acl = ntfs_get_acl,
91168 +       .set_acl = ntfs_set_acl,
91169 +       .setattr = ntfs3_setattr,
91170 +       .getattr = ntfs_getattr,
91171 +       .listxattr = ntfs_listxattr,
91172 +       .atomic_open = ntfs_atomic_open,
91173 +       .fiemap = ntfs_fiemap,
91175 diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
91176 new file mode 100644
91177 index 000000000000..40398e6c39c9
91178 --- /dev/null
91179 +++ b/fs/ntfs3/ntfs.h
91180 @@ -0,0 +1,1238 @@
91181 +/* SPDX-License-Identifier: GPL-2.0 */
91183 + *
91184 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
91185 + *
91186 + * on-disk ntfs structs
91187 + */
91189 +// clang-format off
91191 +/* TODO:
91192 + * - Check 4K mft record and 512 bytes cluster
91193 + */
91196 + * Activate this define to use binary search in indexes
91197 + */
91198 +#define NTFS3_INDEX_BINARY_SEARCH
91201 + * Check each run for marked clusters
91202 + */
91203 +#define NTFS3_CHECK_FREE_CLST
91205 +#define NTFS_NAME_LEN 255
91208 + * ntfs.sys used 500 maximum links
91209 + * on-disk struct allows up to 0xffff
91210 + */
91211 +#define NTFS_LINK_MAX 0x400
91212 +//#define NTFS_LINK_MAX 0xffff
91215 + * Activate to use 64 bit clusters instead of 32 bits in ntfs.sys
91216 + * Logical and virtual cluster number
91217 + * If needed, may be redefined to use 64 bit value
91218 + */
91219 +//#define CONFIG_NTFS3_64BIT_CLUSTER
91221 +#define NTFS_LZNT_MAX_CLUSTER  4096
91222 +#define NTFS_LZNT_CUNIT                4
91223 +#define NTFS_LZNT_CLUSTERS     (1u<<NTFS_LZNT_CUNIT)
91225 +struct GUID {
91226 +       __le32 Data1;
91227 +       __le16 Data2;
91228 +       __le16 Data3;
91229 +       u8 Data4[8];
91233 + * this struct repeats layout of ATTR_FILE_NAME
91234 + * at offset 0x40
91235 + * it used to store global constants NAME_MFT/NAME_MIRROR...
91236 + * most constant names are shorter than 10
91237 + */
91238 +struct cpu_str {
91239 +       u8 len;
91240 +       u8 unused;
91241 +       u16 name[10];
91244 +struct le_str {
91245 +       u8 len;
91246 +       u8 unused;
91247 +       __le16 name[];
91250 +static_assert(SECTOR_SHIFT == 9);
91252 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
91253 +typedef u64 CLST;
91254 +static_assert(sizeof(size_t) == 8);
91255 +#else
91256 +typedef u32 CLST;
91257 +#endif
91259 +#define SPARSE_LCN64   ((u64)-1)
91260 +#define SPARSE_LCN     ((CLST)-1)
91261 +#define RESIDENT_LCN   ((CLST)-2)
91262 +#define COMPRESSED_LCN ((CLST)-3)
91264 +#define COMPRESSION_UNIT     4
91265 +#define COMPRESS_MAX_CLUSTER 0x1000
91266 +#define MFT_INCREASE_CHUNK   1024
91268 +enum RECORD_NUM {
91269 +       MFT_REC_MFT             = 0,
91270 +       MFT_REC_MIRR            = 1,
91271 +       MFT_REC_LOG             = 2,
91272 +       MFT_REC_VOL             = 3,
91273 +       MFT_REC_ATTR            = 4,
91274 +       MFT_REC_ROOT            = 5,
91275 +       MFT_REC_BITMAP          = 6,
91276 +       MFT_REC_BOOT            = 7,
91277 +       MFT_REC_BADCLUST        = 8,
91278 +       //MFT_REC_QUOTA         = 9,
91279 +       MFT_REC_SECURE          = 9, // NTFS 3.0
91280 +       MFT_REC_UPCASE          = 10,
91281 +       MFT_REC_EXTEND          = 11, // NTFS 3.0
91282 +       MFT_REC_RESERVED        = 11,
91283 +       MFT_REC_FREE            = 16,
91284 +       MFT_REC_USER            = 24,
91287 +enum ATTR_TYPE {
91288 +       ATTR_ZERO               = cpu_to_le32(0x00),
91289 +       ATTR_STD                = cpu_to_le32(0x10),
91290 +       ATTR_LIST               = cpu_to_le32(0x20),
91291 +       ATTR_NAME               = cpu_to_le32(0x30),
91292 +       // ATTR_VOLUME_VERSION on Nt4
91293 +       ATTR_ID                 = cpu_to_le32(0x40),
91294 +       ATTR_SECURE             = cpu_to_le32(0x50),
91295 +       ATTR_LABEL              = cpu_to_le32(0x60),
91296 +       ATTR_VOL_INFO           = cpu_to_le32(0x70),
91297 +       ATTR_DATA               = cpu_to_le32(0x80),
91298 +       ATTR_ROOT               = cpu_to_le32(0x90),
91299 +       ATTR_ALLOC              = cpu_to_le32(0xA0),
91300 +       ATTR_BITMAP             = cpu_to_le32(0xB0),
91301 +       // ATTR_SYMLINK on Nt4
91302 +       ATTR_REPARSE            = cpu_to_le32(0xC0),
91303 +       ATTR_EA_INFO            = cpu_to_le32(0xD0),
91304 +       ATTR_EA                 = cpu_to_le32(0xE0),
91305 +       ATTR_PROPERTYSET        = cpu_to_le32(0xF0),
91306 +       ATTR_LOGGED_UTILITY_STREAM = cpu_to_le32(0x100),
91307 +       ATTR_END                = cpu_to_le32(0xFFFFFFFF)
91310 +static_assert(sizeof(enum ATTR_TYPE) == 4);
91312 +enum FILE_ATTRIBUTE {
91313 +       FILE_ATTRIBUTE_READONLY         = cpu_to_le32(0x00000001),
91314 +       FILE_ATTRIBUTE_HIDDEN           = cpu_to_le32(0x00000002),
91315 +       FILE_ATTRIBUTE_SYSTEM           = cpu_to_le32(0x00000004),
91316 +       FILE_ATTRIBUTE_ARCHIVE          = cpu_to_le32(0x00000020),
91317 +       FILE_ATTRIBUTE_DEVICE           = cpu_to_le32(0x00000040),
91318 +       FILE_ATTRIBUTE_TEMPORARY        = cpu_to_le32(0x00000100),
91319 +       FILE_ATTRIBUTE_SPARSE_FILE      = cpu_to_le32(0x00000200),
91320 +       FILE_ATTRIBUTE_REPARSE_POINT    = cpu_to_le32(0x00000400),
91321 +       FILE_ATTRIBUTE_COMPRESSED       = cpu_to_le32(0x00000800),
91322 +       FILE_ATTRIBUTE_OFFLINE          = cpu_to_le32(0x00001000),
91323 +       FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = cpu_to_le32(0x00002000),
91324 +       FILE_ATTRIBUTE_ENCRYPTED        = cpu_to_le32(0x00004000),
91325 +       FILE_ATTRIBUTE_VALID_FLAGS      = cpu_to_le32(0x00007fb7),
91326 +       FILE_ATTRIBUTE_DIRECTORY        = cpu_to_le32(0x10000000),
91329 +static_assert(sizeof(enum FILE_ATTRIBUTE) == 4);
91331 +extern const struct cpu_str NAME_MFT;
91332 +extern const struct cpu_str NAME_MIRROR;
91333 +extern const struct cpu_str NAME_LOGFILE;
91334 +extern const struct cpu_str NAME_VOLUME;
91335 +extern const struct cpu_str NAME_ATTRDEF;
91336 +extern const struct cpu_str NAME_ROOT;
91337 +extern const struct cpu_str NAME_BITMAP;
91338 +extern const struct cpu_str NAME_BOOT;
91339 +extern const struct cpu_str NAME_BADCLUS;
91340 +extern const struct cpu_str NAME_QUOTA;
91341 +extern const struct cpu_str NAME_SECURE;
91342 +extern const struct cpu_str NAME_UPCASE;
91343 +extern const struct cpu_str NAME_EXTEND;
91344 +extern const struct cpu_str NAME_OBJID;
91345 +extern const struct cpu_str NAME_REPARSE;
91346 +extern const struct cpu_str NAME_USNJRNL;
91348 +extern const __le16 I30_NAME[4];
91349 +extern const __le16 SII_NAME[4];
91350 +extern const __le16 SDH_NAME[4];
91351 +extern const __le16 SO_NAME[2];
91352 +extern const __le16 SQ_NAME[2];
91353 +extern const __le16 SR_NAME[2];
91355 +extern const __le16 BAD_NAME[4];
91356 +extern const __le16 SDS_NAME[4];
91357 +extern const __le16 WOF_NAME[17];      /* WofCompressedData */
91359 +/* MFT record number structure */
91360 +struct MFT_REF {
91361 +       __le32 low;     // The low part of the number
91362 +       __le16 high;    // The high part of the number
91363 +       __le16 seq;     // The sequence number of MFT record
91366 +static_assert(sizeof(__le64) == sizeof(struct MFT_REF));
91368 +static inline CLST ino_get(const struct MFT_REF *ref)
91370 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
91371 +       return le32_to_cpu(ref->low) | ((u64)le16_to_cpu(ref->high) << 32);
91372 +#else
91373 +       return le32_to_cpu(ref->low);
91374 +#endif
91377 +struct NTFS_BOOT {
91378 +       u8 jump_code[3];        // 0x00: Jump to boot code
91379 +       u8 system_id[8];        // 0x03: System ID, equals "NTFS    "
91381 +       // NOTE: this member is not aligned(!)
91382 +       // bytes_per_sector[0] must be 0
91383 +       // bytes_per_sector[1] must be multiplied by 256
91384 +       u8 bytes_per_sector[2]; // 0x0B: Bytes per sector
91386 +       u8 sectors_per_clusters;// 0x0D: Sectors per cluster
91387 +       u8 unused1[7];
91388 +       u8 media_type;          // 0x15: Media type (0xF8 - harddisk)
91389 +       u8 unused2[2];
91390 +       __le16 sct_per_track;   // 0x18: number of sectors per track
91391 +       __le16 heads;           // 0x1A: number of heads per cylinder
91392 +       __le32 hidden_sectors;  // 0x1C: number of 'hidden' sectors
91393 +       u8 unused3[4];
91394 +       u8 bios_drive_num;      // 0x24: BIOS drive number =0x80
91395 +       u8 unused4;
91396 +       u8 signature_ex;        // 0x26: Extended BOOT signature =0x80
91397 +       u8 unused5;
91398 +       __le64 sectors_per_volume;// 0x28: size of volume in sectors
91399 +       __le64 mft_clst;        // 0x30: first cluster of $MFT
91400 +       __le64 mft2_clst;       // 0x38: first cluster of $MFTMirr
91401 +       s8 record_size;         // 0x40: size of MFT record in clusters(sectors)
91402 +       u8 unused6[3];
91403 +       s8 index_size;          // 0x44: size of INDX record in clusters(sectors)
91404 +       u8 unused7[3];
91405 +       __le64 serial_num;      // 0x48: Volume serial number
91406 +       __le32 check_sum;       // 0x50: Simple additive checksum of all
91407 +                               // of the u32's which precede the 'check_sum'
91409 +       u8 boot_code[0x200 - 0x50 - 2 - 4]; // 0x54:
91410 +       u8 boot_magic[2];       // 0x1FE: Boot signature =0x55 + 0xAA
91413 +static_assert(sizeof(struct NTFS_BOOT) == 0x200);
91415 +enum NTFS_SIGNATURE {
91416 +       NTFS_FILE_SIGNATURE = cpu_to_le32(0x454C4946), // 'FILE'
91417 +       NTFS_INDX_SIGNATURE = cpu_to_le32(0x58444E49), // 'INDX'
91418 +       NTFS_CHKD_SIGNATURE = cpu_to_le32(0x444B4843), // 'CHKD'
91419 +       NTFS_RSTR_SIGNATURE = cpu_to_le32(0x52545352), // 'RSTR'
91420 +       NTFS_RCRD_SIGNATURE = cpu_to_le32(0x44524352), // 'RCRD'
91421 +       NTFS_BAAD_SIGNATURE = cpu_to_le32(0x44414142), // 'BAAD'
91422 +       NTFS_HOLE_SIGNATURE = cpu_to_le32(0x454C4F48), // 'HOLE'
91423 +       NTFS_FFFF_SIGNATURE = cpu_to_le32(0xffffffff),
91426 +static_assert(sizeof(enum NTFS_SIGNATURE) == 4);
91428 +/* MFT Record header structure */
91429 +struct NTFS_RECORD_HEADER {
91430 +       /* Record magic number, equals 'FILE'/'INDX'/'RSTR'/'RCRD' */
91431 +       enum NTFS_SIGNATURE sign; // 0x00:
91432 +       __le16 fix_off;         // 0x04:
91433 +       __le16 fix_num;         // 0x06:
91434 +       __le64 lsn;             // 0x08: Log file sequence number
91437 +static_assert(sizeof(struct NTFS_RECORD_HEADER) == 0x10);
91439 +static inline int is_baad(const struct NTFS_RECORD_HEADER *hdr)
91441 +       return hdr->sign == NTFS_BAAD_SIGNATURE;
91444 +/* Possible bits in struct MFT_REC.flags */
91445 +enum RECORD_FLAG {
91446 +       RECORD_FLAG_IN_USE      = cpu_to_le16(0x0001),
91447 +       RECORD_FLAG_DIR         = cpu_to_le16(0x0002),
91448 +       RECORD_FLAG_SYSTEM      = cpu_to_le16(0x0004),
91449 +       RECORD_FLAG_UNKNOWN     = cpu_to_le16(0x0008),
91452 +/* MFT Record structure */
91453 +struct MFT_REC {
91454 +       struct NTFS_RECORD_HEADER rhdr; // 'FILE'
91456 +       __le16 seq;             // 0x10: Sequence number for this record
91457 +       __le16 hard_links;      // 0x12: The number of hard links to record
91458 +       __le16 attr_off;        // 0x14: Offset to attributes
91459 +       __le16 flags;           // 0x16: See RECORD_FLAG
91460 +       __le32 used;            // 0x18: The size of used part
91461 +       __le32 total;           // 0x1C: Total record size
91463 +       struct MFT_REF parent_ref; // 0x20: Parent MFT record
91464 +       __le16 next_attr_id;    // 0x28: The next attribute Id
91466 +       __le16 res;             // 0x2A: High part of mft record?
91467 +       __le32 mft_record;      // 0x2C: Current mft record number
91468 +       __le16 fixups[];        // 0x30:
91471 +#define MFTRECORD_FIXUP_OFFSET_1 offsetof(struct MFT_REC, res)
91472 +#define MFTRECORD_FIXUP_OFFSET_3 offsetof(struct MFT_REC, fixups)
91474 +static_assert(MFTRECORD_FIXUP_OFFSET_1 == 0x2A);
91475 +static_assert(MFTRECORD_FIXUP_OFFSET_3 == 0x30);
91477 +static inline bool is_rec_base(const struct MFT_REC *rec)
91479 +       const struct MFT_REF *r = &rec->parent_ref;
91481 +       return !r->low && !r->high && !r->seq;
91484 +static inline bool is_mft_rec5(const struct MFT_REC *rec)
91486 +       return le16_to_cpu(rec->rhdr.fix_off) >=
91487 +              offsetof(struct MFT_REC, fixups);
91490 +static inline bool is_rec_inuse(const struct MFT_REC *rec)
91492 +       return rec->flags & RECORD_FLAG_IN_USE;
91495 +static inline bool clear_rec_inuse(struct MFT_REC *rec)
91497 +       return rec->flags &= ~RECORD_FLAG_IN_USE;
91500 +/* Possible values of ATTR_RESIDENT.flags */
91501 +#define RESIDENT_FLAG_INDEXED 0x01
91503 +struct ATTR_RESIDENT {
91504 +       __le32 data_size;       // 0x10: The size of data
91505 +       __le16 data_off;        // 0x14: Offset to data
91506 +       u8 flags;               // 0x16: resident flags ( 1 - indexed )
91507 +       u8 res;                 // 0x17:
91508 +}; // sizeof() = 0x18
91510 +struct ATTR_NONRESIDENT {
91511 +       __le64 svcn;            // 0x10: Starting VCN of this segment
91512 +       __le64 evcn;            // 0x18: End VCN of this segment
91513 +       __le16 run_off;         // 0x20: Offset to packed runs
91514 +       //  Unit of Compression size for this stream, expressed
91515 +       //  as a log of the cluster size.
91516 +       //
91517 +       //      0 means file is not compressed
91518 +       //      1, 2, 3, and 4 are potentially legal values if the
91519 +       //          stream is compressed, however the implementation
91520 +       //          may only choose to use 4, or possibly 3.  Note
91521 +       //          that 4 means cluster size time 16.  If convenient
91522 +       //          the implementation may wish to accept a
91523 +       //          reasonable range of legal values here (1-5?),
91524 +       //          even if the implementation only generates
91525 +       //          a smaller set of values itself.
91526 +       u8 c_unit;              // 0x22
91527 +       u8 res1[5];             // 0x23:
91528 +       __le64 alloc_size;      // 0x28: The allocated size of attribute in bytes
91529 +                               // (multiple of cluster size)
91530 +       __le64 data_size;       // 0x30: The size of attribute  in bytes <= alloc_size
91531 +       __le64 valid_size;      // 0x38: The size of valid part in bytes <= data_size
91532 +       __le64 total_size;      // 0x40: The sum of the allocated clusters for a file
91533 +                               // (present only for the first segment (0 == vcn)
91534 +                               // of compressed attribute)
91536 +}; // sizeof()=0x40 or 0x48 (if compressed)
91538 +/* Possible values of ATTRIB.flags: */
91539 +#define ATTR_FLAG_COMPRESSED     cpu_to_le16(0x0001)
91540 +#define ATTR_FLAG_COMPRESSED_MASK cpu_to_le16(0x00FF)
91541 +#define ATTR_FLAG_ENCRYPTED      cpu_to_le16(0x4000)
91542 +#define ATTR_FLAG_SPARSED        cpu_to_le16(0x8000)
91544 +struct ATTRIB {
91545 +       enum ATTR_TYPE type;    // 0x00: The type of this attribute
91546 +       __le32 size;            // 0x04: The size of this attribute
91547 +       u8 non_res;             // 0x08: Is this attribute non-resident ?
91548 +       u8 name_len;            // 0x09: This attribute name length
91549 +       __le16 name_off;        // 0x0A: Offset to the attribute name
91550 +       __le16 flags;           // 0x0C: See ATTR_FLAG_XXX
91551 +       __le16 id;              // 0x0E: unique id (per record)
91553 +       union {
91554 +               struct ATTR_RESIDENT res;     // 0x10
91555 +               struct ATTR_NONRESIDENT nres; // 0x10
91556 +       };
91559 +/* Define attribute sizes */
91560 +#define SIZEOF_RESIDENT                        0x18
91561 +#define SIZEOF_NONRESIDENT_EX          0x48
91562 +#define SIZEOF_NONRESIDENT             0x40
91564 +#define SIZEOF_RESIDENT_LE             cpu_to_le16(0x18)
91565 +#define SIZEOF_NONRESIDENT_EX_LE       cpu_to_le16(0x48)
91566 +#define SIZEOF_NONRESIDENT_LE          cpu_to_le16(0x40)
91568 +static inline u64 attr_ondisk_size(const struct ATTRIB *attr)
91570 +       return attr->non_res ? ((attr->flags &
91571 +                                (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) ?
91572 +                                       le64_to_cpu(attr->nres.total_size) :
91573 +                                       le64_to_cpu(attr->nres.alloc_size)) :
91574 +                              QuadAlign(le32_to_cpu(attr->res.data_size));
91577 +static inline u64 attr_size(const struct ATTRIB *attr)
91579 +       return attr->non_res ? le64_to_cpu(attr->nres.data_size) :
91580 +                              le32_to_cpu(attr->res.data_size);
91583 +static inline bool is_attr_encrypted(const struct ATTRIB *attr)
91585 +       return attr->flags & ATTR_FLAG_ENCRYPTED;
91588 +static inline bool is_attr_sparsed(const struct ATTRIB *attr)
91590 +       return attr->flags & ATTR_FLAG_SPARSED;
91593 +static inline bool is_attr_compressed(const struct ATTRIB *attr)
91595 +       return attr->flags & ATTR_FLAG_COMPRESSED;
91598 +static inline bool is_attr_ext(const struct ATTRIB *attr)
91600 +       return attr->flags & (ATTR_FLAG_SPARSED | ATTR_FLAG_COMPRESSED);
91603 +static inline bool is_attr_indexed(const struct ATTRIB *attr)
91605 +       return !attr->non_res && (attr->res.flags & RESIDENT_FLAG_INDEXED);
91608 +static inline __le16 const *attr_name(const struct ATTRIB *attr)
91610 +       return Add2Ptr(attr, le16_to_cpu(attr->name_off));
91613 +static inline u64 attr_svcn(const struct ATTRIB *attr)
91615 +       return attr->non_res ? le64_to_cpu(attr->nres.svcn) : 0;
91618 +/* the size of resident attribute by its resident size */
91619 +#define BYTES_PER_RESIDENT(b) (0x18 + (b))
91621 +static_assert(sizeof(struct ATTRIB) == 0x48);
91622 +static_assert(sizeof(((struct ATTRIB *)NULL)->res) == 0x08);
91623 +static_assert(sizeof(((struct ATTRIB *)NULL)->nres) == 0x38);
91625 +static inline void *resident_data_ex(const struct ATTRIB *attr, u32 datasize)
91627 +       u32 asize, rsize;
91628 +       u16 off;
91630 +       if (attr->non_res)
91631 +               return NULL;
91633 +       asize = le32_to_cpu(attr->size);
91634 +       off = le16_to_cpu(attr->res.data_off);
91636 +       if (asize < datasize + off)
91637 +               return NULL;
91639 +       rsize = le32_to_cpu(attr->res.data_size);
91640 +       if (rsize < datasize)
91641 +               return NULL;
91643 +       return Add2Ptr(attr, off);
91646 +static inline void *resident_data(const struct ATTRIB *attr)
91648 +       return Add2Ptr(attr, le16_to_cpu(attr->res.data_off));
91651 +static inline void *attr_run(const struct ATTRIB *attr)
91653 +       return Add2Ptr(attr, le16_to_cpu(attr->nres.run_off));
91656 +/* Standard information attribute (0x10) */
91657 +struct ATTR_STD_INFO {
91658 +       __le64 cr_time;         // 0x00: File creation file
91659 +       __le64 m_time;          // 0x08: File modification time
91660 +       __le64 c_time;          // 0x10: Last time any attribute was modified
91661 +       __le64 a_time;          // 0x18: File last access time
91662 +       enum FILE_ATTRIBUTE fa; // 0x20: Standard DOS attributes & more
91663 +       __le32 max_ver_num;     // 0x24: Maximum Number of Versions
91664 +       __le32 ver_num;         // 0x28: Version Number
91665 +       __le32 class_id;        // 0x2C: Class Id from bidirectional Class Id index
91668 +static_assert(sizeof(struct ATTR_STD_INFO) == 0x30);
91670 +#define SECURITY_ID_INVALID 0x00000000
91671 +#define SECURITY_ID_FIRST 0x00000100
91673 +struct ATTR_STD_INFO5 {
91674 +       __le64 cr_time;         // 0x00: File creation file
91675 +       __le64 m_time;          // 0x08: File modification time
91676 +       __le64 c_time;          // 0x10: Last time any attribute was modified
91677 +       __le64 a_time;          // 0x18: File last access time
91678 +       enum FILE_ATTRIBUTE fa; // 0x20: Standard DOS attributes & more
91679 +       __le32 max_ver_num;     // 0x24: Maximum Number of Versions
91680 +       __le32 ver_num;         // 0x28: Version Number
91681 +       __le32 class_id;        // 0x2C: Class Id from bidirectional Class Id index
91683 +       __le32 owner_id;        // 0x30: Owner Id of the user owning the file.
91684 +       __le32 security_id;     // 0x34: The Security Id is a key in the $SII Index and $SDS
91685 +       __le64 quota_charge;    // 0x38:
91686 +       __le64 usn;             // 0x40: Last Update Sequence Number of the file. This is a direct
91687 +                               // index into the file $UsnJrnl. If zero, the USN Journal is
91688 +                               // disabled.
91691 +static_assert(sizeof(struct ATTR_STD_INFO5) == 0x48);
91693 +/* attribute list entry structure (0x20) */
91694 +struct ATTR_LIST_ENTRY {
91695 +       enum ATTR_TYPE type;    // 0x00: The type of attribute
91696 +       __le16 size;            // 0x04: The size of this record
91697 +       u8 name_len;            // 0x06: The length of attribute name
91698 +       u8 name_off;            // 0x07: The offset to attribute name
91699 +       __le64 vcn;             // 0x08: Starting VCN of this attribute
91700 +       struct MFT_REF ref;     // 0x10: MFT record number with attribute
91701 +       __le16 id;              // 0x18: struct ATTRIB ID
91702 +       __le16 name[3];         // 0x1A: Just to align. To get real name can use bNameOffset
91704 +}; // sizeof(0x20)
91706 +static_assert(sizeof(struct ATTR_LIST_ENTRY) == 0x20);
91708 +static inline u32 le_size(u8 name_len)
91710 +       return QuadAlign(offsetof(struct ATTR_LIST_ENTRY, name) +
91711 +                        name_len * sizeof(short));
91714 +/* returns 0 if 'attr' has the same type and name */
91715 +static inline int le_cmp(const struct ATTR_LIST_ENTRY *le,
91716 +                        const struct ATTRIB *attr)
91718 +       return le->type != attr->type || le->name_len != attr->name_len ||
91719 +              (!le->name_len &&
91720 +               memcmp(Add2Ptr(le, le->name_off),
91721 +                      Add2Ptr(attr, le16_to_cpu(attr->name_off)),
91722 +                      le->name_len * sizeof(short)));
91725 +static inline __le16 const *le_name(const struct ATTR_LIST_ENTRY *le)
91727 +       return Add2Ptr(le, le->name_off);
91730 +/* File name types (the field type in struct ATTR_FILE_NAME ) */
91731 +#define FILE_NAME_POSIX   0
91732 +#define FILE_NAME_UNICODE 1
91733 +#define FILE_NAME_DOS    2
91734 +#define FILE_NAME_UNICODE_AND_DOS (FILE_NAME_DOS | FILE_NAME_UNICODE)
91736 +/* Filename attribute structure (0x30) */
91737 +struct NTFS_DUP_INFO {
91738 +       __le64 cr_time;         // 0x00: File creation file
91739 +       __le64 m_time;          // 0x08: File modification time
91740 +       __le64 c_time;          // 0x10: Last time any attribute was modified
91741 +       __le64 a_time;          // 0x18: File last access time
91742 +       __le64 alloc_size;      // 0x20: Data attribute allocated size, multiple of cluster size
91743 +       __le64 data_size;       // 0x28: Data attribute size <= Dataalloc_size
91744 +       enum FILE_ATTRIBUTE fa; // 0x30: Standard DOS attributes & more
91745 +       __le16 ea_size;         // 0x34: Packed EAs
91746 +       __le16 reparse;         // 0x36: Used by Reparse
91748 +}; // 0x38
91750 +struct ATTR_FILE_NAME {
91751 +       struct MFT_REF home;    // 0x00: MFT record for directory
91752 +       struct NTFS_DUP_INFO dup;// 0x08
91753 +       u8 name_len;            // 0x40: File name length in words
91754 +       u8 type;                // 0x41: File name type
91755 +       __le16 name[];          // 0x42: File name
91758 +static_assert(sizeof(((struct ATTR_FILE_NAME *)NULL)->dup) == 0x38);
91759 +static_assert(offsetof(struct ATTR_FILE_NAME, name) == 0x42);
91760 +#define SIZEOF_ATTRIBUTE_FILENAME     0x44
91761 +#define SIZEOF_ATTRIBUTE_FILENAME_MAX (0x42 + 255 * 2)
91763 +static inline struct ATTRIB *attr_from_name(struct ATTR_FILE_NAME *fname)
91765 +       return (struct ATTRIB *)((char *)fname - SIZEOF_RESIDENT);
91768 +static inline u16 fname_full_size(const struct ATTR_FILE_NAME *fname)
91770 +       // don't return struct_size(fname, name, fname->name_len);
91771 +       return offsetof(struct ATTR_FILE_NAME, name) +
91772 +              fname->name_len * sizeof(short);
91775 +static inline u8 paired_name(u8 type)
91777 +       if (type == FILE_NAME_UNICODE)
91778 +               return FILE_NAME_DOS;
91779 +       if (type == FILE_NAME_DOS)
91780 +               return FILE_NAME_UNICODE;
91781 +       return FILE_NAME_POSIX;
91784 +/* Index entry defines ( the field flags in NtfsDirEntry ) */
91785 +#define NTFS_IE_HAS_SUBNODES   cpu_to_le16(1)
91786 +#define NTFS_IE_LAST           cpu_to_le16(2)
91788 +/* Directory entry structure */
91789 +struct NTFS_DE {
91790 +       union {
91791 +               struct MFT_REF ref; // 0x00: MFT record number with this file
91792 +               struct {
91793 +                       __le16 data_off;  // 0x00:
91794 +                       __le16 data_size; // 0x02:
91795 +                       __le32 res;       // 0x04: must be 0
91796 +               } view;
91797 +       };
91798 +       __le16 size;            // 0x08: The size of this entry
91799 +       __le16 key_size;        // 0x0A: The size of File name length in bytes + 0x42
91800 +       __le16 flags;           // 0x0C: Entry flags: NTFS_IE_XXX
91801 +       __le16 res;             // 0x0E:
91803 +       // Here any indexed attribute can be placed
91804 +       // One of them is:
91805 +       // struct ATTR_FILE_NAME AttrFileName;
91806 +       //
91808 +       // The last 8 bytes of this structure contains
91809 +       // the VBN of subnode
91810 +       // !!! Note !!!
91811 +       // This field is presented only if (flags & NTFS_IE_HAS_SUBNODES)
91812 +       // __le64 vbn;
91815 +static_assert(sizeof(struct NTFS_DE) == 0x10);
91817 +static inline void de_set_vbn_le(struct NTFS_DE *e, __le64 vcn)
91819 +       __le64 *v = Add2Ptr(e, le16_to_cpu(e->size) - sizeof(__le64));
91821 +       *v = vcn;
91824 +static inline void de_set_vbn(struct NTFS_DE *e, CLST vcn)
91826 +       __le64 *v = Add2Ptr(e, le16_to_cpu(e->size) - sizeof(__le64));
91828 +       *v = cpu_to_le64(vcn);
91831 +static inline __le64 de_get_vbn_le(const struct NTFS_DE *e)
91833 +       return *(__le64 *)Add2Ptr(e, le16_to_cpu(e->size) - sizeof(__le64));
91836 +static inline CLST de_get_vbn(const struct NTFS_DE *e)
91838 +       __le64 *v = Add2Ptr(e, le16_to_cpu(e->size) - sizeof(__le64));
91840 +       return le64_to_cpu(*v);
91843 +static inline struct NTFS_DE *de_get_next(const struct NTFS_DE *e)
91845 +       return Add2Ptr(e, le16_to_cpu(e->size));
91848 +static inline struct ATTR_FILE_NAME *de_get_fname(const struct NTFS_DE *e)
91850 +       return le16_to_cpu(e->key_size) >= SIZEOF_ATTRIBUTE_FILENAME ?
91851 +                      Add2Ptr(e, sizeof(struct NTFS_DE)) :
91852 +                      NULL;
91855 +static inline bool de_is_last(const struct NTFS_DE *e)
91857 +       return e->flags & NTFS_IE_LAST;
91860 +static inline bool de_has_vcn(const struct NTFS_DE *e)
91862 +       return e->flags & NTFS_IE_HAS_SUBNODES;
91865 +static inline bool de_has_vcn_ex(const struct NTFS_DE *e)
91867 +       return (e->flags & NTFS_IE_HAS_SUBNODES) &&
91868 +              (u64)(-1) != *((u64 *)Add2Ptr(e, le16_to_cpu(e->size) -
91869 +                                                       sizeof(__le64)));
91872 +#define MAX_BYTES_PER_NAME_ENTRY                                              \
91873 +       QuadAlign(sizeof(struct NTFS_DE) +                                     \
91874 +                 offsetof(struct ATTR_FILE_NAME, name) +                      \
91875 +                 NTFS_NAME_LEN * sizeof(short))
91877 +struct INDEX_HDR {
91878 +       __le32 de_off;  // 0x00: The offset from the start of this structure
91879 +                       // to the first NTFS_DE
91880 +       __le32 used;    // 0x04: The size of this structure plus all
91881 +                       // entries (quad-word aligned)
91882 +       __le32 total;   // 0x08: The allocated size of for this structure plus all entries
91883 +       u8 flags;       // 0x0C: 0x00 = Small directory, 0x01 = Large directory
91884 +       u8 res[3];
91886 +       //
91887 +       // de_off + used <= total
91888 +       //
91891 +static_assert(sizeof(struct INDEX_HDR) == 0x10);
91893 +static inline struct NTFS_DE *hdr_first_de(const struct INDEX_HDR *hdr)
91895 +       u32 de_off = le32_to_cpu(hdr->de_off);
91896 +       u32 used = le32_to_cpu(hdr->used);
91897 +       struct NTFS_DE *e = Add2Ptr(hdr, de_off);
91898 +       u16 esize;
91900 +       if (de_off >= used || de_off >= le32_to_cpu(hdr->total))
91901 +               return NULL;
91903 +       esize = le16_to_cpu(e->size);
91904 +       if (esize < sizeof(struct NTFS_DE) || de_off + esize > used)
91905 +               return NULL;
91907 +       return e;
91910 +static inline struct NTFS_DE *hdr_next_de(const struct INDEX_HDR *hdr,
91911 +                                         const struct NTFS_DE *e)
91913 +       size_t off = PtrOffset(hdr, e);
91914 +       u32 used = le32_to_cpu(hdr->used);
91915 +       u16 esize;
91917 +       if (off >= used)
91918 +               return NULL;
91920 +       esize = le16_to_cpu(e->size);
91922 +       if (esize < sizeof(struct NTFS_DE) ||
91923 +           off + esize + sizeof(struct NTFS_DE) > used)
91924 +               return NULL;
91926 +       return Add2Ptr(e, esize);
91929 +static inline bool hdr_has_subnode(const struct INDEX_HDR *hdr)
91931 +       return hdr->flags & 1;
91934 +struct INDEX_BUFFER {
91935 +       struct NTFS_RECORD_HEADER rhdr; // 'INDX'
91936 +       __le64 vbn; // 0x10: vcn if index >= cluster or vsn id index < cluster
91937 +       struct INDEX_HDR ihdr; // 0x18:
91940 +static_assert(sizeof(struct INDEX_BUFFER) == 0x28);
91942 +static inline bool ib_is_empty(const struct INDEX_BUFFER *ib)
91944 +       const struct NTFS_DE *first = hdr_first_de(&ib->ihdr);
91946 +       return !first || de_is_last(first);
91949 +static inline bool ib_is_leaf(const struct INDEX_BUFFER *ib)
91951 +       return !(ib->ihdr.flags & 1);
91954 +/* Index root structure ( 0x90 ) */
91955 +enum COLLATION_RULE {
91956 +       NTFS_COLLATION_TYPE_BINARY      = cpu_to_le32(0),
91957 +       // $I30
91958 +       NTFS_COLLATION_TYPE_FILENAME    = cpu_to_le32(0x01),
91959 +       // $SII of $Secure and $Q of Quota
91960 +       NTFS_COLLATION_TYPE_UINT        = cpu_to_le32(0x10),
91961 +       // $O of Quota
91962 +       NTFS_COLLATION_TYPE_SID         = cpu_to_le32(0x11),
91963 +       // $SDH of $Secure
91964 +       NTFS_COLLATION_TYPE_SECURITY_HASH = cpu_to_le32(0x12),
91965 +       // $O of ObjId and "$R" for Reparse
91966 +       NTFS_COLLATION_TYPE_UINTS       = cpu_to_le32(0x13)
91969 +static_assert(sizeof(enum COLLATION_RULE) == 4);
91972 +struct INDEX_ROOT {
91973 +       enum ATTR_TYPE type;    // 0x00: The type of attribute to index on
91974 +       enum COLLATION_RULE rule; // 0x04: The rule
91975 +       __le32 index_block_size;// 0x08: The size of index record
91976 +       u8 index_block_clst;    // 0x0C: The number of clusters or sectors per index
91977 +       u8 res[3];
91978 +       struct INDEX_HDR ihdr;  // 0x10:
91981 +static_assert(sizeof(struct INDEX_ROOT) == 0x20);
91982 +static_assert(offsetof(struct INDEX_ROOT, ihdr) == 0x10);
91984 +#define VOLUME_FLAG_DIRTY          cpu_to_le16(0x0001)
91985 +#define VOLUME_FLAG_RESIZE_LOG_FILE cpu_to_le16(0x0002)
91987 +struct VOLUME_INFO {
91988 +       __le64 res1;    // 0x00
91989 +       u8 major_ver;   // 0x08: NTFS major version number (before .)
91990 +       u8 minor_ver;   // 0x09: NTFS minor version number (after .)
91991 +       __le16 flags;   // 0x0A: Volume flags, see VOLUME_FLAG_XXX
91993 +}; // sizeof=0xC
91995 +#define SIZEOF_ATTRIBUTE_VOLUME_INFO 0xc
91997 +#define NTFS_LABEL_MAX_LENGTH          (0x100 / sizeof(short))
91998 +#define NTFS_ATTR_INDEXABLE            cpu_to_le32(0x00000002)
91999 +#define NTFS_ATTR_DUPALLOWED           cpu_to_le32(0x00000004)
92000 +#define NTFS_ATTR_MUST_BE_INDEXED      cpu_to_le32(0x00000010)
92001 +#define NTFS_ATTR_MUST_BE_NAMED                cpu_to_le32(0x00000020)
92002 +#define NTFS_ATTR_MUST_BE_RESIDENT     cpu_to_le32(0x00000040)
92003 +#define NTFS_ATTR_LOG_ALWAYS           cpu_to_le32(0x00000080)
92005 +/* $AttrDef file entry */
92006 +struct ATTR_DEF_ENTRY {
92007 +       __le16 name[0x40];      // 0x00: Attr name
92008 +       enum ATTR_TYPE type;    // 0x80: struct ATTRIB type
92009 +       __le32 res;             // 0x84:
92010 +       enum COLLATION_RULE rule; // 0x88:
92011 +       __le32 flags;           // 0x8C: NTFS_ATTR_XXX (see above)
92012 +       __le64 min_sz;          // 0x90: Minimum attribute data size
92013 +       __le64 max_sz;          // 0x98: Maximum attribute data size
92016 +static_assert(sizeof(struct ATTR_DEF_ENTRY) == 0xa0);
92018 +/* Object ID (0x40) */
92019 +struct OBJECT_ID {
92020 +       struct GUID ObjId;      // 0x00: Unique Id assigned to file
92021 +       struct GUID BirthVolumeId;// 0x10: Birth Volume Id is the Object Id of the Volume on
92022 +                               // which the Object Id was allocated. It never changes
92023 +       struct GUID BirthObjectId; // 0x20: Birth Object Id is the first Object Id that was
92024 +                               // ever assigned to this MFT Record. I.e. If the Object Id
92025 +                               // is changed for some reason, this field will reflect the
92026 +                               // original value of the Object Id.
92027 +       struct GUID DomainId;   // 0x30: Domain Id is currently unused but it is intended to be
92028 +                               // used in a network environment where the local machine is
92029 +                               // part of a Windows 2000 Domain. This may be used in a Windows
92030 +                               // 2000 Advanced Server managed domain.
92033 +static_assert(sizeof(struct OBJECT_ID) == 0x40);
92035 +/* O Directory entry structure ( rule = 0x13 ) */
92036 +struct NTFS_DE_O {
92037 +       struct NTFS_DE de;
92038 +       struct GUID ObjId;      // 0x10: Unique Id assigned to file
92039 +       struct MFT_REF ref;     // 0x20: MFT record number with this file
92040 +       struct GUID BirthVolumeId; // 0x28: Birth Volume Id is the Object Id of the Volume on
92041 +                               // which the Object Id was allocated. It never changes
92042 +       struct GUID BirthObjectId; // 0x38: Birth Object Id is the first Object Id that was
92043 +                               // ever assigned to this MFT Record. I.e. If the Object Id
92044 +                               // is changed for some reason, this field will reflect the
92045 +                               // original value of the Object Id.
92046 +                               // This field is valid if data_size == 0x48
92047 +       struct GUID BirthDomainId; // 0x48: Domain Id is currently unused but it is intended
92048 +                               // to be used in a network environment where the local
92049 +                               // machine is part of a Windows 2000 Domain. This may be
92050 +                               // used in a Windows 2000 Advanced Server managed domain.
92053 +static_assert(sizeof(struct NTFS_DE_O) == 0x58);
92055 +#define NTFS_OBJECT_ENTRY_DATA_SIZE1                                          \
92056 +       0x38 // struct NTFS_DE_O.BirthDomainId is not used
92057 +#define NTFS_OBJECT_ENTRY_DATA_SIZE2                                          \
92058 +       0x48 // struct NTFS_DE_O.BirthDomainId is used
92060 +/* Q Directory entry structure ( rule = 0x11 ) */
92061 +struct NTFS_DE_Q {
92062 +       struct NTFS_DE de;
92063 +       __le32 owner_id;        // 0x10: Unique Id assigned to file
92064 +       __le32 Version;         // 0x14: 0x02
92065 +       __le32 flags2;          // 0x18: Quota flags, see above
92066 +       __le64 BytesUsed;       // 0x1C:
92067 +       __le64 ChangeTime;      // 0x24:
92068 +       __le64 WarningLimit;    // 0x28:
92069 +       __le64 HardLimit;       // 0x34:
92070 +       __le64 ExceededTime;    // 0x3C:
92072 +       // SID is placed here
92073 +}; // sizeof() = 0x44
92075 +#define SIZEOF_NTFS_DE_Q 0x44
92077 +#define SecurityDescriptorsBlockSize 0x40000 // 256K
92078 +#define SecurityDescriptorMaxSize    0x20000 // 128K
92079 +#define Log2OfSecurityDescriptorsBlockSize 18
92081 +struct SECURITY_KEY {
92082 +       __le32 hash; //  Hash value for descriptor
92083 +       __le32 sec_id; //  Security Id (guaranteed unique)
92086 +/* Security descriptors (the content of $Secure::SDS data stream) */
92087 +struct SECURITY_HDR {
92088 +       struct SECURITY_KEY key;        // 0x00: Security Key
92089 +       __le64 off;                     // 0x08: Offset of this entry in the file
92090 +       __le32 size;                    // 0x10: Size of this entry, 8 byte aligned
92091 +       //
92092 +       // Security descriptor itself is placed here
92093 +       // Total size is 16 byte aligned
92094 +       //
92095 +} __packed;
92097 +#define SIZEOF_SECURITY_HDR 0x14
92099 +/* SII Directory entry structure */
92100 +struct NTFS_DE_SII {
92101 +       struct NTFS_DE de;
92102 +       __le32 sec_id;                  // 0x10: Key: sizeof(security_id) = wKeySize
92103 +       struct SECURITY_HDR sec_hdr;    // 0x14:
92104 +} __packed;
92106 +#define SIZEOF_SII_DIRENTRY 0x28
92108 +/* SDH Directory entry structure */
92109 +struct NTFS_DE_SDH {
92110 +       struct NTFS_DE de;
92111 +       struct SECURITY_KEY key;        // 0x10: Key
92112 +       struct SECURITY_HDR sec_hdr;    // 0x18: Data
92113 +       __le16 magic[2];                // 0x2C: 0x00490049 "I I"
92116 +#define SIZEOF_SDH_DIRENTRY 0x30
92118 +struct REPARSE_KEY {
92119 +       __le32 ReparseTag;              // 0x00: Reparse Tag
92120 +       struct MFT_REF ref;             // 0x04: MFT record number with this file
92121 +}; // sizeof() = 0x0C
92123 +static_assert(offsetof(struct REPARSE_KEY, ref) == 0x04);
92124 +#define SIZEOF_REPARSE_KEY 0x0C
92126 +/* Reparse Directory entry structure */
92127 +struct NTFS_DE_R {
92128 +       struct NTFS_DE de;
92129 +       struct REPARSE_KEY key;         // 0x10: Reparse Key
92130 +       u32 zero;                       // 0x1c
92131 +}; // sizeof() = 0x20
92133 +static_assert(sizeof(struct NTFS_DE_R) == 0x20);
92135 +/* CompressReparseBuffer.WofVersion */
92136 +#define WOF_CURRENT_VERSION            cpu_to_le32(1)
92137 +/* CompressReparseBuffer.WofProvider */
92138 +#define WOF_PROVIDER_WIM               cpu_to_le32(1)
92139 +/* CompressReparseBuffer.WofProvider */
92140 +#define WOF_PROVIDER_SYSTEM            cpu_to_le32(2)
92141 +/* CompressReparseBuffer.ProviderVer */
92142 +#define WOF_PROVIDER_CURRENT_VERSION   cpu_to_le32(1)
92144 +#define WOF_COMPRESSION_XPRESS4K       cpu_to_le32(0) // 4k
92145 +#define WOF_COMPRESSION_LZX32K         cpu_to_le32(1) // 32k
92146 +#define WOF_COMPRESSION_XPRESS8K       cpu_to_le32(2) // 8k
92147 +#define WOF_COMPRESSION_XPRESS16K      cpu_to_le32(3) // 16k
92150 + * ATTR_REPARSE (0xC0)
92151 + *
92152 + * The reparse struct GUID structure is used by all 3rd party layered drivers to
92153 + * store data in a reparse point. For non-Microsoft tags, The struct GUID field
92154 + * cannot be GUID_NULL.
92155 + * The constraints on reparse tags are defined below.
92156 + * Microsoft tags can also be used with this format of the reparse point buffer.
92157 + */
92158 +struct REPARSE_POINT {
92159 +       __le32 ReparseTag;      // 0x00:
92160 +       __le16 ReparseDataLength;// 0x04:
92161 +       __le16 Reserved;
92163 +       struct GUID Guid;       // 0x08:
92165 +       //
92166 +       // Here GenericReparseBuffer is placed
92167 +       //
92170 +static_assert(sizeof(struct REPARSE_POINT) == 0x18);
92173 +// Maximum allowed size of the reparse data.
92175 +#define MAXIMUM_REPARSE_DATA_BUFFER_SIZE       (16 * 1024)
92178 +// The value of the following constant needs to satisfy the following
92179 +// conditions:
92180 +//  (1) Be at least as large as the largest of the reserved tags.
92181 +//  (2) Be strictly smaller than all the tags in use.
92183 +#define IO_REPARSE_TAG_RESERVED_RANGE          1
92186 +// The reparse tags are a ULONG. The 32 bits are laid out as follows:
92188 +//   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
92189 +//   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
92190 +//  +-+-+-+-+-----------------------+-------------------------------+
92191 +//  |M|R|N|R|    Reserved bits     |       Reparse Tag Value       |
92192 +//  +-+-+-+-+-----------------------+-------------------------------+
92194 +// M is the Microsoft bit. When set to 1, it denotes a tag owned by Microsoft.
92195 +//   All ISVs must use a tag with a 0 in this position.
92196 +//   Note: If a Microsoft tag is used by non-Microsoft software, the
92197 +//   behavior is not defined.
92199 +// R is reserved.  Must be zero for non-Microsoft tags.
92201 +// N is name surrogate. When set to 1, the file represents another named
92202 +//   entity in the system.
92204 +// The M and N bits are OR-able.
92205 +// The following macros check for the M and N bit values:
92209 +// Macro to determine whether a reparse point tag corresponds to a tag
92210 +// owned by Microsoft.
92212 +#define IsReparseTagMicrosoft(_tag)    (((_tag)&IO_REPARSE_TAG_MICROSOFT))
92215 +// Macro to determine whether a reparse point tag is a name surrogate
92217 +#define IsReparseTagNameSurrogate(_tag)        (((_tag)&IO_REPARSE_TAG_NAME_SURROGATE))
92220 +// The following constant represents the bits that are valid to use in
92221 +// reparse tags.
92223 +#define IO_REPARSE_TAG_VALID_VALUES    0xF000FFFF
92226 +// Macro to determine whether a reparse tag is a valid tag.
92228 +#define IsReparseTagValid(_tag)                                                       \
92229 +       (!((_tag) & ~IO_REPARSE_TAG_VALID_VALUES) &&                           \
92230 +        ((_tag) > IO_REPARSE_TAG_RESERVED_RANGE))
92233 +// Microsoft tags for reparse points.
92236 +enum IO_REPARSE_TAG {
92237 +       IO_REPARSE_TAG_SYMBOLIC_LINK    = cpu_to_le32(0),
92238 +       IO_REPARSE_TAG_NAME_SURROGATE   = cpu_to_le32(0x20000000),
92239 +       IO_REPARSE_TAG_MICROSOFT        = cpu_to_le32(0x80000000),
92240 +       IO_REPARSE_TAG_MOUNT_POINT      = cpu_to_le32(0xA0000003),
92241 +       IO_REPARSE_TAG_SYMLINK          = cpu_to_le32(0xA000000C),
92242 +       IO_REPARSE_TAG_HSM              = cpu_to_le32(0xC0000004),
92243 +       IO_REPARSE_TAG_SIS              = cpu_to_le32(0x80000007),
92244 +       IO_REPARSE_TAG_DEDUP            = cpu_to_le32(0x80000013),
92245 +       IO_REPARSE_TAG_COMPRESS         = cpu_to_le32(0x80000017),
92247 +       //
92248 +       // The reparse tag 0x80000008 is reserved for Microsoft internal use
92249 +       // (may be published in the future)
92250 +       //
92252 +       //
92253 +       // Microsoft reparse tag reserved for DFS
92254 +       //
92255 +       IO_REPARSE_TAG_DFS              = cpu_to_le32(0x8000000A),
92257 +       //
92258 +       // Microsoft reparse tag reserved for the file system filter manager
92259 +       //
92260 +       IO_REPARSE_TAG_FILTER_MANAGER   = cpu_to_le32(0x8000000B),
92262 +       //
92263 +       // Non-Microsoft tags for reparse points
92264 +       //
92266 +       //
92267 +       // Tag allocated to CONGRUENT, May 2000. Used by IFSTEST
92268 +       //
92269 +       IO_REPARSE_TAG_IFSTEST_CONGRUENT = cpu_to_le32(0x00000009),
92271 +       //
92272 +       // Tag allocated to ARKIVIO
92273 +       //
92274 +       IO_REPARSE_TAG_ARKIVIO          = cpu_to_le32(0x0000000C),
92276 +       //
92277 +       //  Tag allocated to SOLUTIONSOFT
92278 +       //
92279 +       IO_REPARSE_TAG_SOLUTIONSOFT     = cpu_to_le32(0x2000000D),
92281 +       //
92282 +       //  Tag allocated to COMMVAULT
92283 +       //
92284 +       IO_REPARSE_TAG_COMMVAULT        = cpu_to_le32(0x0000000E),
92286 +       // OneDrive??
92287 +       IO_REPARSE_TAG_CLOUD            = cpu_to_le32(0x9000001A),
92288 +       IO_REPARSE_TAG_CLOUD_1          = cpu_to_le32(0x9000101A),
92289 +       IO_REPARSE_TAG_CLOUD_2          = cpu_to_le32(0x9000201A),
92290 +       IO_REPARSE_TAG_CLOUD_3          = cpu_to_le32(0x9000301A),
92291 +       IO_REPARSE_TAG_CLOUD_4          = cpu_to_le32(0x9000401A),
92292 +       IO_REPARSE_TAG_CLOUD_5          = cpu_to_le32(0x9000501A),
92293 +       IO_REPARSE_TAG_CLOUD_6          = cpu_to_le32(0x9000601A),
92294 +       IO_REPARSE_TAG_CLOUD_7          = cpu_to_le32(0x9000701A),
92295 +       IO_REPARSE_TAG_CLOUD_8          = cpu_to_le32(0x9000801A),
92296 +       IO_REPARSE_TAG_CLOUD_9          = cpu_to_le32(0x9000901A),
92297 +       IO_REPARSE_TAG_CLOUD_A          = cpu_to_le32(0x9000A01A),
92298 +       IO_REPARSE_TAG_CLOUD_B          = cpu_to_le32(0x9000B01A),
92299 +       IO_REPARSE_TAG_CLOUD_C          = cpu_to_le32(0x9000C01A),
92300 +       IO_REPARSE_TAG_CLOUD_D          = cpu_to_le32(0x9000D01A),
92301 +       IO_REPARSE_TAG_CLOUD_E          = cpu_to_le32(0x9000E01A),
92302 +       IO_REPARSE_TAG_CLOUD_F          = cpu_to_le32(0x9000F01A),
92306 +#define SYMLINK_FLAG_RELATIVE          1
92308 +/* Microsoft reparse buffer. (see DDK for details) */
92309 +struct REPARSE_DATA_BUFFER {
92310 +       __le32 ReparseTag;              // 0x00:
92311 +       __le16 ReparseDataLength;       // 0x04:
92312 +       __le16 Reserved;
92314 +       union {
92315 +               // If ReparseTag == 0xA0000003 (IO_REPARSE_TAG_MOUNT_POINT)
92316 +               struct {
92317 +                       __le16 SubstituteNameOffset; // 0x08
92318 +                       __le16 SubstituteNameLength; // 0x0A
92319 +                       __le16 PrintNameOffset;      // 0x0C
92320 +                       __le16 PrintNameLength;      // 0x0E
92321 +                       __le16 PathBuffer[];         // 0x10
92322 +               } MountPointReparseBuffer;
92324 +               // If ReparseTag == 0xA000000C (IO_REPARSE_TAG_SYMLINK)
92325 +               // https://msdn.microsoft.com/en-us/library/cc232006.aspx
92326 +               struct {
92327 +                       __le16 SubstituteNameOffset; // 0x08
92328 +                       __le16 SubstituteNameLength; // 0x0A
92329 +                       __le16 PrintNameOffset;      // 0x0C
92330 +                       __le16 PrintNameLength;      // 0x0E
92331 +                       // 0-absolute path 1- relative path, SYMLINK_FLAG_RELATIVE
92332 +                       __le32 Flags;                // 0x10
92333 +                       __le16 PathBuffer[];         // 0x14
92334 +               } SymbolicLinkReparseBuffer;
92336 +               // If ReparseTag == 0x80000017U
92337 +               struct {
92338 +                       __le32 WofVersion;  // 0x08 == 1
92339 +                       /* 1 - WIM backing provider ("WIMBoot"),
92340 +                        * 2 - System compressed file provider
92341 +                        */
92342 +                       __le32 WofProvider; // 0x0C
92343 +                       __le32 ProviderVer; // 0x10: == 1 WOF_FILE_PROVIDER_CURRENT_VERSION == 1
92344 +                       __le32 CompressionFormat; // 0x14: 0, 1, 2, 3. See WOF_COMPRESSION_XXX
92345 +               } CompressReparseBuffer;
92347 +               struct {
92348 +                       u8 DataBuffer[1];   // 0x08
92349 +               } GenericReparseBuffer;
92350 +       };
92353 +/* ATTR_EA_INFO (0xD0) */
92355 +#define FILE_NEED_EA 0x80 // See ntifs.h
92356 +/* FILE_NEED_EA, indicates that the file to which the EA belongs cannot be
92357 + * interpreted without understanding the associated extended attributes.
92358 + */
92359 +struct EA_INFO {
92360 +       __le16 size_pack;       // 0x00: Size of buffer to hold in packed form
92361 +       __le16 count;           // 0x02: Count of EA's with FILE_NEED_EA bit set
92362 +       __le32 size;            // 0x04: Size of buffer to hold in unpacked form
92365 +static_assert(sizeof(struct EA_INFO) == 8);
92367 +/* ATTR_EA (0xE0) */
92368 +struct EA_FULL {
92369 +       __le32 size;            // 0x00: (not in packed)
92370 +       u8 flags;               // 0x04
92371 +       u8 name_len;            // 0x05
92372 +       __le16 elength;         // 0x06
92373 +       u8 name[];              // 0x08
92376 +static_assert(offsetof(struct EA_FULL, name) == 8);
92378 +#define ACL_REVISION   2
92379 +#define ACL_REVISION_DS 4
92381 +#define SE_SELF_RELATIVE cpu_to_le16(0x8000)
92383 +struct SECURITY_DESCRIPTOR_RELATIVE {
92384 +       u8 Revision;
92385 +       u8 Sbz1;
92386 +       __le16 Control;
92387 +       __le32 Owner;
92388 +       __le32 Group;
92389 +       __le32 Sacl;
92390 +       __le32 Dacl;
92392 +static_assert(sizeof(struct SECURITY_DESCRIPTOR_RELATIVE) == 0x14);
92394 +struct ACE_HEADER {
92395 +       u8 AceType;
92396 +       u8 AceFlags;
92397 +       __le16 AceSize;
92399 +static_assert(sizeof(struct ACE_HEADER) == 4);
92401 +struct ACL {
92402 +       u8 AclRevision;
92403 +       u8 Sbz1;
92404 +       __le16 AclSize;
92405 +       __le16 AceCount;
92406 +       __le16 Sbz2;
92408 +static_assert(sizeof(struct ACL) == 8);
92410 +struct SID {
92411 +       u8 Revision;
92412 +       u8 SubAuthorityCount;
92413 +       u8 IdentifierAuthority[6];
92414 +       __le32 SubAuthority[];
92416 +static_assert(offsetof(struct SID, SubAuthority) == 8);
92418 +// clang-format on
92419 diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
92420 new file mode 100644
92421 index 000000000000..5e1dd628d3cc
92422 --- /dev/null
92423 +++ b/fs/ntfs3/ntfs_fs.h
92424 @@ -0,0 +1,1085 @@
92425 +/* SPDX-License-Identifier: GPL-2.0 */
92427 + *
92428 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
92429 + *
92430 + */
92432 +// clang-format off
92433 +#define MINUS_ONE_T                    ((size_t)(-1))
92434 +/* Biggest MFT / smallest cluster */
92435 +#define MAXIMUM_BYTES_PER_MFT          4096
92436 +#define NTFS_BLOCKS_PER_MFT_RECORD     (MAXIMUM_BYTES_PER_MFT / 512)
92438 +#define MAXIMUM_BYTES_PER_INDEX                4096
92439 +#define NTFS_BLOCKS_PER_INODE          (MAXIMUM_BYTES_PER_INDEX / 512)
92441 +/* ntfs specific error code when fixup failed*/
92442 +#define E_NTFS_FIXUP                   555
92443 +/* ntfs specific error code about resident->nonresident*/
92444 +#define E_NTFS_NONRESIDENT             556
92446 +/* sbi->flags */
92447 +#define NTFS_FLAGS_NODISCARD           0x00000001
92448 +/* Set when LogFile is replaying */
92449 +#define NTFS_FLAGS_LOG_REPLAYING       0x00000008
92450 +/* Set when we changed first MFT's which copy must be updated in $MftMirr */
92451 +#define NTFS_FLAGS_MFTMIRR             0x00001000
92452 +#define NTFS_FLAGS_NEED_REPLAY         0x04000000
92455 +/* ni->ni_flags */
92457 + * Data attribute is external compressed (lzx/xpress)
92458 + * 1 - WOF_COMPRESSION_XPRESS4K
92459 + * 2 - WOF_COMPRESSION_XPRESS8K
92460 + * 3 - WOF_COMPRESSION_XPRESS16K
92461 + * 4 - WOF_COMPRESSION_LZX32K
92462 + */
92463 +#define NI_FLAG_COMPRESSED_MASK                0x0000000f
92464 +/* Data attribute is deduplicated */
92465 +#define NI_FLAG_DEDUPLICATED           0x00000010
92466 +#define NI_FLAG_EA                     0x00000020
92467 +#define NI_FLAG_DIR                    0x00000040
92468 +#define NI_FLAG_RESIDENT               0x00000080
92469 +#define NI_FLAG_UPDATE_PARENT          0x00000100
92470 +// clang-format on
92472 +struct ntfs_mount_options {
92473 +       struct nls_table *nls;
92475 +       kuid_t fs_uid;
92476 +       kgid_t fs_gid;
92477 +       u16 fs_fmask_inv;
92478 +       u16 fs_dmask_inv;
92480 +       unsigned uid : 1, /* uid was set */
92481 +               gid : 1, /* gid was set */
92482 +               fmask : 1, /* fmask was set */
92483 +               dmask : 1, /*dmask was set*/
92484 +               sys_immutable : 1, /* immutable system files */
92485 +               discard : 1, /* issue discard requests on deletions */
92486 +               sparse : 1, /*create sparse files*/
92487 +               showmeta : 1, /*show meta files*/
92488 +               nohidden : 1, /*do not show hidden files*/
92489 +               force : 1, /*rw mount dirty volume*/
92490 +               no_acs_rules : 1, /*exclude acs rules*/
92491 +               prealloc : 1 /*preallocate space when file is growing*/
92492 +               ;
92495 +/* special value to unpack and deallocate*/
92496 +#define RUN_DEALLOCATE ((struct runs_tree *)(size_t)1)
92498 +/* TODO: use rb tree instead of array */
92499 +struct runs_tree {
92500 +       struct ntfs_run *runs;
92501 +       size_t count; // Currently used size a ntfs_run storage.
92502 +       size_t allocated; // Currently allocated ntfs_run storage size.
92505 +struct ntfs_buffers {
92506 +       /* Biggest MFT / smallest cluster = 4096 / 512 = 8 */
92507 +       /* Biggest index / smallest cluster = 4096 / 512 = 8 */
92508 +       struct buffer_head *bh[PAGE_SIZE >> SECTOR_SHIFT];
92509 +       u32 bytes;
92510 +       u32 nbufs;
92511 +       u32 off;
92514 +enum ALLOCATE_OPT {
92515 +       ALLOCATE_DEF = 0, // Allocate all clusters
92516 +       ALLOCATE_MFT = 1, // Allocate for MFT
92519 +enum bitmap_mutex_classes {
92520 +       BITMAP_MUTEX_CLUSTERS = 0,
92521 +       BITMAP_MUTEX_MFT = 1,
92524 +struct wnd_bitmap {
92525 +       struct super_block *sb;
92526 +       struct rw_semaphore rw_lock;
92528 +       struct runs_tree run;
92529 +       size_t nbits;
92531 +       size_t total_zeroes; // total number of free bits
92532 +       u16 *free_bits; // free bits in each window
92533 +       size_t nwnd;
92534 +       u32 bits_last; // bits in last window
92536 +       struct rb_root start_tree; // extents, sorted by 'start'
92537 +       struct rb_root count_tree; // extents, sorted by 'count + start'
92538 +       size_t count; // extents count
92540 +       /*
92541 +        * -1 Tree is activated but not updated (too many fragments)
92542 +        * 0 - Tree is not activated
92543 +        * 1 - Tree is activated and updated
92544 +        */
92545 +       int uptodated;
92546 +       size_t extent_min; // Minimal extent used while building
92547 +       size_t extent_max; // Upper estimate of biggest free block
92549 +       /* Zone [bit, end) */
92550 +       size_t zone_bit;
92551 +       size_t zone_end;
92553 +       bool set_tail; // not necessary in driver
92554 +       bool inited;
92557 +typedef int (*NTFS_CMP_FUNC)(const void *key1, size_t len1, const void *key2,
92558 +                            size_t len2, const void *param);
92560 +enum index_mutex_classed {
92561 +       INDEX_MUTEX_I30 = 0,
92562 +       INDEX_MUTEX_SII = 1,
92563 +       INDEX_MUTEX_SDH = 2,
92564 +       INDEX_MUTEX_SO = 3,
92565 +       INDEX_MUTEX_SQ = 4,
92566 +       INDEX_MUTEX_SR = 5,
92567 +       INDEX_MUTEX_TOTAL
92570 +/* ntfs_index - allocation unit inside directory */
92571 +struct ntfs_index {
92572 +       struct runs_tree bitmap_run;
92573 +       struct runs_tree alloc_run;
92574 +       /* read/write access to 'bitmap_run'/'alloc_run' while ntfs_readdir */
92575 +       struct rw_semaphore run_lock;
92577 +       /*TODO: remove 'cmp'*/
92578 +       NTFS_CMP_FUNC cmp;
92580 +       u8 index_bits; // log2(root->index_block_size)
92581 +       u8 idx2vbn_bits; // log2(root->index_block_clst)
92582 +       u8 vbn2vbo_bits; // index_block_size < cluster? 9 : cluster_bits
92583 +       u8 type; // index_mutex_classed
92586 +/* Minimum mft zone */
92587 +#define NTFS_MIN_MFT_ZONE 100
92589 +/* ntfs file system in-core superblock data */
92590 +struct ntfs_sb_info {
92591 +       struct super_block *sb;
92593 +       u32 discard_granularity;
92594 +       u64 discard_granularity_mask_inv; // ~(discard_granularity_mask_inv-1)
92596 +       u32 cluster_size; // bytes per cluster
92597 +       u32 cluster_mask; // == cluster_size - 1
92598 +       u64 cluster_mask_inv; // ~(cluster_size - 1)
92599 +       u32 block_mask; // sb->s_blocksize - 1
92600 +       u32 blocks_per_cluster; // cluster_size / sb->s_blocksize
92602 +       u32 record_size;
92603 +       u32 sector_size;
92604 +       u32 index_size;
92606 +       u8 sector_bits;
92607 +       u8 cluster_bits;
92608 +       u8 record_bits;
92610 +       u64 maxbytes; // Maximum size for normal files
92611 +       u64 maxbytes_sparse; // Maximum size for sparse file
92613 +       u32 flags; // See NTFS_FLAGS_XXX
92615 +       CLST bad_clusters; // The count of marked bad clusters
92617 +       u16 max_bytes_per_attr; // maximum attribute size in record
92618 +       u16 attr_size_tr; // attribute size threshold (320 bytes)
92620 +       /* Records in $Extend */
92621 +       CLST objid_no;
92622 +       CLST quota_no;
92623 +       CLST reparse_no;
92624 +       CLST usn_jrnl_no;
92626 +       struct ATTR_DEF_ENTRY *def_table; // attribute definition table
92627 +       u32 def_entries;
92628 +       u32 ea_max_size;
92630 +       struct MFT_REC *new_rec;
92632 +       u16 *upcase;
92634 +       struct {
92635 +               u64 lbo, lbo2;
92636 +               struct ntfs_inode *ni;
92637 +               struct wnd_bitmap bitmap; // $MFT::Bitmap
92638 +               /*
92639 +                * MFT records [11-24) used to expand MFT itself
92640 +                * They always marked as used in $MFT::Bitmap
92641 +                * 'reserved_bitmap' contains real bitmap of these records
92642 +                */
92643 +               ulong reserved_bitmap; // bitmap of used records [11 - 24)
92644 +               size_t next_free; // The next record to allocate from
92645 +               size_t used; // mft valid size in records
92646 +               u32 recs_mirr; // Number of records in MFTMirr
92647 +               u8 next_reserved;
92648 +               u8 reserved_bitmap_inited;
92649 +       } mft;
92651 +       struct {
92652 +               struct wnd_bitmap bitmap; // $Bitmap::Data
92653 +               CLST next_free_lcn;
92654 +       } used;
92656 +       struct {
92657 +               u64 size; // in bytes
92658 +               u64 blocks; // in blocks
92659 +               u64 ser_num;
92660 +               struct ntfs_inode *ni;
92661 +               __le16 flags; // cached current VOLUME_INFO::flags, VOLUME_FLAG_DIRTY
92662 +               u8 major_ver;
92663 +               u8 minor_ver;
92664 +               char label[65];
92665 +               bool real_dirty; /* real fs state*/
92666 +       } volume;
92668 +       struct {
92669 +               struct ntfs_index index_sii;
92670 +               struct ntfs_index index_sdh;
92671 +               struct ntfs_inode *ni;
92672 +               u32 next_id;
92673 +               u64 next_off;
92675 +               __le32 def_security_id;
92676 +       } security;
92678 +       struct {
92679 +               struct ntfs_index index_r;
92680 +               struct ntfs_inode *ni;
92681 +               u64 max_size; // 16K
92682 +       } reparse;
92684 +       struct {
92685 +               struct ntfs_index index_o;
92686 +               struct ntfs_inode *ni;
92687 +       } objid;
92689 +       struct {
92690 +               struct mutex mtx_lznt;
92691 +               struct lznt *lznt;
92692 +#ifdef CONFIG_NTFS3_LZX_XPRESS
92693 +               struct mutex mtx_xpress;
92694 +               struct xpress_decompressor *xpress;
92695 +               struct mutex mtx_lzx;
92696 +               struct lzx_decompressor *lzx;
92697 +#endif
92698 +       } compress;
92700 +       struct ntfs_mount_options options;
92701 +       struct ratelimit_state msg_ratelimit;
92705 + * one MFT record(usually 1024 bytes), consists of attributes
92706 + */
92707 +struct mft_inode {
92708 +       struct rb_node node;
92709 +       struct ntfs_sb_info *sbi;
92711 +       struct MFT_REC *mrec;
92712 +       struct ntfs_buffers nb;
92714 +       CLST rno;
92715 +       bool dirty;
92718 +/* nested class for ntfs_inode::ni_lock */
92719 +enum ntfs_inode_mutex_lock_class {
92720 +       NTFS_INODE_MUTEX_DIRTY,
92721 +       NTFS_INODE_MUTEX_SECURITY,
92722 +       NTFS_INODE_MUTEX_OBJID,
92723 +       NTFS_INODE_MUTEX_REPARSE,
92724 +       NTFS_INODE_MUTEX_NORMAL,
92725 +       NTFS_INODE_MUTEX_PARENT,
92729 + * ntfs inode - extends linux inode. consists of one or more mft inodes
92730 + */
92731 +struct ntfs_inode {
92732 +       struct mft_inode mi; // base record
92734 +       /*
92735 +        * Valid size: [0 - i_valid) - these range in file contains valid data
92736 +        * Range [i_valid - inode->i_size) - contains 0
92737 +        * Usually i_valid <= inode->i_size
92738 +        */
92739 +       u64 i_valid;
92740 +       struct timespec64 i_crtime;
92742 +       struct mutex ni_lock;
92744 +       /* file attributes from std */
92745 +       enum FILE_ATTRIBUTE std_fa;
92746 +       __le32 std_security_id;
92748 +       /*
92749 +        * tree of mft_inode
92750 +        * not empty when primary MFT record (usually 1024 bytes) can't save all attributes
92751 +        * e.g. file becomes too fragmented or contains a lot of names
92752 +        */
92753 +       struct rb_root mi_tree;
92755 +       /*
92756 +        * This member is used in ntfs_readdir to ensure that all subrecords are loaded
92757 +        */
92758 +       u8 mi_loaded;
92760 +       union {
92761 +               struct ntfs_index dir;
92762 +               struct {
92763 +                       struct rw_semaphore run_lock;
92764 +                       struct runs_tree run;
92765 +#ifdef CONFIG_NTFS3_LZX_XPRESS
92766 +                       struct page *offs_page;
92767 +#endif
92768 +               } file;
92769 +       };
92771 +       struct {
92772 +               struct runs_tree run;
92773 +               struct ATTR_LIST_ENTRY *le; // 1K aligned memory
92774 +               size_t size;
92775 +               bool dirty;
92776 +       } attr_list;
92778 +       size_t ni_flags; // NI_FLAG_XXX
92780 +       struct inode vfs_inode;
92783 +struct indx_node {
92784 +       struct ntfs_buffers nb;
92785 +       struct INDEX_BUFFER *index;
92788 +struct ntfs_fnd {
92789 +       int level;
92790 +       struct indx_node *nodes[20];
92791 +       struct NTFS_DE *de[20];
92792 +       struct NTFS_DE *root_de;
92795 +enum REPARSE_SIGN {
92796 +       REPARSE_NONE = 0,
92797 +       REPARSE_COMPRESSED = 1,
92798 +       REPARSE_DEDUPLICATED = 2,
92799 +       REPARSE_LINK = 3
92802 +/* functions from attrib.c*/
92803 +int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
92804 +                  struct runs_tree *run, const CLST *vcn);
92805 +int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
92806 +                          CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
92807 +                          enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
92808 +                          CLST *new_lcn);
92809 +int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
92810 +                         struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
92811 +                         u64 new_size, struct runs_tree *run,
92812 +                         struct ATTRIB **ins_attr, struct page *page);
92813 +int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
92814 +                 const __le16 *name, u8 name_len, struct runs_tree *run,
92815 +                 u64 new_size, const u64 *new_valid, bool keep_prealloc,
92816 +                 struct ATTRIB **ret);
92817 +int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
92818 +                       CLST *len, bool *new);
92819 +int attr_data_read_resident(struct ntfs_inode *ni, struct page *page);
92820 +int attr_data_write_resident(struct ntfs_inode *ni, struct page *page);
92821 +int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
92822 +                      const __le16 *name, u8 name_len, struct runs_tree *run,
92823 +                      CLST vcn);
92824 +int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
92825 +                        const __le16 *name, u8 name_len, struct runs_tree *run,
92826 +                        u64 from, u64 to);
92827 +int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
92828 +                       struct runs_tree *run, u64 frame, u64 frames,
92829 +                       u8 frame_bits, u32 *ondisk_size, u64 *vbo_data);
92830 +int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
92831 +                            CLST frame, CLST *clst_data);
92832 +int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
92833 +                       u64 new_valid);
92834 +int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes);
92835 +int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes);
92837 +/* functions from attrlist.c*/
92838 +void al_destroy(struct ntfs_inode *ni);
92839 +bool al_verify(struct ntfs_inode *ni);
92840 +int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr);
92841 +struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
92842 +                                    struct ATTR_LIST_ENTRY *le);
92843 +struct ATTR_LIST_ENTRY *al_find_le(struct ntfs_inode *ni,
92844 +                                  struct ATTR_LIST_ENTRY *le,
92845 +                                  const struct ATTRIB *attr);
92846 +struct ATTR_LIST_ENTRY *al_find_ex(struct ntfs_inode *ni,
92847 +                                  struct ATTR_LIST_ENTRY *le,
92848 +                                  enum ATTR_TYPE type, const __le16 *name,
92849 +                                  u8 name_len, const CLST *vcn);
92850 +int al_add_le(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name,
92851 +             u8 name_len, CLST svcn, __le16 id, const struct MFT_REF *ref,
92852 +             struct ATTR_LIST_ENTRY **new_le);
92853 +bool al_remove_le(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le);
92854 +bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn,
92855 +                 const __le16 *name, size_t name_len,
92856 +                 const struct MFT_REF *ref);
92857 +int al_update(struct ntfs_inode *ni);
92858 +static inline size_t al_aligned(size_t size)
92860 +       return (size + 1023) & ~(size_t)1023;
92863 +/* globals from bitfunc.c */
92864 +bool are_bits_clear(const ulong *map, size_t bit, size_t nbits);
92865 +bool are_bits_set(const ulong *map, size_t bit, size_t nbits);
92866 +size_t get_set_bits_ex(const ulong *map, size_t bit, size_t nbits);
92868 +/* globals from dir.c */
92869 +int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const struct le_str *uni,
92870 +                     u8 *buf, int buf_len);
92871 +int ntfs_nls_to_utf16(struct ntfs_sb_info *sbi, const u8 *name, u32 name_len,
92872 +                     struct cpu_str *uni, u32 max_ulen,
92873 +                     enum utf16_endian endian);
92874 +struct inode *dir_search_u(struct inode *dir, const struct cpu_str *uni,
92875 +                          struct ntfs_fnd *fnd);
92876 +bool dir_is_empty(struct inode *dir);
92877 +extern const struct file_operations ntfs_dir_operations;
92879 +/* globals from file.c*/
92880 +int ntfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
92881 +                struct kstat *stat, u32 request_mask, u32 flags);
92882 +void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn,
92883 +                        CLST len);
92884 +int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
92885 +                 struct iattr *attr);
92886 +int ntfs_file_open(struct inode *inode, struct file *file);
92887 +int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
92888 +               __u64 start, __u64 len);
92889 +extern const struct inode_operations ntfs_special_inode_operations;
92890 +extern const struct inode_operations ntfs_file_inode_operations;
92891 +extern const struct file_operations ntfs_file_operations;
92893 +/* globals from frecord.c */
92894 +void ni_remove_mi(struct ntfs_inode *ni, struct mft_inode *mi);
92895 +struct ATTR_STD_INFO *ni_std(struct ntfs_inode *ni);
92896 +struct ATTR_STD_INFO5 *ni_std5(struct ntfs_inode *ni);
92897 +void ni_clear(struct ntfs_inode *ni);
92898 +int ni_load_mi_ex(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi);
92899 +int ni_load_mi(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
92900 +              struct mft_inode **mi);
92901 +struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
92902 +                           struct ATTR_LIST_ENTRY **entry_o,
92903 +                           enum ATTR_TYPE type, const __le16 *name,
92904 +                           u8 name_len, const CLST *vcn,
92905 +                           struct mft_inode **mi);
92906 +struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr,
92907 +                              struct ATTR_LIST_ENTRY **le,
92908 +                              struct mft_inode **mi);
92909 +struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
92910 +                           const __le16 *name, u8 name_len, CLST vcn,
92911 +                           struct mft_inode **pmi);
92912 +int ni_load_all_mi(struct ntfs_inode *ni);
92913 +bool ni_add_subrecord(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi);
92914 +int ni_remove_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
92915 +                  const __le16 *name, size_t name_len, bool base_only,
92916 +                  const __le16 *id);
92917 +int ni_create_attr_list(struct ntfs_inode *ni);
92918 +int ni_expand_list(struct ntfs_inode *ni);
92919 +int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
92920 +                         const __le16 *name, u8 name_len,
92921 +                         const struct runs_tree *run, CLST svcn, CLST len,
92922 +                         __le16 flags, struct ATTRIB **new_attr,
92923 +                         struct mft_inode **mi);
92924 +int ni_insert_resident(struct ntfs_inode *ni, u32 data_size,
92925 +                      enum ATTR_TYPE type, const __le16 *name, u8 name_len,
92926 +                      struct ATTRIB **new_attr, struct mft_inode **mi);
92927 +int ni_remove_attr_le(struct ntfs_inode *ni, struct ATTRIB *attr,
92928 +                     struct ATTR_LIST_ENTRY *le);
92929 +int ni_delete_all(struct ntfs_inode *ni);
92930 +struct ATTR_FILE_NAME *ni_fname_name(struct ntfs_inode *ni,
92931 +                                    const struct cpu_str *uni,
92932 +                                    const struct MFT_REF *home,
92933 +                                    struct ATTR_LIST_ENTRY **entry);
92934 +struct ATTR_FILE_NAME *ni_fname_type(struct ntfs_inode *ni, u8 name_type,
92935 +                                    struct ATTR_LIST_ENTRY **entry);
92936 +int ni_new_attr_flags(struct ntfs_inode *ni, enum FILE_ATTRIBUTE new_fa);
92937 +enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
92938 +                                  void *buffer);
92939 +int ni_write_inode(struct inode *inode, int sync, const char *hint);
92940 +#define _ni_write_inode(i, w) ni_write_inode(i, w, __func__)
92941 +int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
92942 +             __u64 vbo, __u64 len);
92943 +int ni_readpage_cmpr(struct ntfs_inode *ni, struct page *page);
92944 +int ni_decompress_file(struct ntfs_inode *ni);
92945 +int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
92946 +                 u32 pages_per_frame);
92947 +int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
92948 +                  u32 pages_per_frame);
92950 +/* globals from fslog.c */
92951 +int log_replay(struct ntfs_inode *ni, bool *initialized);
92953 +/* globals from fsntfs.c */
92954 +bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes);
92955 +int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
92956 +                      bool simple);
92957 +int ntfs_extend_init(struct ntfs_sb_info *sbi);
92958 +int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi);
92959 +const struct ATTR_DEF_ENTRY *ntfs_query_def(struct ntfs_sb_info *sbi,
92960 +                                           enum ATTR_TYPE Type);
92961 +int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
92962 +                            CLST *new_lcn, CLST *new_len,
92963 +                            enum ALLOCATE_OPT opt);
92964 +int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
92965 +                      struct ntfs_inode *ni, struct mft_inode **mi);
92966 +void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno);
92967 +int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to);
92968 +int ntfs_refresh_zone(struct ntfs_sb_info *sbi);
92969 +int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait);
92970 +enum NTFS_DIRTY_FLAGS {
92971 +       NTFS_DIRTY_CLEAR = 0,
92972 +       NTFS_DIRTY_DIRTY = 1,
92973 +       NTFS_DIRTY_ERROR = 2,
92975 +int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty);
92976 +int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer);
92977 +int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
92978 +                 const void *buffer, int wait);
92979 +int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
92980 +                     u64 vbo, const void *buf, size_t bytes);
92981 +struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
92982 +                                  const struct runs_tree *run, u64 vbo);
92983 +int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
92984 +                    u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb);
92985 +int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
92986 +                struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
92987 +                struct ntfs_buffers *nb);
92988 +int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
92989 +               u32 bytes, struct ntfs_buffers *nb);
92990 +int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
92991 +                 struct ntfs_buffers *nb, int sync);
92992 +int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
92993 +                  struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
92994 +                  u32 op);
92995 +int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run);
92996 +int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
92997 +                   u64 vbo, u64 *lbo, u64 *bytes);
92998 +struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST nRec,
92999 +                                 bool dir);
93000 +extern const u8 s_default_security[0x50];
93001 +bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len);
93002 +int ntfs_security_init(struct ntfs_sb_info *sbi);
93003 +int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
93004 +                           struct SECURITY_DESCRIPTOR_RELATIVE **sd,
93005 +                           size_t *size);
93006 +int ntfs_insert_security(struct ntfs_sb_info *sbi,
93007 +                        const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
93008 +                        u32 size, __le32 *security_id, bool *inserted);
93009 +int ntfs_reparse_init(struct ntfs_sb_info *sbi);
93010 +int ntfs_objid_init(struct ntfs_sb_info *sbi);
93011 +int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid);
93012 +int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
93013 +                       const struct MFT_REF *ref);
93014 +int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
93015 +                       const struct MFT_REF *ref);
93016 +void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim);
93017 +int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim);
93019 +/* globals from index.c */
93020 +int indx_used_bit(struct ntfs_index *indx, struct ntfs_inode *ni, size_t *bit);
93021 +void fnd_clear(struct ntfs_fnd *fnd);
93022 +static inline struct ntfs_fnd *fnd_get(void)
93024 +       return ntfs_zalloc(sizeof(struct ntfs_fnd));
93026 +static inline void fnd_put(struct ntfs_fnd *fnd)
93028 +       if (fnd) {
93029 +               fnd_clear(fnd);
93030 +               ntfs_free(fnd);
93031 +       }
93033 +void indx_clear(struct ntfs_index *idx);
93034 +int indx_init(struct ntfs_index *indx, struct ntfs_sb_info *sbi,
93035 +             const struct ATTRIB *attr, enum index_mutex_classed type);
93036 +struct INDEX_ROOT *indx_get_root(struct ntfs_index *indx, struct ntfs_inode *ni,
93037 +                                struct ATTRIB **attr, struct mft_inode **mi);
93038 +int indx_read(struct ntfs_index *idx, struct ntfs_inode *ni, CLST vbn,
93039 +             struct indx_node **node);
93040 +int indx_find(struct ntfs_index *indx, struct ntfs_inode *dir,
93041 +             const struct INDEX_ROOT *root, const void *Key, size_t KeyLen,
93042 +             const void *param, int *diff, struct NTFS_DE **entry,
93043 +             struct ntfs_fnd *fnd);
93044 +int indx_find_sort(struct ntfs_index *indx, struct ntfs_inode *ni,
93045 +                  const struct INDEX_ROOT *root, struct NTFS_DE **entry,
93046 +                  struct ntfs_fnd *fnd);
93047 +int indx_find_raw(struct ntfs_index *indx, struct ntfs_inode *ni,
93048 +                 const struct INDEX_ROOT *root, struct NTFS_DE **entry,
93049 +                 size_t *off, struct ntfs_fnd *fnd);
93050 +int indx_insert_entry(struct ntfs_index *indx, struct ntfs_inode *ni,
93051 +                     const struct NTFS_DE *new_de, const void *param,
93052 +                     struct ntfs_fnd *fnd);
93053 +int indx_delete_entry(struct ntfs_index *indx, struct ntfs_inode *ni,
93054 +                     const void *key, u32 key_len, const void *param);
93055 +int indx_update_dup(struct ntfs_inode *ni, struct ntfs_sb_info *sbi,
93056 +                   const struct ATTR_FILE_NAME *fname,
93057 +                   const struct NTFS_DUP_INFO *dup, int sync);
93059 +/* globals from inode.c */
93060 +struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
93061 +                        const struct cpu_str *name);
93062 +int ntfs_set_size(struct inode *inode, u64 new_size);
93063 +int reset_log_file(struct inode *inode);
93064 +int ntfs_get_block(struct inode *inode, sector_t vbn,
93065 +                  struct buffer_head *bh_result, int create);
93066 +int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc);
93067 +int ntfs_sync_inode(struct inode *inode);
93068 +int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
93069 +                     struct inode *i2);
93070 +int inode_write_data(struct inode *inode, const void *data, size_t bytes);
93071 +struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
93072 +                               struct inode *dir, struct dentry *dentry,
93073 +                               const struct cpu_str *uni, umode_t mode,
93074 +                               dev_t dev, const char *symname, u32 size,
93075 +                               int excl, struct ntfs_fnd *fnd);
93076 +int ntfs_link_inode(struct inode *inode, struct dentry *dentry);
93077 +int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry);
93078 +void ntfs_evict_inode(struct inode *inode);
93079 +extern const struct inode_operations ntfs_link_inode_operations;
93080 +extern const struct address_space_operations ntfs_aops;
93081 +extern const struct address_space_operations ntfs_aops_cmpr;
93083 +/* globals from name_i.c*/
93084 +int fill_name_de(struct ntfs_sb_info *sbi, void *buf, const struct qstr *name,
93085 +                const struct cpu_str *uni);
93086 +struct dentry *ntfs3_get_parent(struct dentry *child);
93088 +extern const struct inode_operations ntfs_dir_inode_operations;
93090 +/* globals from record.c */
93091 +int mi_get(struct ntfs_sb_info *sbi, CLST rno, struct mft_inode **mi);
93092 +void mi_put(struct mft_inode *mi);
93093 +int mi_init(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno);
93094 +int mi_read(struct mft_inode *mi, bool is_mft);
93095 +struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr);
93096 +// TODO: id?
93097 +struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
93098 +                           enum ATTR_TYPE type, const __le16 *name,
93099 +                           size_t name_len, const __le16 *id);
93100 +static inline struct ATTRIB *rec_find_attr_le(struct mft_inode *rec,
93101 +                                             struct ATTR_LIST_ENTRY *le)
93103 +       return mi_find_attr(rec, NULL, le->type, le_name(le), le->name_len,
93104 +                           &le->id);
93106 +int mi_write(struct mft_inode *mi, int wait);
93107 +int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
93108 +                 __le16 flags, bool is_mft);
93109 +void mi_mark_free(struct mft_inode *mi);
93110 +struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
93111 +                             const __le16 *name, u8 name_len, u32 asize,
93112 +                             u16 name_off);
93114 +bool mi_remove_attr(struct mft_inode *mi, struct ATTRIB *attr);
93115 +bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes);
93116 +int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr,
93117 +                struct runs_tree *run, CLST len);
93118 +static inline bool mi_is_ref(const struct mft_inode *mi,
93119 +                            const struct MFT_REF *ref)
93121 +       if (le32_to_cpu(ref->low) != mi->rno)
93122 +               return false;
93123 +       if (ref->seq != mi->mrec->seq)
93124 +               return false;
93126 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
93127 +       return le16_to_cpu(ref->high) == (mi->rno >> 32);
93128 +#else
93129 +       return !ref->high;
93130 +#endif
93133 +static inline void mi_get_ref(const struct mft_inode *mi, struct MFT_REF *ref)
93135 +       ref->low = cpu_to_le32(mi->rno);
93136 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
93137 +       ref->high = cpu_to_le16(mi->rno >> 32);
93138 +#else
93139 +       ref->high = 0;
93140 +#endif
93141 +       ref->seq = mi->mrec->seq;
93144 +/* globals from run.c */
93145 +bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn,
93146 +                     CLST *len, size_t *index);
93147 +void run_truncate(struct runs_tree *run, CLST vcn);
93148 +void run_truncate_head(struct runs_tree *run, CLST vcn);
93149 +void run_truncate_around(struct runs_tree *run, CLST vcn);
93150 +bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *Index);
93151 +bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
93152 +                  bool is_mft);
93153 +bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len);
93154 +bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
93155 +                  CLST *lcn, CLST *len);
93156 +bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn);
93158 +int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
93159 +            u32 run_buf_size, CLST *packed_vcns);
93160 +int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
93161 +              CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
93162 +              u32 run_buf_size);
93164 +#ifdef NTFS3_CHECK_FREE_CLST
93165 +int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
93166 +                 CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
93167 +                 u32 run_buf_size);
93168 +#else
93169 +#define run_unpack_ex run_unpack
93170 +#endif
93171 +int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn);
93173 +/* globals from super.c */
93174 +void *ntfs_set_shared(void *ptr, u32 bytes);
93175 +void *ntfs_put_shared(void *ptr);
93176 +void ntfs_unmap_meta(struct super_block *sb, CLST lcn, CLST len);
93177 +int ntfs_discard(struct ntfs_sb_info *sbi, CLST Lcn, CLST Len);
93179 +/* globals from bitmap.c*/
93180 +int __init ntfs3_init_bitmap(void);
93181 +void ntfs3_exit_bitmap(void);
93182 +void wnd_close(struct wnd_bitmap *wnd);
93183 +static inline size_t wnd_zeroes(const struct wnd_bitmap *wnd)
93185 +       return wnd->total_zeroes;
93187 +int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits);
93188 +int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits);
93189 +int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits);
93190 +bool wnd_is_free(struct wnd_bitmap *wnd, size_t bit, size_t bits);
93191 +bool wnd_is_used(struct wnd_bitmap *wnd, size_t bit, size_t bits);
93193 +/* Possible values for 'flags' 'wnd_find' */
93194 +#define BITMAP_FIND_MARK_AS_USED 0x01
93195 +#define BITMAP_FIND_FULL 0x02
93196 +size_t wnd_find(struct wnd_bitmap *wnd, size_t to_alloc, size_t hint,
93197 +               size_t flags, size_t *allocated);
93198 +int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits);
93199 +void wnd_zone_set(struct wnd_bitmap *wnd, size_t Lcn, size_t Len);
93200 +int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range);
93202 +/* globals from upcase.c */
93203 +int ntfs_cmp_names(const __le16 *s1, size_t l1, const __le16 *s2, size_t l2,
93204 +                  const u16 *upcase, bool bothcase);
93205 +int ntfs_cmp_names_cpu(const struct cpu_str *uni1, const struct le_str *uni2,
93206 +                      const u16 *upcase, bool bothcase);
93208 +/* globals from xattr.c */
93209 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
93210 +struct posix_acl *ntfs_get_acl(struct inode *inode, int type);
93211 +int ntfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
93212 +                struct posix_acl *acl, int type);
93213 +int ntfs_init_acl(struct user_namespace *mnt_userns, struct inode *inode,
93214 +                 struct inode *dir);
93215 +#else
93216 +#define ntfs_get_acl NULL
93217 +#define ntfs_set_acl NULL
93218 +#endif
93220 +int ntfs_acl_chmod(struct user_namespace *mnt_userns, struct inode *inode);
93221 +int ntfs_permission(struct user_namespace *mnt_userns, struct inode *inode,
93222 +                   int mask);
93223 +ssize_t ntfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
93224 +extern const struct xattr_handler *ntfs_xattr_handlers[];
93226 +/* globals from lznt.c */
93227 +struct lznt *get_lznt_ctx(int level);
93228 +size_t compress_lznt(const void *uncompressed, size_t uncompressed_size,
93229 +                    void *compressed, size_t compressed_size,
93230 +                    struct lznt *ctx);
93231 +ssize_t decompress_lznt(const void *compressed, size_t compressed_size,
93232 +                       void *uncompressed, size_t uncompressed_size);
93234 +static inline bool is_ntfs3(struct ntfs_sb_info *sbi)
93236 +       return sbi->volume.major_ver >= 3;
93239 +/*(sb->s_flags & SB_ACTIVE)*/
93240 +static inline bool is_mounted(struct ntfs_sb_info *sbi)
93242 +       return !!sbi->sb->s_root;
93245 +static inline bool ntfs_is_meta_file(struct ntfs_sb_info *sbi, CLST rno)
93247 +       return rno < MFT_REC_FREE || rno == sbi->objid_no ||
93248 +              rno == sbi->quota_no || rno == sbi->reparse_no ||
93249 +              rno == sbi->usn_jrnl_no;
93252 +static inline void ntfs_unmap_page(struct page *page)
93254 +       kunmap(page);
93255 +       put_page(page);
93258 +static inline struct page *ntfs_map_page(struct address_space *mapping,
93259 +                                        unsigned long index)
93261 +       struct page *page = read_mapping_page(mapping, index, NULL);
93263 +       if (!IS_ERR(page)) {
93264 +               kmap(page);
93265 +               if (!PageError(page))
93266 +                       return page;
93267 +               ntfs_unmap_page(page);
93268 +               return ERR_PTR(-EIO);
93269 +       }
93270 +       return page;
93273 +static inline size_t wnd_zone_bit(const struct wnd_bitmap *wnd)
93275 +       return wnd->zone_bit;
93278 +static inline size_t wnd_zone_len(const struct wnd_bitmap *wnd)
93280 +       return wnd->zone_end - wnd->zone_bit;
93283 +static inline void run_init(struct runs_tree *run)
93285 +       run->runs = NULL;
93286 +       run->count = 0;
93287 +       run->allocated = 0;
93290 +static inline struct runs_tree *run_alloc(void)
93292 +       return ntfs_zalloc(sizeof(struct runs_tree));
93295 +static inline void run_close(struct runs_tree *run)
93297 +       ntfs_vfree(run->runs);
93298 +       memset(run, 0, sizeof(*run));
93301 +static inline void run_free(struct runs_tree *run)
93303 +       if (run) {
93304 +               ntfs_vfree(run->runs);
93305 +               ntfs_free(run);
93306 +       }
93309 +static inline bool run_is_empty(struct runs_tree *run)
93311 +       return !run->count;
93314 +/* NTFS uses quad aligned bitmaps */
93315 +static inline size_t bitmap_size(size_t bits)
93317 +       return QuadAlign((bits + 7) >> 3);
93320 +#define _100ns2seconds 10000000
93321 +#define SecondsToStartOf1970 0x00000002B6109100
93323 +#define NTFS_TIME_GRAN 100
93326 + * kernel2nt
93327 + *
93328 + * converts in-memory kernel timestamp into nt time
93329 + */
93330 +static inline __le64 kernel2nt(const struct timespec64 *ts)
93332 +       // 10^7 units of 100 nanoseconds one second
93333 +       return cpu_to_le64(_100ns2seconds *
93334 +                                  (ts->tv_sec + SecondsToStartOf1970) +
93335 +                          ts->tv_nsec / NTFS_TIME_GRAN);
93339 + * nt2kernel
93340 + *
93341 + * converts on-disk nt time into kernel timestamp
93342 + */
93343 +static inline void nt2kernel(const __le64 tm, struct timespec64 *ts)
93345 +       u64 t = le64_to_cpu(tm) - _100ns2seconds * SecondsToStartOf1970;
93347 +       // WARNING: do_div changes its first argument(!)
93348 +       ts->tv_nsec = do_div(t, _100ns2seconds) * 100;
93349 +       ts->tv_sec = t;
93352 +static inline struct ntfs_sb_info *ntfs_sb(struct super_block *sb)
93354 +       return sb->s_fs_info;
93357 +/* Align up on cluster boundary */
93358 +static inline u64 ntfs_up_cluster(const struct ntfs_sb_info *sbi, u64 size)
93360 +       return (size + sbi->cluster_mask) & sbi->cluster_mask_inv;
93363 +/* Align up on cluster boundary */
93364 +static inline u64 ntfs_up_block(const struct super_block *sb, u64 size)
93366 +       return (size + sb->s_blocksize - 1) & ~(u64)(sb->s_blocksize - 1);
93369 +static inline CLST bytes_to_cluster(const struct ntfs_sb_info *sbi, u64 size)
93371 +       return (size + sbi->cluster_mask) >> sbi->cluster_bits;
93374 +static inline u64 bytes_to_block(const struct super_block *sb, u64 size)
93376 +       return (size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
93379 +static inline struct buffer_head *ntfs_bread(struct super_block *sb,
93380 +                                            sector_t block)
93382 +       struct buffer_head *bh = sb_bread(sb, block);
93384 +       if (bh)
93385 +               return bh;
93387 +       ntfs_err(sb, "failed to read volume at offset 0x%llx",
93388 +                (u64)block << sb->s_blocksize_bits);
93389 +       return NULL;
93392 +static inline bool is_power_of2(size_t v)
93394 +       return v && !(v & (v - 1));
93397 +static inline struct ntfs_inode *ntfs_i(struct inode *inode)
93399 +       return container_of(inode, struct ntfs_inode, vfs_inode);
93402 +static inline bool is_compressed(const struct ntfs_inode *ni)
93404 +       return (ni->std_fa & FILE_ATTRIBUTE_COMPRESSED) ||
93405 +              (ni->ni_flags & NI_FLAG_COMPRESSED_MASK);
93408 +static inline int ni_ext_compress_bits(const struct ntfs_inode *ni)
93410 +       return 0xb + (ni->ni_flags & NI_FLAG_COMPRESSED_MASK);
93413 +/* bits - 0xc, 0xd, 0xe, 0xf, 0x10 */
93414 +static inline void ni_set_ext_compress_bits(struct ntfs_inode *ni, u8 bits)
93416 +       ni->ni_flags |= (bits - 0xb) & NI_FLAG_COMPRESSED_MASK;
93419 +static inline bool is_dedup(const struct ntfs_inode *ni)
93421 +       return ni->ni_flags & NI_FLAG_DEDUPLICATED;
93424 +static inline bool is_encrypted(const struct ntfs_inode *ni)
93426 +       return ni->std_fa & FILE_ATTRIBUTE_ENCRYPTED;
93429 +static inline bool is_sparsed(const struct ntfs_inode *ni)
93431 +       return ni->std_fa & FILE_ATTRIBUTE_SPARSE_FILE;
93434 +static inline int is_resident(struct ntfs_inode *ni)
93436 +       return ni->ni_flags & NI_FLAG_RESIDENT;
93439 +static inline void le16_sub_cpu(__le16 *var, u16 val)
93441 +       *var = cpu_to_le16(le16_to_cpu(*var) - val);
93444 +static inline void le32_sub_cpu(__le32 *var, u32 val)
93446 +       *var = cpu_to_le32(le32_to_cpu(*var) - val);
93449 +static inline void nb_put(struct ntfs_buffers *nb)
93451 +       u32 i, nbufs = nb->nbufs;
93453 +       if (!nbufs)
93454 +               return;
93456 +       for (i = 0; i < nbufs; i++)
93457 +               put_bh(nb->bh[i]);
93458 +       nb->nbufs = 0;
93461 +static inline void put_indx_node(struct indx_node *in)
93463 +       if (!in)
93464 +               return;
93466 +       ntfs_free(in->index);
93467 +       nb_put(&in->nb);
93468 +       ntfs_free(in);
93471 +static inline void mi_clear(struct mft_inode *mi)
93473 +       nb_put(&mi->nb);
93474 +       ntfs_free(mi->mrec);
93475 +       mi->mrec = NULL;
93478 +static inline void ni_lock(struct ntfs_inode *ni)
93480 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_NORMAL);
93483 +static inline void ni_lock_dir(struct ntfs_inode *ni)
93485 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_PARENT);
93488 +static inline void ni_unlock(struct ntfs_inode *ni)
93490 +       mutex_unlock(&ni->ni_lock);
93493 +static inline int ni_trylock(struct ntfs_inode *ni)
93495 +       return mutex_trylock(&ni->ni_lock);
93498 +static inline int attr_load_runs_attr(struct ntfs_inode *ni,
93499 +                                     struct ATTRIB *attr,
93500 +                                     struct runs_tree *run, CLST vcn)
93502 +       return attr_load_runs_vcn(ni, attr->type, attr_name(attr),
93503 +                                 attr->name_len, run, vcn);
93506 +static inline void le64_sub_cpu(__le64 *var, u64 val)
93508 +       *var = cpu_to_le64(le64_to_cpu(*var) - val);
93510 diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
93511 new file mode 100644
93512 index 000000000000..0d4a6251bddc
93513 --- /dev/null
93514 +++ b/fs/ntfs3/record.c
93515 @@ -0,0 +1,609 @@
93516 +// SPDX-License-Identifier: GPL-2.0
93518 + *
93519 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
93520 + *
93521 + */
93523 +#include <linux/blkdev.h>
93524 +#include <linux/buffer_head.h>
93525 +#include <linux/fs.h>
93526 +#include <linux/nls.h>
93528 +#include "debug.h"
93529 +#include "ntfs.h"
93530 +#include "ntfs_fs.h"
93532 +static inline int compare_attr(const struct ATTRIB *left, enum ATTR_TYPE type,
93533 +                              const __le16 *name, u8 name_len,
93534 +                              const u16 *upcase)
93536 +       /* First, compare the type codes: */
93537 +       int diff = le32_to_cpu(left->type) - le32_to_cpu(type);
93539 +       if (diff)
93540 +               return diff;
93542 +       /*
93543 +        * They have the same type code, so we have to compare the names.
93544 +        */
93545 +       return ntfs_cmp_names(attr_name(left), left->name_len, name, name_len,
93546 +                             upcase, true);
93550 + * mi_new_attt_id
93551 + *
93552 + * returns unused attribute id that is less than mrec->next_attr_id
93553 + */
93554 +static __le16 mi_new_attt_id(struct mft_inode *mi)
93556 +       u16 free_id, max_id, t16;
93557 +       struct MFT_REC *rec = mi->mrec;
93558 +       struct ATTRIB *attr;
93559 +       __le16 id;
93561 +       id = rec->next_attr_id;
93562 +       free_id = le16_to_cpu(id);
93563 +       if (free_id < 0x7FFF) {
93564 +               rec->next_attr_id = cpu_to_le16(free_id + 1);
93565 +               return id;
93566 +       }
93568 +       /* One record can store up to 1024/24 ~= 42 attributes */
93569 +       free_id = 0;
93570 +       max_id = 0;
93572 +       attr = NULL;
93574 +       for (;;) {
93575 +               attr = mi_enum_attr(mi, attr);
93576 +               if (!attr) {
93577 +                       rec->next_attr_id = cpu_to_le16(max_id + 1);
93578 +                       mi->dirty = true;
93579 +                       return cpu_to_le16(free_id);
93580 +               }
93582 +               t16 = le16_to_cpu(attr->id);
93583 +               if (t16 == free_id) {
93584 +                       free_id += 1;
93585 +                       attr = NULL;
93586 +               } else if (max_id < t16)
93587 +                       max_id = t16;
93588 +       }
93591 +int mi_get(struct ntfs_sb_info *sbi, CLST rno, struct mft_inode **mi)
93593 +       int err;
93594 +       struct mft_inode *m = ntfs_zalloc(sizeof(struct mft_inode));
93596 +       if (!m)
93597 +               return -ENOMEM;
93599 +       err = mi_init(m, sbi, rno);
93600 +       if (err) {
93601 +               ntfs_free(m);
93602 +               return err;
93603 +       }
93605 +       err = mi_read(m, false);
93606 +       if (err) {
93607 +               mi_put(m);
93608 +               return err;
93609 +       }
93611 +       *mi = m;
93612 +       return 0;
93615 +void mi_put(struct mft_inode *mi)
93617 +       mi_clear(mi);
93618 +       ntfs_free(mi);
93621 +int mi_init(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno)
93623 +       mi->sbi = sbi;
93624 +       mi->rno = rno;
93625 +       mi->mrec = ntfs_malloc(sbi->record_size);
93626 +       if (!mi->mrec)
93627 +               return -ENOMEM;
93629 +       return 0;
93633 + * mi_read
93634 + *
93635 + * reads MFT data
93636 + */
93637 +int mi_read(struct mft_inode *mi, bool is_mft)
93639 +       int err;
93640 +       struct MFT_REC *rec = mi->mrec;
93641 +       struct ntfs_sb_info *sbi = mi->sbi;
93642 +       u32 bpr = sbi->record_size;
93643 +       u64 vbo = (u64)mi->rno << sbi->record_bits;
93644 +       struct ntfs_inode *mft_ni = sbi->mft.ni;
93645 +       struct runs_tree *run = mft_ni ? &mft_ni->file.run : NULL;
93646 +       struct rw_semaphore *rw_lock = NULL;
93648 +       if (is_mounted(sbi)) {
93649 +               if (!is_mft) {
93650 +                       rw_lock = &mft_ni->file.run_lock;
93651 +                       down_read(rw_lock);
93652 +               }
93653 +       }
93655 +       err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb);
93656 +       if (rw_lock)
93657 +               up_read(rw_lock);
93658 +       if (!err)
93659 +               goto ok;
93661 +       if (err == -E_NTFS_FIXUP) {
93662 +               mi->dirty = true;
93663 +               goto ok;
93664 +       }
93666 +       if (err != -ENOENT)
93667 +               goto out;
93669 +       if (rw_lock) {
93670 +               ni_lock(mft_ni);
93671 +               down_write(rw_lock);
93672 +       }
93673 +       err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, &mft_ni->file.run,
93674 +                                vbo >> sbi->cluster_bits);
93675 +       if (rw_lock) {
93676 +               up_write(rw_lock);
93677 +               ni_unlock(mft_ni);
93678 +       }
93679 +       if (err)
93680 +               goto out;
93682 +       if (rw_lock)
93683 +               down_read(rw_lock);
93684 +       err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb);
93685 +       if (rw_lock)
93686 +               up_read(rw_lock);
93688 +       if (err == -E_NTFS_FIXUP) {
93689 +               mi->dirty = true;
93690 +               goto ok;
93691 +       }
93692 +       if (err)
93693 +               goto out;
93695 +ok:
93696 +       /* check field 'total' only here */
93697 +       if (le32_to_cpu(rec->total) != bpr) {
93698 +               err = -EINVAL;
93699 +               goto out;
93700 +       }
93702 +       return 0;
93704 +out:
93705 +       return err;
93708 +struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
93710 +       const struct MFT_REC *rec = mi->mrec;
93711 +       u32 used = le32_to_cpu(rec->used);
93712 +       u32 t32, off, asize;
93713 +       u16 t16;
93715 +       if (!attr) {
93716 +               u32 total = le32_to_cpu(rec->total);
93718 +               off = le16_to_cpu(rec->attr_off);
93720 +               if (used > total)
93721 +                       return NULL;
93723 +               if (off >= used || off < MFTRECORD_FIXUP_OFFSET_1 ||
93724 +                   !IsDwordAligned(off)) {
93725 +                       return NULL;
93726 +               }
93728 +               /* Skip non-resident records */
93729 +               if (!is_rec_inuse(rec))
93730 +                       return NULL;
93732 +               attr = Add2Ptr(rec, off);
93733 +       } else {
93734 +               /* Check if input attr inside record */
93735 +               off = PtrOffset(rec, attr);
93736 +               if (off >= used)
93737 +                       return NULL;
93739 +               asize = le32_to_cpu(attr->size);
93740 +               if (asize < SIZEOF_RESIDENT) {
93741 +                       /* Impossible 'cause we should not return such attribute */
93742 +                       return NULL;
93743 +               }
93745 +               attr = Add2Ptr(attr, asize);
93746 +               off += asize;
93747 +       }
93749 +       asize = le32_to_cpu(attr->size);
93751 +       /* Can we use the first field (attr->type) */
93752 +       if (off + 8 > used) {
93753 +               static_assert(QuadAlign(sizeof(enum ATTR_TYPE)) == 8);
93754 +               return NULL;
93755 +       }
93757 +       if (attr->type == ATTR_END) {
93758 +               /* end of enumeration */
93759 +               return NULL;
93760 +       }
93762 +       /* 0x100 is last known attribute for now*/
93763 +       t32 = le32_to_cpu(attr->type);
93764 +       if ((t32 & 0xf) || (t32 > 0x100))
93765 +               return NULL;
93767 +       /* Check boundary */
93768 +       if (off + asize > used)
93769 +               return NULL;
93771 +       /* Check size of attribute */
93772 +       if (!attr->non_res) {
93773 +               if (asize < SIZEOF_RESIDENT)
93774 +                       return NULL;
93776 +               t16 = le16_to_cpu(attr->res.data_off);
93778 +               if (t16 > asize)
93779 +                       return NULL;
93781 +               t32 = le32_to_cpu(attr->res.data_size);
93782 +               if (t16 + t32 > asize)
93783 +                       return NULL;
93785 +               return attr;
93786 +       }
93788 +       /* Check some nonresident fields */
93789 +       if (attr->name_len &&
93790 +           le16_to_cpu(attr->name_off) + sizeof(short) * attr->name_len >
93791 +                   le16_to_cpu(attr->nres.run_off)) {
93792 +               return NULL;
93793 +       }
93795 +       if (attr->nres.svcn || !is_attr_ext(attr)) {
93796 +               if (asize + 8 < SIZEOF_NONRESIDENT)
93797 +                       return NULL;
93799 +               if (attr->nres.c_unit)
93800 +                       return NULL;
93801 +       } else if (asize + 8 < SIZEOF_NONRESIDENT_EX)
93802 +               return NULL;
93804 +       return attr;
93808 + * mi_find_attr
93809 + *
93810 + * finds the attribute by type and name and id
93811 + */
93812 +struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
93813 +                           enum ATTR_TYPE type, const __le16 *name,
93814 +                           size_t name_len, const __le16 *id)
93816 +       u32 type_in = le32_to_cpu(type);
93817 +       u32 atype;
93819 +next_attr:
93820 +       attr = mi_enum_attr(mi, attr);
93821 +       if (!attr)
93822 +               return NULL;
93824 +       atype = le32_to_cpu(attr->type);
93825 +       if (atype > type_in)
93826 +               return NULL;
93828 +       if (atype < type_in)
93829 +               goto next_attr;
93831 +       if (attr->name_len != name_len)
93832 +               goto next_attr;
93834 +       if (name_len && memcmp(attr_name(attr), name, name_len * sizeof(short)))
93835 +               goto next_attr;
93837 +       if (id && *id != attr->id)
93838 +               goto next_attr;
93840 +       return attr;
93843 +int mi_write(struct mft_inode *mi, int wait)
93845 +       struct MFT_REC *rec;
93846 +       int err;
93847 +       struct ntfs_sb_info *sbi;
93849 +       if (!mi->dirty)
93850 +               return 0;
93852 +       sbi = mi->sbi;
93853 +       rec = mi->mrec;
93855 +       err = ntfs_write_bh(sbi, &rec->rhdr, &mi->nb, wait);
93856 +       if (err)
93857 +               return err;
93859 +       if (mi->rno < sbi->mft.recs_mirr)
93860 +               sbi->flags |= NTFS_FLAGS_MFTMIRR;
93862 +       mi->dirty = false;
93864 +       return 0;
93867 +int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
93868 +                 __le16 flags, bool is_mft)
93870 +       int err;
93871 +       u16 seq = 1;
93872 +       struct MFT_REC *rec;
93873 +       u64 vbo = (u64)rno << sbi->record_bits;
93875 +       err = mi_init(mi, sbi, rno);
93876 +       if (err)
93877 +               return err;
93879 +       rec = mi->mrec;
93881 +       if (rno == MFT_REC_MFT) {
93882 +               ;
93883 +       } else if (rno < MFT_REC_FREE) {
93884 +               seq = rno;
93885 +       } else if (rno >= sbi->mft.used) {
93886 +               ;
93887 +       } else if (mi_read(mi, is_mft)) {
93888 +               ;
93889 +       } else if (rec->rhdr.sign == NTFS_FILE_SIGNATURE) {
93890 +               /* Record is reused. Update its sequence number */
93891 +               seq = le16_to_cpu(rec->seq) + 1;
93892 +               if (!seq)
93893 +                       seq = 1;
93894 +       }
93896 +       memcpy(rec, sbi->new_rec, sbi->record_size);
93898 +       rec->seq = cpu_to_le16(seq);
93899 +       rec->flags = RECORD_FLAG_IN_USE | flags;
93901 +       mi->dirty = true;
93903 +       if (!mi->nb.nbufs) {
93904 +               struct ntfs_inode *ni = sbi->mft.ni;
93905 +               bool lock = false;
93907 +               if (is_mounted(sbi) && !is_mft) {
93908 +                       down_read(&ni->file.run_lock);
93909 +                       lock = true;
93910 +               }
93912 +               err = ntfs_get_bh(sbi, &ni->file.run, vbo, sbi->record_size,
93913 +                                 &mi->nb);
93914 +               if (lock)
93915 +                       up_read(&ni->file.run_lock);
93916 +       }
93918 +       return err;
93922 + * mi_mark_free
93923 + *
93924 + * marks record as unused and marks it as free in bitmap
93925 + */
93926 +void mi_mark_free(struct mft_inode *mi)
93928 +       CLST rno = mi->rno;
93929 +       struct ntfs_sb_info *sbi = mi->sbi;
93931 +       if (rno >= MFT_REC_RESERVED && rno < MFT_REC_FREE) {
93932 +               ntfs_clear_mft_tail(sbi, rno, rno + 1);
93933 +               mi->dirty = false;
93934 +               return;
93935 +       }
93937 +       if (mi->mrec) {
93938 +               clear_rec_inuse(mi->mrec);
93939 +               mi->dirty = true;
93940 +               mi_write(mi, 0);
93941 +       }
93942 +       ntfs_mark_rec_free(sbi, rno);
93946 + * mi_insert_attr
93947 + *
93948 + * reserves space for new attribute
93949 + * returns not full constructed attribute or NULL if not possible to create
93950 + */
93951 +struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
93952 +                             const __le16 *name, u8 name_len, u32 asize,
93953 +                             u16 name_off)
93955 +       size_t tail;
93956 +       struct ATTRIB *attr;
93957 +       __le16 id;
93958 +       struct MFT_REC *rec = mi->mrec;
93959 +       struct ntfs_sb_info *sbi = mi->sbi;
93960 +       u32 used = le32_to_cpu(rec->used);
93961 +       const u16 *upcase = sbi->upcase;
93962 +       int diff;
93964 +       /* Can we insert mi attribute? */
93965 +       if (used + asize > mi->sbi->record_size)
93966 +               return NULL;
93968 +       /*
93969 +        * Scan through the list of attributes to find the point
93970 +        * at which we should insert it.
93971 +        */
93972 +       attr = NULL;
93973 +       while ((attr = mi_enum_attr(mi, attr))) {
93974 +               diff = compare_attr(attr, type, name, name_len, upcase);
93975 +               if (diff > 0)
93976 +                       break;
93977 +               if (diff < 0)
93978 +                       continue;
93980 +               if (!is_attr_indexed(attr))
93981 +                       return NULL;
93982 +               break;
93983 +       }
93985 +       if (!attr) {
93986 +               tail = 8; /* not used, just to suppress warning */
93987 +               attr = Add2Ptr(rec, used - 8);
93988 +       } else {
93989 +               tail = used - PtrOffset(rec, attr);
93990 +       }
93992 +       id = mi_new_attt_id(mi);
93994 +       memmove(Add2Ptr(attr, asize), attr, tail);
93995 +       memset(attr, 0, asize);
93997 +       attr->type = type;
93998 +       attr->size = cpu_to_le32(asize);
93999 +       attr->name_len = name_len;
94000 +       attr->name_off = cpu_to_le16(name_off);
94001 +       attr->id = id;
94003 +       memmove(Add2Ptr(attr, name_off), name, name_len * sizeof(short));
94004 +       rec->used = cpu_to_le32(used + asize);
94006 +       mi->dirty = true;
94008 +       return attr;
94012 + * mi_remove_attr
94013 + *
94014 + * removes the attribute from record
94015 + * NOTE: The source attr will point to next attribute
94016 + */
94017 +bool mi_remove_attr(struct mft_inode *mi, struct ATTRIB *attr)
94019 +       struct MFT_REC *rec = mi->mrec;
94020 +       u32 aoff = PtrOffset(rec, attr);
94021 +       u32 used = le32_to_cpu(rec->used);
94022 +       u32 asize = le32_to_cpu(attr->size);
94024 +       if (aoff + asize > used)
94025 +               return false;
94027 +       used -= asize;
94028 +       memmove(attr, Add2Ptr(attr, asize), used - aoff);
94029 +       rec->used = cpu_to_le32(used);
94030 +       mi->dirty = true;
94032 +       return true;
94035 +/* bytes = "new attribute size" - "old attribute size" */
94036 +bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes)
94038 +       struct MFT_REC *rec = mi->mrec;
94039 +       u32 aoff = PtrOffset(rec, attr);
94040 +       u32 total, used = le32_to_cpu(rec->used);
94041 +       u32 nsize, asize = le32_to_cpu(attr->size);
94042 +       u32 rsize = le32_to_cpu(attr->res.data_size);
94043 +       int tail = (int)(used - aoff - asize);
94044 +       int dsize;
94045 +       char *next;
94047 +       if (tail < 0 || aoff >= used)
94048 +               return false;
94050 +       if (!bytes)
94051 +               return true;
94053 +       total = le32_to_cpu(rec->total);
94054 +       next = Add2Ptr(attr, asize);
94056 +       if (bytes > 0) {
94057 +               dsize = QuadAlign(bytes);
94058 +               if (used + dsize > total)
94059 +                       return false;
94060 +               nsize = asize + dsize;
94061 +               // move tail
94062 +               memmove(next + dsize, next, tail);
94063 +               memset(next, 0, dsize);
94064 +               used += dsize;
94065 +               rsize += dsize;
94066 +       } else {
94067 +               dsize = QuadAlign(-bytes);
94068 +               if (dsize > asize)
94069 +                       return false;
94070 +               nsize = asize - dsize;
94071 +               memmove(next - dsize, next, tail);
94072 +               used -= dsize;
94073 +               rsize -= dsize;
94074 +       }
94076 +       rec->used = cpu_to_le32(used);
94077 +       attr->size = cpu_to_le32(nsize);
94078 +       if (!attr->non_res)
94079 +               attr->res.data_size = cpu_to_le32(rsize);
94080 +       mi->dirty = true;
94082 +       return true;
94085 +int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr,
94086 +                struct runs_tree *run, CLST len)
94088 +       int err = 0;
94089 +       struct ntfs_sb_info *sbi = mi->sbi;
94090 +       u32 new_run_size;
94091 +       CLST plen;
94092 +       struct MFT_REC *rec = mi->mrec;
94093 +       CLST svcn = le64_to_cpu(attr->nres.svcn);
94094 +       u32 used = le32_to_cpu(rec->used);
94095 +       u32 aoff = PtrOffset(rec, attr);
94096 +       u32 asize = le32_to_cpu(attr->size);
94097 +       char *next = Add2Ptr(attr, asize);
94098 +       u16 run_off = le16_to_cpu(attr->nres.run_off);
94099 +       u32 run_size = asize - run_off;
94100 +       u32 tail = used - aoff - asize;
94101 +       u32 dsize = sbi->record_size - used;
94103 +       /* Make a maximum gap in current record */
94104 +       memmove(next + dsize, next, tail);
94106 +       /* Pack as much as possible */
94107 +       err = run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_size + dsize,
94108 +                      &plen);
94109 +       if (err < 0) {
94110 +               memmove(next, next + dsize, tail);
94111 +               return err;
94112 +       }
94114 +       new_run_size = QuadAlign(err);
94116 +       memmove(next + new_run_size - run_size, next + dsize, tail);
94118 +       attr->size = cpu_to_le32(asize + new_run_size - run_size);
94119 +       attr->nres.evcn = cpu_to_le64(svcn + plen - 1);
94120 +       rec->used = cpu_to_le32(used + new_run_size - run_size);
94121 +       mi->dirty = true;
94123 +       return 0;
94125 diff --git a/fs/ntfs3/run.c b/fs/ntfs3/run.c
94126 new file mode 100644
94127 index 000000000000..5cdf6efe67e0
94128 --- /dev/null
94129 +++ b/fs/ntfs3/run.c
94130 @@ -0,0 +1,1111 @@
94131 +// SPDX-License-Identifier: GPL-2.0
94133 + *
94134 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
94135 + *
94136 + * TODO: try to use extents tree (instead of array)
94137 + */
94139 +#include <linux/blkdev.h>
94140 +#include <linux/buffer_head.h>
94141 +#include <linux/fs.h>
94142 +#include <linux/nls.h>
94144 +#include "debug.h"
94145 +#include "ntfs.h"
94146 +#include "ntfs_fs.h"
94148 +/* runs_tree is a continues memory. Try to avoid big size  */
94149 +#define NTFS3_RUN_MAX_BYTES 0x10000
94151 +struct ntfs_run {
94152 +       CLST vcn; /* virtual cluster number */
94153 +       CLST len; /* length in clusters */
94154 +       CLST lcn; /* logical cluster number */
94158 + * run_lookup
94159 + *
94160 + * Lookup the index of a MCB entry that is first <= vcn.
94161 + * case of success it will return non-zero value and set
94162 + * 'index' parameter to index of entry been found.
94163 + * case of entry missing from list 'index' will be set to
94164 + * point to insertion position for the entry question.
94165 + */
94166 +bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index)
94168 +       size_t min_idx, max_idx, mid_idx;
94169 +       struct ntfs_run *r;
94171 +       if (!run->count) {
94172 +               *index = 0;
94173 +               return false;
94174 +       }
94176 +       min_idx = 0;
94177 +       max_idx = run->count - 1;
94179 +       /* Check boundary cases specially, 'cause they cover the often requests */
94180 +       r = run->runs;
94181 +       if (vcn < r->vcn) {
94182 +               *index = 0;
94183 +               return false;
94184 +       }
94186 +       if (vcn < r->vcn + r->len) {
94187 +               *index = 0;
94188 +               return true;
94189 +       }
94191 +       r += max_idx;
94192 +       if (vcn >= r->vcn + r->len) {
94193 +               *index = run->count;
94194 +               return false;
94195 +       }
94197 +       if (vcn >= r->vcn) {
94198 +               *index = max_idx;
94199 +               return true;
94200 +       }
94202 +       do {
94203 +               mid_idx = min_idx + ((max_idx - min_idx) >> 1);
94204 +               r = run->runs + mid_idx;
94206 +               if (vcn < r->vcn) {
94207 +                       max_idx = mid_idx - 1;
94208 +                       if (!mid_idx)
94209 +                               break;
94210 +               } else if (vcn >= r->vcn + r->len) {
94211 +                       min_idx = mid_idx + 1;
94212 +               } else {
94213 +                       *index = mid_idx;
94214 +                       return true;
94215 +               }
94216 +       } while (min_idx <= max_idx);
94218 +       *index = max_idx + 1;
94219 +       return false;
94223 + * run_consolidate
94224 + *
94225 + * consolidate runs starting from a given one.
94226 + */
94227 +static void run_consolidate(struct runs_tree *run, size_t index)
94229 +       size_t i;
94230 +       struct ntfs_run *r = run->runs + index;
94232 +       while (index + 1 < run->count) {
94233 +               /*
94234 +                * I should merge current run with next
94235 +                * if start of the next run lies inside one being tested.
94236 +                */
94237 +               struct ntfs_run *n = r + 1;
94238 +               CLST end = r->vcn + r->len;
94239 +               CLST dl;
94241 +               /* Stop if runs are not aligned one to another. */
94242 +               if (n->vcn > end)
94243 +                       break;
94245 +               dl = end - n->vcn;
94247 +               /*
94248 +                * If range at index overlaps with next one
94249 +                * then I will either adjust it's start position
94250 +                * or (if completely matches) dust remove one from the list.
94251 +                */
94252 +               if (dl > 0) {
94253 +                       if (n->len <= dl)
94254 +                               goto remove_next_range;
94256 +                       n->len -= dl;
94257 +                       n->vcn += dl;
94258 +                       if (n->lcn != SPARSE_LCN)
94259 +                               n->lcn += dl;
94260 +                       dl = 0;
94261 +               }
94263 +               /*
94264 +                * Stop if sparse mode does not match
94265 +                * both current and next runs.
94266 +                */
94267 +               if ((n->lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) {
94268 +                       index += 1;
94269 +                       r = n;
94270 +                       continue;
94271 +               }
94273 +               /*
94274 +                * Check if volume block
94275 +                * of a next run lcn does not match
94276 +                * last volume block of the current run.
94277 +                */
94278 +               if (n->lcn != SPARSE_LCN && n->lcn != r->lcn + r->len)
94279 +                       break;
94281 +               /*
94282 +                * Next and current are siblings.
94283 +                * Eat/join.
94284 +                */
94285 +               r->len += n->len - dl;
94287 +remove_next_range:
94288 +               i = run->count - (index + 1);
94289 +               if (i > 1)
94290 +                       memmove(n, n + 1, sizeof(*n) * (i - 1));
94292 +               run->count -= 1;
94293 +       }
94296 +/* returns true if range [svcn - evcn] is mapped*/
94297 +bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn)
94299 +       size_t i;
94300 +       const struct ntfs_run *r, *end;
94301 +       CLST next_vcn;
94303 +       if (!run_lookup(run, svcn, &i))
94304 +               return false;
94306 +       end = run->runs + run->count;
94307 +       r = run->runs + i;
94309 +       for (;;) {
94310 +               next_vcn = r->vcn + r->len;
94311 +               if (next_vcn > evcn)
94312 +                       return true;
94314 +               if (++r >= end)
94315 +                       return false;
94317 +               if (r->vcn != next_vcn)
94318 +                       return false;
94319 +       }
94322 +bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn,
94323 +                     CLST *len, size_t *index)
94325 +       size_t idx;
94326 +       CLST gap;
94327 +       struct ntfs_run *r;
94329 +       /* Fail immediately if nrun was not touched yet. */
94330 +       if (!run->runs)
94331 +               return false;
94333 +       if (!run_lookup(run, vcn, &idx))
94334 +               return false;
94336 +       r = run->runs + idx;
94338 +       if (vcn >= r->vcn + r->len)
94339 +               return false;
94341 +       gap = vcn - r->vcn;
94342 +       if (r->len <= gap)
94343 +               return false;
94345 +       *lcn = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + gap);
94347 +       if (len)
94348 +               *len = r->len - gap;
94349 +       if (index)
94350 +               *index = idx;
94352 +       return true;
94356 + * run_truncate_head
94357 + *
94358 + * decommit the range before vcn
94359 + */
94360 +void run_truncate_head(struct runs_tree *run, CLST vcn)
94362 +       size_t index;
94363 +       struct ntfs_run *r;
94365 +       if (run_lookup(run, vcn, &index)) {
94366 +               r = run->runs + index;
94368 +               if (vcn > r->vcn) {
94369 +                       CLST dlen = vcn - r->vcn;
94371 +                       r->vcn = vcn;
94372 +                       r->len -= dlen;
94373 +                       if (r->lcn != SPARSE_LCN)
94374 +                               r->lcn += dlen;
94375 +               }
94377 +               if (!index)
94378 +                       return;
94379 +       }
94380 +       r = run->runs;
94381 +       memmove(r, r + index, sizeof(*r) * (run->count - index));
94383 +       run->count -= index;
94385 +       if (!run->count) {
94386 +               ntfs_vfree(run->runs);
94387 +               run->runs = NULL;
94388 +               run->allocated = 0;
94389 +       }
94393 + * run_truncate
94394 + *
94395 + * decommit the range after vcn
94396 + */
94397 +void run_truncate(struct runs_tree *run, CLST vcn)
94399 +       size_t index;
94401 +       /*
94402 +        * If I hit the range then
94403 +        * I have to truncate one.
94404 +        * If range to be truncated is becoming empty
94405 +        * then it will entirely be removed.
94406 +        */
94407 +       if (run_lookup(run, vcn, &index)) {
94408 +               struct ntfs_run *r = run->runs + index;
94410 +               r->len = vcn - r->vcn;
94412 +               if (r->len > 0)
94413 +                       index += 1;
94414 +       }
94416 +       /*
94417 +        * At this point 'index' is set to
94418 +        * position that should be thrown away (including index itself)
94419 +        * Simple one - just set the limit.
94420 +        */
94421 +       run->count = index;
94423 +       /* Do not reallocate array 'runs'. Only free if possible */
94424 +       if (!index) {
94425 +               ntfs_vfree(run->runs);
94426 +               run->runs = NULL;
94427 +               run->allocated = 0;
94428 +       }
94431 +/* trim head and tail if necessary*/
94432 +void run_truncate_around(struct runs_tree *run, CLST vcn)
94434 +       run_truncate_head(run, vcn);
94436 +       if (run->count >= NTFS3_RUN_MAX_BYTES / sizeof(struct ntfs_run) / 2)
94437 +               run_truncate(run, (run->runs + (run->count >> 1))->vcn);
94441 + * run_add_entry
94442 + *
94443 + * sets location to known state.
94444 + * run to be added may overlap with existing location.
94445 + * returns false if of memory
94446 + */
94447 +bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
94448 +                  bool is_mft)
94450 +       size_t used, index;
94451 +       struct ntfs_run *r;
94452 +       bool inrange;
94453 +       CLST tail_vcn = 0, tail_len = 0, tail_lcn = 0;
94454 +       bool should_add_tail = false;
94456 +       /*
94457 +        * Lookup the insertion point.
94458 +        *
94459 +        * Execute bsearch for the entry containing
94460 +        * start position question.
94461 +        */
94462 +       inrange = run_lookup(run, vcn, &index);
94464 +       /*
94465 +        * Shortcut here would be case of
94466 +        * range not been found but one been added
94467 +        * continues previous run.
94468 +        * this case I can directly make use of
94469 +        * existing range as my start point.
94470 +        */
94471 +       if (!inrange && index > 0) {
94472 +               struct ntfs_run *t = run->runs + index - 1;
94474 +               if (t->vcn + t->len == vcn &&
94475 +                   (t->lcn == SPARSE_LCN) == (lcn == SPARSE_LCN) &&
94476 +                   (lcn == SPARSE_LCN || lcn == t->lcn + t->len)) {
94477 +                       inrange = true;
94478 +                       index -= 1;
94479 +               }
94480 +       }
94482 +       /*
94483 +        * At this point 'index' either points to the range
94484 +        * containing start position or to the insertion position
94485 +        * for a new range.
94486 +        * So first let's check if range I'm probing is here already.
94487 +        */
94488 +       if (!inrange) {
94489 +requires_new_range:
94490 +               /*
94491 +                * Range was not found.
94492 +                * Insert at position 'index'
94493 +                */
94494 +               used = run->count * sizeof(struct ntfs_run);
94496 +               /*
94497 +                * Check allocated space.
94498 +                * If one is not enough to get one more entry
94499 +                * then it will be reallocated
94500 +                */
94501 +               if (run->allocated < used + sizeof(struct ntfs_run)) {
94502 +                       size_t bytes;
94503 +                       struct ntfs_run *new_ptr;
94505 +                       /* Use power of 2 for 'bytes'*/
94506 +                       if (!used) {
94507 +                               bytes = 64;
94508 +                       } else if (used <= 16 * PAGE_SIZE) {
94509 +                               if (is_power_of2(run->allocated))
94510 +                                       bytes = run->allocated << 1;
94511 +                               else
94512 +                                       bytes = (size_t)1
94513 +                                               << (2 + blksize_bits(used));
94514 +                       } else {
94515 +                               bytes = run->allocated + (16 * PAGE_SIZE);
94516 +                       }
94518 +                       WARN_ON(!is_mft && bytes > NTFS3_RUN_MAX_BYTES);
94520 +                       new_ptr = ntfs_vmalloc(bytes);
94522 +                       if (!new_ptr)
94523 +                               return false;
94525 +                       r = new_ptr + index;
94526 +                       memcpy(new_ptr, run->runs,
94527 +                              index * sizeof(struct ntfs_run));
94528 +                       memcpy(r + 1, run->runs + index,
94529 +                              sizeof(struct ntfs_run) * (run->count - index));
94531 +                       ntfs_vfree(run->runs);
94532 +                       run->runs = new_ptr;
94533 +                       run->allocated = bytes;
94535 +               } else {
94536 +                       size_t i = run->count - index;
94538 +                       r = run->runs + index;
94540 +                       /* memmove appears to be a bottle neck here... */
94541 +                       if (i > 0)
94542 +                               memmove(r + 1, r, sizeof(struct ntfs_run) * i);
94543 +               }
94545 +               r->vcn = vcn;
94546 +               r->lcn = lcn;
94547 +               r->len = len;
94548 +               run->count += 1;
94549 +       } else {
94550 +               r = run->runs + index;
94552 +               /*
94553 +                * If one of ranges was not allocated
94554 +                * then I have to split location I just matched.
94555 +                * and insert current one
94556 +                * a common case this requires tail to be reinserted
94557 +                * a recursive call.
94558 +                */
94559 +               if (((lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) ||
94560 +                   (lcn != SPARSE_LCN && lcn != r->lcn + (vcn - r->vcn))) {
94561 +                       CLST to_eat = vcn - r->vcn;
94562 +                       CLST Tovcn = to_eat + len;
94564 +                       should_add_tail = Tovcn < r->len;
94566 +                       if (should_add_tail) {
94567 +                               tail_lcn = r->lcn == SPARSE_LCN
94568 +                                                  ? SPARSE_LCN
94569 +                                                  : (r->lcn + Tovcn);
94570 +                               tail_vcn = r->vcn + Tovcn;
94571 +                               tail_len = r->len - Tovcn;
94572 +                       }
94574 +                       if (to_eat > 0) {
94575 +                               r->len = to_eat;
94576 +                               inrange = false;
94577 +                               index += 1;
94578 +                               goto requires_new_range;
94579 +                       }
94581 +                       /* lcn should match one I'm going to add. */
94582 +                       r->lcn = lcn;
94583 +               }
94585 +               /*
94586 +                * If existing range fits then I'm done.
94587 +                * Otherwise extend found one and fall back to range jocode.
94588 +                */
94589 +               if (r->vcn + r->len < vcn + len)
94590 +                       r->len += len - ((r->vcn + r->len) - vcn);
94591 +       }
94593 +       /*
94594 +        * And normalize it starting from insertion point.
94595 +        * It's possible that no insertion needed case if
94596 +        * start point lies within the range of an entry
94597 +        * that 'index' points to.
94598 +        */
94599 +       if (inrange && index > 0)
94600 +               index -= 1;
94601 +       run_consolidate(run, index);
94602 +       run_consolidate(run, index + 1);
94604 +       /*
94605 +        * a special case
94606 +        * I have to add extra range a tail.
94607 +        */
94608 +       if (should_add_tail &&
94609 +           !run_add_entry(run, tail_vcn, tail_lcn, tail_len, is_mft))
94610 +               return false;
94612 +       return true;
94615 +/*helper for attr_collapse_range, which is helper for fallocate(collapse_range)*/
94616 +bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
94618 +       size_t index, eat;
94619 +       struct ntfs_run *r, *e, *eat_start, *eat_end;
94620 +       CLST end;
94622 +       if (WARN_ON(!run_lookup(run, vcn, &index)))
94623 +               return true; /* should never be here */
94625 +       e = run->runs + run->count;
94626 +       r = run->runs + index;
94627 +       end = vcn + len;
94629 +       if (vcn > r->vcn) {
94630 +               if (r->vcn + r->len <= end) {
94631 +                       /* collapse tail of run */
94632 +                       r->len = vcn - r->vcn;
94633 +               } else if (r->lcn == SPARSE_LCN) {
94634 +                       /* collapse a middle part of sparsed run */
94635 +                       r->len -= len;
94636 +               } else {
94637 +                       /* collapse a middle part of normal run, split */
94638 +                       if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
94639 +                               return false;
94640 +                       return run_collapse_range(run, vcn, len);
94641 +               }
94643 +               r += 1;
94644 +       }
94646 +       eat_start = r;
94647 +       eat_end = r;
94649 +       for (; r < e; r++) {
94650 +               CLST d;
94652 +               if (r->vcn >= end) {
94653 +                       r->vcn -= len;
94654 +                       continue;
94655 +               }
94657 +               if (r->vcn + r->len <= end) {
94658 +                       /* eat this run */
94659 +                       eat_end = r + 1;
94660 +                       continue;
94661 +               }
94663 +               d = end - r->vcn;
94664 +               if (r->lcn != SPARSE_LCN)
94665 +                       r->lcn += d;
94666 +               r->len -= d;
94667 +               r->vcn -= len - d;
94668 +       }
94670 +       eat = eat_end - eat_start;
94671 +       memmove(eat_start, eat_end, (e - eat_end) * sizeof(*r));
94672 +       run->count -= eat;
94674 +       return true;
94678 + * run_get_entry
94679 + *
94680 + * returns index-th mapped region
94681 + */
94682 +bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
94683 +                  CLST *lcn, CLST *len)
94685 +       const struct ntfs_run *r;
94687 +       if (index >= run->count)
94688 +               return false;
94690 +       r = run->runs + index;
94692 +       if (!r->len)
94693 +               return false;
94695 +       if (vcn)
94696 +               *vcn = r->vcn;
94697 +       if (lcn)
94698 +               *lcn = r->lcn;
94699 +       if (len)
94700 +               *len = r->len;
94701 +       return true;
94705 + * run_packed_size
94706 + *
94707 + * calculates the size of packed int64
94708 + */
94709 +#ifdef __BIG_ENDIAN
94710 +static inline int run_packed_size(const s64 n)
94712 +       const u8 *p = (const u8 *)&n + sizeof(n) - 1;
94714 +       if (n >= 0) {
94715 +               if (p[-7] || p[-6] || p[-5] || p[-4])
94716 +                       p -= 4;
94717 +               if (p[-3] || p[-2])
94718 +                       p -= 2;
94719 +               if (p[-1])
94720 +                       p -= 1;
94721 +               if (p[0] & 0x80)
94722 +                       p -= 1;
94723 +       } else {
94724 +               if (p[-7] != 0xff || p[-6] != 0xff || p[-5] != 0xff ||
94725 +                   p[-4] != 0xff)
94726 +                       p -= 4;
94727 +               if (p[-3] != 0xff || p[-2] != 0xff)
94728 +                       p -= 2;
94729 +               if (p[-1] != 0xff)
94730 +                       p -= 1;
94731 +               if (!(p[0] & 0x80))
94732 +                       p -= 1;
94733 +       }
94734 +       return (const u8 *)&n + sizeof(n) - p;
94737 +/* full trusted function. It does not check 'size' for errors */
94738 +static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
94740 +       const u8 *p = (u8 *)&v;
94742 +       switch (size) {
94743 +       case 8:
94744 +               run_buf[7] = p[0];
94745 +               fallthrough;
94746 +       case 7:
94747 +               run_buf[6] = p[1];
94748 +               fallthrough;
94749 +       case 6:
94750 +               run_buf[5] = p[2];
94751 +               fallthrough;
94752 +       case 5:
94753 +               run_buf[4] = p[3];
94754 +               fallthrough;
94755 +       case 4:
94756 +               run_buf[3] = p[4];
94757 +               fallthrough;
94758 +       case 3:
94759 +               run_buf[2] = p[5];
94760 +               fallthrough;
94761 +       case 2:
94762 +               run_buf[1] = p[6];
94763 +               fallthrough;
94764 +       case 1:
94765 +               run_buf[0] = p[7];
94766 +       }
94769 +/* full trusted function. It does not check 'size' for errors */
94770 +static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
94772 +       u8 *p = (u8 *)&v;
94774 +       switch (size) {
94775 +       case 8:
94776 +               p[0] = run_buf[7];
94777 +               fallthrough;
94778 +       case 7:
94779 +               p[1] = run_buf[6];
94780 +               fallthrough;
94781 +       case 6:
94782 +               p[2] = run_buf[5];
94783 +               fallthrough;
94784 +       case 5:
94785 +               p[3] = run_buf[4];
94786 +               fallthrough;
94787 +       case 4:
94788 +               p[4] = run_buf[3];
94789 +               fallthrough;
94790 +       case 3:
94791 +               p[5] = run_buf[2];
94792 +               fallthrough;
94793 +       case 2:
94794 +               p[6] = run_buf[1];
94795 +               fallthrough;
94796 +       case 1:
94797 +               p[7] = run_buf[0];
94798 +       }
94799 +       return v;
94802 +#else
94804 +static inline int run_packed_size(const s64 n)
94806 +       const u8 *p = (const u8 *)&n;
94808 +       if (n >= 0) {
94809 +               if (p[7] || p[6] || p[5] || p[4])
94810 +                       p += 4;
94811 +               if (p[3] || p[2])
94812 +                       p += 2;
94813 +               if (p[1])
94814 +                       p += 1;
94815 +               if (p[0] & 0x80)
94816 +                       p += 1;
94817 +       } else {
94818 +               if (p[7] != 0xff || p[6] != 0xff || p[5] != 0xff ||
94819 +                   p[4] != 0xff)
94820 +                       p += 4;
94821 +               if (p[3] != 0xff || p[2] != 0xff)
94822 +                       p += 2;
94823 +               if (p[1] != 0xff)
94824 +                       p += 1;
94825 +               if (!(p[0] & 0x80))
94826 +                       p += 1;
94827 +       }
94829 +       return 1 + p - (const u8 *)&n;
94832 +/* full trusted function. It does not check 'size' for errors */
94833 +static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
94835 +       const u8 *p = (u8 *)&v;
94837 +       /* memcpy( run_buf, &v, size); is it faster? */
94838 +       switch (size) {
94839 +       case 8:
94840 +               run_buf[7] = p[7];
94841 +               fallthrough;
94842 +       case 7:
94843 +               run_buf[6] = p[6];
94844 +               fallthrough;
94845 +       case 6:
94846 +               run_buf[5] = p[5];
94847 +               fallthrough;
94848 +       case 5:
94849 +               run_buf[4] = p[4];
94850 +               fallthrough;
94851 +       case 4:
94852 +               run_buf[3] = p[3];
94853 +               fallthrough;
94854 +       case 3:
94855 +               run_buf[2] = p[2];
94856 +               fallthrough;
94857 +       case 2:
94858 +               run_buf[1] = p[1];
94859 +               fallthrough;
94860 +       case 1:
94861 +               run_buf[0] = p[0];
94862 +       }
94865 +/* full trusted function. It does not check 'size' for errors */
94866 +static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
94868 +       u8 *p = (u8 *)&v;
94870 +       /* memcpy( &v, run_buf, size); is it faster? */
94871 +       switch (size) {
94872 +       case 8:
94873 +               p[7] = run_buf[7];
94874 +               fallthrough;
94875 +       case 7:
94876 +               p[6] = run_buf[6];
94877 +               fallthrough;
94878 +       case 6:
94879 +               p[5] = run_buf[5];
94880 +               fallthrough;
94881 +       case 5:
94882 +               p[4] = run_buf[4];
94883 +               fallthrough;
94884 +       case 4:
94885 +               p[3] = run_buf[3];
94886 +               fallthrough;
94887 +       case 3:
94888 +               p[2] = run_buf[2];
94889 +               fallthrough;
94890 +       case 2:
94891 +               p[1] = run_buf[1];
94892 +               fallthrough;
94893 +       case 1:
94894 +               p[0] = run_buf[0];
94895 +       }
94896 +       return v;
94898 +#endif
94901 + * run_pack
94902 + *
94903 + * packs runs into buffer
94904 + * packed_vcns - how much runs we have packed
94905 + * packed_size - how much bytes we have used run_buf
94906 + */
94907 +int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
94908 +            u32 run_buf_size, CLST *packed_vcns)
94910 +       CLST next_vcn, vcn, lcn;
94911 +       CLST prev_lcn = 0;
94912 +       CLST evcn1 = svcn + len;
94913 +       int packed_size = 0;
94914 +       size_t i;
94915 +       bool ok;
94916 +       s64 dlcn;
94917 +       int offset_size, size_size, tmp;
94919 +       next_vcn = vcn = svcn;
94921 +       *packed_vcns = 0;
94923 +       if (!len)
94924 +               goto out;
94926 +       ok = run_lookup_entry(run, vcn, &lcn, &len, &i);
94928 +       if (!ok)
94929 +               goto error;
94931 +       if (next_vcn != vcn)
94932 +               goto error;
94934 +       for (;;) {
94935 +               next_vcn = vcn + len;
94936 +               if (next_vcn > evcn1)
94937 +                       len = evcn1 - vcn;
94939 +               /* how much bytes required to pack len */
94940 +               size_size = run_packed_size(len);
94942 +               /* offset_size - how much bytes is packed dlcn */
94943 +               if (lcn == SPARSE_LCN) {
94944 +                       offset_size = 0;
94945 +                       dlcn = 0;
94946 +               } else {
94947 +                       /* NOTE: lcn can be less than prev_lcn! */
94948 +                       dlcn = (s64)lcn - prev_lcn;
94949 +                       offset_size = run_packed_size(dlcn);
94950 +                       prev_lcn = lcn;
94951 +               }
94953 +               tmp = run_buf_size - packed_size - 2 - offset_size;
94954 +               if (tmp <= 0)
94955 +                       goto out;
94957 +               /* can we store this entire run */
94958 +               if (tmp < size_size)
94959 +                       goto out;
94961 +               if (run_buf) {
94962 +                       /* pack run header */
94963 +                       run_buf[0] = ((u8)(size_size | (offset_size << 4)));
94964 +                       run_buf += 1;
94966 +                       /* Pack the length of run */
94967 +                       run_pack_s64(run_buf, size_size, len);
94969 +                       run_buf += size_size;
94970 +                       /* Pack the offset from previous lcn */
94971 +                       run_pack_s64(run_buf, offset_size, dlcn);
94972 +                       run_buf += offset_size;
94973 +               }
94975 +               packed_size += 1 + offset_size + size_size;
94976 +               *packed_vcns += len;
94978 +               if (packed_size + 1 >= run_buf_size || next_vcn >= evcn1)
94979 +                       goto out;
94981 +               ok = run_get_entry(run, ++i, &vcn, &lcn, &len);
94982 +               if (!ok)
94983 +                       goto error;
94985 +               if (next_vcn != vcn)
94986 +                       goto error;
94987 +       }
94989 +out:
94990 +       /* Store last zero */
94991 +       if (run_buf)
94992 +               run_buf[0] = 0;
94994 +       return packed_size + 1;
94996 +error:
94997 +       return -EOPNOTSUPP;
95001 + * run_unpack
95002 + *
95003 + * unpacks packed runs from "run_buf"
95004 + * returns error, if negative, or real used bytes
95005 + */
95006 +int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
95007 +              CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
95008 +              u32 run_buf_size)
95010 +       u64 prev_lcn, vcn64, lcn, next_vcn;
95011 +       const u8 *run_last, *run_0;
95012 +       bool is_mft = ino == MFT_REC_MFT;
95014 +       /* Check for empty */
95015 +       if (evcn + 1 == svcn)
95016 +               return 0;
95018 +       if (evcn < svcn)
95019 +               return -EINVAL;
95021 +       run_0 = run_buf;
95022 +       run_last = run_buf + run_buf_size;
95023 +       prev_lcn = 0;
95024 +       vcn64 = svcn;
95026 +       /* Read all runs the chain */
95027 +       /* size_size - how much bytes is packed len */
95028 +       while (run_buf < run_last) {
95029 +               /* size_size - how much bytes is packed len */
95030 +               u8 size_size = *run_buf & 0xF;
95031 +               /* offset_size - how much bytes is packed dlcn */
95032 +               u8 offset_size = *run_buf++ >> 4;
95033 +               u64 len;
95035 +               if (!size_size)
95036 +                       break;
95038 +               /*
95039 +                * Unpack runs.
95040 +                * NOTE: runs are stored little endian order
95041 +                * "len" is unsigned value, "dlcn" is signed
95042 +                * Large positive number requires to store 5 bytes
95043 +                * e.g.: 05 FF 7E FF FF 00 00 00
95044 +                */
95045 +               if (size_size > 8)
95046 +                       return -EINVAL;
95048 +               len = run_unpack_s64(run_buf, size_size, 0);
95049 +               /* skip size_size */
95050 +               run_buf += size_size;
95052 +               if (!len)
95053 +                       return -EINVAL;
95055 +               if (!offset_size)
95056 +                       lcn = SPARSE_LCN64;
95057 +               else if (offset_size <= 8) {
95058 +                       s64 dlcn;
95060 +                       /* initial value of dlcn is -1 or 0 */
95061 +                       dlcn = (run_buf[offset_size - 1] & 0x80) ? (s64)-1 : 0;
95062 +                       dlcn = run_unpack_s64(run_buf, offset_size, dlcn);
95063 +                       /* skip offset_size */
95064 +                       run_buf += offset_size;
95066 +                       if (!dlcn)
95067 +                               return -EINVAL;
95068 +                       lcn = prev_lcn + dlcn;
95069 +                       prev_lcn = lcn;
95070 +               } else
95071 +                       return -EINVAL;
95073 +               next_vcn = vcn64 + len;
95074 +               /* check boundary */
95075 +               if (next_vcn > evcn + 1)
95076 +                       return -EINVAL;
95078 +#ifndef CONFIG_NTFS3_64BIT_CLUSTER
95079 +               if (next_vcn > 0x100000000ull || (lcn + len) > 0x100000000ull) {
95080 +                       ntfs_err(
95081 +                               sbi->sb,
95082 +                               "This driver is compiled whitout CONFIG_NTFS3_64BIT_CLUSTER (like windows driver).\n"
95083 +                               "Volume contains 64 bits run: vcn %llx, lcn %llx, len %llx.\n"
95084 +                               "Activate CONFIG_NTFS3_64BIT_CLUSTER to process this case",
95085 +                               vcn64, lcn, len);
95086 +                       return -EOPNOTSUPP;
95087 +               }
95088 +#endif
95089 +               if (lcn != SPARSE_LCN64 && lcn + len > sbi->used.bitmap.nbits) {
95090 +                       /* lcn range is out of volume */
95091 +                       return -EINVAL;
95092 +               }
95094 +               if (!run)
95095 +                       ; /* called from check_attr(fslog.c) to check run */
95096 +               else if (run == RUN_DEALLOCATE) {
95097 +                       /* called from ni_delete_all to free clusters without storing in run */
95098 +                       if (lcn != SPARSE_LCN64)
95099 +                               mark_as_free_ex(sbi, lcn, len, true);
95100 +               } else if (vcn64 >= vcn) {
95101 +                       if (!run_add_entry(run, vcn64, lcn, len, is_mft))
95102 +                               return -ENOMEM;
95103 +               } else if (next_vcn > vcn) {
95104 +                       u64 dlen = vcn - vcn64;
95106 +                       if (!run_add_entry(run, vcn, lcn + dlen, len - dlen,
95107 +                                          is_mft))
95108 +                               return -ENOMEM;
95109 +               }
95111 +               vcn64 = next_vcn;
95112 +       }
95114 +       if (vcn64 != evcn + 1) {
95115 +               /* not expected length of unpacked runs */
95116 +               return -EINVAL;
95117 +       }
95119 +       return run_buf - run_0;
95122 +#ifdef NTFS3_CHECK_FREE_CLST
95124 + * run_unpack_ex
95125 + *
95126 + * unpacks packed runs from "run_buf"
95127 + * checks unpacked runs to be used in bitmap
95128 + * returns error, if negative, or real used bytes
95129 + */
95130 +int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
95131 +                 CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
95132 +                 u32 run_buf_size)
95134 +       int ret, err;
95135 +       CLST next_vcn, lcn, len;
95136 +       size_t index;
95137 +       bool ok;
95138 +       struct wnd_bitmap *wnd;
95140 +       ret = run_unpack(run, sbi, ino, svcn, evcn, vcn, run_buf, run_buf_size);
95141 +       if (ret <= 0)
95142 +               return ret;
95144 +       if (!sbi->used.bitmap.sb || !run || run == RUN_DEALLOCATE)
95145 +               return ret;
95147 +       if (ino == MFT_REC_BADCLUST)
95148 +               return ret;
95150 +       next_vcn = vcn = svcn;
95151 +       wnd = &sbi->used.bitmap;
95153 +       for (ok = run_lookup_entry(run, vcn, &lcn, &len, &index);
95154 +            next_vcn <= evcn;
95155 +            ok = run_get_entry(run, ++index, &vcn, &lcn, &len)) {
95156 +               if (!ok || next_vcn != vcn)
95157 +                       return -EINVAL;
95159 +               next_vcn = vcn + len;
95161 +               if (lcn == SPARSE_LCN)
95162 +                       continue;
95164 +               if (sbi->flags & NTFS_FLAGS_NEED_REPLAY)
95165 +                       continue;
95167 +               down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
95168 +               /* Check for free blocks */
95169 +               ok = wnd_is_used(wnd, lcn, len);
95170 +               up_read(&wnd->rw_lock);
95171 +               if (ok)
95172 +                       continue;
95174 +               /* Looks like volume is corrupted */
95175 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
95177 +               if (down_write_trylock(&wnd->rw_lock)) {
95178 +                       /* mark all zero bits as used in range [lcn, lcn+len) */
95179 +                       CLST i, lcn_f = 0, len_f = 0;
95181 +                       err = 0;
95182 +                       for (i = 0; i < len; i++) {
95183 +                               if (wnd_is_free(wnd, lcn + i, 1)) {
95184 +                                       if (!len_f)
95185 +                                               lcn_f = lcn + i;
95186 +                                       len_f += 1;
95187 +                               } else if (len_f) {
95188 +                                       err = wnd_set_used(wnd, lcn_f, len_f);
95189 +                                       len_f = 0;
95190 +                                       if (err)
95191 +                                               break;
95192 +                               }
95193 +                       }
95195 +                       if (len_f)
95196 +                               err = wnd_set_used(wnd, lcn_f, len_f);
95198 +                       up_write(&wnd->rw_lock);
95199 +                       if (err)
95200 +                               return err;
95201 +               }
95202 +       }
95204 +       return ret;
95206 +#endif
95209 + * run_get_highest_vcn
95210 + *
95211 + * returns the highest vcn from a mapping pairs array
95212 + * it used while replaying log file
95213 + */
95214 +int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn)
95216 +       u64 vcn64 = vcn;
95217 +       u8 size_size;
95219 +       while ((size_size = *run_buf & 0xF)) {
95220 +               u8 offset_size = *run_buf++ >> 4;
95221 +               u64 len;
95223 +               if (size_size > 8 || offset_size > 8)
95224 +                       return -EINVAL;
95226 +               len = run_unpack_s64(run_buf, size_size, 0);
95227 +               if (!len)
95228 +                       return -EINVAL;
95230 +               run_buf += size_size + offset_size;
95231 +               vcn64 += len;
95233 +#ifndef CONFIG_NTFS3_64BIT_CLUSTER
95234 +               if (vcn64 > 0x100000000ull)
95235 +                       return -EINVAL;
95236 +#endif
95237 +       }
95239 +       *highest_vcn = vcn64 - 1;
95240 +       return 0;
95242 diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
95243 new file mode 100644
95244 index 000000000000..c563431248bf
95245 --- /dev/null
95246 +++ b/fs/ntfs3/super.c
95247 @@ -0,0 +1,1500 @@
95248 +// SPDX-License-Identifier: GPL-2.0
95250 + *
95251 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
95252 + *
95253 + *
95254 + *                 terminology
95255 + *
95256 + * cluster - allocation unit     - 512,1K,2K,4K,...,2M
95257 + * vcn - virtual cluster number  - offset inside the file in clusters
95258 + * vbo - virtual byte offset     - offset inside the file in bytes
95259 + * lcn - logical cluster number  - 0 based cluster in clusters heap
95260 + * lbo - logical byte offset     - absolute position inside volume
95261 + * run - maps vcn to lcn         - stored in attributes in packed form
95262 + * attr - attribute segment      - std/name/data etc records inside MFT
95263 + * mi  - mft inode               - one MFT record(usually 1024 bytes or 4K), consists of attributes
95264 + * ni  - ntfs inode              - extends linux inode. consists of one or more mft inodes
95265 + * index - unit inside directory - 2K, 4K, <=page size, does not depend on cluster size
95266 + *
95267 + * TODO: Implement
95268 + * https://docs.microsoft.com/en-us/windows/wsl/file-permissions
95269 + */
95271 +#include <linux/backing-dev.h>
95272 +#include <linux/blkdev.h>
95273 +#include <linux/buffer_head.h>
95274 +#include <linux/exportfs.h>
95275 +#include <linux/fs.h>
95276 +#include <linux/iversion.h>
95277 +#include <linux/module.h>
95278 +#include <linux/nls.h>
95279 +#include <linux/parser.h>
95280 +#include <linux/seq_file.h>
95281 +#include <linux/statfs.h>
95283 +#include "debug.h"
95284 +#include "ntfs.h"
95285 +#include "ntfs_fs.h"
95286 +#ifdef CONFIG_NTFS3_LZX_XPRESS
95287 +#include "lib/lib.h"
95288 +#endif
95290 +#ifdef CONFIG_PRINTK
95292 + * Trace warnings/notices/errors
95293 + * Thanks Joe Perches <joe@perches.com> for implementation
95294 + */
95295 +void ntfs_printk(const struct super_block *sb, const char *fmt, ...)
95297 +       struct va_format vaf;
95298 +       va_list args;
95299 +       int level;
95300 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
95302 +       /*should we use different ratelimits for warnings/notices/errors? */
95303 +       if (!___ratelimit(&sbi->msg_ratelimit, "ntfs3"))
95304 +               return;
95306 +       va_start(args, fmt);
95308 +       level = printk_get_level(fmt);
95309 +       vaf.fmt = printk_skip_level(fmt);
95310 +       vaf.va = &args;
95311 +       printk("%c%cntfs3: %s: %pV\n", KERN_SOH_ASCII, level, sb->s_id, &vaf);
95313 +       va_end(args);
95316 +static char s_name_buf[512];
95317 +static atomic_t s_name_buf_cnt = ATOMIC_INIT(1); // 1 means 'free s_name_buf'
95319 +/* print warnings/notices/errors about inode using name or inode number */
95320 +void ntfs_inode_printk(struct inode *inode, const char *fmt, ...)
95322 +       struct super_block *sb = inode->i_sb;
95323 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
95324 +       char *name;
95325 +       va_list args;
95326 +       struct va_format vaf;
95327 +       int level;
95329 +       if (!___ratelimit(&sbi->msg_ratelimit, "ntfs3"))
95330 +               return;
95332 +       /* use static allocated buffer, if possible */
95333 +       name = atomic_dec_and_test(&s_name_buf_cnt)
95334 +                      ? s_name_buf
95335 +                      : kmalloc(sizeof(s_name_buf), GFP_NOFS);
95337 +       if (name) {
95338 +               struct dentry *de = d_find_alias(inode);
95339 +               const u32 name_len = ARRAY_SIZE(s_name_buf) - 1;
95341 +               if (de) {
95342 +                       spin_lock(&de->d_lock);
95343 +                       snprintf(name, name_len, " \"%s\"", de->d_name.name);
95344 +                       spin_unlock(&de->d_lock);
95345 +                       name[name_len] = 0; /* to be sure*/
95346 +               } else {
95347 +                       name[0] = 0;
95348 +               }
95349 +               dput(de); /* cocci warns if placed in branch "if (de)" */
95350 +       }
95352 +       va_start(args, fmt);
95354 +       level = printk_get_level(fmt);
95355 +       vaf.fmt = printk_skip_level(fmt);
95356 +       vaf.va = &args;
95358 +       printk("%c%cntfs3: %s: ino=%lx,%s %pV\n", KERN_SOH_ASCII, level,
95359 +              sb->s_id, inode->i_ino, name ? name : "", &vaf);
95361 +       va_end(args);
95363 +       atomic_inc(&s_name_buf_cnt);
95364 +       if (name != s_name_buf)
95365 +               kfree(name);
95367 +#endif
95370 + * Shared memory struct.
95371 + *
95372 + * on-disk ntfs's upcase table is created by ntfs formater
95373 + * 'upcase' table is 128K bytes of memory
95374 + * we should read it into memory when mounting
95375 + * Several ntfs volumes likely use the same 'upcase' table
95376 + * It is good idea to share in-memory 'upcase' table between different volumes
95377 + * Unfortunately winxp/vista/win7 use different upcase tables
95378 + */
95379 +static DEFINE_SPINLOCK(s_shared_lock);
95381 +static struct {
95382 +       void *ptr;
95383 +       u32 len;
95384 +       int cnt;
95385 +} s_shared[8];
95388 + * ntfs_set_shared
95389 + *
95390 + * Returns 'ptr' if pointer was saved in shared memory
95391 + * Returns NULL if pointer was not shared
95392 + */
95393 +void *ntfs_set_shared(void *ptr, u32 bytes)
95395 +       void *ret = NULL;
95396 +       int i, j = -1;
95398 +       spin_lock(&s_shared_lock);
95399 +       for (i = 0; i < ARRAY_SIZE(s_shared); i++) {
95400 +               if (!s_shared[i].cnt) {
95401 +                       j = i;
95402 +               } else if (bytes == s_shared[i].len &&
95403 +                          !memcmp(s_shared[i].ptr, ptr, bytes)) {
95404 +                       s_shared[i].cnt += 1;
95405 +                       ret = s_shared[i].ptr;
95406 +                       break;
95407 +               }
95408 +       }
95410 +       if (!ret && j != -1) {
95411 +               s_shared[j].ptr = ptr;
95412 +               s_shared[j].len = bytes;
95413 +               s_shared[j].cnt = 1;
95414 +               ret = ptr;
95415 +       }
95416 +       spin_unlock(&s_shared_lock);
95418 +       return ret;
95422 + * ntfs_put_shared
95423 + *
95424 + * Returns 'ptr' if pointer is not shared anymore
95425 + * Returns NULL if pointer is still shared
95426 + */
95427 +void *ntfs_put_shared(void *ptr)
95429 +       void *ret = ptr;
95430 +       int i;
95432 +       spin_lock(&s_shared_lock);
95433 +       for (i = 0; i < ARRAY_SIZE(s_shared); i++) {
95434 +               if (s_shared[i].cnt && s_shared[i].ptr == ptr) {
95435 +                       if (--s_shared[i].cnt)
95436 +                               ret = NULL;
95437 +                       break;
95438 +               }
95439 +       }
95440 +       spin_unlock(&s_shared_lock);
95442 +       return ret;
95445 +static inline void clear_mount_options(struct ntfs_mount_options *options)
95447 +       unload_nls(options->nls);
95450 +enum Opt {
95451 +       Opt_uid,
95452 +       Opt_gid,
95453 +       Opt_umask,
95454 +       Opt_dmask,
95455 +       Opt_fmask,
95456 +       Opt_immutable,
95457 +       Opt_discard,
95458 +       Opt_force,
95459 +       Opt_sparse,
95460 +       Opt_nohidden,
95461 +       Opt_showmeta,
95462 +       Opt_acl,
95463 +       Opt_noatime,
95464 +       Opt_nls,
95465 +       Opt_prealloc,
95466 +       Opt_no_acs_rules,
95467 +       Opt_err,
95470 +static const match_table_t ntfs_tokens = {
95471 +       { Opt_uid, "uid=%u" },
95472 +       { Opt_gid, "gid=%u" },
95473 +       { Opt_umask, "umask=%o" },
95474 +       { Opt_dmask, "dmask=%o" },
95475 +       { Opt_fmask, "fmask=%o" },
95476 +       { Opt_immutable, "sys_immutable" },
95477 +       { Opt_discard, "discard" },
95478 +       { Opt_force, "force" },
95479 +       { Opt_sparse, "sparse" },
95480 +       { Opt_nohidden, "nohidden" },
95481 +       { Opt_acl, "acl" },
95482 +       { Opt_noatime, "noatime" },
95483 +       { Opt_showmeta, "showmeta" },
95484 +       { Opt_nls, "nls=%s" },
95485 +       { Opt_prealloc, "prealloc" },
95486 +       { Opt_no_acs_rules, "no_acs_rules" },
95487 +       { Opt_err, NULL },
95490 +static noinline int ntfs_parse_options(struct super_block *sb, char *options,
95491 +                                      int silent,
95492 +                                      struct ntfs_mount_options *opts)
95494 +       char *p;
95495 +       substring_t args[MAX_OPT_ARGS];
95496 +       int option;
95497 +       char nls_name[30];
95498 +       struct nls_table *nls;
95500 +       opts->fs_uid = current_uid();
95501 +       opts->fs_gid = current_gid();
95502 +       opts->fs_fmask_inv = opts->fs_dmask_inv = ~current_umask();
95503 +       nls_name[0] = 0;
95505 +       if (!options)
95506 +               goto out;
95508 +       while ((p = strsep(&options, ","))) {
95509 +               int token;
95511 +               if (!*p)
95512 +                       continue;
95514 +               token = match_token(p, ntfs_tokens, args);
95515 +               switch (token) {
95516 +               case Opt_immutable:
95517 +                       opts->sys_immutable = 1;
95518 +                       break;
95519 +               case Opt_uid:
95520 +                       if (match_int(&args[0], &option))
95521 +                               return -EINVAL;
95522 +                       opts->fs_uid = make_kuid(current_user_ns(), option);
95523 +                       if (!uid_valid(opts->fs_uid))
95524 +                               return -EINVAL;
95525 +                       opts->uid = 1;
95526 +                       break;
95527 +               case Opt_gid:
95528 +                       if (match_int(&args[0], &option))
95529 +                               return -EINVAL;
95530 +                       opts->fs_gid = make_kgid(current_user_ns(), option);
95531 +                       if (!gid_valid(opts->fs_gid))
95532 +                               return -EINVAL;
95533 +                       opts->gid = 1;
95534 +                       break;
95535 +               case Opt_umask:
95536 +                       if (match_octal(&args[0], &option))
95537 +                               return -EINVAL;
95538 +                       opts->fs_fmask_inv = opts->fs_dmask_inv = ~option;
95539 +                       opts->fmask = opts->dmask = 1;
95540 +                       break;
95541 +               case Opt_dmask:
95542 +                       if (match_octal(&args[0], &option))
95543 +                               return -EINVAL;
95544 +                       opts->fs_dmask_inv = ~option;
95545 +                       opts->dmask = 1;
95546 +                       break;
95547 +               case Opt_fmask:
95548 +                       if (match_octal(&args[0], &option))
95549 +                               return -EINVAL;
95550 +                       opts->fs_fmask_inv = ~option;
95551 +                       opts->fmask = 1;
95552 +                       break;
95553 +               case Opt_discard:
95554 +                       opts->discard = 1;
95555 +                       break;
95556 +               case Opt_force:
95557 +                       opts->force = 1;
95558 +                       break;
95559 +               case Opt_sparse:
95560 +                       opts->sparse = 1;
95561 +                       break;
95562 +               case Opt_nohidden:
95563 +                       opts->nohidden = 1;
95564 +                       break;
95565 +               case Opt_acl:
95566 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
95567 +                       sb->s_flags |= SB_POSIXACL;
95568 +                       break;
95569 +#else
95570 +                       ntfs_err(sb, "support for ACL not compiled in!");
95571 +                       return -EINVAL;
95572 +#endif
95573 +               case Opt_noatime:
95574 +                       sb->s_flags |= SB_NOATIME;
95575 +                       break;
95576 +               case Opt_showmeta:
95577 +                       opts->showmeta = 1;
95578 +                       break;
95579 +               case Opt_nls:
95580 +                       match_strlcpy(nls_name, &args[0], sizeof(nls_name));
95581 +                       break;
95582 +               case Opt_prealloc:
95583 +                       opts->prealloc = 1;
95584 +                       break;
95585 +               case Opt_no_acs_rules:
95586 +                       opts->no_acs_rules = 1;
95587 +                       break;
95588 +               default:
95589 +                       if (!silent)
95590 +                               ntfs_err(
95591 +                                       sb,
95592 +                                       "Unrecognized mount option \"%s\" or missing value",
95593 +                                       p);
95594 +                       //return -EINVAL;
95595 +               }
95596 +       }
95598 +out:
95599 +       if (!strcmp(nls_name[0] ? nls_name : CONFIG_NLS_DEFAULT, "utf8")) {
95600 +               /* For UTF-8 use utf16s_to_utf8s/utf8s_to_utf16s instead of nls */
95601 +               nls = NULL;
95602 +       } else if (nls_name[0]) {
95603 +               nls = load_nls(nls_name);
95604 +               if (!nls) {
95605 +                       ntfs_err(sb, "failed to load \"%s\"", nls_name);
95606 +                       return -EINVAL;
95607 +               }
95608 +       } else {
95609 +               nls = load_nls_default();
95610 +               if (!nls) {
95611 +                       ntfs_err(sb, "failed to load default nls");
95612 +                       return -EINVAL;
95613 +               }
95614 +       }
95615 +       opts->nls = nls;
95617 +       return 0;
95620 +static int ntfs_remount(struct super_block *sb, int *flags, char *data)
95622 +       int err, ro_rw;
95623 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
95624 +       struct ntfs_mount_options old_opts;
95625 +       char *orig_data = kstrdup(data, GFP_KERNEL);
95627 +       if (data && !orig_data)
95628 +               return -ENOMEM;
95630 +       /* Store  original options */
95631 +       memcpy(&old_opts, &sbi->options, sizeof(old_opts));
95632 +       clear_mount_options(&sbi->options);
95633 +       memset(&sbi->options, 0, sizeof(sbi->options));
95635 +       err = ntfs_parse_options(sb, data, 0, &sbi->options);
95636 +       if (err)
95637 +               goto restore_opts;
95639 +       ro_rw = sb_rdonly(sb) && !(*flags & SB_RDONLY);
95640 +       if (ro_rw && (sbi->flags & NTFS_FLAGS_NEED_REPLAY)) {
95641 +               ntfs_warn(
95642 +                       sb,
95643 +                       "Couldn't remount rw because journal is not replayed. Please umount/remount instead\n");
95644 +               err = -EINVAL;
95645 +               goto restore_opts;
95646 +       }
95648 +       sync_filesystem(sb);
95650 +       if (ro_rw && (sbi->volume.flags & VOLUME_FLAG_DIRTY) &&
95651 +           !sbi->options.force) {
95652 +               ntfs_warn(sb, "volume is dirty and \"force\" flag is not set!");
95653 +               err = -EINVAL;
95654 +               goto restore_opts;
95655 +       }
95657 +       clear_mount_options(&old_opts);
95659 +       *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME) |
95660 +                SB_NODIRATIME | SB_NOATIME;
95661 +       ntfs_info(sb, "re-mounted. Opts: %s", orig_data);
95662 +       err = 0;
95663 +       goto out;
95665 +restore_opts:
95666 +       clear_mount_options(&sbi->options);
95667 +       memcpy(&sbi->options, &old_opts, sizeof(old_opts));
95669 +out:
95670 +       kfree(orig_data);
95671 +       return err;
95674 +static struct kmem_cache *ntfs_inode_cachep;
95676 +static struct inode *ntfs_alloc_inode(struct super_block *sb)
95678 +       struct ntfs_inode *ni = kmem_cache_alloc(ntfs_inode_cachep, GFP_NOFS);
95680 +       if (!ni)
95681 +               return NULL;
95683 +       memset(ni, 0, offsetof(struct ntfs_inode, vfs_inode));
95685 +       mutex_init(&ni->ni_lock);
95687 +       return &ni->vfs_inode;
95690 +static void ntfs_i_callback(struct rcu_head *head)
95692 +       struct inode *inode = container_of(head, struct inode, i_rcu);
95693 +       struct ntfs_inode *ni = ntfs_i(inode);
95695 +       mutex_destroy(&ni->ni_lock);
95697 +       kmem_cache_free(ntfs_inode_cachep, ni);
95700 +static void ntfs_destroy_inode(struct inode *inode)
95702 +       call_rcu(&inode->i_rcu, ntfs_i_callback);
95705 +static void init_once(void *foo)
95707 +       struct ntfs_inode *ni = foo;
95709 +       inode_init_once(&ni->vfs_inode);
95712 +/* noinline to reduce binary size*/
95713 +static noinline void put_ntfs(struct ntfs_sb_info *sbi)
95715 +       ntfs_free(sbi->new_rec);
95716 +       ntfs_vfree(ntfs_put_shared(sbi->upcase));
95717 +       ntfs_free(sbi->def_table);
95719 +       wnd_close(&sbi->mft.bitmap);
95720 +       wnd_close(&sbi->used.bitmap);
95722 +       if (sbi->mft.ni)
95723 +               iput(&sbi->mft.ni->vfs_inode);
95725 +       if (sbi->security.ni)
95726 +               iput(&sbi->security.ni->vfs_inode);
95728 +       if (sbi->reparse.ni)
95729 +               iput(&sbi->reparse.ni->vfs_inode);
95731 +       if (sbi->objid.ni)
95732 +               iput(&sbi->objid.ni->vfs_inode);
95734 +       if (sbi->volume.ni)
95735 +               iput(&sbi->volume.ni->vfs_inode);
95737 +       ntfs_update_mftmirr(sbi, 0);
95739 +       indx_clear(&sbi->security.index_sii);
95740 +       indx_clear(&sbi->security.index_sdh);
95741 +       indx_clear(&sbi->reparse.index_r);
95742 +       indx_clear(&sbi->objid.index_o);
95743 +       ntfs_free(sbi->compress.lznt);
95744 +#ifdef CONFIG_NTFS3_LZX_XPRESS
95745 +       xpress_free_decompressor(sbi->compress.xpress);
95746 +       lzx_free_decompressor(sbi->compress.lzx);
95747 +#endif
95748 +       clear_mount_options(&sbi->options);
95750 +       ntfs_free(sbi);
95753 +static void ntfs_put_super(struct super_block *sb)
95755 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
95757 +       /*mark rw ntfs as clear, if possible*/
95758 +       ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
95760 +       put_ntfs(sbi);
95762 +       sync_blockdev(sb->s_bdev);
95765 +static int ntfs_statfs(struct dentry *dentry, struct kstatfs *buf)
95767 +       struct super_block *sb = dentry->d_sb;
95768 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
95769 +       struct wnd_bitmap *wnd = &sbi->used.bitmap;
95771 +       buf->f_type = sb->s_magic;
95772 +       buf->f_bsize = sbi->cluster_size;
95773 +       buf->f_blocks = wnd->nbits;
95775 +       buf->f_bfree = buf->f_bavail = wnd_zeroes(wnd);
95776 +       buf->f_fsid.val[0] = sbi->volume.ser_num;
95777 +       buf->f_fsid.val[1] = (sbi->volume.ser_num >> 32);
95778 +       buf->f_namelen = NTFS_NAME_LEN;
95780 +       return 0;
95783 +static int ntfs_show_options(struct seq_file *m, struct dentry *root)
95785 +       struct super_block *sb = root->d_sb;
95786 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
95787 +       struct ntfs_mount_options *opts = &sbi->options;
95788 +       struct user_namespace *user_ns = seq_user_ns(m);
95790 +       if (opts->uid)
95791 +               seq_printf(m, ",uid=%u",
95792 +                          from_kuid_munged(user_ns, opts->fs_uid));
95793 +       if (opts->gid)
95794 +               seq_printf(m, ",gid=%u",
95795 +                          from_kgid_munged(user_ns, opts->fs_gid));
95796 +       if (opts->fmask)
95797 +               seq_printf(m, ",fmask=%04o", ~opts->fs_fmask_inv);
95798 +       if (opts->dmask)
95799 +               seq_printf(m, ",dmask=%04o", ~opts->fs_dmask_inv);
95800 +       if (opts->nls)
95801 +               seq_printf(m, ",nls=%s", opts->nls->charset);
95802 +       else
95803 +               seq_puts(m, ",nls=utf8");
95804 +       if (opts->sys_immutable)
95805 +               seq_puts(m, ",sys_immutable");
95806 +       if (opts->discard)
95807 +               seq_puts(m, ",discard");
95808 +       if (opts->sparse)
95809 +               seq_puts(m, ",sparse");
95810 +       if (opts->showmeta)
95811 +               seq_puts(m, ",showmeta");
95812 +       if (opts->nohidden)
95813 +               seq_puts(m, ",nohidden");
95814 +       if (opts->force)
95815 +               seq_puts(m, ",force");
95816 +       if (opts->no_acs_rules)
95817 +               seq_puts(m, ",no_acs_rules");
95818 +       if (opts->prealloc)
95819 +               seq_puts(m, ",prealloc");
95820 +       if (sb->s_flags & SB_POSIXACL)
95821 +               seq_puts(m, ",acl");
95822 +       if (sb->s_flags & SB_NOATIME)
95823 +               seq_puts(m, ",noatime");
95825 +       return 0;
95828 +/*super_operations::sync_fs*/
95829 +static int ntfs_sync_fs(struct super_block *sb, int wait)
95831 +       int err = 0, err2;
95832 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
95833 +       struct ntfs_inode *ni;
95834 +       struct inode *inode;
95836 +       ni = sbi->security.ni;
95837 +       if (ni) {
95838 +               inode = &ni->vfs_inode;
95839 +               err2 = _ni_write_inode(inode, wait);
95840 +               if (err2 && !err)
95841 +                       err = err2;
95842 +       }
95844 +       ni = sbi->objid.ni;
95845 +       if (ni) {
95846 +               inode = &ni->vfs_inode;
95847 +               err2 = _ni_write_inode(inode, wait);
95848 +               if (err2 && !err)
95849 +                       err = err2;
95850 +       }
95852 +       ni = sbi->reparse.ni;
95853 +       if (ni) {
95854 +               inode = &ni->vfs_inode;
95855 +               err2 = _ni_write_inode(inode, wait);
95856 +               if (err2 && !err)
95857 +                       err = err2;
95858 +       }
95860 +       if (!err)
95861 +               ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
95863 +       ntfs_update_mftmirr(sbi, wait);
95865 +       return err;
95868 +static const struct super_operations ntfs_sops = {
95869 +       .alloc_inode = ntfs_alloc_inode,
95870 +       .destroy_inode = ntfs_destroy_inode,
95871 +       .evict_inode = ntfs_evict_inode,
95872 +       .put_super = ntfs_put_super,
95873 +       .statfs = ntfs_statfs,
95874 +       .show_options = ntfs_show_options,
95875 +       .sync_fs = ntfs_sync_fs,
95876 +       .remount_fs = ntfs_remount,
95877 +       .write_inode = ntfs3_write_inode,
95880 +static struct inode *ntfs_export_get_inode(struct super_block *sb, u64 ino,
95881 +                                          u32 generation)
95883 +       struct MFT_REF ref;
95884 +       struct inode *inode;
95886 +       ref.low = cpu_to_le32(ino);
95887 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
95888 +       ref.high = cpu_to_le16(ino >> 32);
95889 +#else
95890 +       ref.high = 0;
95891 +#endif
95892 +       ref.seq = cpu_to_le16(generation);
95894 +       inode = ntfs_iget5(sb, &ref, NULL);
95895 +       if (!IS_ERR(inode) && is_bad_inode(inode)) {
95896 +               iput(inode);
95897 +               inode = ERR_PTR(-ESTALE);
95898 +       }
95900 +       return inode;
95903 +static struct dentry *ntfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
95904 +                                       int fh_len, int fh_type)
95906 +       return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
95907 +                                   ntfs_export_get_inode);
95910 +static struct dentry *ntfs_fh_to_parent(struct super_block *sb, struct fid *fid,
95911 +                                       int fh_len, int fh_type)
95913 +       return generic_fh_to_parent(sb, fid, fh_len, fh_type,
95914 +                                   ntfs_export_get_inode);
95917 +/* TODO: == ntfs_sync_inode */
95918 +static int ntfs_nfs_commit_metadata(struct inode *inode)
95920 +       return _ni_write_inode(inode, 1);
95923 +static const struct export_operations ntfs_export_ops = {
95924 +       .fh_to_dentry = ntfs_fh_to_dentry,
95925 +       .fh_to_parent = ntfs_fh_to_parent,
95926 +       .get_parent = ntfs3_get_parent,
95927 +       .commit_metadata = ntfs_nfs_commit_metadata,
95930 +/* Returns Gb,Mb to print with "%u.%02u Gb" */
95931 +static u32 format_size_gb(const u64 bytes, u32 *mb)
95933 +       /* Do simple right 30 bit shift of 64 bit value */
95934 +       u64 kbytes = bytes >> 10;
95935 +       u32 kbytes32 = kbytes;
95937 +       *mb = (100 * (kbytes32 & 0xfffff) + 0x7ffff) >> 20;
95938 +       if (*mb >= 100)
95939 +               *mb = 99;
95941 +       return (kbytes32 >> 20) | (((u32)(kbytes >> 32)) << 12);
95944 +static u32 true_sectors_per_clst(const struct NTFS_BOOT *boot)
95946 +       return boot->sectors_per_clusters <= 0x80
95947 +                      ? boot->sectors_per_clusters
95948 +                      : (1u << (0 - boot->sectors_per_clusters));
95951 +/* inits internal info from on-disk boot sector*/
95952 +static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
95953 +                              u64 dev_size)
95955 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
95956 +       int err;
95957 +       u32 mb, gb, boot_sector_size, sct_per_clst, record_size;
95958 +       u64 sectors, clusters, fs_size, mlcn, mlcn2;
95959 +       struct NTFS_BOOT *boot;
95960 +       struct buffer_head *bh;
95961 +       struct MFT_REC *rec;
95962 +       u16 fn, ao;
95964 +       sbi->volume.blocks = dev_size >> PAGE_SHIFT;
95966 +       bh = ntfs_bread(sb, 0);
95967 +       if (!bh)
95968 +               return -EIO;
95970 +       err = -EINVAL;
95971 +       boot = (struct NTFS_BOOT *)bh->b_data;
95973 +       if (memcmp(boot->system_id, "NTFS    ", sizeof("NTFS    ") - 1))
95974 +               goto out;
95976 +       /* 0x55AA is not mandaroty. Thanks Maxim Suhanov*/
95977 +       /*if (0x55 != boot->boot_magic[0] || 0xAA != boot->boot_magic[1])
95978 +        *      goto out;
95979 +        */
95981 +       boot_sector_size = (u32)boot->bytes_per_sector[1] << 8;
95982 +       if (boot->bytes_per_sector[0] || boot_sector_size < SECTOR_SIZE ||
95983 +           !is_power_of2(boot_sector_size)) {
95984 +               goto out;
95985 +       }
95987 +       /* cluster size: 512, 1K, 2K, 4K, ... 2M */
95988 +       sct_per_clst = true_sectors_per_clst(boot);
95989 +       if (!is_power_of2(sct_per_clst))
95990 +               goto out;
95992 +       mlcn = le64_to_cpu(boot->mft_clst);
95993 +       mlcn2 = le64_to_cpu(boot->mft2_clst);
95994 +       sectors = le64_to_cpu(boot->sectors_per_volume);
95996 +       if (mlcn * sct_per_clst >= sectors)
95997 +               goto out;
95999 +       if (mlcn2 * sct_per_clst >= sectors)
96000 +               goto out;
96002 +       /* Check MFT record size */
96003 +       if ((boot->record_size < 0 &&
96004 +            SECTOR_SIZE > (2U << (-boot->record_size))) ||
96005 +           (boot->record_size >= 0 && !is_power_of2(boot->record_size))) {
96006 +               goto out;
96007 +       }
96009 +       /* Check index record size */
96010 +       if ((boot->index_size < 0 &&
96011 +            SECTOR_SIZE > (2U << (-boot->index_size))) ||
96012 +           (boot->index_size >= 0 && !is_power_of2(boot->index_size))) {
96013 +               goto out;
96014 +       }
96016 +       sbi->sector_size = boot_sector_size;
96017 +       sbi->sector_bits = blksize_bits(boot_sector_size);
96018 +       fs_size = (sectors + 1) << sbi->sector_bits;
96020 +       gb = format_size_gb(fs_size, &mb);
96022 +       /*
96023 +        * - Volume formatted and mounted with the same sector size
96024 +        * - Volume formatted 4K and mounted as 512
96025 +        * - Volume formatted 512 and mounted as 4K
96026 +        */
96027 +       if (sbi->sector_size != sector_size) {
96028 +               ntfs_warn(sb,
96029 +                         "Different NTFS' sector size and media sector size");
96030 +               dev_size += sector_size - 1;
96031 +       }
96033 +       sbi->cluster_size = boot_sector_size * sct_per_clst;
96034 +       sbi->cluster_bits = blksize_bits(sbi->cluster_size);
96036 +       sbi->mft.lbo = mlcn << sbi->cluster_bits;
96037 +       sbi->mft.lbo2 = mlcn2 << sbi->cluster_bits;
96039 +       if (sbi->cluster_size < sbi->sector_size)
96040 +               goto out;
96042 +       sbi->cluster_mask = sbi->cluster_size - 1;
96043 +       sbi->cluster_mask_inv = ~(u64)sbi->cluster_mask;
96044 +       sbi->record_size = record_size = boot->record_size < 0
96045 +                                                ? 1 << (-boot->record_size)
96046 +                                                : (u32)boot->record_size
96047 +                                                          << sbi->cluster_bits;
96049 +       if (record_size > MAXIMUM_BYTES_PER_MFT)
96050 +               goto out;
96052 +       sbi->record_bits = blksize_bits(record_size);
96053 +       sbi->attr_size_tr = (5 * record_size >> 4); // ~320 bytes
96055 +       sbi->max_bytes_per_attr =
96056 +               record_size - QuadAlign(MFTRECORD_FIXUP_OFFSET_1) -
96057 +               QuadAlign(((record_size >> SECTOR_SHIFT) * sizeof(short))) -
96058 +               QuadAlign(sizeof(enum ATTR_TYPE));
96060 +       sbi->index_size = boot->index_size < 0
96061 +                                 ? 1u << (-boot->index_size)
96062 +                                 : (u32)boot->index_size << sbi->cluster_bits;
96064 +       sbi->volume.ser_num = le64_to_cpu(boot->serial_num);
96065 +       sbi->volume.size = sectors << sbi->sector_bits;
96067 +       /* warning if RAW volume */
96068 +       if (dev_size < fs_size) {
96069 +               u32 mb0, gb0;
96071 +               gb0 = format_size_gb(dev_size, &mb0);
96072 +               ntfs_warn(
96073 +                       sb,
96074 +                       "RAW NTFS volume: Filesystem size %u.%02u Gb > volume size %u.%02u Gb. Mount in read-only",
96075 +                       gb, mb, gb0, mb0);
96076 +               sb->s_flags |= SB_RDONLY;
96077 +       }
96079 +       clusters = sbi->volume.size >> sbi->cluster_bits;
96080 +#ifndef CONFIG_NTFS3_64BIT_CLUSTER
96081 +       /* 32 bits per cluster */
96082 +       if (clusters >> 32) {
96083 +               ntfs_notice(
96084 +                       sb,
96085 +                       "NTFS %u.%02u Gb is too big to use 32 bits per cluster",
96086 +                       gb, mb);
96087 +               goto out;
96088 +       }
96089 +#elif BITS_PER_LONG < 64
96090 +#error "CONFIG_NTFS3_64BIT_CLUSTER incompatible in 32 bit OS"
96091 +#endif
96093 +       sbi->used.bitmap.nbits = clusters;
96095 +       rec = ntfs_zalloc(record_size);
96096 +       if (!rec) {
96097 +               err = -ENOMEM;
96098 +               goto out;
96099 +       }
96101 +       sbi->new_rec = rec;
96102 +       rec->rhdr.sign = NTFS_FILE_SIGNATURE;
96103 +       rec->rhdr.fix_off = cpu_to_le16(MFTRECORD_FIXUP_OFFSET_1);
96104 +       fn = (sbi->record_size >> SECTOR_SHIFT) + 1;
96105 +       rec->rhdr.fix_num = cpu_to_le16(fn);
96106 +       ao = QuadAlign(MFTRECORD_FIXUP_OFFSET_1 + sizeof(short) * fn);
96107 +       rec->attr_off = cpu_to_le16(ao);
96108 +       rec->used = cpu_to_le32(ao + QuadAlign(sizeof(enum ATTR_TYPE)));
96109 +       rec->total = cpu_to_le32(sbi->record_size);
96110 +       ((struct ATTRIB *)Add2Ptr(rec, ao))->type = ATTR_END;
96112 +       if (sbi->cluster_size < PAGE_SIZE)
96113 +               sb_set_blocksize(sb, sbi->cluster_size);
96115 +       sbi->block_mask = sb->s_blocksize - 1;
96116 +       sbi->blocks_per_cluster = sbi->cluster_size >> sb->s_blocksize_bits;
96117 +       sbi->volume.blocks = sbi->volume.size >> sb->s_blocksize_bits;
96119 +       /* Maximum size for normal files */
96120 +       sbi->maxbytes = (clusters << sbi->cluster_bits) - 1;
96122 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
96123 +       if (clusters >= (1ull << (64 - sbi->cluster_bits)))
96124 +               sbi->maxbytes = -1;
96125 +       sbi->maxbytes_sparse = -1;
96126 +#else
96127 +       /* Maximum size for sparse file */
96128 +       sbi->maxbytes_sparse = (1ull << (sbi->cluster_bits + 32)) - 1;
96129 +#endif
96131 +       err = 0;
96133 +out:
96134 +       brelse(bh);
96136 +       return err;
96139 +/* try to mount*/
96140 +static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
96142 +       int err;
96143 +       struct ntfs_sb_info *sbi;
96144 +       struct block_device *bdev = sb->s_bdev;
96145 +       struct inode *bd_inode = bdev->bd_inode;
96146 +       struct request_queue *rq = bdev_get_queue(bdev);
96147 +       struct inode *inode = NULL;
96148 +       struct ntfs_inode *ni;
96149 +       size_t i, tt;
96150 +       CLST vcn, lcn, len;
96151 +       struct ATTRIB *attr;
96152 +       const struct VOLUME_INFO *info;
96153 +       u32 idx, done, bytes;
96154 +       struct ATTR_DEF_ENTRY *t;
96155 +       u16 *upcase = NULL;
96156 +       u16 *shared;
96157 +       bool is_ro;
96158 +       struct MFT_REF ref;
96160 +       ref.high = 0;
96162 +       sbi = ntfs_zalloc(sizeof(struct ntfs_sb_info));
96163 +       if (!sbi)
96164 +               return -ENOMEM;
96166 +       sb->s_fs_info = sbi;
96167 +       sbi->sb = sb;
96168 +       sb->s_flags |= SB_NODIRATIME;
96169 +       sb->s_magic = 0x7366746e; // "ntfs"
96170 +       sb->s_op = &ntfs_sops;
96171 +       sb->s_export_op = &ntfs_export_ops;
96172 +       sb->s_time_gran = NTFS_TIME_GRAN; // 100 nsec
96173 +       sb->s_xattr = ntfs_xattr_handlers;
96175 +       ratelimit_state_init(&sbi->msg_ratelimit, DEFAULT_RATELIMIT_INTERVAL,
96176 +                            DEFAULT_RATELIMIT_BURST);
96178 +       err = ntfs_parse_options(sb, data, silent, &sbi->options);
96179 +       if (err)
96180 +               goto out;
96182 +       if (!rq || !blk_queue_discard(rq) || !rq->limits.discard_granularity) {
96183 +               ;
96184 +       } else {
96185 +               sbi->discard_granularity = rq->limits.discard_granularity;
96186 +               sbi->discard_granularity_mask_inv =
96187 +                       ~(u64)(sbi->discard_granularity - 1);
96188 +       }
96190 +       sb_set_blocksize(sb, PAGE_SIZE);
96192 +       /* parse boot */
96193 +       err = ntfs_init_from_boot(sb, rq ? queue_logical_block_size(rq) : 512,
96194 +                                 bd_inode->i_size);
96195 +       if (err)
96196 +               goto out;
96198 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
96199 +       sb->s_maxbytes = MAX_LFS_FILESIZE;
96200 +#else
96201 +       sb->s_maxbytes = 0xFFFFFFFFull << sbi->cluster_bits;
96202 +#endif
96204 +       mutex_init(&sbi->compress.mtx_lznt);
96205 +#ifdef CONFIG_NTFS3_LZX_XPRESS
96206 +       mutex_init(&sbi->compress.mtx_xpress);
96207 +       mutex_init(&sbi->compress.mtx_lzx);
96208 +#endif
96210 +       /*
96211 +        * Load $Volume. This should be done before LogFile
96212 +        * 'cause 'sbi->volume.ni' is used 'ntfs_set_state'
96213 +        */
96214 +       ref.low = cpu_to_le32(MFT_REC_VOL);
96215 +       ref.seq = cpu_to_le16(MFT_REC_VOL);
96216 +       inode = ntfs_iget5(sb, &ref, &NAME_VOLUME);
96217 +       if (IS_ERR(inode)) {
96218 +               err = PTR_ERR(inode);
96219 +               ntfs_err(sb, "Failed to load $Volume.");
96220 +               inode = NULL;
96221 +               goto out;
96222 +       }
96224 +       ni = ntfs_i(inode);
96226 +       /* Load and save label (not necessary) */
96227 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_LABEL, NULL, 0, NULL, NULL);
96229 +       if (!attr) {
96230 +               /* It is ok if no ATTR_LABEL */
96231 +       } else if (!attr->non_res && !is_attr_ext(attr)) {
96232 +               /* $AttrDef allows labels to be up to 128 symbols */
96233 +               err = utf16s_to_utf8s(resident_data(attr),
96234 +                                     le32_to_cpu(attr->res.data_size) >> 1,
96235 +                                     UTF16_LITTLE_ENDIAN, sbi->volume.label,
96236 +                                     sizeof(sbi->volume.label));
96237 +               if (err < 0)
96238 +                       sbi->volume.label[0] = 0;
96239 +       } else {
96240 +               /* should we break mounting here? */
96241 +               //err = -EINVAL;
96242 +               //goto out;
96243 +       }
96245 +       attr = ni_find_attr(ni, attr, NULL, ATTR_VOL_INFO, NULL, 0, NULL, NULL);
96246 +       if (!attr || is_attr_ext(attr)) {
96247 +               err = -EINVAL;
96248 +               goto out;
96249 +       }
96251 +       info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
96252 +       if (!info) {
96253 +               err = -EINVAL;
96254 +               goto out;
96255 +       }
96257 +       sbi->volume.major_ver = info->major_ver;
96258 +       sbi->volume.minor_ver = info->minor_ver;
96259 +       sbi->volume.flags = info->flags;
96261 +       sbi->volume.ni = ni;
96262 +       inode = NULL;
96264 +       /* Load $MFTMirr to estimate recs_mirr */
96265 +       ref.low = cpu_to_le32(MFT_REC_MIRR);
96266 +       ref.seq = cpu_to_le16(MFT_REC_MIRR);
96267 +       inode = ntfs_iget5(sb, &ref, &NAME_MIRROR);
96268 +       if (IS_ERR(inode)) {
96269 +               err = PTR_ERR(inode);
96270 +               ntfs_err(sb, "Failed to load $MFTMirr.");
96271 +               inode = NULL;
96272 +               goto out;
96273 +       }
96275 +       sbi->mft.recs_mirr =
96276 +               ntfs_up_cluster(sbi, inode->i_size) >> sbi->record_bits;
96278 +       iput(inode);
96280 +       /* Load LogFile to replay */
96281 +       ref.low = cpu_to_le32(MFT_REC_LOG);
96282 +       ref.seq = cpu_to_le16(MFT_REC_LOG);
96283 +       inode = ntfs_iget5(sb, &ref, &NAME_LOGFILE);
96284 +       if (IS_ERR(inode)) {
96285 +               err = PTR_ERR(inode);
96286 +               ntfs_err(sb, "Failed to load \x24LogFile.");
96287 +               inode = NULL;
96288 +               goto out;
96289 +       }
96291 +       ni = ntfs_i(inode);
96293 +       err = ntfs_loadlog_and_replay(ni, sbi);
96294 +       if (err)
96295 +               goto out;
96297 +       iput(inode);
96298 +       inode = NULL;
96300 +       is_ro = sb_rdonly(sbi->sb);
96302 +       if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
96303 +               if (!is_ro) {
96304 +                       ntfs_warn(sb,
96305 +                                 "failed to replay log file. Can't mount rw!");
96306 +                       err = -EINVAL;
96307 +                       goto out;
96308 +               }
96309 +       } else if (sbi->volume.flags & VOLUME_FLAG_DIRTY) {
96310 +               if (!is_ro && !sbi->options.force) {
96311 +                       ntfs_warn(
96312 +                               sb,
96313 +                               "volume is dirty and \"force\" flag is not set!");
96314 +                       err = -EINVAL;
96315 +                       goto out;
96316 +               }
96317 +       }
96319 +       /* Load $MFT */
96320 +       ref.low = cpu_to_le32(MFT_REC_MFT);
96321 +       ref.seq = cpu_to_le16(1);
96323 +       inode = ntfs_iget5(sb, &ref, &NAME_MFT);
96324 +       if (IS_ERR(inode)) {
96325 +               err = PTR_ERR(inode);
96326 +               ntfs_err(sb, "Failed to load $MFT.");
96327 +               inode = NULL;
96328 +               goto out;
96329 +       }
96331 +       ni = ntfs_i(inode);
96333 +       sbi->mft.used = ni->i_valid >> sbi->record_bits;
96334 +       tt = inode->i_size >> sbi->record_bits;
96335 +       sbi->mft.next_free = MFT_REC_USER;
96337 +       err = wnd_init(&sbi->mft.bitmap, sb, tt);
96338 +       if (err)
96339 +               goto out;
96341 +       err = ni_load_all_mi(ni);
96342 +       if (err)
96343 +               goto out;
96345 +       sbi->mft.ni = ni;
96347 +       /* Load $BadClus */
96348 +       ref.low = cpu_to_le32(MFT_REC_BADCLUST);
96349 +       ref.seq = cpu_to_le16(MFT_REC_BADCLUST);
96350 +       inode = ntfs_iget5(sb, &ref, &NAME_BADCLUS);
96351 +       if (IS_ERR(inode)) {
96352 +               err = PTR_ERR(inode);
96353 +               ntfs_err(sb, "Failed to load $BadClus.");
96354 +               inode = NULL;
96355 +               goto out;
96356 +       }
96358 +       ni = ntfs_i(inode);
96360 +       for (i = 0; run_get_entry(&ni->file.run, i, &vcn, &lcn, &len); i++) {
96361 +               if (lcn == SPARSE_LCN)
96362 +                       continue;
96364 +               if (!sbi->bad_clusters)
96365 +                       ntfs_notice(sb, "Volume contains bad blocks");
96367 +               sbi->bad_clusters += len;
96368 +       }
96370 +       iput(inode);
96372 +       /* Load $Bitmap */
96373 +       ref.low = cpu_to_le32(MFT_REC_BITMAP);
96374 +       ref.seq = cpu_to_le16(MFT_REC_BITMAP);
96375 +       inode = ntfs_iget5(sb, &ref, &NAME_BITMAP);
96376 +       if (IS_ERR(inode)) {
96377 +               err = PTR_ERR(inode);
96378 +               ntfs_err(sb, "Failed to load $Bitmap.");
96379 +               inode = NULL;
96380 +               goto out;
96381 +       }
96383 +       ni = ntfs_i(inode);
96385 +#ifndef CONFIG_NTFS3_64BIT_CLUSTER
96386 +       if (inode->i_size >> 32) {
96387 +               err = -EINVAL;
96388 +               goto out;
96389 +       }
96390 +#endif
96392 +       /* Check bitmap boundary */
96393 +       tt = sbi->used.bitmap.nbits;
96394 +       if (inode->i_size < bitmap_size(tt)) {
96395 +               err = -EINVAL;
96396 +               goto out;
96397 +       }
96399 +       /* Not necessary */
96400 +       sbi->used.bitmap.set_tail = true;
96401 +       err = wnd_init(&sbi->used.bitmap, sbi->sb, tt);
96402 +       if (err)
96403 +               goto out;
96405 +       iput(inode);
96407 +       /* Compute the mft zone */
96408 +       err = ntfs_refresh_zone(sbi);
96409 +       if (err)
96410 +               goto out;
96412 +       /* Load $AttrDef */
96413 +       ref.low = cpu_to_le32(MFT_REC_ATTR);
96414 +       ref.seq = cpu_to_le16(MFT_REC_ATTR);
96415 +       inode = ntfs_iget5(sbi->sb, &ref, &NAME_ATTRDEF);
96416 +       if (IS_ERR(inode)) {
96417 +               err = PTR_ERR(inode);
96418 +               ntfs_err(sb, "Failed to load $AttrDef -> %d", err);
96419 +               inode = NULL;
96420 +               goto out;
96421 +       }
96423 +       if (inode->i_size < sizeof(struct ATTR_DEF_ENTRY)) {
96424 +               err = -EINVAL;
96425 +               goto out;
96426 +       }
96427 +       bytes = inode->i_size;
96428 +       sbi->def_table = t = ntfs_malloc(bytes);
96429 +       if (!t) {
96430 +               err = -ENOMEM;
96431 +               goto out;
96432 +       }
96434 +       for (done = idx = 0; done < bytes; done += PAGE_SIZE, idx++) {
96435 +               unsigned long tail = bytes - done;
96436 +               struct page *page = ntfs_map_page(inode->i_mapping, idx);
96438 +               if (IS_ERR(page)) {
96439 +                       err = PTR_ERR(page);
96440 +                       goto out;
96441 +               }
96442 +               memcpy(Add2Ptr(t, done), page_address(page),
96443 +                      min(PAGE_SIZE, tail));
96444 +               ntfs_unmap_page(page);
96446 +               if (!idx && ATTR_STD != t->type) {
96447 +                       err = -EINVAL;
96448 +                       goto out;
96449 +               }
96450 +       }
96452 +       t += 1;
96453 +       sbi->def_entries = 1;
96454 +       done = sizeof(struct ATTR_DEF_ENTRY);
96455 +       sbi->reparse.max_size = MAXIMUM_REPARSE_DATA_BUFFER_SIZE;
96456 +       sbi->ea_max_size = 0x10000; /* default formater value */
96458 +       while (done + sizeof(struct ATTR_DEF_ENTRY) <= bytes) {
96459 +               u32 t32 = le32_to_cpu(t->type);
96460 +               u64 sz = le64_to_cpu(t->max_sz);
96462 +               if ((t32 & 0xF) || le32_to_cpu(t[-1].type) >= t32)
96463 +                       break;
96465 +               if (t->type == ATTR_REPARSE)
96466 +                       sbi->reparse.max_size = sz;
96467 +               else if (t->type == ATTR_EA)
96468 +                       sbi->ea_max_size = sz;
96470 +               done += sizeof(struct ATTR_DEF_ENTRY);
96471 +               t += 1;
96472 +               sbi->def_entries += 1;
96473 +       }
96474 +       iput(inode);
96476 +       /* Load $UpCase */
96477 +       ref.low = cpu_to_le32(MFT_REC_UPCASE);
96478 +       ref.seq = cpu_to_le16(MFT_REC_UPCASE);
96479 +       inode = ntfs_iget5(sb, &ref, &NAME_UPCASE);
96480 +       if (IS_ERR(inode)) {
96481 +               err = PTR_ERR(inode);
96482 +               ntfs_err(sb, "Failed to load \x24LogFile.");
96483 +               inode = NULL;
96484 +               goto out;
96485 +       }
96487 +       ni = ntfs_i(inode);
96489 +       if (inode->i_size != 0x10000 * sizeof(short)) {
96490 +               err = -EINVAL;
96491 +               goto out;
96492 +       }
96494 +       sbi->upcase = upcase = ntfs_vmalloc(0x10000 * sizeof(short));
96495 +       if (!upcase) {
96496 +               err = -ENOMEM;
96497 +               goto out;
96498 +       }
96500 +       for (idx = 0; idx < (0x10000 * sizeof(short) >> PAGE_SHIFT); idx++) {
96501 +               const __le16 *src;
96502 +               u16 *dst = Add2Ptr(upcase, idx << PAGE_SHIFT);
96503 +               struct page *page = ntfs_map_page(inode->i_mapping, idx);
96505 +               if (IS_ERR(page)) {
96506 +                       err = PTR_ERR(page);
96507 +                       goto out;
96508 +               }
96510 +               src = page_address(page);
96512 +#ifdef __BIG_ENDIAN
96513 +               for (i = 0; i < PAGE_SIZE / sizeof(u16); i++)
96514 +                       *dst++ = le16_to_cpu(*src++);
96515 +#else
96516 +               memcpy(dst, src, PAGE_SIZE);
96517 +#endif
96518 +               ntfs_unmap_page(page);
96519 +       }
96521 +       shared = ntfs_set_shared(upcase, 0x10000 * sizeof(short));
96522 +       if (shared && upcase != shared) {
96523 +               sbi->upcase = shared;
96524 +               ntfs_vfree(upcase);
96525 +       }
96527 +       iput(inode);
96528 +       inode = NULL;
96530 +       if (is_ntfs3(sbi)) {
96531 +               /* Load $Secure */
96532 +               err = ntfs_security_init(sbi);
96533 +               if (err)
96534 +                       goto out;
96536 +               /* Load $Extend */
96537 +               err = ntfs_extend_init(sbi);
96538 +               if (err)
96539 +                       goto load_root;
96541 +               /* Load $Extend\$Reparse */
96542 +               err = ntfs_reparse_init(sbi);
96543 +               if (err)
96544 +                       goto load_root;
96546 +               /* Load $Extend\$ObjId */
96547 +               err = ntfs_objid_init(sbi);
96548 +               if (err)
96549 +                       goto load_root;
96550 +       }
96552 +load_root:
96553 +       /* Load root */
96554 +       ref.low = cpu_to_le32(MFT_REC_ROOT);
96555 +       ref.seq = cpu_to_le16(MFT_REC_ROOT);
96556 +       inode = ntfs_iget5(sb, &ref, &NAME_ROOT);
96557 +       if (IS_ERR(inode)) {
96558 +               err = PTR_ERR(inode);
96559 +               ntfs_err(sb, "Failed to load root.");
96560 +               inode = NULL;
96561 +               goto out;
96562 +       }
96564 +       ni = ntfs_i(inode);
96566 +       sb->s_root = d_make_root(inode);
96568 +       if (!sb->s_root) {
96569 +               err = -EINVAL;
96570 +               goto out;
96571 +       }
96573 +       return 0;
96575 +out:
96576 +       iput(inode);
96578 +       if (sb->s_root) {
96579 +               d_drop(sb->s_root);
96580 +               sb->s_root = NULL;
96581 +       }
96583 +       put_ntfs(sbi);
96585 +       sb->s_fs_info = NULL;
96586 +       return err;
96589 +void ntfs_unmap_meta(struct super_block *sb, CLST lcn, CLST len)
96591 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
96592 +       struct block_device *bdev = sb->s_bdev;
96593 +       sector_t devblock = (u64)lcn * sbi->blocks_per_cluster;
96594 +       unsigned long blocks = (u64)len * sbi->blocks_per_cluster;
96595 +       unsigned long cnt = 0;
96596 +       unsigned long limit = global_zone_page_state(NR_FREE_PAGES)
96597 +                             << (PAGE_SHIFT - sb->s_blocksize_bits);
96599 +       if (limit >= 0x2000)
96600 +               limit -= 0x1000;
96601 +       else if (limit < 32)
96602 +               limit = 32;
96603 +       else
96604 +               limit >>= 1;
96606 +       while (blocks--) {
96607 +               clean_bdev_aliases(bdev, devblock++, 1);
96608 +               if (cnt++ >= limit) {
96609 +                       sync_blockdev(bdev);
96610 +                       cnt = 0;
96611 +               }
96612 +       }
96616 + * ntfs_discard
96617 + *
96618 + * issue a discard request (trim for SSD)
96619 + */
96620 +int ntfs_discard(struct ntfs_sb_info *sbi, CLST lcn, CLST len)
96622 +       int err;
96623 +       u64 lbo, bytes, start, end;
96624 +       struct super_block *sb;
96626 +       if (sbi->used.next_free_lcn == lcn + len)
96627 +               sbi->used.next_free_lcn = lcn;
96629 +       if (sbi->flags & NTFS_FLAGS_NODISCARD)
96630 +               return -EOPNOTSUPP;
96632 +       if (!sbi->options.discard)
96633 +               return -EOPNOTSUPP;
96635 +       lbo = (u64)lcn << sbi->cluster_bits;
96636 +       bytes = (u64)len << sbi->cluster_bits;
96638 +       /* Align up 'start' on discard_granularity */
96639 +       start = (lbo + sbi->discard_granularity - 1) &
96640 +               sbi->discard_granularity_mask_inv;
96641 +       /* Align down 'end' on discard_granularity */
96642 +       end = (lbo + bytes) & sbi->discard_granularity_mask_inv;
96644 +       sb = sbi->sb;
96645 +       if (start >= end)
96646 +               return 0;
96648 +       err = blkdev_issue_discard(sb->s_bdev, start >> 9, (end - start) >> 9,
96649 +                                  GFP_NOFS, 0);
96651 +       if (err == -EOPNOTSUPP)
96652 +               sbi->flags |= NTFS_FLAGS_NODISCARD;
96654 +       return err;
96657 +static struct dentry *ntfs_mount(struct file_system_type *fs_type, int flags,
96658 +                                const char *dev_name, void *data)
96660 +       return mount_bdev(fs_type, flags, dev_name, data, ntfs_fill_super);
96663 +static struct file_system_type ntfs_fs_type = {
96664 +       .owner = THIS_MODULE,
96665 +       .name = "ntfs3",
96666 +       .mount = ntfs_mount,
96667 +       .kill_sb = kill_block_super,
96668 +       .fs_flags = FS_REQUIRES_DEV,
96671 +static int __init init_ntfs_fs(void)
96673 +       int err;
96675 +       pr_notice("ntfs3: Index binary search\n");
96676 +       pr_notice("ntfs3: Hot fix free clusters\n");
96677 +       pr_notice("ntfs3: Max link count %u\n", NTFS_LINK_MAX);
96679 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
96680 +       pr_notice("ntfs3: Enabled Linux POSIX ACLs support\n");
96681 +#endif
96682 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
96683 +       pr_notice("ntfs3: Activated 64 bits per cluster\n");
96684 +#else
96685 +       pr_notice("ntfs3: Activated 32 bits per cluster\n");
96686 +#endif
96687 +#ifdef CONFIG_NTFS3_LZX_XPRESS
96688 +       pr_notice("ntfs3: Read-only lzx/xpress compression included\n");
96689 +#endif
96691 +       err = ntfs3_init_bitmap();
96692 +       if (err)
96693 +               return err;
96695 +       ntfs_inode_cachep = kmem_cache_create(
96696 +               "ntfs_inode_cache", sizeof(struct ntfs_inode), 0,
96697 +               (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT),
96698 +               init_once);
96699 +       if (!ntfs_inode_cachep) {
96700 +               err = -ENOMEM;
96701 +               goto out1;
96702 +       }
96704 +       err = register_filesystem(&ntfs_fs_type);
96705 +       if (err)
96706 +               goto out;
96708 +       return 0;
96709 +out:
96710 +       kmem_cache_destroy(ntfs_inode_cachep);
96711 +out1:
96712 +       ntfs3_exit_bitmap();
96713 +       return err;
96716 +static void __exit exit_ntfs_fs(void)
96718 +       if (ntfs_inode_cachep) {
96719 +               rcu_barrier();
96720 +               kmem_cache_destroy(ntfs_inode_cachep);
96721 +       }
96723 +       unregister_filesystem(&ntfs_fs_type);
96724 +       ntfs3_exit_bitmap();
96727 +MODULE_LICENSE("GPL");
96728 +MODULE_DESCRIPTION("ntfs3 read/write filesystem");
96729 +MODULE_INFO(behaviour, "Index binary search");
96730 +MODULE_INFO(behaviour, "Hot fix free clusters");
96731 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
96732 +MODULE_INFO(behaviour, "Enabled Linux POSIX ACLs support");
96733 +#endif
96734 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
96735 +MODULE_INFO(cluster, "Activated 64 bits per cluster");
96736 +#else
96737 +MODULE_INFO(cluster, "Activated 32 bits per cluster");
96738 +#endif
96739 +#ifdef CONFIG_NTFS3_LZX_XPRESS
96740 +MODULE_INFO(compression, "Read-only lzx/xpress compression included");
96741 +#endif
96743 +MODULE_AUTHOR("Konstantin Komarov");
96744 +MODULE_ALIAS_FS("ntfs3");
96746 +module_init(init_ntfs_fs);
96747 +module_exit(exit_ntfs_fs);
96748 diff --git a/fs/ntfs3/upcase.c b/fs/ntfs3/upcase.c
96749 new file mode 100644
96750 index 000000000000..9617382aca64
96751 --- /dev/null
96752 +++ b/fs/ntfs3/upcase.c
96753 @@ -0,0 +1,105 @@
96754 +// SPDX-License-Identifier: GPL-2.0
96756 + *
96757 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
96758 + *
96759 + */
96760 +#include <linux/blkdev.h>
96761 +#include <linux/buffer_head.h>
96762 +#include <linux/module.h>
96763 +#include <linux/nls.h>
96765 +#include "debug.h"
96766 +#include "ntfs.h"
96767 +#include "ntfs_fs.h"
96769 +static inline u16 upcase_unicode_char(const u16 *upcase, u16 chr)
96771 +       if (chr < 'a')
96772 +               return chr;
96774 +       if (chr <= 'z')
96775 +               return chr - ('a' - 'A');
96777 +       return upcase[chr];
96781 + * Thanks Kari Argillander <kari.argillander@gmail.com> for idea and implementation 'bothcase'
96782 + *
96783 + * Straigth way to compare names:
96784 + * - case insensitive
96785 + * - if name equals and 'bothcases' then
96786 + * - case sensitive
96787 + * 'Straigth way' code scans input names twice in worst case
96788 + * Optimized code scans input names only once
96789 + */
96790 +int ntfs_cmp_names(const __le16 *s1, size_t l1, const __le16 *s2, size_t l2,
96791 +                  const u16 *upcase, bool bothcase)
96793 +       int diff1 = 0;
96794 +       int diff2;
96795 +       size_t len = min(l1, l2);
96797 +       if (!bothcase && upcase)
96798 +               goto case_insentive;
96800 +       for (; len; s1++, s2++, len--) {
96801 +               diff1 = le16_to_cpu(*s1) - le16_to_cpu(*s2);
96802 +               if (diff1) {
96803 +                       if (bothcase && upcase)
96804 +                               goto case_insentive;
96806 +                       return diff1;
96807 +               }
96808 +       }
96809 +       return l1 - l2;
96811 +case_insentive:
96812 +       for (; len; s1++, s2++, len--) {
96813 +               diff2 = upcase_unicode_char(upcase, le16_to_cpu(*s1)) -
96814 +                       upcase_unicode_char(upcase, le16_to_cpu(*s2));
96815 +               if (diff2)
96816 +                       return diff2;
96817 +       }
96819 +       diff2 = l1 - l2;
96820 +       return diff2 ? diff2 : diff1;
96823 +int ntfs_cmp_names_cpu(const struct cpu_str *uni1, const struct le_str *uni2,
96824 +                      const u16 *upcase, bool bothcase)
96826 +       const u16 *s1 = uni1->name;
96827 +       const __le16 *s2 = uni2->name;
96828 +       size_t l1 = uni1->len;
96829 +       size_t l2 = uni2->len;
96830 +       size_t len = min(l1, l2);
96831 +       int diff1 = 0;
96832 +       int diff2;
96834 +       if (!bothcase && upcase)
96835 +               goto case_insentive;
96837 +       for (; len; s1++, s2++, len--) {
96838 +               diff1 = *s1 - le16_to_cpu(*s2);
96839 +               if (diff1) {
96840 +                       if (bothcase && upcase)
96841 +                               goto case_insentive;
96843 +                       return diff1;
96844 +               }
96845 +       }
96846 +       return l1 - l2;
96848 +case_insentive:
96849 +       for (; len; s1++, s2++, len--) {
96850 +               diff2 = upcase_unicode_char(upcase, *s1) -
96851 +                       upcase_unicode_char(upcase, le16_to_cpu(*s2));
96852 +               if (diff2)
96853 +                       return diff2;
96854 +       }
96856 +       diff2 = l1 - l2;
96857 +       return diff2 ? diff2 : diff1;
96859 diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
96860 new file mode 100644
96861 index 000000000000..759df507c92c
96862 --- /dev/null
96863 +++ b/fs/ntfs3/xattr.c
96864 @@ -0,0 +1,1046 @@
96865 +// SPDX-License-Identifier: GPL-2.0
96867 + *
96868 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
96869 + *
96870 + */
96872 +#include <linux/blkdev.h>
96873 +#include <linux/buffer_head.h>
96874 +#include <linux/fs.h>
96875 +#include <linux/nls.h>
96876 +#include <linux/posix_acl.h>
96877 +#include <linux/posix_acl_xattr.h>
96878 +#include <linux/xattr.h>
96880 +#include "debug.h"
96881 +#include "ntfs.h"
96882 +#include "ntfs_fs.h"
96884 +// clang-format off
96885 +#define SYSTEM_DOS_ATTRIB    "system.dos_attrib"
96886 +#define SYSTEM_NTFS_ATTRIB   "system.ntfs_attrib"
96887 +#define SYSTEM_NTFS_SECURITY "system.ntfs_security"
96888 +// clang-format on
96890 +static inline size_t unpacked_ea_size(const struct EA_FULL *ea)
96892 +       return ea->size ? le32_to_cpu(ea->size)
96893 +                       : DwordAlign(struct_size(
96894 +                                 ea, name,
96895 +                                 1 + ea->name_len + le16_to_cpu(ea->elength)));
96898 +static inline size_t packed_ea_size(const struct EA_FULL *ea)
96900 +       return struct_size(ea, name,
96901 +                          1 + ea->name_len + le16_to_cpu(ea->elength)) -
96902 +              offsetof(struct EA_FULL, flags);
96906 + * find_ea
96907 + *
96908 + * assume there is at least one xattr in the list
96909 + */
96910 +static inline bool find_ea(const struct EA_FULL *ea_all, u32 bytes,
96911 +                          const char *name, u8 name_len, u32 *off)
96913 +       *off = 0;
96915 +       if (!ea_all || !bytes)
96916 +               return false;
96918 +       for (;;) {
96919 +               const struct EA_FULL *ea = Add2Ptr(ea_all, *off);
96920 +               u32 next_off = *off + unpacked_ea_size(ea);
96922 +               if (next_off > bytes)
96923 +                       return false;
96925 +               if (ea->name_len == name_len &&
96926 +                   !memcmp(ea->name, name, name_len))
96927 +                       return true;
96929 +               *off = next_off;
96930 +               if (next_off >= bytes)
96931 +                       return false;
96932 +       }
96936 + * ntfs_read_ea
96937 + *
96938 + * reads all extended attributes
96939 + * ea - new allocated memory
96940 + * info - pointer into resident data
96941 + */
96942 +static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
96943 +                       size_t add_bytes, const struct EA_INFO **info)
96945 +       int err;
96946 +       struct ATTR_LIST_ENTRY *le = NULL;
96947 +       struct ATTRIB *attr_info, *attr_ea;
96948 +       void *ea_p;
96949 +       u32 size;
96951 +       static_assert(le32_to_cpu(ATTR_EA_INFO) < le32_to_cpu(ATTR_EA));
96953 +       *ea = NULL;
96954 +       *info = NULL;
96956 +       attr_info =
96957 +               ni_find_attr(ni, NULL, &le, ATTR_EA_INFO, NULL, 0, NULL, NULL);
96958 +       attr_ea =
96959 +               ni_find_attr(ni, attr_info, &le, ATTR_EA, NULL, 0, NULL, NULL);
96961 +       if (!attr_ea || !attr_info)
96962 +               return 0;
96964 +       *info = resident_data_ex(attr_info, sizeof(struct EA_INFO));
96965 +       if (!*info)
96966 +               return -EINVAL;
96968 +       /* Check Ea limit */
96969 +       size = le32_to_cpu((*info)->size);
96970 +       if (size > ni->mi.sbi->ea_max_size)
96971 +               return -EFBIG;
96973 +       if (attr_size(attr_ea) > ni->mi.sbi->ea_max_size)
96974 +               return -EFBIG;
96976 +       /* Allocate memory for packed Ea */
96977 +       ea_p = ntfs_malloc(size + add_bytes);
96978 +       if (!ea_p)
96979 +               return -ENOMEM;
96981 +       if (attr_ea->non_res) {
96982 +               struct runs_tree run;
96984 +               run_init(&run);
96986 +               err = attr_load_runs(attr_ea, ni, &run, NULL);
96987 +               if (!err)
96988 +                       err = ntfs_read_run_nb(ni->mi.sbi, &run, 0, ea_p, size,
96989 +                                              NULL);
96990 +               run_close(&run);
96992 +               if (err)
96993 +                       goto out;
96994 +       } else {
96995 +               void *p = resident_data_ex(attr_ea, size);
96997 +               if (!p) {
96998 +                       err = -EINVAL;
96999 +                       goto out;
97000 +               }
97001 +               memcpy(ea_p, p, size);
97002 +       }
97004 +       memset(Add2Ptr(ea_p, size), 0, add_bytes);
97005 +       *ea = ea_p;
97006 +       return 0;
97008 +out:
97009 +       ntfs_free(ea_p);
97010 +       *ea = NULL;
97011 +       return err;
97015 + * ntfs_list_ea
97016 + *
97017 + * copy a list of xattrs names into the buffer
97018 + * provided, or compute the buffer size required
97019 + *
97020 + * Returns a negative error number on failure, or the number of bytes
97021 + * used / required on success.
97022 + */
97023 +static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
97024 +                           size_t bytes_per_buffer)
97026 +       const struct EA_INFO *info;
97027 +       struct EA_FULL *ea_all = NULL;
97028 +       const struct EA_FULL *ea;
97029 +       u32 off, size;
97030 +       int err;
97031 +       size_t ret;
97033 +       err = ntfs_read_ea(ni, &ea_all, 0, &info);
97034 +       if (err)
97035 +               return err;
97037 +       if (!info || !ea_all)
97038 +               return 0;
97040 +       size = le32_to_cpu(info->size);
97042 +       /* Enumerate all xattrs */
97043 +       for (ret = 0, off = 0; off < size; off += unpacked_ea_size(ea)) {
97044 +               ea = Add2Ptr(ea_all, off);
97046 +               if (buffer) {
97047 +                       if (ret + ea->name_len + 1 > bytes_per_buffer) {
97048 +                               err = -ERANGE;
97049 +                               goto out;
97050 +                       }
97052 +                       memcpy(buffer + ret, ea->name, ea->name_len);
97053 +                       buffer[ret + ea->name_len] = 0;
97054 +               }
97056 +               ret += ea->name_len + 1;
97057 +       }
97059 +out:
97060 +       ntfs_free(ea_all);
97061 +       return err ? err : ret;
97064 +static int ntfs_get_ea(struct inode *inode, const char *name, size_t name_len,
97065 +                      void *buffer, size_t size, size_t *required)
97067 +       struct ntfs_inode *ni = ntfs_i(inode);
97068 +       const struct EA_INFO *info;
97069 +       struct EA_FULL *ea_all = NULL;
97070 +       const struct EA_FULL *ea;
97071 +       u32 off, len;
97072 +       int err;
97074 +       if (!(ni->ni_flags & NI_FLAG_EA))
97075 +               return -ENODATA;
97077 +       if (!required)
97078 +               ni_lock(ni);
97080 +       len = 0;
97082 +       if (name_len > 255) {
97083 +               err = -ENAMETOOLONG;
97084 +               goto out;
97085 +       }
97087 +       err = ntfs_read_ea(ni, &ea_all, 0, &info);
97088 +       if (err)
97089 +               goto out;
97091 +       if (!info)
97092 +               goto out;
97094 +       /* Enumerate all xattrs */
97095 +       if (!find_ea(ea_all, le32_to_cpu(info->size), name, name_len, &off)) {
97096 +               err = -ENODATA;
97097 +               goto out;
97098 +       }
97099 +       ea = Add2Ptr(ea_all, off);
97101 +       len = le16_to_cpu(ea->elength);
97102 +       if (!buffer) {
97103 +               err = 0;
97104 +               goto out;
97105 +       }
97107 +       if (len > size) {
97108 +               err = -ERANGE;
97109 +               if (required)
97110 +                       *required = len;
97111 +               goto out;
97112 +       }
97114 +       memcpy(buffer, ea->name + ea->name_len + 1, len);
97115 +       err = 0;
97117 +out:
97118 +       ntfs_free(ea_all);
97119 +       if (!required)
97120 +               ni_unlock(ni);
97122 +       return err ? err : len;
97125 +static noinline int ntfs_set_ea(struct inode *inode, const char *name,
97126 +                               size_t name_len, const void *value,
97127 +                               size_t val_size, int flags, int locked)
97129 +       struct ntfs_inode *ni = ntfs_i(inode);
97130 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
97131 +       int err;
97132 +       struct EA_INFO ea_info;
97133 +       const struct EA_INFO *info;
97134 +       struct EA_FULL *new_ea;
97135 +       struct EA_FULL *ea_all = NULL;
97136 +       size_t add, new_pack;
97137 +       u32 off, size;
97138 +       __le16 size_pack;
97139 +       struct ATTRIB *attr;
97140 +       struct ATTR_LIST_ENTRY *le;
97141 +       struct mft_inode *mi;
97142 +       struct runs_tree ea_run;
97143 +       u64 new_sz;
97144 +       void *p;
97146 +       if (!locked)
97147 +               ni_lock(ni);
97149 +       run_init(&ea_run);
97151 +       if (name_len > 255) {
97152 +               err = -ENAMETOOLONG;
97153 +               goto out;
97154 +       }
97156 +       add = DwordAlign(struct_size(ea_all, name, 1 + name_len + val_size));
97158 +       err = ntfs_read_ea(ni, &ea_all, add, &info);
97159 +       if (err)
97160 +               goto out;
97162 +       if (!info) {
97163 +               memset(&ea_info, 0, sizeof(ea_info));
97164 +               size = 0;
97165 +               size_pack = 0;
97166 +       } else {
97167 +               memcpy(&ea_info, info, sizeof(ea_info));
97168 +               size = le32_to_cpu(ea_info.size);
97169 +               size_pack = ea_info.size_pack;
97170 +       }
97172 +       if (info && find_ea(ea_all, size, name, name_len, &off)) {
97173 +               struct EA_FULL *ea;
97174 +               size_t ea_sz;
97176 +               if (flags & XATTR_CREATE) {
97177 +                       err = -EEXIST;
97178 +                       goto out;
97179 +               }
97181 +               /* Remove current xattr */
97182 +               ea = Add2Ptr(ea_all, off);
97183 +               if (ea->flags & FILE_NEED_EA)
97184 +                       le16_add_cpu(&ea_info.count, -1);
97186 +               ea_sz = unpacked_ea_size(ea);
97188 +               le16_add_cpu(&ea_info.size_pack, 0 - packed_ea_size(ea));
97190 +               memmove(ea, Add2Ptr(ea, ea_sz), size - off - ea_sz);
97192 +               size -= ea_sz;
97193 +               memset(Add2Ptr(ea_all, size), 0, ea_sz);
97195 +               ea_info.size = cpu_to_le32(size);
97197 +               if ((flags & XATTR_REPLACE) && !val_size)
97198 +                       goto update_ea;
97199 +       } else {
97200 +               if (flags & XATTR_REPLACE) {
97201 +                       err = -ENODATA;
97202 +                       goto out;
97203 +               }
97205 +               if (!ea_all) {
97206 +                       ea_all = ntfs_zalloc(add);
97207 +                       if (!ea_all) {
97208 +                               err = -ENOMEM;
97209 +                               goto out;
97210 +                       }
97211 +               }
97212 +       }
97214 +       /* append new xattr */
97215 +       new_ea = Add2Ptr(ea_all, size);
97216 +       new_ea->size = cpu_to_le32(add);
97217 +       new_ea->flags = 0;
97218 +       new_ea->name_len = name_len;
97219 +       new_ea->elength = cpu_to_le16(val_size);
97220 +       memcpy(new_ea->name, name, name_len);
97221 +       new_ea->name[name_len] = 0;
97222 +       memcpy(new_ea->name + name_len + 1, value, val_size);
97223 +       new_pack = le16_to_cpu(ea_info.size_pack) + packed_ea_size(new_ea);
97225 +       /* should fit into 16 bits */
97226 +       if (new_pack > 0xffff) {
97227 +               err = -EFBIG; // -EINVAL?
97228 +               goto out;
97229 +       }
97230 +       ea_info.size_pack = cpu_to_le16(new_pack);
97232 +       /* new size of ATTR_EA */
97233 +       size += add;
97234 +       if (size > sbi->ea_max_size) {
97235 +               err = -EFBIG; // -EINVAL?
97236 +               goto out;
97237 +       }
97238 +       ea_info.size = cpu_to_le32(size);
97240 +update_ea:
97242 +       if (!info) {
97243 +               /* Create xattr */
97244 +               if (!size) {
97245 +                       err = 0;
97246 +                       goto out;
97247 +               }
97249 +               err = ni_insert_resident(ni, sizeof(struct EA_INFO),
97250 +                                        ATTR_EA_INFO, NULL, 0, NULL, NULL);
97251 +               if (err)
97252 +                       goto out;
97254 +               err = ni_insert_resident(ni, 0, ATTR_EA, NULL, 0, NULL, NULL);
97255 +               if (err)
97256 +                       goto out;
97257 +       }
97259 +       new_sz = size;
97260 +       err = attr_set_size(ni, ATTR_EA, NULL, 0, &ea_run, new_sz, &new_sz,
97261 +                           false, NULL);
97262 +       if (err)
97263 +               goto out;
97265 +       le = NULL;
97266 +       attr = ni_find_attr(ni, NULL, &le, ATTR_EA_INFO, NULL, 0, NULL, &mi);
97267 +       if (!attr) {
97268 +               err = -EINVAL;
97269 +               goto out;
97270 +       }
97272 +       if (!size) {
97273 +               /* delete xattr, ATTR_EA_INFO */
97274 +               err = ni_remove_attr_le(ni, attr, le);
97275 +               if (err)
97276 +                       goto out;
97277 +       } else {
97278 +               p = resident_data_ex(attr, sizeof(struct EA_INFO));
97279 +               if (!p) {
97280 +                       err = -EINVAL;
97281 +                       goto out;
97282 +               }
97283 +               memcpy(p, &ea_info, sizeof(struct EA_INFO));
97284 +               mi->dirty = true;
97285 +       }
97287 +       le = NULL;
97288 +       attr = ni_find_attr(ni, NULL, &le, ATTR_EA, NULL, 0, NULL, &mi);
97289 +       if (!attr) {
97290 +               err = -EINVAL;
97291 +               goto out;
97292 +       }
97294 +       if (!size) {
97295 +               /* delete xattr, ATTR_EA */
97296 +               err = ni_remove_attr_le(ni, attr, le);
97297 +               if (err)
97298 +                       goto out;
97299 +       } else if (attr->non_res) {
97300 +               err = ntfs_sb_write_run(sbi, &ea_run, 0, ea_all, size);
97301 +               if (err)
97302 +                       goto out;
97303 +       } else {
97304 +               p = resident_data_ex(attr, size);
97305 +               if (!p) {
97306 +                       err = -EINVAL;
97307 +                       goto out;
97308 +               }
97309 +               memcpy(p, ea_all, size);
97310 +               mi->dirty = true;
97311 +       }
97313 +       /* Check if we delete the last xattr */
97314 +       if (size)
97315 +               ni->ni_flags |= NI_FLAG_EA;
97316 +       else
97317 +               ni->ni_flags &= ~NI_FLAG_EA;
97319 +       if (ea_info.size_pack != size_pack)
97320 +               ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
97321 +       mark_inode_dirty(&ni->vfs_inode);
97323 +out:
97324 +       if (!locked)
97325 +               ni_unlock(ni);
97327 +       run_close(&ea_run);
97328 +       ntfs_free(ea_all);
97330 +       return err;
97333 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
97334 +static inline void ntfs_posix_acl_release(struct posix_acl *acl)
97336 +       if (acl && refcount_dec_and_test(&acl->a_refcount))
97337 +               kfree(acl);
97340 +static struct posix_acl *ntfs_get_acl_ex(struct user_namespace *mnt_userns,
97341 +                                        struct inode *inode, int type,
97342 +                                        int locked)
97344 +       struct ntfs_inode *ni = ntfs_i(inode);
97345 +       const char *name;
97346 +       size_t name_len;
97347 +       struct posix_acl *acl;
97348 +       size_t req;
97349 +       int err;
97350 +       void *buf;
97352 +       /* allocate PATH_MAX bytes */
97353 +       buf = __getname();
97354 +       if (!buf)
97355 +               return ERR_PTR(-ENOMEM);
97357 +       /* Possible values of 'type' was already checked above */
97358 +       if (type == ACL_TYPE_ACCESS) {
97359 +               name = XATTR_NAME_POSIX_ACL_ACCESS;
97360 +               name_len = sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1;
97361 +       } else {
97362 +               name = XATTR_NAME_POSIX_ACL_DEFAULT;
97363 +               name_len = sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1;
97364 +       }
97366 +       if (!locked)
97367 +               ni_lock(ni);
97369 +       err = ntfs_get_ea(inode, name, name_len, buf, PATH_MAX, &req);
97371 +       if (!locked)
97372 +               ni_unlock(ni);
97374 +       /* Translate extended attribute to acl */
97375 +       if (err > 0) {
97376 +               acl = posix_acl_from_xattr(mnt_userns, buf, err);
97377 +               if (!IS_ERR(acl))
97378 +                       set_cached_acl(inode, type, acl);
97379 +       } else {
97380 +               acl = err == -ENODATA ? NULL : ERR_PTR(err);
97381 +       }
97383 +       __putname(buf);
97385 +       return acl;
97389 + * ntfs_get_acl
97390 + *
97391 + * inode_operations::get_acl
97392 + */
97393 +struct posix_acl *ntfs_get_acl(struct inode *inode, int type)
97395 +       /* TODO: init_user_ns? */
97396 +       return ntfs_get_acl_ex(&init_user_ns, inode, type, 0);
97399 +static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
97400 +                                   struct inode *inode, struct posix_acl *acl,
97401 +                                   int type, int locked)
97403 +       const char *name;
97404 +       size_t size, name_len;
97405 +       void *value = NULL;
97406 +       int err = 0;
97408 +       if (S_ISLNK(inode->i_mode))
97409 +               return -EOPNOTSUPP;
97411 +       switch (type) {
97412 +       case ACL_TYPE_ACCESS:
97413 +               if (acl) {
97414 +                       umode_t mode = inode->i_mode;
97416 +                       err = posix_acl_equiv_mode(acl, &mode);
97417 +                       if (err < 0)
97418 +                               return err;
97420 +                       if (inode->i_mode != mode) {
97421 +                               inode->i_mode = mode;
97422 +                               mark_inode_dirty(inode);
97423 +                       }
97425 +                       if (!err) {
97426 +                               /*
97427 +                                * acl can be exactly represented in the
97428 +                                * traditional file mode permission bits
97429 +                                */
97430 +                               acl = NULL;
97431 +                               goto out;
97432 +                       }
97433 +               }
97434 +               name = XATTR_NAME_POSIX_ACL_ACCESS;
97435 +               name_len = sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1;
97436 +               break;
97438 +       case ACL_TYPE_DEFAULT:
97439 +               if (!S_ISDIR(inode->i_mode))
97440 +                       return acl ? -EACCES : 0;
97441 +               name = XATTR_NAME_POSIX_ACL_DEFAULT;
97442 +               name_len = sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1;
97443 +               break;
97445 +       default:
97446 +               return -EINVAL;
97447 +       }
97449 +       if (!acl)
97450 +               goto out;
97452 +       size = posix_acl_xattr_size(acl->a_count);
97453 +       value = ntfs_malloc(size);
97454 +       if (!value)
97455 +               return -ENOMEM;
97457 +       err = posix_acl_to_xattr(mnt_userns, acl, value, size);
97458 +       if (err)
97459 +               goto out;
97461 +       err = ntfs_set_ea(inode, name, name_len, value, size, 0, locked);
97462 +       if (err)
97463 +               goto out;
97465 +       inode->i_flags &= ~S_NOSEC;
97467 +out:
97468 +       if (!err)
97469 +               set_cached_acl(inode, type, acl);
97471 +       kfree(value);
97473 +       return err;
97477 + * ntfs_set_acl
97478 + *
97479 + * inode_operations::set_acl
97480 + */
97481 +int ntfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
97482 +                struct posix_acl *acl, int type)
97484 +       return ntfs_set_acl_ex(mnt_userns, inode, acl, type, 0);
97487 +static int ntfs_xattr_get_acl(struct user_namespace *mnt_userns,
97488 +                             struct inode *inode, int type, void *buffer,
97489 +                             size_t size)
97491 +       struct posix_acl *acl;
97492 +       int err;
97494 +       if (!(inode->i_sb->s_flags & SB_POSIXACL))
97495 +               return -EOPNOTSUPP;
97497 +       acl = ntfs_get_acl(inode, type);
97498 +       if (IS_ERR(acl))
97499 +               return PTR_ERR(acl);
97501 +       if (!acl)
97502 +               return -ENODATA;
97504 +       err = posix_acl_to_xattr(mnt_userns, acl, buffer, size);
97505 +       ntfs_posix_acl_release(acl);
97507 +       return err;
97510 +static int ntfs_xattr_set_acl(struct user_namespace *mnt_userns,
97511 +                             struct inode *inode, int type, const void *value,
97512 +                             size_t size)
97514 +       struct posix_acl *acl;
97515 +       int err;
97517 +       if (!(inode->i_sb->s_flags & SB_POSIXACL))
97518 +               return -EOPNOTSUPP;
97520 +       if (!inode_owner_or_capable(mnt_userns, inode))
97521 +               return -EPERM;
97523 +       if (!value)
97524 +               return 0;
97526 +       acl = posix_acl_from_xattr(mnt_userns, value, size);
97527 +       if (IS_ERR(acl))
97528 +               return PTR_ERR(acl);
97530 +       if (acl) {
97531 +               err = posix_acl_valid(mnt_userns, acl);
97532 +               if (err)
97533 +                       goto release_and_out;
97534 +       }
97536 +       err = ntfs_set_acl(mnt_userns, inode, acl, type);
97538 +release_and_out:
97539 +       ntfs_posix_acl_release(acl);
97540 +       return err;
97544 + * Initialize the ACLs of a new inode. Called from ntfs_create_inode.
97545 + */
97546 +int ntfs_init_acl(struct user_namespace *mnt_userns, struct inode *inode,
97547 +                 struct inode *dir)
97549 +       struct posix_acl *default_acl, *acl;
97550 +       int err;
97552 +       /*
97553 +        * TODO refactoring lock
97554 +        * ni_lock(dir) ... -> posix_acl_create(dir,...) -> ntfs_get_acl -> ni_lock(dir)
97555 +        */
97556 +       inode->i_default_acl = NULL;
97558 +       default_acl = ntfs_get_acl_ex(mnt_userns, dir, ACL_TYPE_DEFAULT, 1);
97560 +       if (!default_acl || default_acl == ERR_PTR(-EOPNOTSUPP)) {
97561 +               inode->i_mode &= ~current_umask();
97562 +               err = 0;
97563 +               goto out;
97564 +       }
97566 +       if (IS_ERR(default_acl)) {
97567 +               err = PTR_ERR(default_acl);
97568 +               goto out;
97569 +       }
97571 +       acl = default_acl;
97572 +       err = __posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
97573 +       if (err < 0)
97574 +               goto out1;
97575 +       if (!err) {
97576 +               posix_acl_release(acl);
97577 +               acl = NULL;
97578 +       }
97580 +       if (!S_ISDIR(inode->i_mode)) {
97581 +               posix_acl_release(default_acl);
97582 +               default_acl = NULL;
97583 +       }
97585 +       if (default_acl)
97586 +               err = ntfs_set_acl_ex(mnt_userns, inode, default_acl,
97587 +                                     ACL_TYPE_DEFAULT, 1);
97589 +       if (!acl)
97590 +               inode->i_acl = NULL;
97591 +       else if (!err)
97592 +               err = ntfs_set_acl_ex(mnt_userns, inode, acl, ACL_TYPE_ACCESS,
97593 +                                     1);
97595 +       posix_acl_release(acl);
97596 +out1:
97597 +       posix_acl_release(default_acl);
97599 +out:
97600 +       return err;
97602 +#endif
97605 + * ntfs_acl_chmod
97606 + *
97607 + * helper for 'ntfs3_setattr'
97608 + */
97609 +int ntfs_acl_chmod(struct user_namespace *mnt_userns, struct inode *inode)
97611 +       struct super_block *sb = inode->i_sb;
97613 +       if (!(sb->s_flags & SB_POSIXACL))
97614 +               return 0;
97616 +       if (S_ISLNK(inode->i_mode))
97617 +               return -EOPNOTSUPP;
97619 +       return posix_acl_chmod(mnt_userns, inode, inode->i_mode);
97623 + * ntfs_permission
97624 + *
97625 + * inode_operations::permission
97626 + */
97627 +int ntfs_permission(struct user_namespace *mnt_userns, struct inode *inode,
97628 +                   int mask)
97630 +       if (ntfs_sb(inode->i_sb)->options.no_acs_rules) {
97631 +               /* "no access rules" mode - allow all changes */
97632 +               return 0;
97633 +       }
97635 +       return generic_permission(mnt_userns, inode, mask);
97639 + * ntfs_listxattr
97640 + *
97641 + * inode_operations::listxattr
97642 + */
97643 +ssize_t ntfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
97645 +       struct inode *inode = d_inode(dentry);
97646 +       struct ntfs_inode *ni = ntfs_i(inode);
97647 +       ssize_t ret;
97649 +       if (!(ni->ni_flags & NI_FLAG_EA)) {
97650 +               /* no xattr in file */
97651 +               return 0;
97652 +       }
97654 +       ni_lock(ni);
97656 +       ret = ntfs_list_ea(ni, buffer, size);
97658 +       ni_unlock(ni);
97660 +       return ret;
97663 +static int ntfs_getxattr(const struct xattr_handler *handler, struct dentry *de,
97664 +                        struct inode *inode, const char *name, void *buffer,
97665 +                        size_t size)
97667 +       int err;
97668 +       struct ntfs_inode *ni = ntfs_i(inode);
97669 +       size_t name_len = strlen(name);
97671 +       /* Dispatch request */
97672 +       if (name_len == sizeof(SYSTEM_DOS_ATTRIB) - 1 &&
97673 +           !memcmp(name, SYSTEM_DOS_ATTRIB, sizeof(SYSTEM_DOS_ATTRIB))) {
97674 +               /* system.dos_attrib */
97675 +               if (!buffer) {
97676 +                       err = sizeof(u8);
97677 +               } else if (size < sizeof(u8)) {
97678 +                       err = -ENODATA;
97679 +               } else {
97680 +                       err = sizeof(u8);
97681 +                       *(u8 *)buffer = le32_to_cpu(ni->std_fa);
97682 +               }
97683 +               goto out;
97684 +       }
97686 +       if (name_len == sizeof(SYSTEM_NTFS_ATTRIB) - 1 &&
97687 +           !memcmp(name, SYSTEM_NTFS_ATTRIB, sizeof(SYSTEM_NTFS_ATTRIB))) {
97688 +               /* system.ntfs_attrib */
97689 +               if (!buffer) {
97690 +                       err = sizeof(u32);
97691 +               } else if (size < sizeof(u32)) {
97692 +                       err = -ENODATA;
97693 +               } else {
97694 +                       err = sizeof(u32);
97695 +                       *(u32 *)buffer = le32_to_cpu(ni->std_fa);
97696 +               }
97697 +               goto out;
97698 +       }
97700 +       if (name_len == sizeof(SYSTEM_NTFS_SECURITY) - 1 &&
97701 +           !memcmp(name, SYSTEM_NTFS_SECURITY, sizeof(SYSTEM_NTFS_SECURITY))) {
97702 +               /* system.ntfs_security*/
97703 +               struct SECURITY_DESCRIPTOR_RELATIVE *sd = NULL;
97704 +               size_t sd_size = 0;
97706 +               if (!is_ntfs3(ni->mi.sbi)) {
97707 +                       /* we should get nt4 security */
97708 +                       err = -EINVAL;
97709 +                       goto out;
97710 +               } else if (le32_to_cpu(ni->std_security_id) <
97711 +                          SECURITY_ID_FIRST) {
97712 +                       err = -ENOENT;
97713 +                       goto out;
97714 +               }
97716 +               err = ntfs_get_security_by_id(ni->mi.sbi, ni->std_security_id,
97717 +                                             &sd, &sd_size);
97718 +               if (err)
97719 +                       goto out;
97721 +               if (!is_sd_valid(sd, sd_size)) {
97722 +                       ntfs_inode_warn(
97723 +                               inode,
97724 +                               "looks like you get incorrect security descriptor id=%u",
97725 +                               ni->std_security_id);
97726 +               }
97728 +               if (!buffer) {
97729 +                       err = sd_size;
97730 +               } else if (size < sd_size) {
97731 +                       err = -ENODATA;
97732 +               } else {
97733 +                       err = sd_size;
97734 +                       memcpy(buffer, sd, sd_size);
97735 +               }
97736 +               ntfs_free(sd);
97737 +               goto out;
97738 +       }
97740 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
97741 +       if ((name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1 &&
97742 +            !memcmp(name, XATTR_NAME_POSIX_ACL_ACCESS,
97743 +                    sizeof(XATTR_NAME_POSIX_ACL_ACCESS))) ||
97744 +           (name_len == sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1 &&
97745 +            !memcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT,
97746 +                    sizeof(XATTR_NAME_POSIX_ACL_DEFAULT)))) {
97747 +               /* TODO: init_user_ns? */
97748 +               err = ntfs_xattr_get_acl(
97749 +                       &init_user_ns, inode,
97750 +                       name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1
97751 +                               ? ACL_TYPE_ACCESS
97752 +                               : ACL_TYPE_DEFAULT,
97753 +                       buffer, size);
97754 +               goto out;
97755 +       }
97756 +#endif
97757 +       /* deal with ntfs extended attribute */
97758 +       err = ntfs_get_ea(inode, name, name_len, buffer, size, NULL);
97760 +out:
97761 +       return err;
97765 + * ntfs_setxattr
97766 + *
97767 + * inode_operations::setxattr
97768 + */
97769 +static noinline int ntfs_setxattr(const struct xattr_handler *handler,
97770 +                                 struct user_namespace *mnt_userns,
97771 +                                 struct dentry *de, struct inode *inode,
97772 +                                 const char *name, const void *value,
97773 +                                 size_t size, int flags)
97775 +       int err = -EINVAL;
97776 +       struct ntfs_inode *ni = ntfs_i(inode);
97777 +       size_t name_len = strlen(name);
97778 +       enum FILE_ATTRIBUTE new_fa;
97780 +       /* Dispatch request */
97781 +       if (name_len == sizeof(SYSTEM_DOS_ATTRIB) - 1 &&
97782 +           !memcmp(name, SYSTEM_DOS_ATTRIB, sizeof(SYSTEM_DOS_ATTRIB))) {
97783 +               if (sizeof(u8) != size)
97784 +                       goto out;
97785 +               new_fa = cpu_to_le32(*(u8 *)value);
97786 +               goto set_new_fa;
97787 +       }
97789 +       if (name_len == sizeof(SYSTEM_NTFS_ATTRIB) - 1 &&
97790 +           !memcmp(name, SYSTEM_NTFS_ATTRIB, sizeof(SYSTEM_NTFS_ATTRIB))) {
97791 +               if (size != sizeof(u32))
97792 +                       goto out;
97793 +               new_fa = cpu_to_le32(*(u32 *)value);
97795 +               if (S_ISREG(inode->i_mode)) {
97796 +                       /* Process compressed/sparsed in special way*/
97797 +                       ni_lock(ni);
97798 +                       err = ni_new_attr_flags(ni, new_fa);
97799 +                       ni_unlock(ni);
97800 +                       if (err)
97801 +                               goto out;
97802 +               }
97803 +set_new_fa:
97804 +               /*
97805 +                * Thanks Mark Harmstone:
97806 +                * keep directory bit consistency
97807 +                */
97808 +               if (S_ISDIR(inode->i_mode))
97809 +                       new_fa |= FILE_ATTRIBUTE_DIRECTORY;
97810 +               else
97811 +                       new_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
97813 +               if (ni->std_fa != new_fa) {
97814 +                       ni->std_fa = new_fa;
97815 +                       if (new_fa & FILE_ATTRIBUTE_READONLY)
97816 +                               inode->i_mode &= ~0222;
97817 +                       else
97818 +                               inode->i_mode |= 0222;
97819 +                       /* std attribute always in primary record */
97820 +                       ni->mi.dirty = true;
97821 +                       mark_inode_dirty(inode);
97822 +               }
97823 +               err = 0;
97825 +               goto out;
97826 +       }
97828 +       if (name_len == sizeof(SYSTEM_NTFS_SECURITY) - 1 &&
97829 +           !memcmp(name, SYSTEM_NTFS_SECURITY, sizeof(SYSTEM_NTFS_SECURITY))) {
97830 +               /* system.ntfs_security*/
97831 +               __le32 security_id;
97832 +               bool inserted;
97833 +               struct ATTR_STD_INFO5 *std;
97835 +               if (!is_ntfs3(ni->mi.sbi)) {
97836 +                       /*
97837 +                        * we should replace ATTR_SECURE
97838 +                        * Skip this way cause it is nt4 feature
97839 +                        */
97840 +                       err = -EINVAL;
97841 +                       goto out;
97842 +               }
97844 +               if (!is_sd_valid(value, size)) {
97845 +                       err = -EINVAL;
97846 +                       ntfs_inode_warn(
97847 +                               inode,
97848 +                               "you try to set invalid security descriptor");
97849 +                       goto out;
97850 +               }
97852 +               err = ntfs_insert_security(ni->mi.sbi, value, size,
97853 +                                          &security_id, &inserted);
97854 +               if (err)
97855 +                       goto out;
97857 +               ni_lock(ni);
97858 +               std = ni_std5(ni);
97859 +               if (!std) {
97860 +                       err = -EINVAL;
97861 +               } else if (std->security_id != security_id) {
97862 +                       std->security_id = ni->std_security_id = security_id;
97863 +                       /* std attribute always in primary record */
97864 +                       ni->mi.dirty = true;
97865 +                       mark_inode_dirty(&ni->vfs_inode);
97866 +               }
97867 +               ni_unlock(ni);
97868 +               goto out;
97869 +       }
97871 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
97872 +       if ((name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1 &&
97873 +            !memcmp(name, XATTR_NAME_POSIX_ACL_ACCESS,
97874 +                    sizeof(XATTR_NAME_POSIX_ACL_ACCESS))) ||
97875 +           (name_len == sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1 &&
97876 +            !memcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT,
97877 +                    sizeof(XATTR_NAME_POSIX_ACL_DEFAULT)))) {
97878 +               /* TODO: init_user_ns? */
97879 +               err = ntfs_xattr_set_acl(
97880 +                       &init_user_ns, inode,
97881 +                       name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1
97882 +                               ? ACL_TYPE_ACCESS
97883 +                               : ACL_TYPE_DEFAULT,
97884 +                       value, size);
97885 +               goto out;
97886 +       }
97887 +#endif
97888 +       /* deal with ntfs extended attribute */
97889 +       err = ntfs_set_ea(inode, name, name_len, value, size, flags, 0);
97891 +out:
97892 +       return err;
97895 +static bool ntfs_xattr_user_list(struct dentry *dentry)
97897 +       return true;
97900 +static const struct xattr_handler ntfs_xattr_handler = {
97901 +       .prefix = "",
97902 +       .get = ntfs_getxattr,
97903 +       .set = ntfs_setxattr,
97904 +       .list = ntfs_xattr_user_list,
97907 +const struct xattr_handler *ntfs_xattr_handlers[] = {
97908 +       &ntfs_xattr_handler,
97909 +       NULL,
97911 diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
97912 index 0b2891c6c71e..2846b943e80c 100644
97913 --- a/fs/overlayfs/copy_up.c
97914 +++ b/fs/overlayfs/copy_up.c
97915 @@ -932,7 +932,7 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
97916  static int ovl_copy_up_flags(struct dentry *dentry, int flags)
97918         int err = 0;
97919 -       const struct cred *old_cred = ovl_override_creds(dentry->d_sb);
97920 +       const struct cred *old_cred;
97921         bool disconnected = (dentry->d_flags & DCACHE_DISCONNECTED);
97923         /*
97924 @@ -943,6 +943,7 @@ static int ovl_copy_up_flags(struct dentry *dentry, int flags)
97925         if (WARN_ON(disconnected && d_is_dir(dentry)))
97926                 return -EIO;
97928 +       old_cred = ovl_override_creds(dentry->d_sb);
97929         while (!err) {
97930                 struct dentry *next;
97931                 struct dentry *parent = NULL;
97932 diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
97933 index 3fe05fb5d145..71e264e2f16b 100644
97934 --- a/fs/overlayfs/namei.c
97935 +++ b/fs/overlayfs/namei.c
97936 @@ -919,6 +919,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
97937                         continue;
97939                 if ((uppermetacopy || d.metacopy) && !ofs->config.metacopy) {
97940 +                       dput(this);
97941                         err = -EPERM;
97942                         pr_warn_ratelimited("refusing to follow metacopy origin for (%pd2)\n", dentry);
97943                         goto out_put;
97944 diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
97945 index 95cff83786a5..2322f854533c 100644
97946 --- a/fs/overlayfs/overlayfs.h
97947 +++ b/fs/overlayfs/overlayfs.h
97948 @@ -319,9 +319,6 @@ int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry,
97949                        enum ovl_xattr ox, const void *value, size_t size,
97950                        int xerr);
97951  int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry);
97952 -void ovl_set_flag(unsigned long flag, struct inode *inode);
97953 -void ovl_clear_flag(unsigned long flag, struct inode *inode);
97954 -bool ovl_test_flag(unsigned long flag, struct inode *inode);
97955  bool ovl_inuse_trylock(struct dentry *dentry);
97956  void ovl_inuse_unlock(struct dentry *dentry);
97957  bool ovl_is_inuse(struct dentry *dentry);
97958 @@ -335,6 +332,21 @@ char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct dentry *dentry,
97959                              int padding);
97960  int ovl_sync_status(struct ovl_fs *ofs);
97962 +static inline void ovl_set_flag(unsigned long flag, struct inode *inode)
97964 +       set_bit(flag, &OVL_I(inode)->flags);
97967 +static inline void ovl_clear_flag(unsigned long flag, struct inode *inode)
97969 +       clear_bit(flag, &OVL_I(inode)->flags);
97972 +static inline bool ovl_test_flag(unsigned long flag, struct inode *inode)
97974 +       return test_bit(flag, &OVL_I(inode)->flags);
97977  static inline bool ovl_is_impuredir(struct super_block *sb,
97978                                     struct dentry *dentry)
97980 @@ -439,6 +451,18 @@ int ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
97981                         struct dentry *dentry, int level);
97982  int ovl_indexdir_cleanup(struct ovl_fs *ofs);
97985 + * Can we iterate real dir directly?
97986 + *
97987 + * Non-merge dir may contain whiteouts from a time it was a merge upper, before
97988 + * lower dir was removed under it and possibly before it was rotated from upper
97989 + * to lower layer.
97990 + */
97991 +static inline bool ovl_dir_is_real(struct dentry *dir)
97993 +       return !ovl_test_flag(OVL_WHITEOUTS, d_inode(dir));
97996  /* inode.c */
97997  int ovl_set_nlink_upper(struct dentry *dentry);
97998  int ovl_set_nlink_lower(struct dentry *dentry);
97999 diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
98000 index f404a78e6b60..cc1e80257064 100644
98001 --- a/fs/overlayfs/readdir.c
98002 +++ b/fs/overlayfs/readdir.c
98003 @@ -319,18 +319,6 @@ static inline int ovl_dir_read(struct path *realpath,
98004         return err;
98008 - * Can we iterate real dir directly?
98009 - *
98010 - * Non-merge dir may contain whiteouts from a time it was a merge upper, before
98011 - * lower dir was removed under it and possibly before it was rotated from upper
98012 - * to lower layer.
98013 - */
98014 -static bool ovl_dir_is_real(struct dentry *dir)
98016 -       return !ovl_test_flag(OVL_WHITEOUTS, d_inode(dir));
98019  static void ovl_dir_reset(struct file *file)
98021         struct ovl_dir_file *od = file->private_data;
98022 diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
98023 index fdd72f1a9c5e..787ce7c38fba 100644
98024 --- a/fs/overlayfs/super.c
98025 +++ b/fs/overlayfs/super.c
98026 @@ -380,6 +380,8 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
98027                            ofs->config.metacopy ? "on" : "off");
98028         if (ofs->config.ovl_volatile)
98029                 seq_puts(m, ",volatile");
98030 +       if (ofs->config.userxattr)
98031 +               seq_puts(m, ",userxattr");
98032         return 0;
98035 @@ -1826,7 +1828,8 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
98036   * - upper/work dir of any overlayfs instance
98037   */
98038  static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
98039 -                          struct dentry *dentry, const char *name)
98040 +                          struct dentry *dentry, const char *name,
98041 +                          bool is_lower)
98043         struct dentry *next = dentry, *parent;
98044         int err = 0;
98045 @@ -1838,7 +1841,7 @@ static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
98047         /* Walk back ancestors to root (inclusive) looking for traps */
98048         while (!err && parent != next) {
98049 -               if (ovl_lookup_trap_inode(sb, parent)) {
98050 +               if (is_lower && ovl_lookup_trap_inode(sb, parent)) {
98051                         err = -ELOOP;
98052                         pr_err("overlapping %s path\n", name);
98053                 } else if (ovl_is_inuse(parent)) {
98054 @@ -1864,7 +1867,7 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
98056         if (ovl_upper_mnt(ofs)) {
98057                 err = ovl_check_layer(sb, ofs, ovl_upper_mnt(ofs)->mnt_root,
98058 -                                     "upperdir");
98059 +                                     "upperdir", false);
98060                 if (err)
98061                         return err;
98063 @@ -1875,7 +1878,8 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
98064                  * workbasedir.  In that case, we already have their traps in
98065                  * inode cache and we will catch that case on lookup.
98066                  */
98067 -               err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir");
98068 +               err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir",
98069 +                                     false);
98070                 if (err)
98071                         return err;
98072         }
98073 @@ -1883,7 +1887,7 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
98074         for (i = 1; i < ofs->numlayer; i++) {
98075                 err = ovl_check_layer(sb, ofs,
98076                                       ofs->layers[i].mnt->mnt_root,
98077 -                                     "lowerdir");
98078 +                                     "lowerdir", true);
98079                 if (err)
98080                         return err;
98081         }
98082 diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
98083 index 7f5a01a11f97..404a0a32ddf6 100644
98084 --- a/fs/overlayfs/util.c
98085 +++ b/fs/overlayfs/util.c
98086 @@ -422,18 +422,20 @@ void ovl_inode_update(struct inode *inode, struct dentry *upperdentry)
98087         }
98090 -static void ovl_dentry_version_inc(struct dentry *dentry, bool impurity)
98091 +static void ovl_dir_version_inc(struct dentry *dentry, bool impurity)
98093         struct inode *inode = d_inode(dentry);
98095         WARN_ON(!inode_is_locked(inode));
98096 +       WARN_ON(!d_is_dir(dentry));
98097         /*
98098 -        * Version is used by readdir code to keep cache consistent.  For merge
98099 -        * dirs all changes need to be noted.  For non-merge dirs, cache only
98100 -        * contains impure (ones which have been copied up and have origins)
98101 -        * entries, so only need to note changes to impure entries.
98102 +        * Version is used by readdir code to keep cache consistent.
98103 +        * For merge dirs (or dirs with origin) all changes need to be noted.
98104 +        * For non-merge dirs, cache contains only impure entries (i.e. ones
98105 +        * which have been copied up and have origins), so only need to note
98106 +        * changes to impure entries.
98107          */
98108 -       if (OVL_TYPE_MERGE(ovl_path_type(dentry)) || impurity)
98109 +       if (!ovl_dir_is_real(dentry) || impurity)
98110                 OVL_I(inode)->version++;
98113 @@ -442,7 +444,7 @@ void ovl_dir_modified(struct dentry *dentry, bool impurity)
98114         /* Copy mtime/ctime */
98115         ovl_copyattr(d_inode(ovl_dentry_upper(dentry)), d_inode(dentry));
98117 -       ovl_dentry_version_inc(dentry, impurity);
98118 +       ovl_dir_version_inc(dentry, impurity);
98121  u64 ovl_dentry_version_get(struct dentry *dentry)
98122 @@ -638,21 +640,6 @@ int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry)
98123         return err;
98126 -void ovl_set_flag(unsigned long flag, struct inode *inode)
98128 -       set_bit(flag, &OVL_I(inode)->flags);
98131 -void ovl_clear_flag(unsigned long flag, struct inode *inode)
98133 -       clear_bit(flag, &OVL_I(inode)->flags);
98136 -bool ovl_test_flag(unsigned long flag, struct inode *inode)
98138 -       return test_bit(flag, &OVL_I(inode)->flags);
98141  /**
98142   * Caller must hold a reference to inode to prevent it from being freed while
98143   * it is marked inuse.
98144 diff --git a/fs/proc/array.c b/fs/proc/array.c
98145 index bb87e4d89cd8..7ec59171f197 100644
98146 --- a/fs/proc/array.c
98147 +++ b/fs/proc/array.c
98148 @@ -342,8 +342,10 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
98149         seq_put_decimal_ull(m, "NoNewPrivs:\t", task_no_new_privs(p));
98150  #ifdef CONFIG_SECCOMP
98151         seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode);
98152 +#ifdef CONFIG_SECCOMP_FILTER
98153         seq_put_decimal_ull(m, "\nSeccomp_filters:\t",
98154                             atomic_read(&p->seccomp.filter_count));
98155 +#endif
98156  #endif
98157         seq_puts(m, "\nSpeculation_Store_Bypass:\t");
98158         switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
98159 diff --git a/fs/proc/generic.c b/fs/proc/generic.c
98160 index bc86aa87cc41..5600da30e289 100644
98161 --- a/fs/proc/generic.c
98162 +++ b/fs/proc/generic.c
98163 @@ -756,7 +756,7 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
98164         while (1) {
98165                 next = pde_subdir_first(de);
98166                 if (next) {
98167 -                       if (unlikely(pde_is_permanent(root))) {
98168 +                       if (unlikely(pde_is_permanent(next))) {
98169                                 write_unlock(&proc_subdir_lock);
98170                                 WARN(1, "removing permanent /proc entry '%s/%s'",
98171                                         next->parent->name, next->name);
98172 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
98173 index e862cab69583..d292f20c4e3d 100644
98174 --- a/fs/proc/task_mmu.c
98175 +++ b/fs/proc/task_mmu.c
98176 @@ -19,6 +19,7 @@
98177  #include <linux/shmem_fs.h>
98178  #include <linux/uaccess.h>
98179  #include <linux/pkeys.h>
98180 +#include <linux/mm_inline.h>
98182  #include <asm/elf.h>
98183  #include <asm/tlb.h>
98184 @@ -1718,7 +1719,7 @@ static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
98185         if (PageSwapCache(page))
98186                 md->swapcache += nr_pages;
98188 -       if (PageActive(page) || PageUnevictable(page))
98189 +       if (PageUnevictable(page) || page_is_active(compound_head(page), NULL))
98190                 md->active += nr_pages;
98192         if (PageWriteback(page))
98193 diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
98194 index d963ae7902f9..67b194ba1b03 100644
98195 --- a/fs/pstore/platform.c
98196 +++ b/fs/pstore/platform.c
98197 @@ -218,7 +218,7 @@ static int zbufsize_842(size_t size)
98198  #if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
98199  static int zbufsize_zstd(size_t size)
98201 -       return ZSTD_compressBound(size);
98202 +       return zstd_compress_bound(size);
98204  #endif
98206 diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
98207 index 7b1128398976..89d492916dea 100644
98208 --- a/fs/squashfs/file.c
98209 +++ b/fs/squashfs/file.c
98210 @@ -211,11 +211,11 @@ static long long read_indexes(struct super_block *sb, int n,
98211   * If the skip factor is limited in this way then the file will use multiple
98212   * slots.
98213   */
98214 -static inline int calculate_skip(int blocks)
98215 +static inline int calculate_skip(u64 blocks)
98217 -       int skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
98218 +       u64 skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
98219                  * SQUASHFS_META_INDEXES);
98220 -       return min(SQUASHFS_CACHED_BLKS - 1, skip + 1);
98221 +       return min((u64) SQUASHFS_CACHED_BLKS - 1, skip + 1);
98225 diff --git a/fs/squashfs/zstd_wrapper.c b/fs/squashfs/zstd_wrapper.c
98226 index b7cb1faa652d..6967c0aae801 100644
98227 --- a/fs/squashfs/zstd_wrapper.c
98228 +++ b/fs/squashfs/zstd_wrapper.c
98229 @@ -34,7 +34,7 @@ static void *zstd_init(struct squashfs_sb_info *msblk, void *buff)
98230                 goto failed;
98231         wksp->window_size = max_t(size_t,
98232                         msblk->block_size, SQUASHFS_METADATA_SIZE);
98233 -       wksp->mem_size = ZSTD_DStreamWorkspaceBound(wksp->window_size);
98234 +       wksp->mem_size = zstd_dstream_workspace_bound(wksp->window_size);
98235         wksp->mem = vmalloc(wksp->mem_size);
98236         if (wksp->mem == NULL)
98237                 goto failed;
98238 @@ -63,15 +63,15 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
98239         struct squashfs_page_actor *output)
98241         struct workspace *wksp = strm;
98242 -       ZSTD_DStream *stream;
98243 +       zstd_dstream *stream;
98244         size_t total_out = 0;
98245         int error = 0;
98246 -       ZSTD_inBuffer in_buf = { NULL, 0, 0 };
98247 -       ZSTD_outBuffer out_buf = { NULL, 0, 0 };
98248 +       zstd_in_buffer in_buf = { NULL, 0, 0 };
98249 +       zstd_out_buffer out_buf = { NULL, 0, 0 };
98250         struct bvec_iter_all iter_all = {};
98251         struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
98253 -       stream = ZSTD_initDStream(wksp->window_size, wksp->mem, wksp->mem_size);
98254 +       stream = zstd_init_dstream(wksp->window_size, wksp->mem, wksp->mem_size);
98256         if (!stream) {
98257                 ERROR("Failed to initialize zstd decompressor\n");
98258 @@ -116,14 +116,14 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
98259                 }
98261                 total_out -= out_buf.pos;
98262 -               zstd_err = ZSTD_decompressStream(stream, &out_buf, &in_buf);
98263 +               zstd_err = zstd_decompress_stream(stream, &out_buf, &in_buf);
98264                 total_out += out_buf.pos; /* add the additional data produced */
98265                 if (zstd_err == 0)
98266                         break;
98268 -               if (ZSTD_isError(zstd_err)) {
98269 +               if (zstd_is_error(zstd_err)) {
98270                         ERROR("zstd decompression error: %d\n",
98271 -                                       (int)ZSTD_getErrorCode(zstd_err));
98272 +                                       (int)zstd_get_error_code(zstd_err));
98273                         error = -EIO;
98274                         break;
98275                 }
98276 diff --git a/fs/stat.c b/fs/stat.c
98277 index fbc171d038aa..1fa38bdec1a6 100644
98278 --- a/fs/stat.c
98279 +++ b/fs/stat.c
98280 @@ -86,12 +86,20 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
98281         /* SB_NOATIME means filesystem supplies dummy atime value */
98282         if (inode->i_sb->s_flags & SB_NOATIME)
98283                 stat->result_mask &= ~STATX_ATIME;
98285 +       /*
98286 +        * Note: If you add another clause to set an attribute flag, please
98287 +        * update attributes_mask below.
98288 +        */
98289         if (IS_AUTOMOUNT(inode))
98290                 stat->attributes |= STATX_ATTR_AUTOMOUNT;
98292         if (IS_DAX(inode))
98293                 stat->attributes |= STATX_ATTR_DAX;
98295 +       stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT |
98296 +                                 STATX_ATTR_DAX);
98298         mnt_userns = mnt_user_ns(path->mnt);
98299         if (inode->i_op->getattr)
98300                 return inode->i_op->getattr(mnt_userns, path, stat,
98301 diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
98302 index 0f8a6a16421b..1929ec63a0cb 100644
98303 --- a/fs/ubifs/replay.c
98304 +++ b/fs/ubifs/replay.c
98305 @@ -223,7 +223,8 @@ static bool inode_still_linked(struct ubifs_info *c, struct replay_entry *rino)
98306          */
98307         list_for_each_entry_reverse(r, &c->replay_list, list) {
98308                 ubifs_assert(c, r->sqnum >= rino->sqnum);
98309 -               if (key_inum(c, &r->key) == key_inum(c, &rino->key))
98310 +               if (key_inum(c, &r->key) == key_inum(c, &rino->key) &&
98311 +                   key_type(c, &r->key) == UBIFS_INO_KEY)
98312                         return r->deletion == 0;
98314         }
98315 diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
98316 index 472b3039eabb..902e5f7e6642 100644
98317 --- a/fs/xfs/libxfs/xfs_attr.c
98318 +++ b/fs/xfs/libxfs/xfs_attr.c
98319 @@ -928,6 +928,7 @@ xfs_attr_node_addname(
98320          * Search to see if name already exists, and get back a pointer
98321          * to where it should go.
98322          */
98323 +       error = 0;
98324         retval = xfs_attr_node_hasname(args, &state);
98325         if (retval != -ENOATTR && retval != -EEXIST)
98326                 goto out;
98327 diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
98328 index fcde59c65a81..cb3d6b1c655d 100644
98329 --- a/include/crypto/acompress.h
98330 +++ b/include/crypto/acompress.h
98331 @@ -165,6 +165,8 @@ static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
98332   * crypto_free_acomp() -- free ACOMPRESS tfm handle
98333   *
98334   * @tfm:       ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
98335 + *
98336 + * If @tfm is a NULL or error pointer, this function does nothing.
98337   */
98338  static inline void crypto_free_acomp(struct crypto_acomp *tfm)
98340 diff --git a/include/crypto/aead.h b/include/crypto/aead.h
98341 index fcc12c593ef8..e728469c4ccc 100644
98342 --- a/include/crypto/aead.h
98343 +++ b/include/crypto/aead.h
98344 @@ -185,6 +185,8 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
98345  /**
98346   * crypto_free_aead() - zeroize and free aead handle
98347   * @tfm: cipher handle to be freed
98348 + *
98349 + * If @tfm is a NULL or error pointer, this function does nothing.
98350   */
98351  static inline void crypto_free_aead(struct crypto_aead *tfm)
98353 diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
98354 index 1d3aa252caba..5764b46bd1ec 100644
98355 --- a/include/crypto/akcipher.h
98356 +++ b/include/crypto/akcipher.h
98357 @@ -174,6 +174,8 @@ static inline struct crypto_akcipher *crypto_akcipher_reqtfm(
98358   * crypto_free_akcipher() - free AKCIPHER tfm handle
98359   *
98360   * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
98361 + *
98362 + * If @tfm is a NULL or error pointer, this function does nothing.
98363   */
98364  static inline void crypto_free_akcipher(struct crypto_akcipher *tfm)
98366 diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
98367 index 3a1c72fdb7cf..dabaee698718 100644
98368 --- a/include/crypto/chacha.h
98369 +++ b/include/crypto/chacha.h
98370 @@ -47,13 +47,18 @@ static inline void hchacha_block(const u32 *state, u32 *out, int nrounds)
98371                 hchacha_block_generic(state, out, nrounds);
98374 -void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
98375 -static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
98376 +static inline void chacha_init_consts(u32 *state)
98378         state[0]  = 0x61707865; /* "expa" */
98379         state[1]  = 0x3320646e; /* "nd 3" */
98380         state[2]  = 0x79622d32; /* "2-by" */
98381         state[3]  = 0x6b206574; /* "te k" */
98384 +void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
98385 +static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
98387 +       chacha_init_consts(state);
98388         state[4]  = key[0];
98389         state[5]  = key[1];
98390         state[6]  = key[2];
98391 diff --git a/include/crypto/hash.h b/include/crypto/hash.h
98392 index 13f8a6a54ca8..b2bc1e46e86a 100644
98393 --- a/include/crypto/hash.h
98394 +++ b/include/crypto/hash.h
98395 @@ -281,6 +281,8 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
98396  /**
98397   * crypto_free_ahash() - zeroize and free the ahash handle
98398   * @tfm: cipher handle to be freed
98399 + *
98400 + * If @tfm is a NULL or error pointer, this function does nothing.
98401   */
98402  static inline void crypto_free_ahash(struct crypto_ahash *tfm)
98404 @@ -724,6 +726,8 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
98405  /**
98406   * crypto_free_shash() - zeroize and free the message digest handle
98407   * @tfm: cipher handle to be freed
98408 + *
98409 + * If @tfm is a NULL or error pointer, this function does nothing.
98410   */
98411  static inline void crypto_free_shash(struct crypto_shash *tfm)
98413 diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h
98414 index 064e52ca5248..196aa769f296 100644
98415 --- a/include/crypto/internal/poly1305.h
98416 +++ b/include/crypto/internal/poly1305.h
98417 @@ -18,7 +18,8 @@
98418   * only the ε-almost-∆-universal hash function (not the full MAC) is computed.
98419   */
98421 -void poly1305_core_setkey(struct poly1305_core_key *key, const u8 *raw_key);
98422 +void poly1305_core_setkey(struct poly1305_core_key *key,
98423 +                         const u8 raw_key[POLY1305_BLOCK_SIZE]);
98424  static inline void poly1305_core_init(struct poly1305_state *state)
98426         *state = (struct poly1305_state){};
98427 diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
98428 index 88b591215d5c..cccceadc164b 100644
98429 --- a/include/crypto/kpp.h
98430 +++ b/include/crypto/kpp.h
98431 @@ -154,6 +154,8 @@ static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags)
98432   * crypto_free_kpp() - free KPP tfm handle
98433   *
98434   * @tfm: KPP tfm handle allocated with crypto_alloc_kpp()
98435 + *
98436 + * If @tfm is a NULL or error pointer, this function does nothing.
98437   */
98438  static inline void crypto_free_kpp(struct crypto_kpp *tfm)
98440 diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
98441 index f1f67fc749cf..090692ec3bc7 100644
98442 --- a/include/crypto/poly1305.h
98443 +++ b/include/crypto/poly1305.h
98444 @@ -58,8 +58,10 @@ struct poly1305_desc_ctx {
98445         };
98446  };
98448 -void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key);
98449 -void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key);
98450 +void poly1305_init_arch(struct poly1305_desc_ctx *desc,
98451 +                       const u8 key[POLY1305_KEY_SIZE]);
98452 +void poly1305_init_generic(struct poly1305_desc_ctx *desc,
98453 +                          const u8 key[POLY1305_KEY_SIZE]);
98455  static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key)
98457 diff --git a/include/crypto/rng.h b/include/crypto/rng.h
98458 index 8b4b844b4eef..17bb3673d3c1 100644
98459 --- a/include/crypto/rng.h
98460 +++ b/include/crypto/rng.h
98461 @@ -111,6 +111,8 @@ static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
98462  /**
98463   * crypto_free_rng() - zeroize and free RNG handle
98464   * @tfm: cipher handle to be freed
98465 + *
98466 + * If @tfm is a NULL or error pointer, this function does nothing.
98467   */
98468  static inline void crypto_free_rng(struct crypto_rng *tfm)
98470 diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
98471 index 6a733b171a5d..ef0fc9ed4342 100644
98472 --- a/include/crypto/skcipher.h
98473 +++ b/include/crypto/skcipher.h
98474 @@ -196,6 +196,8 @@ static inline struct crypto_tfm *crypto_skcipher_tfm(
98475  /**
98476   * crypto_free_skcipher() - zeroize and free cipher handle
98477   * @tfm: cipher handle to be freed
98478 + *
98479 + * If @tfm is a NULL or error pointer, this function does nothing.
98480   */
98481  static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
98483 diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h
98484 index a94c03a61d8f..b2ed3481c6a0 100644
98485 --- a/include/keys/trusted-type.h
98486 +++ b/include/keys/trusted-type.h
98487 @@ -30,6 +30,7 @@ struct trusted_key_options {
98488         uint16_t keytype;
98489         uint32_t keyhandle;
98490         unsigned char keyauth[TPM_DIGEST_SIZE];
98491 +       uint32_t blobauth_len;
98492         unsigned char blobauth[TPM_DIGEST_SIZE];
98493         uint32_t pcrinfo_len;
98494         unsigned char pcrinfo[MAX_PCRINFO_SIZE];
98495 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
98496 index 158aefae1030..40c48e30f1eb 100644
98497 --- a/include/linux/blkdev.h
98498 +++ b/include/linux/blkdev.h
98499 @@ -620,6 +620,7 @@ struct request_queue {
98501  #define QUEUE_FLAG_MQ_DEFAULT  ((1 << QUEUE_FLAG_IO_STAT) |            \
98502                                  (1 << QUEUE_FLAG_SAME_COMP) |          \
98503 +                                (1 << QUEUE_FLAG_SAME_FORCE) |         \
98504                                  (1 << QUEUE_FLAG_NOWAIT))
98506  void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
98507 diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
98508 index 971b33aca13d..99bc82342ca0 100644
98509 --- a/include/linux/bpf_verifier.h
98510 +++ b/include/linux/bpf_verifier.h
98511 @@ -299,10 +299,11 @@ struct bpf_verifier_state_list {
98512  };
98514  /* Possible states for alu_state member. */
98515 -#define BPF_ALU_SANITIZE_SRC           1U
98516 -#define BPF_ALU_SANITIZE_DST           2U
98517 +#define BPF_ALU_SANITIZE_SRC           (1U << 0)
98518 +#define BPF_ALU_SANITIZE_DST           (1U << 1)
98519  #define BPF_ALU_NEG_VALUE              (1U << 2)
98520  #define BPF_ALU_NON_POINTER            (1U << 3)
98521 +#define BPF_ALU_IMMEDIATE              (1U << 4)
98522  #define BPF_ALU_SANITIZE               (BPF_ALU_SANITIZE_SRC | \
98523                                          BPF_ALU_SANITIZE_DST)
98525 diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
98526 index 4f2f79de083e..bd5744360cfa 100644
98527 --- a/include/linux/cgroup.h
98528 +++ b/include/linux/cgroup.h
98529 @@ -432,6 +432,18 @@ static inline void cgroup_put(struct cgroup *cgrp)
98530         css_put(&cgrp->self);
98533 +extern struct mutex cgroup_mutex;
98535 +static inline void cgroup_lock(void)
98537 +       mutex_lock(&cgroup_mutex);
98540 +static inline void cgroup_unlock(void)
98542 +       mutex_unlock(&cgroup_mutex);
98545  /**
98546   * task_css_set_check - obtain a task's css_set with extra access conditions
98547   * @task: the task to obtain css_set for
98548 @@ -446,7 +458,6 @@ static inline void cgroup_put(struct cgroup *cgrp)
98549   * as locks used during the cgroup_subsys::attach() methods.
98550   */
98551  #ifdef CONFIG_PROVE_RCU
98552 -extern struct mutex cgroup_mutex;
98553  extern spinlock_t css_set_lock;
98554  #define task_css_set_check(task, __c)                                  \
98555         rcu_dereference_check((task)->cgroups,                          \
98556 @@ -704,6 +715,8 @@ struct cgroup;
98557  static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; }
98558  static inline void css_get(struct cgroup_subsys_state *css) {}
98559  static inline void css_put(struct cgroup_subsys_state *css) {}
98560 +static inline void cgroup_lock(void) {}
98561 +static inline void cgroup_unlock(void) {}
98562  static inline int cgroup_attach_task_all(struct task_struct *from,
98563                                          struct task_struct *t) { return 0; }
98564  static inline int cgroupstats_build(struct cgroupstats *stats,
98565 diff --git a/include/linux/compat.h b/include/linux/compat.h
98566 index 6e65be753603..d4c1b402b962 100644
98567 --- a/include/linux/compat.h
98568 +++ b/include/linux/compat.h
98569 @@ -365,6 +365,17 @@ struct compat_robust_list_head {
98570         compat_uptr_t                   list_op_pending;
98571  };
98573 +struct compat_futex_waitv {
98574 +       compat_uptr_t uaddr;
98575 +       compat_uint_t val;
98576 +       compat_uint_t flags;
98579 +struct compat_futex_requeue {
98580 +       compat_uptr_t uaddr;
98581 +       compat_uint_t flags;
98584  #ifdef CONFIG_COMPAT_OLD_SIGACTION
98585  struct compat_old_sigaction {
98586         compat_uptr_t                   sa_handler;
98587 @@ -654,6 +665,18 @@ asmlinkage long
98588  compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
98589                            compat_size_t __user *len_ptr);
98591 +/* kernel/futex2.c */
98592 +asmlinkage long compat_sys_futex_waitv(struct compat_futex_waitv *waiters,
98593 +                                      compat_uint_t nr_futexes, compat_uint_t flags,
98594 +                                      struct __kernel_timespec __user *timo);
98596 +asmlinkage long compat_sys_futex_requeue(struct compat_futex_requeue *uaddr1,
98597 +                                        struct compat_futex_requeue *uaddr2,
98598 +                                        compat_uint_t nr_wake,
98599 +                                        compat_uint_t nr_requeue,
98600 +                                        compat_uint_t cmpval,
98601 +                                        compat_uint_t flags);
98603  /* kernel/itimer.c */
98604  asmlinkage long compat_sys_getitimer(int which,
98605                                      struct old_itimerval32 __user *it);
98606 diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
98607 index 153734816b49..d5b9c8d40c18 100644
98608 --- a/include/linux/console_struct.h
98609 +++ b/include/linux/console_struct.h
98610 @@ -101,6 +101,7 @@ struct vc_data {
98611         unsigned int    vc_rows;
98612         unsigned int    vc_size_row;            /* Bytes per row */
98613         unsigned int    vc_scan_lines;          /* # of scan lines */
98614 +       unsigned int    vc_cell_height;         /* CRTC character cell height */
98615         unsigned long   vc_origin;              /* [!] Start of real screen */
98616         unsigned long   vc_scr_end;             /* [!] End of real screen */
98617         unsigned long   vc_visible_origin;      /* [!] Top of visible window */
98618 diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
98619 index bceb06498521..4f4556232dcf 100644
98620 --- a/include/linux/context_tracking.h
98621 +++ b/include/linux/context_tracking.h
98622 @@ -131,16 +131,26 @@ static __always_inline void guest_enter_irqoff(void)
98623         }
98626 -static __always_inline void guest_exit_irqoff(void)
98627 +static __always_inline void context_tracking_guest_exit(void)
98629         if (context_tracking_enabled())
98630                 __context_tracking_exit(CONTEXT_GUEST);
98633 -       instrumentation_begin();
98634 +static __always_inline void vtime_account_guest_exit(void)
98636         if (vtime_accounting_enabled_this_cpu())
98637                 vtime_guest_exit(current);
98638         else
98639                 current->flags &= ~PF_VCPU;
98642 +static __always_inline void guest_exit_irqoff(void)
98644 +       context_tracking_guest_exit();
98646 +       instrumentation_begin();
98647 +       vtime_account_guest_exit();
98648         instrumentation_end();
98651 @@ -159,12 +169,19 @@ static __always_inline void guest_enter_irqoff(void)
98652         instrumentation_end();
98655 +static __always_inline void context_tracking_guest_exit(void) { }
98657 +static __always_inline void vtime_account_guest_exit(void)
98659 +       vtime_account_kernel(current);
98660 +       current->flags &= ~PF_VCPU;
98663  static __always_inline void guest_exit_irqoff(void)
98665         instrumentation_begin();
98666         /* Flush the guest cputime we spent on the guest */
98667 -       vtime_account_kernel(current);
98668 -       current->flags &= ~PF_VCPU;
98669 +       vtime_account_guest_exit();
98670         instrumentation_end();
98672  #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
98673 diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
98674 index f14adb882338..cc7c3fda2aa6 100644
98675 --- a/include/linux/cpuhotplug.h
98676 +++ b/include/linux/cpuhotplug.h
98677 @@ -135,6 +135,7 @@ enum cpuhp_state {
98678         CPUHP_AP_RISCV_TIMER_STARTING,
98679         CPUHP_AP_CLINT_TIMER_STARTING,
98680         CPUHP_AP_CSKY_TIMER_STARTING,
98681 +       CPUHP_AP_TI_GP_TIMER_STARTING,
98682         CPUHP_AP_HYPERV_TIMER_STARTING,
98683         CPUHP_AP_KVM_STARTING,
98684         CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
98685 diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
98686 index 706b68d1359b..13d1f4c14d7b 100644
98687 --- a/include/linux/dma-iommu.h
98688 +++ b/include/linux/dma-iommu.h
98689 @@ -40,6 +40,8 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
98690  void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
98691                 struct iommu_domain *domain);
98693 +extern bool iommu_dma_forcedac;
98695  #else /* CONFIG_IOMMU_DMA */
98697  struct iommu_domain;
98698 diff --git a/include/linux/elevator.h b/include/linux/elevator.h
98699 index 1fe8e105b83b..dcb2f9022c1d 100644
98700 --- a/include/linux/elevator.h
98701 +++ b/include/linux/elevator.h
98702 @@ -34,7 +34,7 @@ struct elevator_mq_ops {
98703         void (*depth_updated)(struct blk_mq_hw_ctx *);
98705         bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
98706 -       bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int);
98707 +       bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int);
98708         int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
98709         void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
98710         void (*requests_merged)(struct request_queue *, struct request *, struct request *);
98711 diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
98712 index 71177b17eee5..66e2423d9feb 100644
98713 --- a/include/linux/firmware/xlnx-zynqmp.h
98714 +++ b/include/linux/firmware/xlnx-zynqmp.h
98715 @@ -354,11 +354,6 @@ int zynqmp_pm_read_pggs(u32 index, u32 *value);
98716  int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype);
98717  int zynqmp_pm_set_boot_health_status(u32 value);
98718  #else
98719 -static inline struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
98721 -       return ERR_PTR(-ENODEV);
98724  static inline int zynqmp_pm_get_api_version(u32 *version)
98726         return -ENODEV;
98727 diff --git a/include/linux/freezer.h b/include/linux/freezer.h
98728 index 27828145ca09..504cc97bf475 100644
98729 --- a/include/linux/freezer.h
98730 +++ b/include/linux/freezer.h
98731 @@ -311,6 +311,7 @@ static inline void set_freezable(void) {}
98732  #define wait_event_freezekillable_unsafe(wq, condition)                        \
98733                 wait_event_killable(wq, condition)
98735 +#define pm_freezing (false)
98736  #endif /* !CONFIG_FREEZER */
98738  #endif /* FREEZER_H_INCLUDED */
98739 diff --git a/include/linux/fs.h b/include/linux/fs.h
98740 index ec8f3ddf4a6a..33683ff94cb3 100644
98741 --- a/include/linux/fs.h
98742 +++ b/include/linux/fs.h
98743 @@ -683,6 +683,7 @@ struct inode {
98744         };
98745         atomic64_t              i_version;
98746         atomic64_t              i_sequence; /* see futex */
98747 +       atomic64_t              i_sequence2; /* see futex2 */
98748         atomic_t                i_count;
98749         atomic_t                i_dio_count;
98750         atomic_t                i_writecount;
98751 diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
98752 index 286de0520574..ecf0032a0995 100644
98753 --- a/include/linux/gpio/driver.h
98754 +++ b/include/linux/gpio/driver.h
98755 @@ -624,8 +624,17 @@ void gpiochip_irq_domain_deactivate(struct irq_domain *domain,
98756  bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc,
98757                                 unsigned int offset);
98759 +#ifdef CONFIG_GPIOLIB_IRQCHIP
98760  int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
98761                                 struct irq_domain *domain);
98762 +#else
98763 +static inline int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
98764 +                                             struct irq_domain *domain)
98766 +       WARN_ON(1);
98767 +       return -EINVAL;
98769 +#endif
98771  int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset);
98772  void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset);
98773 diff --git a/include/linux/hid.h b/include/linux/hid.h
98774 index ef702b3f56e3..3e33eb14118c 100644
98775 --- a/include/linux/hid.h
98776 +++ b/include/linux/hid.h
98777 @@ -262,6 +262,8 @@ struct hid_item {
98778  #define HID_CP_SELECTION       0x000c0080
98779  #define HID_CP_MEDIASELECTION  0x000c0087
98780  #define HID_CP_SELECTDISC      0x000c00ba
98781 +#define HID_CP_VOLUMEUP                0x000c00e9
98782 +#define HID_CP_VOLUMEDOWN      0x000c00ea
98783  #define HID_CP_PLAYBACKSPEED   0x000c00f1
98784  #define HID_CP_PROXIMITY       0x000c0109
98785  #define HID_CP_SPEAKERSYSTEM   0x000c0160
98786 diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
98787 index ba973efcd369..0ba7b3f9029c 100644
98788 --- a/include/linux/huge_mm.h
98789 +++ b/include/linux/huge_mm.h
98790 @@ -443,6 +443,11 @@ static inline bool is_huge_zero_page(struct page *page)
98791         return false;
98794 +static inline bool is_huge_zero_pmd(pmd_t pmd)
98796 +       return false;
98799  static inline bool is_huge_zero_pud(pud_t pud)
98801         return false;
98802 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
98803 index 56622658b215..a670ae129f4b 100644
98804 --- a/include/linux/i2c.h
98805 +++ b/include/linux/i2c.h
98806 @@ -687,6 +687,8 @@ struct i2c_adapter_quirks {
98807  #define I2C_AQ_NO_ZERO_LEN_READ                BIT(5)
98808  #define I2C_AQ_NO_ZERO_LEN_WRITE       BIT(6)
98809  #define I2C_AQ_NO_ZERO_LEN             (I2C_AQ_NO_ZERO_LEN_READ | I2C_AQ_NO_ZERO_LEN_WRITE)
98810 +/* adapter cannot do repeated START */
98811 +#define I2C_AQ_NO_REP_START            BIT(7)
98813  /*
98814   * i2c_adapter is the structure used to identify a physical i2c bus along
98815 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
98816 index 1bc46b88711a..d1f32b33415a 100644
98817 --- a/include/linux/intel-iommu.h
98818 +++ b/include/linux/intel-iommu.h
98819 @@ -372,6 +372,7 @@ enum {
98820  /* PASID cache invalidation granu */
98821  #define QI_PC_ALL_PASIDS       0
98822  #define QI_PC_PASID_SEL                1
98823 +#define QI_PC_GLOBAL           3
98825  #define QI_EIOTLB_ADDR(addr)   ((u64)(addr) & VTD_PAGE_MASK)
98826  #define QI_EIOTLB_IH(ih)       (((u64)ih) << 6)
98827 diff --git a/include/linux/iommu.h b/include/linux/iommu.h
98828 index 5e7fe519430a..9ca6e6b8084d 100644
98829 --- a/include/linux/iommu.h
98830 +++ b/include/linux/iommu.h
98831 @@ -547,7 +547,7 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
98832          * structure can be rewritten.
98833          */
98834         if (gather->pgsize != size ||
98835 -           end < gather->start || start > gather->end) {
98836 +           end + 1 < gather->start || start > gather->end + 1) {
98837                 if (gather->pgsize)
98838                         iommu_iotlb_sync(domain, gather);
98839                 gather->pgsize = size;
98840 diff --git a/include/linux/ioport.h b/include/linux/ioport.h
98841 index 55de385c839c..647744d8514e 100644
98842 --- a/include/linux/ioport.h
98843 +++ b/include/linux/ioport.h
98844 @@ -331,7 +331,7 @@ static inline void irqresource_disabled(struct resource *res, u32 irq)
98846         res->start = irq;
98847         res->end = irq;
98848 -       res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
98849 +       res->flags |= IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
98852  extern struct address_space *iomem_get_mapping(void);
98853 diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
98854 index 05e22770af51..3ccd19f13f5c 100644
98855 --- a/include/linux/ipc_namespace.h
98856 +++ b/include/linux/ipc_namespace.h
98857 @@ -120,6 +120,9 @@ extern int mq_init_ns(struct ipc_namespace *ns);
98858  static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; }
98859  #endif
98861 +extern struct ipc_namespace *get_ipc_ns_exported(struct ipc_namespace *ns);
98862 +extern struct ipc_namespace *show_init_ipc_ns(void);
98864  #if defined(CONFIG_IPC_NS)
98865  extern struct ipc_namespace *copy_ipcs(unsigned long flags,
98866         struct user_namespace *user_ns, struct ipc_namespace *ns);
98867 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
98868 index 1b65e7204344..99dccea4293c 100644
98869 --- a/include/linux/kvm_host.h
98870 +++ b/include/linux/kvm_host.h
98871 @@ -192,8 +192,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
98872                     int len, void *val);
98873  int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
98874                             int len, struct kvm_io_device *dev);
98875 -void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
98876 -                              struct kvm_io_device *dev);
98877 +int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
98878 +                             struct kvm_io_device *dev);
98879  struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
98880                                          gpa_t addr);
98882 diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
98883 index 0c04d39a7967..cff95ed1ee2b 100644
98884 --- a/include/linux/memcontrol.h
98885 +++ b/include/linux/memcontrol.h
98886 @@ -212,6 +212,8 @@ struct obj_cgroup {
98887         };
98888  };
98890 +struct lru_gen_mm_list;
98892  /*
98893   * The memory controller data structure. The memory controller controls both
98894   * page cache and RSS per cgroup. We would eventually like to provide
98895 @@ -335,6 +337,10 @@ struct mem_cgroup {
98896         struct deferred_split deferred_split_queue;
98897  #endif
98899 +#ifdef CONFIG_LRU_GEN
98900 +       struct lru_gen_mm_list *mm_list;
98901 +#endif
98903         struct mem_cgroup_per_node *nodeinfo[0];
98904         /* WARNING: nodeinfo must be the last member here */
98905  };
98906 @@ -1077,7 +1083,6 @@ static inline struct mem_cgroup *page_memcg(struct page *page)
98908  static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
98910 -       WARN_ON_ONCE(!rcu_read_lock_held());
98911         return NULL;
98914 diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h
98915 index 1dbabf1b3cb8..6e0f66a2e727 100644
98916 --- a/include/linux/mfd/da9063/registers.h
98917 +++ b/include/linux/mfd/da9063/registers.h
98918 @@ -1037,6 +1037,9 @@
98919  #define                DA9063_NONKEY_PIN_AUTODOWN      0x02
98920  #define                DA9063_NONKEY_PIN_AUTOFLPRT     0x03
98922 +/* DA9063_REG_CONFIG_J (addr=0x10F) */
98923 +#define DA9063_TWOWIRE_TO                      0x40
98925  /* DA9063_REG_MON_REG_5 (addr=0x116) */
98926  #define DA9063_MON_A8_IDX_MASK                 0x07
98927  #define                DA9063_MON_A8_IDX_NONE          0x00
98928 diff --git a/include/linux/mfd/intel-m10-bmc.h b/include/linux/mfd/intel-m10-bmc.h
98929 index 74d4e193966a..9b54ca13eac3 100644
98930 --- a/include/linux/mfd/intel-m10-bmc.h
98931 +++ b/include/linux/mfd/intel-m10-bmc.h
98932 @@ -11,7 +11,7 @@
98934  #define M10BMC_LEGACY_SYS_BASE         0x300400
98935  #define M10BMC_SYS_BASE                        0x300800
98936 -#define M10BMC_MEM_END                 0x200000fc
98937 +#define M10BMC_MEM_END                 0x1fffffff
98939  /* Register offset of system registers */
98940  #define NIOS2_FW_VERSION               0x0
98941 diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
98942 index 53b89631a1d9..ab07f09f2bad 100644
98943 --- a/include/linux/mlx5/driver.h
98944 +++ b/include/linux/mlx5/driver.h
98945 @@ -1226,7 +1226,7 @@ enum {
98946         MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
98947  };
98949 -static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev)
98950 +static inline bool mlx5_is_roce_init_enabled(struct mlx5_core_dev *dev)
98952         struct devlink *devlink = priv_to_devlink(dev);
98953         union devlink_param_value val;
98954 diff --git a/include/linux/mm.h b/include/linux/mm.h
98955 index 8ba434287387..c0ecb207198c 100644
98956 --- a/include/linux/mm.h
98957 +++ b/include/linux/mm.h
98958 @@ -203,6 +203,9 @@ static inline void __mm_zero_struct_page(struct page *page)
98960  extern int sysctl_max_map_count;
98962 +extern unsigned long sysctl_clean_low_kbytes;
98963 +extern unsigned long sysctl_clean_min_kbytes;
98965  extern unsigned long sysctl_user_reserve_kbytes;
98966  extern unsigned long sysctl_admin_reserve_kbytes;
98968 @@ -1070,6 +1073,8 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
98969  #define ZONES_PGOFF            (NODES_PGOFF - ZONES_WIDTH)
98970  #define LAST_CPUPID_PGOFF      (ZONES_PGOFF - LAST_CPUPID_WIDTH)
98971  #define KASAN_TAG_PGOFF                (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
98972 +#define LRU_GEN_PGOFF          (KASAN_TAG_PGOFF - LRU_GEN_WIDTH)
98973 +#define LRU_USAGE_PGOFF                (LRU_GEN_PGOFF - LRU_USAGE_WIDTH)
98975  /*
98976   * Define the bit shifts to access each section.  For non-existent
98977 @@ -3170,5 +3175,37 @@ extern int sysctl_nr_trim_pages;
98979  void mem_dump_obj(void *object);
98981 +/**
98982 + * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it
98983 + * @seals: the seals to check
98984 + * @vma: the vma to operate on
98985 + *
98986 + * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on
98987 + * the vma flags.  Return 0 if check pass, or <0 for errors.
98988 + */
98989 +static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
98991 +       if (seals & F_SEAL_FUTURE_WRITE) {
98992 +               /*
98993 +                * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
98994 +                * "future write" seal active.
98995 +                */
98996 +               if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
98997 +                       return -EPERM;
98999 +               /*
99000 +                * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
99001 +                * MAP_SHARED and read-only, take care to not allow mprotect to
99002 +                * revert protections on such mappings. Do this only for shared
99003 +                * mappings. For private mappings, don't need to mask
99004 +                * VM_MAYWRITE as we still want them to be COW-writable.
99005 +                */
99006 +               if (vma->vm_flags & VM_SHARED)
99007 +                       vma->vm_flags &= ~(VM_MAYWRITE);
99008 +       }
99010 +       return 0;
99013  #endif /* __KERNEL__ */
99014  #endif /* _LINUX_MM_H */
99015 diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
99016 index 355ea1ee32bd..5eb4b12972ec 100644
99017 --- a/include/linux/mm_inline.h
99018 +++ b/include/linux/mm_inline.h
99019 @@ -79,11 +79,299 @@ static __always_inline enum lru_list page_lru(struct page *page)
99020         return lru;
99023 +#ifdef CONFIG_LRU_GEN
99025 +#ifdef CONFIG_LRU_GEN_ENABLED
99026 +DECLARE_STATIC_KEY_TRUE(lru_gen_static_key);
99027 +#define lru_gen_enabled() static_branch_likely(&lru_gen_static_key)
99028 +#else
99029 +DECLARE_STATIC_KEY_FALSE(lru_gen_static_key);
99030 +#define lru_gen_enabled() static_branch_unlikely(&lru_gen_static_key)
99031 +#endif
99033 +/* We track at most MAX_NR_GENS generations using the sliding window technique. */
99034 +static inline int lru_gen_from_seq(unsigned long seq)
99036 +       return seq % MAX_NR_GENS;
99039 +/* Convert the level of usage to a tier. See the comment on MAX_NR_TIERS. */
99040 +static inline int lru_tier_from_usage(int usage)
99042 +       return order_base_2(usage + 1);
99045 +/* Return a proper index regardless whether we keep a full history of stats. */
99046 +static inline int sid_from_seq_or_gen(int seq_or_gen)
99048 +       return seq_or_gen % NR_STAT_GENS;
99051 +/* The youngest and the second youngest generations are considered active. */
99052 +static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
99054 +       unsigned long max_seq = READ_ONCE(lruvec->evictable.max_seq);
99056 +       VM_BUG_ON(!max_seq);
99057 +       VM_BUG_ON(gen >= MAX_NR_GENS);
99059 +       return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1);
99062 +/* Update the sizes of the multigenerational lru. */
99063 +static inline void lru_gen_update_size(struct page *page, struct lruvec *lruvec,
99064 +                                      int old_gen, int new_gen)
99066 +       int file = page_is_file_lru(page);
99067 +       int zone = page_zonenum(page);
99068 +       int delta = thp_nr_pages(page);
99069 +       enum lru_list lru = LRU_FILE * file;
99070 +       struct lrugen *lrugen = &lruvec->evictable;
99072 +       lockdep_assert_held(&lruvec->lru_lock);
99073 +       VM_BUG_ON(old_gen != -1 && old_gen >= MAX_NR_GENS);
99074 +       VM_BUG_ON(new_gen != -1 && new_gen >= MAX_NR_GENS);
99075 +       VM_BUG_ON(old_gen == -1 && new_gen == -1);
99077 +       if (old_gen >= 0)
99078 +               WRITE_ONCE(lrugen->sizes[old_gen][file][zone],
99079 +                          lrugen->sizes[old_gen][file][zone] - delta);
99080 +       if (new_gen >= 0)
99081 +               WRITE_ONCE(lrugen->sizes[new_gen][file][zone],
99082 +                          lrugen->sizes[new_gen][file][zone] + delta);
99084 +       if (old_gen < 0) {
99085 +               if (lru_gen_is_active(lruvec, new_gen))
99086 +                       lru += LRU_ACTIVE;
99087 +               update_lru_size(lruvec, lru, zone, delta);
99088 +               return;
99089 +       }
99091 +       if (new_gen < 0) {
99092 +               if (lru_gen_is_active(lruvec, old_gen))
99093 +                       lru += LRU_ACTIVE;
99094 +               update_lru_size(lruvec, lru, zone, -delta);
99095 +               return;
99096 +       }
99098 +       if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) {
99099 +               update_lru_size(lruvec, lru, zone, -delta);
99100 +               update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta);
99101 +       }
99103 +       VM_BUG_ON(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
99106 +/* Add a page to a list of the multigenerational lru. Return true on success. */
99107 +static inline bool lru_gen_addition(struct page *page, struct lruvec *lruvec, bool front)
99109 +       int gen;
99110 +       unsigned long old_flags, new_flags;
99111 +       int file = page_is_file_lru(page);
99112 +       int zone = page_zonenum(page);
99113 +       struct lrugen *lrugen = &lruvec->evictable;
99115 +       if (PageUnevictable(page) || !lrugen->enabled[file])
99116 +               return false;
99117 +       /*
99118 +        * If a page is being faulted in, add it to the youngest generation.
99119 +        * try_walk_mm_list() may look at the size of the youngest generation to
99120 +        * determine if the aging is due.
99121 +        *
99122 +        * If a page can't be evicted immediately, i.e., a shmem page not in
99123 +        * swap cache, a dirty page waiting on writeback, or a page rejected by
99124 +        * evict_lru_gen_pages() due to races, dirty buffer heads, etc., add it
99125 +        * to the second oldest generation.
99126 +        *
99127 +        * If a page could be evicted immediately, i.e., deactivated, rotated by
99128 +        * writeback, or allocated for buffered io, add it to the oldest
99129 +        * generation.
99130 +        */
99131 +       if (PageActive(page))
99132 +               gen = lru_gen_from_seq(lrugen->max_seq);
99133 +       else if ((!file && !PageSwapCache(page)) ||
99134 +                (PageReclaim(page) && (PageDirty(page) || PageWriteback(page))) ||
99135 +                (!PageReferenced(page) && PageWorkingset(page)))
99136 +               gen = lru_gen_from_seq(lrugen->min_seq[file] + 1);
99137 +       else
99138 +               gen = lru_gen_from_seq(lrugen->min_seq[file]);
99140 +       do {
99141 +               old_flags = READ_ONCE(page->flags);
99142 +               VM_BUG_ON_PAGE(old_flags & LRU_GEN_MASK, page);
99144 +               new_flags = (old_flags & ~(LRU_GEN_MASK | BIT(PG_active))) |
99145 +                           ((gen + 1UL) << LRU_GEN_PGOFF);
99146 +               /* see the comment in evict_lru_gen_pages() */
99147 +               if (!(old_flags & BIT(PG_referenced)))
99148 +                       new_flags &= ~(LRU_USAGE_MASK | LRU_TIER_FLAGS);
99149 +       } while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
99151 +       lru_gen_update_size(page, lruvec, -1, gen);
99152 +       if (front)
99153 +               list_add(&page->lru, &lrugen->lists[gen][file][zone]);
99154 +       else
99155 +               list_add_tail(&page->lru, &lrugen->lists[gen][file][zone]);
99157 +       return true;
99160 +/* Delete a page from a list of the multigenerational lru. Return true on success. */
99161 +static inline bool lru_gen_deletion(struct page *page, struct lruvec *lruvec)
99163 +       int gen;
99164 +       unsigned long old_flags, new_flags;
99166 +       do {
99167 +               old_flags = READ_ONCE(page->flags);
99168 +               if (!(old_flags & LRU_GEN_MASK))
99169 +                       return false;
99171 +               VM_BUG_ON_PAGE(PageActive(page), page);
99172 +               VM_BUG_ON_PAGE(PageUnevictable(page), page);
99174 +               gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
99176 +               new_flags = old_flags & ~LRU_GEN_MASK;
99177 +               /* mark page active accordingly */
99178 +               if (lru_gen_is_active(lruvec, gen))
99179 +                       new_flags |= BIT(PG_active);
99180 +       } while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
99182 +       lru_gen_update_size(page, lruvec, gen, -1);
99183 +       list_del(&page->lru);
99185 +       return true;
99188 +/* Activate a page from page cache or swap cache after it's mapped. */
99189 +static inline void lru_gen_activation(struct page *page, struct vm_area_struct *vma)
99191 +       if (!lru_gen_enabled())
99192 +               return;
99194 +       if (PageActive(page) || PageUnevictable(page) || vma_is_dax(vma) ||
99195 +           (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)))
99196 +               return;
99197 +       /*
99198 +        * TODO: pass vm_fault to add_to_page_cache_lru() and
99199 +        * __read_swap_cache_async() so they can activate pages directly when in
99200 +        * the page fault path.
99201 +        */
99202 +       activate_page(page);
99205 +/* Return -1 when a page is not on a list of the multigenerational lru. */
99206 +static inline int page_lru_gen(struct page *page)
99208 +       return ((READ_ONCE(page->flags) & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
99211 +/* This function works regardless whether the multigenerational lru is enabled. */
99212 +static inline bool page_is_active(struct page *page, struct lruvec *lruvec)
99214 +       struct mem_cgroup *memcg;
99215 +       int gen = page_lru_gen(page);
99216 +       bool active = false;
99218 +       VM_BUG_ON_PAGE(PageTail(page), page);
99220 +       if (gen < 0)
99221 +               return PageActive(page);
99223 +       if (lruvec) {
99224 +               VM_BUG_ON_PAGE(PageUnevictable(page), page);
99225 +               VM_BUG_ON_PAGE(PageActive(page), page);
99226 +               lockdep_assert_held(&lruvec->lru_lock);
99228 +               return lru_gen_is_active(lruvec, gen);
99229 +       }
99231 +       rcu_read_lock();
99233 +       memcg = page_memcg_rcu(page);
99234 +       lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
99235 +       active = lru_gen_is_active(lruvec, gen);
99237 +       rcu_read_unlock();
99239 +       return active;
99242 +/* Return the level of usage of a page. See the comment on MAX_NR_TIERS. */
99243 +static inline int page_tier_usage(struct page *page)
99245 +       unsigned long flags = READ_ONCE(page->flags);
99247 +       return flags & BIT(PG_workingset) ?
99248 +              ((flags & LRU_USAGE_MASK) >> LRU_USAGE_PGOFF) + 1 : 0;
99251 +/* Increment the usage counter after a page is accessed via file descriptors. */
99252 +static inline bool page_inc_usage(struct page *page)
99254 +       unsigned long old_flags, new_flags;
99256 +       if (!lru_gen_enabled())
99257 +               return PageActive(page);
99259 +       do {
99260 +               old_flags = READ_ONCE(page->flags);
99262 +               if (!(old_flags & BIT(PG_workingset)))
99263 +                       new_flags = old_flags | BIT(PG_workingset);
99264 +               else
99265 +                       new_flags = (old_flags & ~LRU_USAGE_MASK) | min(LRU_USAGE_MASK,
99266 +                                   (old_flags & LRU_USAGE_MASK) + BIT(LRU_USAGE_PGOFF));
99268 +               if (old_flags == new_flags)
99269 +                       break;
99270 +       } while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
99272 +       return true;
99275 +#else /* CONFIG_LRU_GEN */
99277 +static inline bool lru_gen_enabled(void)
99279 +       return false;
99282 +static inline bool lru_gen_addition(struct page *page, struct lruvec *lruvec, bool front)
99284 +       return false;
99287 +static inline bool lru_gen_deletion(struct page *page, struct lruvec *lruvec)
99289 +       return false;
99292 +static inline void lru_gen_activation(struct page *page, struct vm_area_struct *vma)
99296 +static inline bool page_is_active(struct page *page, struct lruvec *lruvec)
99298 +       return PageActive(page);
99301 +static inline bool page_inc_usage(struct page *page)
99303 +       return PageActive(page);
99306 +#endif /* CONFIG_LRU_GEN */
99308  static __always_inline void add_page_to_lru_list(struct page *page,
99309                                 struct lruvec *lruvec)
99311         enum lru_list lru = page_lru(page);
99313 +       if (lru_gen_addition(page, lruvec, true))
99314 +               return;
99316         update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
99317         list_add(&page->lru, &lruvec->lists[lru]);
99319 @@ -93,6 +381,9 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page,
99321         enum lru_list lru = page_lru(page);
99323 +       if (lru_gen_addition(page, lruvec, false))
99324 +               return;
99326         update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
99327         list_add_tail(&page->lru, &lruvec->lists[lru]);
99329 @@ -100,6 +391,9 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page,
99330  static __always_inline void del_page_from_lru_list(struct page *page,
99331                                 struct lruvec *lruvec)
99333 +       if (lru_gen_deletion(page, lruvec))
99334 +               return;
99336         list_del(&page->lru);
99337         update_lru_size(lruvec, page_lru(page), page_zonenum(page),
99338                         -thp_nr_pages(page));
99339 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
99340 index 6613b26a8894..b936703a39a2 100644
99341 --- a/include/linux/mm_types.h
99342 +++ b/include/linux/mm_types.h
99343 @@ -15,6 +15,8 @@
99344  #include <linux/page-flags-layout.h>
99345  #include <linux/workqueue.h>
99346  #include <linux/seqlock.h>
99347 +#include <linux/nodemask.h>
99348 +#include <linux/mmdebug.h>
99350  #include <asm/mmu.h>
99352 @@ -97,10 +99,10 @@ struct page {
99353                 };
99354                 struct {        /* page_pool used by netstack */
99355                         /**
99356 -                        * @dma_addr: might require a 64-bit value even on
99357 +                        * @dma_addr: might require a 64-bit value on
99358                          * 32-bit architectures.
99359                          */
99360 -                       dma_addr_t dma_addr;
99361 +                       unsigned long dma_addr[2];
99362                 };
99363                 struct {        /* slab, slob and slub */
99364                         union {
99365 @@ -383,6 +385,8 @@ struct core_state {
99366         struct completion startup;
99367  };
99369 +#define ANON_AND_FILE 2
99371  struct kioctx_table;
99372  struct mm_struct {
99373         struct {
99374 @@ -561,6 +565,22 @@ struct mm_struct {
99376  #ifdef CONFIG_IOMMU_SUPPORT
99377                 u32 pasid;
99378 +#endif
99379 +#ifdef CONFIG_LRU_GEN
99380 +               struct {
99381 +                       /* the node of a global or per-memcg mm_struct list */
99382 +                       struct list_head list;
99383 +#ifdef CONFIG_MEMCG
99384 +                       /* points to memcg of the owner task above */
99385 +                       struct mem_cgroup *memcg;
99386 +#endif
99387 +                       /* whether this mm_struct has been used since the last walk */
99388 +                       nodemask_t nodes[ANON_AND_FILE];
99389 +#ifndef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
99390 +                       /* the number of CPUs using this mm_struct */
99391 +                       atomic_t nr_cpus;
99392 +#endif
99393 +               } lrugen;
99394  #endif
99395         } __randomize_layout;
99397 @@ -588,6 +608,103 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
99398         return (struct cpumask *)&mm->cpu_bitmap;
99401 +#ifdef CONFIG_LRU_GEN
99403 +void lru_gen_init_mm(struct mm_struct *mm);
99404 +void lru_gen_add_mm(struct mm_struct *mm);
99405 +void lru_gen_del_mm(struct mm_struct *mm);
99406 +#ifdef CONFIG_MEMCG
99407 +int lru_gen_alloc_mm_list(struct mem_cgroup *memcg);
99408 +void lru_gen_free_mm_list(struct mem_cgroup *memcg);
99409 +void lru_gen_migrate_mm(struct mm_struct *mm);
99410 +#endif
99413 + * Track the usage so mm_struct's that haven't been used since the last walk can
99414 + * be skipped. This function adds a theoretical overhead to each context switch,
99415 + * which hasn't been measurable.
99416 + */
99417 +static inline void lru_gen_switch_mm(struct mm_struct *old, struct mm_struct *new)
99419 +       int file;
99421 +       /* exclude init_mm, efi_mm, etc. */
99422 +       if (!core_kernel_data((unsigned long)old)) {
99423 +               VM_BUG_ON(old == &init_mm);
99425 +               for (file = 0; file < ANON_AND_FILE; file++)
99426 +                       nodes_setall(old->lrugen.nodes[file]);
99428 +#ifndef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
99429 +               atomic_dec(&old->lrugen.nr_cpus);
99430 +               VM_BUG_ON_MM(atomic_read(&old->lrugen.nr_cpus) < 0, old);
99431 +#endif
99432 +       } else
99433 +               VM_BUG_ON_MM(READ_ONCE(old->lrugen.list.prev) ||
99434 +                            READ_ONCE(old->lrugen.list.next), old);
99436 +       if (!core_kernel_data((unsigned long)new)) {
99437 +               VM_BUG_ON(new == &init_mm);
99439 +#ifndef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
99440 +               atomic_inc(&new->lrugen.nr_cpus);
99441 +               VM_BUG_ON_MM(atomic_read(&new->lrugen.nr_cpus) < 0, new);
99442 +#endif
99443 +       } else
99444 +               VM_BUG_ON_MM(READ_ONCE(new->lrugen.list.prev) ||
99445 +                            READ_ONCE(new->lrugen.list.next), new);
99448 +/* Return whether this mm_struct is being used on any CPUs. */
99449 +static inline bool lru_gen_mm_is_active(struct mm_struct *mm)
99451 +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
99452 +       return !cpumask_empty(mm_cpumask(mm));
99453 +#else
99454 +       return atomic_read(&mm->lrugen.nr_cpus);
99455 +#endif
99458 +#else /* CONFIG_LRU_GEN */
99460 +static inline void lru_gen_init_mm(struct mm_struct *mm)
99464 +static inline void lru_gen_add_mm(struct mm_struct *mm)
99468 +static inline void lru_gen_del_mm(struct mm_struct *mm)
99472 +#ifdef CONFIG_MEMCG
99473 +static inline int lru_gen_alloc_mm_list(struct mem_cgroup *memcg)
99475 +       return 0;
99478 +static inline void lru_gen_free_mm_list(struct mem_cgroup *memcg)
99482 +static inline void lru_gen_migrate_mm(struct mm_struct *mm)
99485 +#endif
99487 +static inline void lru_gen_switch_mm(struct mm_struct *old, struct mm_struct *new)
99491 +static inline bool lru_gen_mm_is_active(struct mm_struct *mm)
99493 +       return false;
99496 +#endif /* CONFIG_LRU_GEN */
99498  struct mmu_gather;
99499  extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
99500  extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
99501 diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
99502 index 26a3c7bc29ae..a3a4e374f802 100644
99503 --- a/include/linux/mmc/host.h
99504 +++ b/include/linux/mmc/host.h
99505 @@ -302,9 +302,6 @@ struct mmc_host {
99506         u32                     ocr_avail_sdio; /* SDIO-specific OCR */
99507         u32                     ocr_avail_sd;   /* SD-specific OCR */
99508         u32                     ocr_avail_mmc;  /* MMC-specific OCR */
99509 -#ifdef CONFIG_PM_SLEEP
99510 -       struct notifier_block   pm_notify;
99511 -#endif
99512         struct wakeup_source    *ws;            /* Enable consume of uevents */
99513         u32                     max_current_330;
99514         u32                     max_current_300;
99515 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
99516 index 47946cec7584..a22e9e40083f 100644
99517 --- a/include/linux/mmzone.h
99518 +++ b/include/linux/mmzone.h
99519 @@ -285,14 +285,124 @@ static inline bool is_active_lru(enum lru_list lru)
99520         return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
99523 -#define ANON_AND_FILE 2
99525  enum lruvec_flags {
99526         LRUVEC_CONGESTED,               /* lruvec has many dirty pages
99527                                          * backed by a congested BDI
99528                                          */
99529  };
99531 +struct lruvec;
99532 +struct page_vma_mapped_walk;
99534 +#define LRU_GEN_MASK           ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
99535 +#define LRU_USAGE_MASK         ((BIT(LRU_USAGE_WIDTH) - 1) << LRU_USAGE_PGOFF)
99537 +#ifdef CONFIG_LRU_GEN
99540 + * For each lruvec, evictable pages are divided into multiple generations. The
99541 + * youngest and the oldest generation numbers, AKA max_seq and min_seq, are
99542 + * monotonically increasing. The sliding window technique is used to track at
99543 + * most MAX_NR_GENS and at least MIN_NR_GENS generations. An offset within the
99544 + * window, AKA gen, indexes an array of per-type and per-zone lists for the
99545 + * corresponding generation. All pages from this array of lists have gen+1
99546 + * stored in page->flags. 0 is reserved to indicate that pages are not on the
99547 + * lists.
99548 + */
99549 +#define MAX_NR_GENS            ((unsigned int)CONFIG_NR_LRU_GENS)
99552 + * Each generation is then divided into multiple tiers. Tiers represent levels
99553 + * of usage from file descriptors, i.e., mark_page_accessed(). In contrast to
99554 + * moving across generations which requires the lru lock, moving across tiers
99555 + * only involves an atomic operation on page->flags and therefore has a
99556 + * negligible cost.
99557 + *
99558 + * The purposes of tiers are to:
99559 + *   1) estimate whether pages accessed multiple times via file descriptors are
99560 + *   more active than pages accessed only via page tables by separating the two
99561 + *   access types into upper tiers and the base tier and comparing refault rates
99562 + *   across tiers.
99563 + *   2) improve buffered io performance by deferring activations of pages
99564 + *   accessed multiple times until the eviction. That is activations happen in
99565 + *   the reclaim path, not the access path.
99566 + *
99567 + * Pages accessed N times via file descriptors belong to tier order_base_2(N).
99568 + * The base tier uses the following page flag:
99569 + *   !PageReferenced() -- readahead pages
99570 + *   PageReferenced() -- single-access pages
99571 + * All upper tiers use the following page flags:
99572 + *   PageReferenced() && PageWorkingset() -- multi-access pages
99573 + * in addition to the bits storing N-2 accesses. Therefore, we can support one
99574 + * upper tier without using additional bits in page->flags.
99575 + *
99576 + * Note that
99577 + *   1) PageWorkingset() is always set for upper tiers because we want to
99578 + *    maintain the existing psi behavior.
99579 + *   2) !PageReferenced() && PageWorkingset() is not a valid tier. See the
99580 + *   comment in evict_lru_gen_pages().
99581 + *   3) pages accessed only via page tables belong to the base tier.
99582 + *
99583 + * Pages from the base tier are evicted regardless of the refault rate. Pages
99584 + * from upper tiers will be moved to the next generation, if their refault rates
99585 + * are higher than that of the base tier.
99586 + */
99587 +#define MAX_NR_TIERS           ((unsigned int)CONFIG_TIERS_PER_GEN)
99588 +#define LRU_TIER_FLAGS         (BIT(PG_referenced) | BIT(PG_workingset))
99589 +#define LRU_USAGE_SHIFT                (CONFIG_TIERS_PER_GEN - 1)
99591 +/* Whether to keep historical stats for each generation. */
99592 +#ifdef CONFIG_LRU_GEN_STATS
99593 +#define NR_STAT_GENS           ((unsigned int)CONFIG_NR_LRU_GENS)
99594 +#else
99595 +#define NR_STAT_GENS           1U
99596 +#endif
99598 +struct lrugen {
99599 +       /* the aging increments the max generation number */
99600 +       unsigned long max_seq;
99601 +       /* the eviction increments the min generation numbers */
99602 +       unsigned long min_seq[ANON_AND_FILE];
99603 +       /* the birth time of each generation in jiffies */
99604 +       unsigned long timestamps[MAX_NR_GENS];
99605 +       /* the lists of the multigenerational lru */
99606 +       struct list_head lists[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
99607 +       /* the sizes of the multigenerational lru in pages */
99608 +       unsigned long sizes[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
99609 +       /* to determine which type and its tiers to evict */
99610 +       atomic_long_t evicted[NR_STAT_GENS][ANON_AND_FILE][MAX_NR_TIERS];
99611 +       atomic_long_t refaulted[NR_STAT_GENS][ANON_AND_FILE][MAX_NR_TIERS];
99612 +       /* the base tier is inactive and won't be activated */
99613 +       unsigned long activated[NR_STAT_GENS][ANON_AND_FILE][MAX_NR_TIERS - 1];
99614 +       /* arithmetic mean weighted by geometric series 1/2, 1/4, ... */
99615 +       unsigned long avg_total[ANON_AND_FILE][MAX_NR_TIERS];
99616 +       unsigned long avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS];
99617 +       /* reclaim priority to compare across memcgs */
99618 +       atomic_t priority;
99619 +       /* whether the multigenerational lru is enabled */
99620 +       bool enabled[ANON_AND_FILE];
99623 +void lru_gen_init_lruvec(struct lruvec *lruvec);
99624 +void lru_gen_set_state(bool enable, bool main, bool swap);
99625 +void lru_gen_scan_around(struct page_vma_mapped_walk *pvmw);
99627 +#else /* CONFIG_LRU_GEN */
99629 +static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
99633 +static inline void lru_gen_set_state(bool enable, bool main, bool swap)
99637 +static inline void lru_gen_scan_around(struct page_vma_mapped_walk *pvmw)
99641 +#endif /* CONFIG_LRU_GEN */
99643  struct lruvec {
99644         struct list_head                lists[NR_LRU_LISTS];
99645         /* per lruvec lru_lock for memcg */
99646 @@ -310,6 +420,10 @@ struct lruvec {
99647         unsigned long                   refaults[ANON_AND_FILE];
99648         /* Various lruvec state flags (enum lruvec_flags) */
99649         unsigned long                   flags;
99650 +#ifdef CONFIG_LRU_GEN
99651 +       /* unevictable pages are on LRU_UNEVICTABLE */
99652 +       struct lrugen                   evictable;
99653 +#endif
99654  #ifdef CONFIG_MEMCG
99655         struct pglist_data *pgdat;
99656  #endif
99657 diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
99658 index 3327239fa2f9..cc29dee508f7 100644
99659 --- a/include/linux/nfs_xdr.h
99660 +++ b/include/linux/nfs_xdr.h
99661 @@ -15,6 +15,8 @@
99662  #define NFS_DEF_FILE_IO_SIZE   (4096U)
99663  #define NFS_MIN_FILE_IO_SIZE   (1024U)
99665 +#define NFS_BITMASK_SZ         3
99667  struct nfs4_string {
99668         unsigned int len;
99669         char *data;
99670 @@ -525,7 +527,8 @@ struct nfs_closeargs {
99671         struct nfs_seqid *      seqid;
99672         fmode_t                 fmode;
99673         u32                     share_access;
99674 -       u32 *                   bitmask;
99675 +       const u32 *             bitmask;
99676 +       u32                     bitmask_store[NFS_BITMASK_SZ];
99677         struct nfs4_layoutreturn_args *lr_args;
99678  };
99680 @@ -608,7 +611,8 @@ struct nfs4_delegreturnargs {
99681         struct nfs4_sequence_args       seq_args;
99682         const struct nfs_fh *fhandle;
99683         const nfs4_stateid *stateid;
99684 -       u32 * bitmask;
99685 +       const u32 *bitmask;
99686 +       u32 bitmask_store[NFS_BITMASK_SZ];
99687         struct nfs4_layoutreturn_args *lr_args;
99688  };
99690 @@ -648,7 +652,8 @@ struct nfs_pgio_args {
99691         union {
99692                 unsigned int            replen;                 /* used by read */
99693                 struct {
99694 -                       u32 *                   bitmask;        /* used by write */
99695 +                       const u32 *             bitmask;        /* used by write */
99696 +                       u32 bitmask_store[NFS_BITMASK_SZ];      /* used by write */
99697                         enum nfs3_stable_how    stable;         /* used by write */
99698                 };
99699         };
99700 diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
99701 index ac398e143c9a..89fe4e3592f9 100644
99702 --- a/include/linux/nodemask.h
99703 +++ b/include/linux/nodemask.h
99704 @@ -486,6 +486,7 @@ static inline int num_node_state(enum node_states state)
99705  #define first_online_node      0
99706  #define first_memory_node      0
99707  #define next_online_node(nid)  (MAX_NUMNODES)
99708 +#define next_memory_node(nid)  (MAX_NUMNODES)
99709  #define nr_node_ids            1U
99710  #define nr_online_nodes                1U
99712 diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
99713 index 7d4ec26d8a3e..df83aaec8498 100644
99714 --- a/include/linux/page-flags-layout.h
99715 +++ b/include/linux/page-flags-layout.h
99716 @@ -24,6 +24,17 @@
99717  #error ZONES_SHIFT -- too many zones configured adjust calculation
99718  #endif
99720 +#ifdef CONFIG_LRU_GEN
99722 + * LRU_GEN_WIDTH is generated from order_base_2(CONFIG_NR_LRU_GENS + 1). And the
99723 + * comment on MAX_NR_TIERS explains why we offset by 2 here.
99724 + */
99725 +#define LRU_USAGE_WIDTH                (CONFIG_TIERS_PER_GEN - 2)
99726 +#else
99727 +#define LRU_GEN_WIDTH          0
99728 +#define LRU_USAGE_WIDTH                0
99729 +#endif
99731  #ifdef CONFIG_SPARSEMEM
99732  #include <asm/sparsemem.h>
99734 @@ -56,7 +67,8 @@
99736  #define ZONES_WIDTH            ZONES_SHIFT
99738 -#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
99739 +#if SECTIONS_WIDTH+ZONES_WIDTH+LRU_GEN_WIDTH+LRU_USAGE_WIDTH+NODES_SHIFT \
99740 +       <= BITS_PER_LONG - NR_PAGEFLAGS
99741  #define NODES_WIDTH            NODES_SHIFT
99742  #else
99743  #ifdef CONFIG_SPARSEMEM_VMEMMAP
99744 @@ -83,14 +95,16 @@
99745  #define KASAN_TAG_WIDTH 0
99746  #endif
99748 -#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \
99749 +#if SECTIONS_WIDTH+ZONES_WIDTH+LRU_GEN_WIDTH+LRU_USAGE_WIDTH+ \
99750 +       NODES_WIDTH+KASAN_TAG_WIDTH+LAST_CPUPID_SHIFT \
99751         <= BITS_PER_LONG - NR_PAGEFLAGS
99752  #define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
99753  #else
99754  #define LAST_CPUPID_WIDTH 0
99755  #endif
99757 -#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \
99758 +#if SECTIONS_WIDTH+ZONES_WIDTH+LRU_GEN_WIDTH+LRU_USAGE_WIDTH+ \
99759 +       NODES_WIDTH+KASAN_TAG_WIDTH+LAST_CPUPID_WIDTH \
99760         > BITS_PER_LONG - NR_PAGEFLAGS
99761  #error "Not enough bits in page flags"
99762  #endif
99763 diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
99764 index 04a34c08e0a6..e58984fca32a 100644
99765 --- a/include/linux/page-flags.h
99766 +++ b/include/linux/page-flags.h
99767 @@ -817,7 +817,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
99768          1UL << PG_private      | 1UL << PG_private_2   |       \
99769          1UL << PG_writeback    | 1UL << PG_reserved    |       \
99770          1UL << PG_slab         | 1UL << PG_active      |       \
99771 -        1UL << PG_unevictable  | __PG_MLOCKED)
99772 +        1UL << PG_unevictable  | __PG_MLOCKED | LRU_GEN_MASK)
99774  /*
99775   * Flags checked when a page is prepped for return by the page allocator.
99776 @@ -828,7 +828,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
99777   * alloc-free cycle to prevent from reusing the page.
99778   */
99779  #define PAGE_FLAGS_CHECK_AT_PREP       \
99780 -       (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
99781 +       ((((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_USAGE_MASK)
99783  #define PAGE_FLAGS_PRIVATE                             \
99784         (1UL << PG_private | 1UL << PG_private_2)
99785 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
99786 index 3f7f89ea5e51..3d478abf411c 100644
99787 --- a/include/linux/perf_event.h
99788 +++ b/include/linux/perf_event.h
99789 @@ -607,6 +607,7 @@ struct swevent_hlist {
99790  #define PERF_ATTACH_TASK_DATA  0x08
99791  #define PERF_ATTACH_ITRACE     0x10
99792  #define PERF_ATTACH_SCHED_CB   0x20
99793 +#define PERF_ATTACH_CHILD      0x40
99795  struct perf_cgroup;
99796  struct perf_buffer;
99797 diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
99798 index 5e772392a379..08dd9b8c055a 100644
99799 --- a/include/linux/pgtable.h
99800 +++ b/include/linux/pgtable.h
99801 @@ -193,7 +193,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
99802  #endif
99804  #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
99805 -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
99806 +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG)
99807  static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
99808                                             unsigned long address,
99809                                             pmd_t *pmdp)
99810 @@ -214,7 +214,7 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
99811         BUILD_BUG();
99812         return 0;
99814 -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
99815 +#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG */
99816  #endif
99818  #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
99819 diff --git a/include/linux/phy.h b/include/linux/phy.h
99820 index 1a12e4436b5b..8644b097dea3 100644
99821 --- a/include/linux/phy.h
99822 +++ b/include/linux/phy.h
99823 @@ -493,6 +493,7 @@ struct macsec_ops;
99824   * @loopback_enabled: Set true if this PHY has been loopbacked successfully.
99825   * @downshifted_rate: Set true if link speed has been downshifted.
99826   * @is_on_sfp_module: Set true if PHY is located on an SFP module.
99827 + * @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY
99828   * @state: State of the PHY for management purposes
99829   * @dev_flags: Device-specific flags used by the PHY driver.
99830   * @irq: IRQ number of the PHY's interrupt (-1 if none)
99831 @@ -567,6 +568,7 @@ struct phy_device {
99832         unsigned loopback_enabled:1;
99833         unsigned downshifted_rate:1;
99834         unsigned is_on_sfp_module:1;
99835 +       unsigned mac_managed_pm:1;
99837         unsigned autoneg:1;
99838         /* The most recently read link state */
99839 diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
99840 index 3f23f6e430bf..cd81e060863c 100644
99841 --- a/include/linux/platform_device.h
99842 +++ b/include/linux/platform_device.h
99843 @@ -359,4 +359,7 @@ static inline int is_sh_early_platform_device(struct platform_device *pdev)
99845  #endif /* CONFIG_SUPERH */
99847 +/* For now only SuperH uses it */
99848 +void early_platform_cleanup(void);
99850  #endif /* _PLATFORM_DEVICE_H_ */
99851 diff --git a/include/linux/pm.h b/include/linux/pm.h
99852 index 482313a8ccfc..628718697679 100644
99853 --- a/include/linux/pm.h
99854 +++ b/include/linux/pm.h
99855 @@ -602,6 +602,7 @@ struct dev_pm_info {
99856         unsigned int            idle_notification:1;
99857         unsigned int            request_pending:1;
99858         unsigned int            deferred_resume:1;
99859 +       unsigned int            needs_force_resume:1;
99860         unsigned int            runtime_auto:1;
99861         bool                    ignore_children:1;
99862         unsigned int            no_callbacks:1;
99863 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
99864 index b492ae00cc90..6c08a085367b 100644
99865 --- a/include/linux/pm_runtime.h
99866 +++ b/include/linux/pm_runtime.h
99867 @@ -265,7 +265,7 @@ static inline void pm_runtime_no_callbacks(struct device *dev) {}
99868  static inline void pm_runtime_irq_safe(struct device *dev) {}
99869  static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
99871 -static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; }
99872 +static inline bool pm_runtime_has_no_callbacks(struct device *dev) { return false; }
99873  static inline void pm_runtime_mark_last_busy(struct device *dev) {}
99874  static inline void __pm_runtime_use_autosuspend(struct device *dev,
99875                                                 bool use) {}
99876 diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
99877 index 111a40d0d3d5..8d5f4f40fb41 100644
99878 --- a/include/linux/power/bq27xxx_battery.h
99879 +++ b/include/linux/power/bq27xxx_battery.h
99880 @@ -53,7 +53,6 @@ struct bq27xxx_reg_cache {
99881         int capacity;
99882         int energy;
99883         int flags;
99884 -       int power_avg;
99885         int health;
99886  };
99888 diff --git a/include/linux/reset.h b/include/linux/reset.h
99889 index b9109efa2a5c..9700124affa3 100644
99890 --- a/include/linux/reset.h
99891 +++ b/include/linux/reset.h
99892 @@ -47,6 +47,11 @@ static inline int reset_control_reset(struct reset_control *rstc)
99893         return 0;
99896 +static inline int reset_control_rearm(struct reset_control *rstc)
99898 +       return 0;
99901  static inline int reset_control_assert(struct reset_control *rstc)
99903         return 0;
99904 diff --git a/include/linux/sched.h b/include/linux/sched.h
99905 index ef00bb22164c..b4b0b69d76f1 100644
99906 --- a/include/linux/sched.h
99907 +++ b/include/linux/sched.h
99908 @@ -216,13 +216,40 @@ struct task_group;
99910  extern void scheduler_tick(void);
99912 -#define        MAX_SCHEDULE_TIMEOUT            LONG_MAX
99914 +#define        MAX_SCHEDULE_TIMEOUT    LONG_MAX
99915  extern long schedule_timeout(long timeout);
99916  extern long schedule_timeout_interruptible(long timeout);
99917  extern long schedule_timeout_killable(long timeout);
99918  extern long schedule_timeout_uninterruptible(long timeout);
99919  extern long schedule_timeout_idle(long timeout);
99921 +#ifdef CONFIG_HIGH_RES_TIMERS
99922 +extern long schedule_msec_hrtimeout(long timeout);
99923 +extern long schedule_min_hrtimeout(void);
99924 +extern long schedule_msec_hrtimeout_interruptible(long timeout);
99925 +extern long schedule_msec_hrtimeout_uninterruptible(long timeout);
99926 +#else
99927 +static inline long schedule_msec_hrtimeout(long timeout)
99929 +       return schedule_timeout(msecs_to_jiffies(timeout));
99932 +static inline long schedule_min_hrtimeout(void)
99934 +       return schedule_timeout(1);
99937 +static inline long schedule_msec_hrtimeout_interruptible(long timeout)
99939 +       return schedule_timeout_interruptible(msecs_to_jiffies(timeout));
99942 +static inline long schedule_msec_hrtimeout_uninterruptible(long timeout)
99944 +       return schedule_timeout_uninterruptible(msecs_to_jiffies(timeout));
99946 +#endif
99948  asmlinkage void schedule(void);
99949  extern void schedule_preempt_disabled(void);
99950  asmlinkage void preempt_schedule_irq(void);
99951 @@ -450,10 +477,22 @@ struct sched_statistics {
99952  #endif
99953  };
99955 +#ifdef CONFIG_CACULE_SCHED
99956 +struct cacule_node {
99957 +       struct cacule_node*             next;
99958 +       struct cacule_node*             prev;
99959 +       u64                             cacule_start_time;
99960 +       u64                             vruntime;
99962 +#endif
99964  struct sched_entity {
99965         /* For load-balancing: */
99966         struct load_weight              load;
99967         struct rb_node                  run_node;
99968 +#ifdef CONFIG_CACULE_SCHED
99969 +       struct cacule_node              cacule_node;
99970 +#endif
99971         struct list_head                group_node;
99972         unsigned int                    on_rq;
99974 diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
99975 index 3c31ba88aca5..cb819c3d86f3 100644
99976 --- a/include/linux/sched/sysctl.h
99977 +++ b/include/linux/sched/sysctl.h
99978 @@ -31,6 +31,12 @@ extern unsigned int sysctl_sched_min_granularity;
99979  extern unsigned int sysctl_sched_wakeup_granularity;
99980  extern unsigned int sysctl_sched_child_runs_first;
99982 +#ifdef CONFIG_CACULE_SCHED
99983 +extern int interactivity_factor;
99984 +extern unsigned int interactivity_threshold;
99985 +extern int cacule_max_lifetime;
99986 +#endif
99988  enum sched_tunable_scaling {
99989         SCHED_TUNABLESCALING_NONE,
99990         SCHED_TUNABLESCALING_LOG,
99991 diff --git a/include/linux/smp.h b/include/linux/smp.h
99992 index 70c6f6284dcf..238a3f97a415 100644
99993 --- a/include/linux/smp.h
99994 +++ b/include/linux/smp.h
99995 @@ -73,7 +73,7 @@ void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
99996  void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
99997                            void *info, bool wait, const struct cpumask *mask);
99999 -int smp_call_function_single_async(int cpu, call_single_data_t *csd);
100000 +int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
100002  #ifdef CONFIG_SMP
100004 diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
100005 index 592897fa4f03..643139b1eafe 100644
100006 --- a/include/linux/spi/spi.h
100007 +++ b/include/linux/spi/spi.h
100008 @@ -510,6 +510,9 @@ struct spi_controller {
100010  #define SPI_MASTER_GPIO_SS             BIT(5)  /* GPIO CS must select slave */
100012 +       /* flag indicating this is a non-devres managed controller */
100013 +       bool                    devm_allocated;
100015         /* flag indicating this is an SPI slave controller */
100016         bool                    slave;
100018 diff --git a/include/linux/swap.h b/include/linux/swap.h
100019 index 4cc6ec3bf0ab..0e7532c7db22 100644
100020 --- a/include/linux/swap.h
100021 +++ b/include/linux/swap.h
100022 @@ -344,13 +344,14 @@ extern void lru_add_drain_cpu(int cpu);
100023  extern void lru_add_drain_cpu_zone(struct zone *zone);
100024  extern void lru_add_drain_all(void);
100025  extern void rotate_reclaimable_page(struct page *page);
100026 +extern void activate_page(struct page *page);
100027  extern void deactivate_file_page(struct page *page);
100028  extern void deactivate_page(struct page *page);
100029  extern void mark_page_lazyfree(struct page *page);
100030  extern void swap_setup(void);
100032 -extern void lru_cache_add_inactive_or_unevictable(struct page *page,
100033 -                                               struct vm_area_struct *vma);
100034 +extern void lru_cache_add_page_vma(struct page *page, struct vm_area_struct *vma,
100035 +                                  bool faulting);
100037  /* linux/mm/vmscan.c */
100038  extern unsigned long zone_reclaimable_pages(struct zone *zone);
100039 diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
100040 index 2839dc9a7c01..b07b7d4334a6 100644
100041 --- a/include/linux/syscalls.h
100042 +++ b/include/linux/syscalls.h
100043 @@ -69,6 +69,8 @@ struct io_uring_params;
100044  struct clone_args;
100045  struct open_how;
100046  struct mount_attr;
100047 +struct futex_waitv;
100048 +struct futex_requeue;
100050  #include <linux/types.h>
100051  #include <linux/aio_abi.h>
100052 @@ -619,6 +621,20 @@ asmlinkage long sys_get_robust_list(int pid,
100053  asmlinkage long sys_set_robust_list(struct robust_list_head __user *head,
100054                                     size_t len);
100056 +/* kernel/futex2.c */
100057 +asmlinkage long sys_futex_wait(void __user *uaddr, unsigned int val,
100058 +                              unsigned int flags,
100059 +                              struct __kernel_timespec __user *timo);
100060 +asmlinkage long sys_futex_wake(void __user *uaddr, unsigned int nr_wake,
100061 +                              unsigned int flags);
100062 +asmlinkage long sys_futex_waitv(struct futex_waitv __user *waiters,
100063 +                               unsigned int nr_futexes, unsigned int flags,
100064 +                               struct __kernel_timespec __user *timo);
100065 +asmlinkage long sys_futex_requeue(struct futex_requeue __user *uaddr1,
100066 +                                 struct futex_requeue __user *uaddr2,
100067 +                                 unsigned int nr_wake, unsigned int nr_requeue,
100068 +                                 unsigned int cmpval, unsigned int flags);
100070  /* kernel/hrtimer.c */
100071  asmlinkage long sys_nanosleep(struct __kernel_timespec __user *rqtp,
100072                               struct __kernel_timespec __user *rmtp);
100073 @@ -1300,6 +1316,8 @@ int ksys_ipc(unsigned int call, int first, unsigned long second,
100074         unsigned long third, void __user * ptr, long fifth);
100075  int compat_ksys_ipc(u32 call, int first, int second,
100076         u32 third, u32 ptr, u32 fifth);
100077 +long ksys_futex_wake(void __user *uaddr, unsigned long nr_wake,
100078 +                    unsigned int flags);
100081   * The following kernel syscall equivalents are just wrappers to fs-internal
100082 diff --git a/include/linux/tcp.h b/include/linux/tcp.h
100083 index 48d8a363319e..1bd559c69e83 100644
100084 --- a/include/linux/tcp.h
100085 +++ b/include/linux/tcp.h
100086 @@ -225,7 +225,8 @@ struct tcp_sock {
100087         u8      compressed_ack;
100088         u8      dup_ack_counter:2,
100089                 tlp_retrans:1,  /* TLP is a retransmission */
100090 -               unused:5;
100091 +               fast_ack_mode:2, /* which fast ack mode ? */
100092 +               unused:3;
100093         u32     chrono_start;   /* Start time in jiffies of a TCP chrono */
100094         u32     chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
100095         u8      chrono_type:2,  /* current chronograph type */
100096 diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
100097 index 61c3372d3f32..2f719b471d52 100644
100098 --- a/include/linux/tty_driver.h
100099 +++ b/include/linux/tty_driver.h
100100 @@ -228,7 +228,7 @@
100101   *
100102   *     Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel
100103   *     structure to complete. This method is optional and will only be called
100104 - *     if provided (otherwise EINVAL will be returned).
100105 + *     if provided (otherwise ENOTTY will be returned).
100106   */
100108  #include <linux/export.h>
100109 diff --git a/include/linux/udp.h b/include/linux/udp.h
100110 index aa84597bdc33..ae58ff3b6b5b 100644
100111 --- a/include/linux/udp.h
100112 +++ b/include/linux/udp.h
100113 @@ -51,7 +51,9 @@ struct udp_sock {
100114                                            * different encapsulation layer set
100115                                            * this
100116                                            */
100117 -                        gro_enabled:1; /* Can accept GRO packets */
100118 +                        gro_enabled:1, /* Request GRO aggregation */
100119 +                        accept_udp_l4:1,
100120 +                        accept_udp_fraglist:1;
100121         /*
100122          * Following member retains the information to create a UDP header
100123          * when the socket is uncorked.
100124 @@ -131,8 +133,16 @@ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
100126  static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
100128 -       return !udp_sk(sk)->gro_enabled && skb_is_gso(skb) &&
100129 -              skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4;
100130 +       if (!skb_is_gso(skb))
100131 +               return false;
100133 +       if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && !udp_sk(sk)->accept_udp_l4)
100134 +               return true;
100136 +       if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist)
100137 +               return true;
100139 +       return false;
100142  #define udp_portaddr_for_each_entry(__sk, list) \
100143 diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
100144 index 70d681918d01..bf00259493e0 100644
100145 --- a/include/linux/usb/pd.h
100146 +++ b/include/linux/usb/pd.h
100147 @@ -493,4 +493,6 @@ static inline unsigned int rdo_max_power(u32 rdo)
100148  #define PD_N_CAPS_COUNT                (PD_T_NO_RESPONSE / PD_T_SEND_SOURCE_CAP)
100149  #define PD_N_HARD_RESET_COUNT  2
100151 +#define PD_P_SNK_STDBY_MW      2500    /* 2500 mW */
100153  #endif /* __LINUX_USB_PD_H */
100154 diff --git a/include/linux/zstd.h b/include/linux/zstd.h
100155 index e87f78c9b19c..446ecabcdd02 100644
100156 --- a/include/linux/zstd.h
100157 +++ b/include/linux/zstd.h
100158 @@ -1,138 +1,97 @@
100159 +/* SPDX-License-Identifier: GPL-2.0-only */
100161 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
100162 + * Copyright (c) Yann Collet, Facebook, Inc.
100163   * All rights reserved.
100164   *
100165 - * This source code is licensed under the BSD-style license found in the
100166 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
100167 - * An additional grant of patent rights can be found in the PATENTS file in the
100168 - * same directory.
100170 - * This program is free software; you can redistribute it and/or modify it under
100171 - * the terms of the GNU General Public License version 2 as published by the
100172 - * Free Software Foundation. This program is dual-licensed; you may select
100173 - * either version 2 of the GNU General Public License ("GPL") or BSD license
100174 - * ("BSD").
100175 + * This source code is licensed under both the BSD-style license (found in the
100176 + * LICENSE file in the root directory of https://github.com/facebook/zstd) and
100177 + * the GPLv2 (found in the COPYING file in the root directory of
100178 + * https://github.com/facebook/zstd). You may select, at your option, one of the
100179 + * above-listed licenses.
100180   */
100182 -#ifndef ZSTD_H
100183 -#define ZSTD_H
100184 +#ifndef LINUX_ZSTD_H
100185 +#define LINUX_ZSTD_H
100187 -/* ======   Dependency   ======*/
100188 -#include <linux/types.h>   /* size_t */
100190 + * This is a kernel-style API that wraps the upstream zstd API, which cannot be
100191 + * used directly because the symbols aren't exported. It exposes the minimal
100192 + * functionality which is currently required by users of zstd in the kernel.
100193 + * Expose extra functions from lib/zstd/zstd.h as needed.
100194 + */
100196 +/* ======   Dependency   ====== */
100197 +#include <linux/types.h>
100198 +#include <linux/zstd_errors.h>
100199 +#include <linux/zstd_lib.h>
100201 -/*-*****************************************************************************
100202 - * Introduction
100203 +/* ======   Helper Functions   ====== */
100205 + * zstd_compress_bound() - maximum compressed size in worst case scenario
100206 + * @src_size: The size of the data to compress.
100207   *
100208 - * zstd, short for Zstandard, is a fast lossless compression algorithm,
100209 - * targeting real-time compression scenarios at zlib-level and better
100210 - * compression ratios. The zstd compression library provides in-memory
100211 - * compression and decompression functions. The library supports compression
100212 - * levels from 1 up to ZSTD_maxCLevel() which is 22. Levels >= 20, labeled
100213 - * ultra, should be used with caution, as they require more memory.
100214 - * Compression can be done in:
100215 - *  - a single step, reusing a context (described as Explicit memory management)
100216 - *  - unbounded multiple steps (described as Streaming compression)
100217 - * The compression ratio achievable on small data can be highly improved using
100218 - * compression with a dictionary in:
100219 - *  - a single step (described as Simple dictionary API)
100220 - *  - a single step, reusing a dictionary (described as Fast dictionary API)
100221 - ******************************************************************************/
100223 -/*======  Helper functions  ======*/
100224 + * Return:    The maximum compressed size in the worst case scenario.
100225 + */
100226 +size_t zstd_compress_bound(size_t src_size);
100228  /**
100229 - * enum ZSTD_ErrorCode - zstd error codes
100230 + * zstd_is_error() - tells if a size_t function result is an error code
100231 + * @code:  The function result to check for error.
100232   *
100233 - * Functions that return size_t can be checked for errors using ZSTD_isError()
100234 - * and the ZSTD_ErrorCode can be extracted using ZSTD_getErrorCode().
100235 + * Return: Non-zero iff the code is an error.
100236 + */
100237 +unsigned int zstd_is_error(size_t code);
100240 + * enum zstd_error_code - zstd error codes
100241   */
100242 -typedef enum {
100243 -       ZSTD_error_no_error,
100244 -       ZSTD_error_GENERIC,
100245 -       ZSTD_error_prefix_unknown,
100246 -       ZSTD_error_version_unsupported,
100247 -       ZSTD_error_parameter_unknown,
100248 -       ZSTD_error_frameParameter_unsupported,
100249 -       ZSTD_error_frameParameter_unsupportedBy32bits,
100250 -       ZSTD_error_frameParameter_windowTooLarge,
100251 -       ZSTD_error_compressionParameter_unsupported,
100252 -       ZSTD_error_init_missing,
100253 -       ZSTD_error_memory_allocation,
100254 -       ZSTD_error_stage_wrong,
100255 -       ZSTD_error_dstSize_tooSmall,
100256 -       ZSTD_error_srcSize_wrong,
100257 -       ZSTD_error_corruption_detected,
100258 -       ZSTD_error_checksum_wrong,
100259 -       ZSTD_error_tableLog_tooLarge,
100260 -       ZSTD_error_maxSymbolValue_tooLarge,
100261 -       ZSTD_error_maxSymbolValue_tooSmall,
100262 -       ZSTD_error_dictionary_corrupted,
100263 -       ZSTD_error_dictionary_wrong,
100264 -       ZSTD_error_dictionaryCreation_failed,
100265 -       ZSTD_error_maxCode
100266 -} ZSTD_ErrorCode;
100267 +typedef ZSTD_ErrorCode zstd_error_code;
100269  /**
100270 - * ZSTD_maxCLevel() - maximum compression level available
100271 + * zstd_get_error_code() - translates an error function result to an error code
100272 + * @code:  The function result for which zstd_is_error(code) is true.
100273   *
100274 - * Return: Maximum compression level available.
100275 + * Return: A unique error code for this error.
100276   */
100277 -int ZSTD_maxCLevel(void);
100278 +zstd_error_code zstd_get_error_code(size_t code);
100280  /**
100281 - * ZSTD_compressBound() - maximum compressed size in worst case scenario
100282 - * @srcSize: The size of the data to compress.
100283 + * zstd_get_error_name() - translates an error function result to a string
100284 + * @code:  The function result for which zstd_is_error(code) is true.
100285   *
100286 - * Return:   The maximum compressed size in the worst case scenario.
100287 + * Return: An error string corresponding to the error code.
100288   */
100289 -size_t ZSTD_compressBound(size_t srcSize);
100290 +const char *zstd_get_error_name(size_t code);
100292  /**
100293 - * ZSTD_isError() - tells if a size_t function result is an error code
100294 - * @code:  The function result to check for error.
100295 + * zstd_min_clevel() - minimum allowed compression level
100296   *
100297 - * Return: Non-zero iff the code is an error.
100298 + * Return: The minimum allowed compression level.
100299   */
100300 -static __attribute__((unused)) unsigned int ZSTD_isError(size_t code)
100302 -       return code > (size_t)-ZSTD_error_maxCode;
100304 +int zstd_min_clevel(void);
100306  /**
100307 - * ZSTD_getErrorCode() - translates an error function result to a ZSTD_ErrorCode
100308 - * @functionResult: The result of a function for which ZSTD_isError() is true.
100309 + * zstd_max_clevel() - maximum allowed compression level
100310   *
100311 - * Return:          The ZSTD_ErrorCode corresponding to the functionResult or 0
100312 - *                  if the functionResult isn't an error.
100313 + * Return: The maximum allowed compression level.
100314   */
100315 -static __attribute__((unused)) ZSTD_ErrorCode ZSTD_getErrorCode(
100316 -       size_t functionResult)
100318 -       if (!ZSTD_isError(functionResult))
100319 -               return (ZSTD_ErrorCode)0;
100320 -       return (ZSTD_ErrorCode)(0 - functionResult);
100322 +int zstd_max_clevel(void);
100324 +/* ======   Parameter Selection   ====== */
100326  /**
100327 - * enum ZSTD_strategy - zstd compression search strategy
100328 + * enum zstd_strategy - zstd compression search strategy
100329   *
100330 - * From faster to stronger.
100331 + * From faster to stronger. See zstd_lib.h.
100332   */
100333 -typedef enum {
100334 -       ZSTD_fast,
100335 -       ZSTD_dfast,
100336 -       ZSTD_greedy,
100337 -       ZSTD_lazy,
100338 -       ZSTD_lazy2,
100339 -       ZSTD_btlazy2,
100340 -       ZSTD_btopt,
100341 -       ZSTD_btopt2
100342 -} ZSTD_strategy;
100343 +typedef ZSTD_strategy zstd_strategy;
100345  /**
100346 - * struct ZSTD_compressionParameters - zstd compression parameters
100347 + * struct zstd_compression_parameters - zstd compression parameters
100348   * @windowLog:    Log of the largest match distance. Larger means more
100349   *                compression, and more memory needed during decompression.
100350 - * @chainLog:     Fully searched segment. Larger means more compression, slower,
100351 - *                and more memory (useless for fast).
100352 + * @chainLog:     Fully searched segment. Larger means more compression,
100353 + *                slower, and more memory (useless for fast).
100354   * @hashLog:      Dispatch table. Larger means more compression,
100355   *                slower, and more memory.
100356   * @searchLog:    Number of searches. Larger means more compression and slower.
100357 @@ -141,1017 +100,348 @@ typedef enum {
100358   * @targetLength: Acceptable match size for optimal parser (only). Larger means
100359   *                more compression, and slower.
100360   * @strategy:     The zstd compression strategy.
100362 + * See zstd_lib.h.
100363   */
100364 -typedef struct {
100365 -       unsigned int windowLog;
100366 -       unsigned int chainLog;
100367 -       unsigned int hashLog;
100368 -       unsigned int searchLog;
100369 -       unsigned int searchLength;
100370 -       unsigned int targetLength;
100371 -       ZSTD_strategy strategy;
100372 -} ZSTD_compressionParameters;
100373 +typedef ZSTD_compressionParameters zstd_compression_parameters;
100375  /**
100376 - * struct ZSTD_frameParameters - zstd frame parameters
100377 - * @contentSizeFlag: Controls whether content size will be present in the frame
100378 - *                   header (when known).
100379 - * @checksumFlag:    Controls whether a 32-bit checksum is generated at the end
100380 - *                   of the frame for error detection.
100381 - * @noDictIDFlag:    Controls whether dictID will be saved into the frame header
100382 - *                   when using dictionary compression.
100383 + * struct zstd_frame_parameters - zstd frame parameters
100384 + * @contentSizeFlag: Controls whether content size will be present in the
100385 + *                   frame header (when known).
100386 + * @checksumFlag:    Controls whether a 32-bit checksum is generated at the
100387 + *                   end of the frame for error detection.
100388 + * @noDictIDFlag:    Controls whether dictID will be saved into the frame
100389 + *                   header when using dictionary compression.
100390   *
100391 - * The default value is all fields set to 0.
100392 + * The default value is all fields set to 0. See zstd_lib.h.
100393   */
100394 -typedef struct {
100395 -       unsigned int contentSizeFlag;
100396 -       unsigned int checksumFlag;
100397 -       unsigned int noDictIDFlag;
100398 -} ZSTD_frameParameters;
100399 +typedef ZSTD_frameParameters zstd_frame_parameters;
100401  /**
100402 - * struct ZSTD_parameters - zstd parameters
100403 + * struct zstd_parameters - zstd parameters
100404   * @cParams: The compression parameters.
100405   * @fParams: The frame parameters.
100406   */
100407 -typedef struct {
100408 -       ZSTD_compressionParameters cParams;
100409 -       ZSTD_frameParameters fParams;
100410 -} ZSTD_parameters;
100411 +typedef ZSTD_parameters zstd_parameters;
100413  /**
100414 - * ZSTD_getCParams() - returns ZSTD_compressionParameters for selected level
100415 - * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel().
100416 - * @estimatedSrcSize: The estimated source size to compress or 0 if unknown.
100417 - * @dictSize:         The dictionary size or 0 if a dictionary isn't being used.
100418 + * zstd_get_params() - returns zstd_parameters for selected level
100419 + * @level:              The compression level
100420 + * @estimated_src_size: The estimated source size to compress or 0
100421 + *                      if unknown.
100422   *
100423 - * Return:            The selected ZSTD_compressionParameters.
100424 + * Return:              The selected zstd_parameters.
100425   */
100426 -ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel,
100427 -       unsigned long long estimatedSrcSize, size_t dictSize);
100428 +zstd_parameters zstd_get_params(int level,
100429 +       unsigned long long estimated_src_size);
100432 - * ZSTD_getParams() - returns ZSTD_parameters for selected level
100433 - * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel().
100434 - * @estimatedSrcSize: The estimated source size to compress or 0 if unknown.
100435 - * @dictSize:         The dictionary size or 0 if a dictionary isn't being used.
100437 - * The same as ZSTD_getCParams() except also selects the default frame
100438 - * parameters (all zero).
100440 - * Return:            The selected ZSTD_parameters.
100441 - */
100442 -ZSTD_parameters ZSTD_getParams(int compressionLevel,
100443 -       unsigned long long estimatedSrcSize, size_t dictSize);
100444 +/* ======   Single-pass Compression   ====== */
100446 -/*-*************************************
100447 - * Explicit memory management
100448 - **************************************/
100449 +typedef ZSTD_CCtx zstd_cctx;
100451  /**
100452 - * ZSTD_CCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_CCtx
100453 - * @cParams: The compression parameters to be used for compression.
100454 + * zstd_cctx_workspace_bound() - max memory needed to initialize a zstd_cctx
100455 + * @parameters: The compression parameters to be used.
100456   *
100457   * If multiple compression parameters might be used, the caller must call
100458 - * ZSTD_CCtxWorkspaceBound() for each set of parameters and use the maximum
100459 + * zstd_cctx_workspace_bound() for each set of parameters and use the maximum
100460   * size.
100461   *
100462 - * Return:   A lower bound on the size of the workspace that is passed to
100463 - *           ZSTD_initCCtx().
100464 + * Return:      A lower bound on the size of the workspace that is passed to
100465 + *              zstd_init_cctx().
100466   */
100467 -size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams);
100468 +size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *parameters);
100470  /**
100471 - * struct ZSTD_CCtx - the zstd compression context
100473 - * When compressing many times it is recommended to allocate a context just once
100474 - * and reuse it for each successive compression operation.
100475 - */
100476 -typedef struct ZSTD_CCtx_s ZSTD_CCtx;
100478 - * ZSTD_initCCtx() - initialize a zstd compression context
100479 - * @workspace:     The workspace to emplace the context into. It must outlive
100480 - *                 the returned context.
100481 - * @workspaceSize: The size of workspace. Use ZSTD_CCtxWorkspaceBound() to
100482 - *                 determine how large the workspace must be.
100484 - * Return:         A compression context emplaced into workspace.
100485 - */
100486 -ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize);
100489 - * ZSTD_compressCCtx() - compress src into dst
100490 - * @ctx:         The context. Must have been initialized with a workspace at
100491 - *               least as large as ZSTD_CCtxWorkspaceBound(params.cParams).
100492 - * @dst:         The buffer to compress src into.
100493 - * @dstCapacity: The size of the destination buffer. May be any size, but
100494 - *               ZSTD_compressBound(srcSize) is guaranteed to be large enough.
100495 - * @src:         The data to compress.
100496 - * @srcSize:     The size of the data to compress.
100497 - * @params:      The parameters to use for compression. See ZSTD_getParams().
100499 - * Return:       The compressed size or an error, which can be checked using
100500 - *               ZSTD_isError().
100501 - */
100502 -size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity,
100503 -       const void *src, size_t srcSize, ZSTD_parameters params);
100506 - * ZSTD_DCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_DCtx
100508 - * Return: A lower bound on the size of the workspace that is passed to
100509 - *         ZSTD_initDCtx().
100510 - */
100511 -size_t ZSTD_DCtxWorkspaceBound(void);
100514 - * struct ZSTD_DCtx - the zstd decompression context
100516 - * When decompressing many times it is recommended to allocate a context just
100517 - * once and reuse it for each successive decompression operation.
100518 - */
100519 -typedef struct ZSTD_DCtx_s ZSTD_DCtx;
100521 - * ZSTD_initDCtx() - initialize a zstd decompression context
100522 - * @workspace:     The workspace to emplace the context into. It must outlive
100523 - *                 the returned context.
100524 - * @workspaceSize: The size of workspace. Use ZSTD_DCtxWorkspaceBound() to
100525 - *                 determine how large the workspace must be.
100527 - * Return:         A decompression context emplaced into workspace.
100528 - */
100529 -ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize);
100532 - * ZSTD_decompressDCtx() - decompress zstd compressed src into dst
100533 - * @ctx:         The decompression context.
100534 - * @dst:         The buffer to decompress src into.
100535 - * @dstCapacity: The size of the destination buffer. Must be at least as large
100536 - *               as the decompressed size. If the caller cannot upper bound the
100537 - *               decompressed size, then it's better to use the streaming API.
100538 - * @src:         The zstd compressed data to decompress. Multiple concatenated
100539 - *               frames and skippable frames are allowed.
100540 - * @srcSize:     The exact size of the data to decompress.
100542 - * Return:       The decompressed size or an error, which can be checked using
100543 - *               ZSTD_isError().
100544 - */
100545 -size_t ZSTD_decompressDCtx(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity,
100546 -       const void *src, size_t srcSize);
100548 -/*-************************
100549 - * Simple dictionary API
100550 - **************************/
100553 - * ZSTD_compress_usingDict() - compress src into dst using a dictionary
100554 - * @ctx:         The context. Must have been initialized with a workspace at
100555 - *               least as large as ZSTD_CCtxWorkspaceBound(params.cParams).
100556 - * @dst:         The buffer to compress src into.
100557 - * @dstCapacity: The size of the destination buffer. May be any size, but
100558 - *               ZSTD_compressBound(srcSize) is guaranteed to be large enough.
100559 - * @src:         The data to compress.
100560 - * @srcSize:     The size of the data to compress.
100561 - * @dict:        The dictionary to use for compression.
100562 - * @dictSize:    The size of the dictionary.
100563 - * @params:      The parameters to use for compression. See ZSTD_getParams().
100565 - * Compression using a predefined dictionary. The same dictionary must be used
100566 - * during decompression.
100568 - * Return:       The compressed size or an error, which can be checked using
100569 - *               ZSTD_isError().
100570 - */
100571 -size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity,
100572 -       const void *src, size_t srcSize, const void *dict, size_t dictSize,
100573 -       ZSTD_parameters params);
100576 - * ZSTD_decompress_usingDict() - decompress src into dst using a dictionary
100577 - * @ctx:         The decompression context.
100578 - * @dst:         The buffer to decompress src into.
100579 - * @dstCapacity: The size of the destination buffer. Must be at least as large
100580 - *               as the decompressed size. If the caller cannot upper bound the
100581 - *               decompressed size, then it's better to use the streaming API.
100582 - * @src:         The zstd compressed data to decompress. Multiple concatenated
100583 - *               frames and skippable frames are allowed.
100584 - * @srcSize:     The exact size of the data to decompress.
100585 - * @dict:        The dictionary to use for decompression. The same dictionary
100586 - *               must've been used to compress the data.
100587 - * @dictSize:    The size of the dictionary.
100589 - * Return:       The decompressed size or an error, which can be checked using
100590 - *               ZSTD_isError().
100591 - */
100592 -size_t ZSTD_decompress_usingDict(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity,
100593 -       const void *src, size_t srcSize, const void *dict, size_t dictSize);
100595 -/*-**************************
100596 - * Fast dictionary API
100597 - ***************************/
100600 - * ZSTD_CDictWorkspaceBound() - memory needed to initialize a ZSTD_CDict
100601 - * @cParams: The compression parameters to be used for compression.
100602 + * zstd_init_cctx() - initialize a zstd compression context
100603 + * @workspace:      The workspace to emplace the context into. It must outlive
100604 + *                  the returned context.
100605 + * @workspace_size: The size of workspace. Use zstd_cctx_workspace_bound() to
100606 + *                  determine how large the workspace must be.
100607   *
100608 - * Return:   A lower bound on the size of the workspace that is passed to
100609 - *           ZSTD_initCDict().
100610 - */
100611 -size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams);
100614 - * struct ZSTD_CDict - a digested dictionary to be used for compression
100615 + * Return:          A zstd compression context or NULL on error.
100616   */
100617 -typedef struct ZSTD_CDict_s ZSTD_CDict;
100618 +zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size);
100620  /**
100621 - * ZSTD_initCDict() - initialize a digested dictionary for compression
100622 - * @dictBuffer:    The dictionary to digest. The buffer is referenced by the
100623 - *                 ZSTD_CDict so it must outlive the returned ZSTD_CDict.
100624 - * @dictSize:      The size of the dictionary.
100625 - * @params:        The parameters to use for compression. See ZSTD_getParams().
100626 - * @workspace:     The workspace. It must outlive the returned ZSTD_CDict.
100627 - * @workspaceSize: The workspace size. Must be at least
100628 - *                 ZSTD_CDictWorkspaceBound(params.cParams).
100629 + * zstd_compress_cctx() - compress src into dst with the initialized parameters
100630 + * @cctx:         The context. Must have been initialized with zstd_init_cctx().
100631 + * @dst:          The buffer to compress src into.
100632 + * @dst_capacity: The size of the destination buffer. May be any size, but
100633 + *                ZSTD_compressBound(srcSize) is guaranteed to be large enough.
100634 + * @src:          The data to compress.
100635 + * @src_size:     The size of the data to compress.
100636 + * @parameters:   The compression parameters to be used.
100637   *
100638 - * When compressing multiple messages / blocks with the same dictionary it is
100639 - * recommended to load it just once. The ZSTD_CDict merely references the
100640 - * dictBuffer, so it must outlive the returned ZSTD_CDict.
100642 - * Return:         The digested dictionary emplaced into workspace.
100643 + * Return:        The compressed size or an error, which can be checked using
100644 + *                zstd_is_error().
100645   */
100646 -ZSTD_CDict *ZSTD_initCDict(const void *dictBuffer, size_t dictSize,
100647 -       ZSTD_parameters params, void *workspace, size_t workspaceSize);
100648 +size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
100649 +       const void *src, size_t src_size, const zstd_parameters *parameters);
100652 - * ZSTD_compress_usingCDict() - compress src into dst using a ZSTD_CDict
100653 - * @ctx:         The context. Must have been initialized with a workspace at
100654 - *               least as large as ZSTD_CCtxWorkspaceBound(cParams) where
100655 - *               cParams are the compression parameters used to initialize the
100656 - *               cdict.
100657 - * @dst:         The buffer to compress src into.
100658 - * @dstCapacity: The size of the destination buffer. May be any size, but
100659 - *               ZSTD_compressBound(srcSize) is guaranteed to be large enough.
100660 - * @src:         The data to compress.
100661 - * @srcSize:     The size of the data to compress.
100662 - * @cdict:       The digested dictionary to use for compression.
100663 - * @params:      The parameters to use for compression. See ZSTD_getParams().
100665 - * Compression using a digested dictionary. The same dictionary must be used
100666 - * during decompression.
100668 - * Return:       The compressed size or an error, which can be checked using
100669 - *               ZSTD_isError().
100670 - */
100671 -size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
100672 -       const void *src, size_t srcSize, const ZSTD_CDict *cdict);
100673 +/* ======   Single-pass Decompression   ====== */
100675 +typedef ZSTD_DCtx zstd_dctx;
100677  /**
100678 - * ZSTD_DDictWorkspaceBound() - memory needed to initialize a ZSTD_DDict
100679 + * zstd_dctx_workspace_bound() - max memory needed to initialize a zstd_dctx
100680   *
100681 - * Return:  A lower bound on the size of the workspace that is passed to
100682 - *          ZSTD_initDDict().
100683 - */
100684 -size_t ZSTD_DDictWorkspaceBound(void);
100687 - * struct ZSTD_DDict - a digested dictionary to be used for decompression
100688 + * Return: A lower bound on the size of the workspace that is passed to
100689 + *         zstd_init_dctx().
100690   */
100691 -typedef struct ZSTD_DDict_s ZSTD_DDict;
100692 +size_t zstd_dctx_workspace_bound(void);
100694  /**
100695 - * ZSTD_initDDict() - initialize a digested dictionary for decompression
100696 - * @dictBuffer:    The dictionary to digest. The buffer is referenced by the
100697 - *                 ZSTD_DDict so it must outlive the returned ZSTD_DDict.
100698 - * @dictSize:      The size of the dictionary.
100699 - * @workspace:     The workspace. It must outlive the returned ZSTD_DDict.
100700 - * @workspaceSize: The workspace size. Must be at least
100701 - *                 ZSTD_DDictWorkspaceBound().
100703 - * When decompressing multiple messages / blocks with the same dictionary it is
100704 - * recommended to load it just once. The ZSTD_DDict merely references the
100705 - * dictBuffer, so it must outlive the returned ZSTD_DDict.
100706 + * zstd_init_dctx() - initialize a zstd decompression context
100707 + * @workspace:      The workspace to emplace the context into. It must outlive
100708 + *                  the returned context.
100709 + * @workspace_size: The size of workspace. Use zstd_dctx_workspace_bound() to
100710 + *                  determine how large the workspace must be.
100711   *
100712 - * Return:         The digested dictionary emplaced into workspace.
100713 + * Return:          A zstd decompression context or NULL on error.
100714   */
100715 -ZSTD_DDict *ZSTD_initDDict(const void *dictBuffer, size_t dictSize,
100716 -       void *workspace, size_t workspaceSize);
100717 +zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size);
100719  /**
100720 - * ZSTD_decompress_usingDDict() - decompress src into dst using a ZSTD_DDict
100721 - * @ctx:         The decompression context.
100722 - * @dst:         The buffer to decompress src into.
100723 - * @dstCapacity: The size of the destination buffer. Must be at least as large
100724 - *               as the decompressed size. If the caller cannot upper bound the
100725 - *               decompressed size, then it's better to use the streaming API.
100726 - * @src:         The zstd compressed data to decompress. Multiple concatenated
100727 - *               frames and skippable frames are allowed.
100728 - * @srcSize:     The exact size of the data to decompress.
100729 - * @ddict:       The digested dictionary to use for decompression. The same
100730 - *               dictionary must've been used to compress the data.
100731 + * zstd_decompress_dctx() - decompress zstd compressed src into dst
100732 + * @dctx:         The decompression context.
100733 + * @dst:          The buffer to decompress src into.
100734 + * @dst_capacity: The size of the destination buffer. Must be at least as large
100735 + *                as the decompressed size. If the caller cannot upper bound the
100736 + *                decompressed size, then it's better to use the streaming API.
100737 + * @src:          The zstd compressed data to decompress. Multiple concatenated
100738 + *                frames and skippable frames are allowed.
100739 + * @src_size:     The exact size of the data to decompress.
100740   *
100741 - * Return:       The decompressed size or an error, which can be checked using
100742 - *               ZSTD_isError().
100743 + * Return:        The decompressed size or an error, which can be checked using
100744 + *                zstd_is_error().
100745   */
100746 -size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst,
100747 -       size_t dstCapacity, const void *src, size_t srcSize,
100748 -       const ZSTD_DDict *ddict);
100749 +size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity,
100750 +       const void *src, size_t src_size);
100753 -/*-**************************
100754 - * Streaming
100755 - ***************************/
100756 +/* ======   Streaming Buffers   ====== */
100758  /**
100759 - * struct ZSTD_inBuffer - input buffer for streaming
100760 + * struct zstd_in_buffer - input buffer for streaming
100761   * @src:  Start of the input buffer.
100762   * @size: Size of the input buffer.
100763   * @pos:  Position where reading stopped. Will be updated.
100764   *        Necessarily 0 <= pos <= size.
100766 + * See zstd_lib.h.
100767   */
100768 -typedef struct ZSTD_inBuffer_s {
100769 -       const void *src;
100770 -       size_t size;
100771 -       size_t pos;
100772 -} ZSTD_inBuffer;
100773 +typedef ZSTD_inBuffer zstd_in_buffer;
100775  /**
100776 - * struct ZSTD_outBuffer - output buffer for streaming
100777 + * struct zstd_out_buffer - output buffer for streaming
100778   * @dst:  Start of the output buffer.
100779   * @size: Size of the output buffer.
100780   * @pos:  Position where writing stopped. Will be updated.
100781   *        Necessarily 0 <= pos <= size.
100783 + * See zstd_lib.h.
100784   */
100785 -typedef struct ZSTD_outBuffer_s {
100786 -       void *dst;
100787 -       size_t size;
100788 -       size_t pos;
100789 -} ZSTD_outBuffer;
100790 +typedef ZSTD_outBuffer zstd_out_buffer;
100792 +/* ======   Streaming Compression   ====== */
100795 -/*-*****************************************************************************
100796 - * Streaming compression - HowTo
100798 - * A ZSTD_CStream object is required to track streaming operation.
100799 - * Use ZSTD_initCStream() to initialize a ZSTD_CStream object.
100800 - * ZSTD_CStream objects can be reused multiple times on consecutive compression
100801 - * operations. It is recommended to re-use ZSTD_CStream in situations where many
100802 - * streaming operations will be achieved consecutively. Use one separate
100803 - * ZSTD_CStream per thread for parallel execution.
100805 - * Use ZSTD_compressStream() repetitively to consume input stream.
100806 - * The function will automatically update both `pos` fields.
100807 - * Note that it may not consume the entire input, in which case `pos < size`,
100808 - * and it's up to the caller to present again remaining data.
100809 - * It returns a hint for the preferred number of bytes to use as an input for
100810 - * the next function call.
100812 - * At any moment, it's possible to flush whatever data remains within internal
100813 - * buffer, using ZSTD_flushStream(). `output->pos` will be updated. There might
100814 - * still be some content left within the internal buffer if `output->size` is
100815 - * too small. It returns the number of bytes left in the internal buffer and
100816 - * must be called until it returns 0.
100818 - * ZSTD_endStream() instructs to finish a frame. It will perform a flush and
100819 - * write frame epilogue. The epilogue is required for decoders to consider a
100820 - * frame completed. Similar to ZSTD_flushStream(), it may not be able to flush
100821 - * the full content if `output->size` is too small. In which case, call again
100822 - * ZSTD_endStream() to complete the flush. It returns the number of bytes left
100823 - * in the internal buffer and must be called until it returns 0.
100824 - ******************************************************************************/
100825 +typedef ZSTD_CStream zstd_cstream;
100827  /**
100828 - * ZSTD_CStreamWorkspaceBound() - memory needed to initialize a ZSTD_CStream
100829 - * @cParams: The compression parameters to be used for compression.
100830 + * zstd_cstream_workspace_bound() - memory needed to initialize a zstd_cstream
100831 + * @cparams: The compression parameters to be used for compression.
100832   *
100833   * Return:   A lower bound on the size of the workspace that is passed to
100834 - *           ZSTD_initCStream() and ZSTD_initCStream_usingCDict().
100835 - */
100836 -size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams);
100839 - * struct ZSTD_CStream - the zstd streaming compression context
100840 - */
100841 -typedef struct ZSTD_CStream_s ZSTD_CStream;
100843 -/*===== ZSTD_CStream management functions =====*/
100845 - * ZSTD_initCStream() - initialize a zstd streaming compression context
100846 - * @params:         The zstd compression parameters.
100847 - * @pledgedSrcSize: If params.fParams.contentSizeFlag == 1 then the caller must
100848 - *                  pass the source size (zero means empty source). Otherwise,
100849 - *                  the caller may optionally pass the source size, or zero if
100850 - *                  unknown.
100851 - * @workspace:      The workspace to emplace the context into. It must outlive
100852 - *                  the returned context.
100853 - * @workspaceSize:  The size of workspace.
100854 - *                  Use ZSTD_CStreamWorkspaceBound(params.cParams) to determine
100855 - *                  how large the workspace must be.
100857 - * Return:          The zstd streaming compression context.
100858 + *           zstd_init_cstream().
100859   */
100860 -ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params,
100861 -       unsigned long long pledgedSrcSize, void *workspace,
100862 -       size_t workspaceSize);
100863 +size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams);
100865  /**
100866 - * ZSTD_initCStream_usingCDict() - initialize a streaming compression context
100867 - * @cdict:          The digested dictionary to use for compression.
100868 - * @pledgedSrcSize: Optionally the source size, or zero if unknown.
100869 - * @workspace:      The workspace to emplace the context into. It must outlive
100870 - *                  the returned context.
100871 - * @workspaceSize:  The size of workspace. Call ZSTD_CStreamWorkspaceBound()
100872 - *                  with the cParams used to initialize the cdict to determine
100873 - *                  how large the workspace must be.
100874 + * zstd_init_cstream() - initialize a zstd streaming compression context
100875 + * @parameters        The zstd parameters to use for compression.
100876 + * @pledged_src_size: If params.fParams.contentSizeFlag == 1 then the caller
100877 + *                    must pass the source size (zero means empty source).
100878 + *                    Otherwise, the caller may optionally pass the source
100879 + *                    size, or zero if unknown.
100880 + * @workspace:        The workspace to emplace the context into. It must outlive
100881 + *                    the returned context.
100882 + * @workspace_size:   The size of workspace.
100883 + *                    Use zstd_cstream_workspace_bound(params->cparams) to
100884 + *                    determine how large the workspace must be.
100885   *
100886 - * Return:          The zstd streaming compression context.
100887 + * Return:            The zstd streaming compression context or NULL on error.
100888   */
100889 -ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict,
100890 -       unsigned long long pledgedSrcSize, void *workspace,
100891 -       size_t workspaceSize);
100892 +zstd_cstream *zstd_init_cstream(const zstd_parameters *parameters,
100893 +       unsigned long long pledged_src_size, void *workspace, size_t workspace_size);
100895 -/*===== Streaming compression functions =====*/
100896  /**
100897 - * ZSTD_resetCStream() - reset the context using parameters from creation
100898 - * @zcs:            The zstd streaming compression context to reset.
100899 - * @pledgedSrcSize: Optionally the source size, or zero if unknown.
100900 + * zstd_reset_cstream() - reset the context using parameters from creation
100901 + * @cstream:          The zstd streaming compression context to reset.
100902 + * @pledged_src_size: Optionally the source size, or zero if unknown.
100903   *
100904   * Resets the context using the parameters from creation. Skips dictionary
100905 - * loading, since it can be reused. If `pledgedSrcSize` is non-zero the frame
100906 + * loading, since it can be reused. If `pledged_src_size` is non-zero the frame
100907   * content size is always written into the frame header.
100908   *
100909 - * Return:          Zero or an error, which can be checked using ZSTD_isError().
100910 + * Return:            Zero or an error, which can be checked using
100911 + *                    zstd_is_error().
100912   */
100913 -size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize);
100914 +size_t zstd_reset_cstream(zstd_cstream *cstream,
100915 +       unsigned long long pledged_src_size);
100917  /**
100918 - * ZSTD_compressStream() - streaming compress some of input into output
100919 - * @zcs:    The zstd streaming compression context.
100920 - * @output: Destination buffer. `output->pos` is updated to indicate how much
100921 - *          compressed data was written.
100922 - * @input:  Source buffer. `input->pos` is updated to indicate how much data was
100923 - *          read. Note that it may not consume the entire input, in which case
100924 - *          `input->pos < input->size`, and it's up to the caller to present
100925 - *          remaining data again.
100926 + * zstd_compress_stream() - streaming compress some of input into output
100927 + * @cstream: The zstd streaming compression context.
100928 + * @output:  Destination buffer. `output->pos` is updated to indicate how much
100929 + *           compressed data was written.
100930 + * @input:   Source buffer. `input->pos` is updated to indicate how much data
100931 + *           was read. Note that it may not consume the entire input, in which
100932 + *           case `input->pos < input->size`, and it's up to the caller to
100933 + *           present remaining data again.
100934   *
100935   * The `input` and `output` buffers may be any size. Guaranteed to make some
100936   * forward progress if `input` and `output` are not empty.
100937   *
100938 - * Return:  A hint for the number of bytes to use as the input for the next
100939 - *          function call or an error, which can be checked using
100940 - *          ZSTD_isError().
100941 + * Return:   A hint for the number of bytes to use as the input for the next
100942 + *           function call or an error, which can be checked using
100943 + *           zstd_is_error().
100944   */
100945 -size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output,
100946 -       ZSTD_inBuffer *input);
100947 +size_t zstd_compress_stream(zstd_cstream *cstream, zstd_out_buffer *output,
100948 +       zstd_in_buffer *input);
100950  /**
100951 - * ZSTD_flushStream() - flush internal buffers into output
100952 - * @zcs:    The zstd streaming compression context.
100953 - * @output: Destination buffer. `output->pos` is updated to indicate how much
100954 - *          compressed data was written.
100955 + * zstd_flush_stream() - flush internal buffers into output
100956 + * @cstream: The zstd streaming compression context.
100957 + * @output:  Destination buffer. `output->pos` is updated to indicate how much
100958 + *           compressed data was written.
100959   *
100960 - * ZSTD_flushStream() must be called until it returns 0, meaning all the data
100961 - * has been flushed. Since ZSTD_flushStream() causes a block to be ended,
100962 + * zstd_flush_stream() must be called until it returns 0, meaning all the data
100963 + * has been flushed. Since zstd_flush_stream() causes a block to be ended,
100964   * calling it too often will degrade the compression ratio.
100965   *
100966 - * Return:  The number of bytes still present within internal buffers or an
100967 - *          error, which can be checked using ZSTD_isError().
100968 + * Return:   The number of bytes still present within internal buffers or an
100969 + *           error, which can be checked using zstd_is_error().
100970   */
100971 -size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output);
100973 - * ZSTD_endStream() - flush internal buffers into output and end the frame
100974 - * @zcs:    The zstd streaming compression context.
100975 - * @output: Destination buffer. `output->pos` is updated to indicate how much
100976 - *          compressed data was written.
100978 - * ZSTD_endStream() must be called until it returns 0, meaning all the data has
100979 - * been flushed and the frame epilogue has been written.
100981 - * Return:  The number of bytes still present within internal buffers or an
100982 - *          error, which can be checked using ZSTD_isError().
100983 - */
100984 -size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output);
100985 +size_t zstd_flush_stream(zstd_cstream *cstream, zstd_out_buffer *output);
100987  /**
100988 - * ZSTD_CStreamInSize() - recommended size for the input buffer
100990 - * Return: The recommended size for the input buffer.
100991 - */
100992 -size_t ZSTD_CStreamInSize(void);
100994 - * ZSTD_CStreamOutSize() - recommended size for the output buffer
100995 + * zstd_end_stream() - flush internal buffers into output and end the frame
100996 + * @cstream: The zstd streaming compression context.
100997 + * @output:  Destination buffer. `output->pos` is updated to indicate how much
100998 + *           compressed data was written.
100999   *
101000 - * When the output buffer is at least this large, it is guaranteed to be large
101001 - * enough to flush at least one complete compressed block.
101002 + * zstd_end_stream() must be called until it returns 0, meaning all the data has
101003 + * been flushed and the frame epilogue has been written.
101004   *
101005 - * Return: The recommended size for the output buffer.
101006 + * Return:   The number of bytes still present within internal buffers or an
101007 + *           error, which can be checked using zstd_is_error().
101008   */
101009 -size_t ZSTD_CStreamOutSize(void);
101010 +size_t zstd_end_stream(zstd_cstream *cstream, zstd_out_buffer *output);
101012 +/* ======   Streaming Decompression   ====== */
101015 -/*-*****************************************************************************
101016 - * Streaming decompression - HowTo
101018 - * A ZSTD_DStream object is required to track streaming operations.
101019 - * Use ZSTD_initDStream() to initialize a ZSTD_DStream object.
101020 - * ZSTD_DStream objects can be re-used multiple times.
101022 - * Use ZSTD_decompressStream() repetitively to consume your input.
101023 - * The function will update both `pos` fields.
101024 - * If `input->pos < input->size`, some input has not been consumed.
101025 - * It's up to the caller to present again remaining data.
101026 - * If `output->pos < output->size`, decoder has flushed everything it could.
101027 - * Returns 0 iff a frame is completely decoded and fully flushed.
101028 - * Otherwise it returns a suggested next input size that will never load more
101029 - * than the current frame.
101030 - ******************************************************************************/
101031 +typedef ZSTD_DStream zstd_dstream;
101033  /**
101034 - * ZSTD_DStreamWorkspaceBound() - memory needed to initialize a ZSTD_DStream
101035 - * @maxWindowSize: The maximum window size allowed for compressed frames.
101036 + * zstd_dstream_workspace_bound() - memory needed to initialize a zstd_dstream
101037 + * @max_window_size: The maximum window size allowed for compressed frames.
101038   *
101039 - * Return:         A lower bound on the size of the workspace that is passed to
101040 - *                 ZSTD_initDStream() and ZSTD_initDStream_usingDDict().
101041 + * Return:           A lower bound on the size of the workspace that is passed
101042 + *                   to zstd_init_dstream().
101043   */
101044 -size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize);
101045 +size_t zstd_dstream_workspace_bound(size_t max_window_size);
101047  /**
101048 - * struct ZSTD_DStream - the zstd streaming decompression context
101049 - */
101050 -typedef struct ZSTD_DStream_s ZSTD_DStream;
101051 -/*===== ZSTD_DStream management functions =====*/
101053 - * ZSTD_initDStream() - initialize a zstd streaming decompression context
101054 - * @maxWindowSize: The maximum window size allowed for compressed frames.
101055 - * @workspace:     The workspace to emplace the context into. It must outlive
101056 - *                 the returned context.
101057 - * @workspaceSize: The size of workspace.
101058 - *                 Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine
101059 - *                 how large the workspace must be.
101061 - * Return:         The zstd streaming decompression context.
101062 - */
101063 -ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace,
101064 -       size_t workspaceSize);
101066 - * ZSTD_initDStream_usingDDict() - initialize streaming decompression context
101067 - * @maxWindowSize: The maximum window size allowed for compressed frames.
101068 - * @ddict:         The digested dictionary to use for decompression.
101069 - * @workspace:     The workspace to emplace the context into. It must outlive
101070 - *                 the returned context.
101071 - * @workspaceSize: The size of workspace.
101072 - *                 Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine
101073 - *                 how large the workspace must be.
101074 + * zstd_init_dstream() - initialize a zstd streaming decompression context
101075 + * @max_window_size: The maximum window size allowed for compressed frames.
101076 + * @workspace:       The workspace to emplace the context into. It must outlive
101077 + *                   the returned context.
101078 + * @workspaceSize:   The size of workspace.
101079 + *                   Use zstd_dstream_workspace_bound(max_window_size) to
101080 + *                   determine how large the workspace must be.
101081   *
101082 - * Return:         The zstd streaming decompression context.
101083 + * Return:           The zstd streaming decompression context.
101084   */
101085 -ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize,
101086 -       const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize);
101087 +zstd_dstream *zstd_init_dstream(size_t max_window_size, void *workspace,
101088 +       size_t workspace_size);
101090 -/*===== Streaming decompression functions =====*/
101091  /**
101092 - * ZSTD_resetDStream() - reset the context using parameters from creation
101093 - * @zds:   The zstd streaming decompression context to reset.
101094 + * zstd_reset_dstream() - reset the context using parameters from creation
101095 + * @dstream: The zstd streaming decompression context to reset.
101096   *
101097   * Resets the context using the parameters from creation. Skips dictionary
101098   * loading, since it can be reused.
101099   *
101100 - * Return: Zero or an error, which can be checked using ZSTD_isError().
101101 + * Return:   Zero or an error, which can be checked using zstd_is_error().
101102   */
101103 -size_t ZSTD_resetDStream(ZSTD_DStream *zds);
101104 +size_t zstd_reset_dstream(zstd_dstream *dstream);
101106  /**
101107 - * ZSTD_decompressStream() - streaming decompress some of input into output
101108 - * @zds:    The zstd streaming decompression context.
101109 - * @output: Destination buffer. `output.pos` is updated to indicate how much
101110 - *          decompressed data was written.
101111 - * @input:  Source buffer. `input.pos` is updated to indicate how much data was
101112 - *          read. Note that it may not consume the entire input, in which case
101113 - *          `input.pos < input.size`, and it's up to the caller to present
101114 - *          remaining data again.
101115 + * zstd_decompress_stream() - streaming decompress some of input into output
101116 + * @dstream: The zstd streaming decompression context.
101117 + * @output:  Destination buffer. `output.pos` is updated to indicate how much
101118 + *           decompressed data was written.
101119 + * @input:   Source buffer. `input.pos` is updated to indicate how much data was
101120 + *           read. Note that it may not consume the entire input, in which case
101121 + *           `input.pos < input.size`, and it's up to the caller to present
101122 + *           remaining data again.
101123   *
101124   * The `input` and `output` buffers may be any size. Guaranteed to make some
101125   * forward progress if `input` and `output` are not empty.
101126 - * ZSTD_decompressStream() will not consume the last byte of the frame until
101127 + * zstd_decompress_stream() will not consume the last byte of the frame until
101128   * the entire frame is flushed.
101129   *
101130 - * Return:  Returns 0 iff a frame is completely decoded and fully flushed.
101131 - *          Otherwise returns a hint for the number of bytes to use as the input
101132 - *          for the next function call or an error, which can be checked using
101133 - *          ZSTD_isError(). The size hint will never load more than the frame.
101134 + * Return:   Returns 0 iff a frame is completely decoded and fully flushed.
101135 + *           Otherwise returns a hint for the number of bytes to use as the
101136 + *           input for the next function call or an error, which can be checked
101137 + *           using zstd_is_error(). The size hint will never load more than the
101138 + *           frame.
101139   */
101140 -size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output,
101141 -       ZSTD_inBuffer *input);
101142 +size_t zstd_decompress_stream(zstd_dstream *dstream, zstd_out_buffer *output,
101143 +       zstd_in_buffer *input);
101146 - * ZSTD_DStreamInSize() - recommended size for the input buffer
101148 - * Return: The recommended size for the input buffer.
101149 - */
101150 -size_t ZSTD_DStreamInSize(void);
101152 - * ZSTD_DStreamOutSize() - recommended size for the output buffer
101154 - * When the output buffer is at least this large, it is guaranteed to be large
101155 - * enough to flush at least one complete decompressed block.
101157 - * Return: The recommended size for the output buffer.
101158 - */
101159 -size_t ZSTD_DStreamOutSize(void);
101162 -/* --- Constants ---*/
101163 -#define ZSTD_MAGICNUMBER            0xFD2FB528   /* >= v0.8.0 */
101164 -#define ZSTD_MAGIC_SKIPPABLE_START  0x184D2A50U
101166 -#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
101167 -#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
101169 -#define ZSTD_WINDOWLOG_MAX_32  27
101170 -#define ZSTD_WINDOWLOG_MAX_64  27
101171 -#define ZSTD_WINDOWLOG_MAX \
101172 -       ((unsigned int)(sizeof(size_t) == 4 \
101173 -               ? ZSTD_WINDOWLOG_MAX_32 \
101174 -               : ZSTD_WINDOWLOG_MAX_64))
101175 -#define ZSTD_WINDOWLOG_MIN 10
101176 -#define ZSTD_HASHLOG_MAX ZSTD_WINDOWLOG_MAX
101177 -#define ZSTD_HASHLOG_MIN        6
101178 -#define ZSTD_CHAINLOG_MAX     (ZSTD_WINDOWLOG_MAX+1)
101179 -#define ZSTD_CHAINLOG_MIN      ZSTD_HASHLOG_MIN
101180 -#define ZSTD_HASHLOG3_MAX      17
101181 -#define ZSTD_SEARCHLOG_MAX    (ZSTD_WINDOWLOG_MAX-1)
101182 -#define ZSTD_SEARCHLOG_MIN      1
101183 -/* only for ZSTD_fast, other strategies are limited to 6 */
101184 -#define ZSTD_SEARCHLENGTH_MAX   7
101185 -/* only for ZSTD_btopt, other strategies are limited to 4 */
101186 -#define ZSTD_SEARCHLENGTH_MIN   3
101187 -#define ZSTD_TARGETLENGTH_MIN   4
101188 -#define ZSTD_TARGETLENGTH_MAX 999
101190 -/* for static allocation */
101191 -#define ZSTD_FRAMEHEADERSIZE_MAX 18
101192 -#define ZSTD_FRAMEHEADERSIZE_MIN  6
101193 -#define ZSTD_frameHeaderSize_prefix 5
101194 -#define ZSTD_frameHeaderSize_min ZSTD_FRAMEHEADERSIZE_MIN
101195 -#define ZSTD_frameHeaderSize_max ZSTD_FRAMEHEADERSIZE_MAX
101196 -/* magic number + skippable frame length */
101197 -#define ZSTD_skippableHeaderSize 8
101200 -/*-*************************************
101201 - * Compressed size functions
101202 - **************************************/
101205 - * ZSTD_findFrameCompressedSize() - returns the size of a compressed frame
101206 - * @src:     Source buffer. It should point to the start of a zstd encoded frame
101207 - *           or a skippable frame.
101208 - * @srcSize: The size of the source buffer. It must be at least as large as the
101209 - *           size of the frame.
101211 - * Return:   The compressed size of the frame pointed to by `src` or an error,
101212 - *           which can be check with ZSTD_isError().
101213 - *           Suitable to pass to ZSTD_decompress() or similar functions.
101214 - */
101215 -size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize);
101217 -/*-*************************************
101218 - * Decompressed size functions
101219 - **************************************/
101221 - * ZSTD_getFrameContentSize() - returns the content size in a zstd frame header
101222 - * @src:     It should point to the start of a zstd encoded frame.
101223 - * @srcSize: The size of the source buffer. It must be at least as large as the
101224 - *           frame header. `ZSTD_frameHeaderSize_max` is always large enough.
101226 - * Return:   The frame content size stored in the frame header if known.
101227 - *           `ZSTD_CONTENTSIZE_UNKNOWN` if the content size isn't stored in the
101228 - *           frame header. `ZSTD_CONTENTSIZE_ERROR` on invalid input.
101229 - */
101230 -unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
101231 +/* ======   Frame Inspection Functions ====== */
101233  /**
101234 - * ZSTD_findDecompressedSize() - returns decompressed size of a series of frames
101235 - * @src:     It should point to the start of a series of zstd encoded and/or
101236 - *           skippable frames.
101237 - * @srcSize: The exact size of the series of frames.
101238 + * zstd_find_frame_compressed_size() - returns the size of a compressed frame
101239 + * @src:      Source buffer. It should point to the start of a zstd encoded
101240 + *            frame or a skippable frame.
101241 + * @src_size: The size of the source buffer. It must be at least as large as the
101242 + *            size of the frame.
101243   *
101244 - * If any zstd encoded frame in the series doesn't have the frame content size
101245 - * set, `ZSTD_CONTENTSIZE_UNKNOWN` is returned. But frame content size is always
101246 - * set when using ZSTD_compress(). The decompressed size can be very large.
101247 - * If the source is untrusted, the decompressed size could be wrong or
101248 - * intentionally modified. Always ensure the result fits within the
101249 - * application's authorized limits. ZSTD_findDecompressedSize() handles multiple
101250 - * frames, and so it must traverse the input to read each frame header. This is
101251 - * efficient as most of the data is skipped, however it does mean that all frame
101252 - * data must be present and valid.
101254 - * Return:   Decompressed size of all the data contained in the frames if known.
101255 - *           `ZSTD_CONTENTSIZE_UNKNOWN` if the decompressed size is unknown.
101256 - *           `ZSTD_CONTENTSIZE_ERROR` if an error occurred.
101257 - */
101258 -unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize);
101260 -/*-*************************************
101261 - * Advanced compression functions
101262 - **************************************/
101264 - * ZSTD_checkCParams() - ensure parameter values remain within authorized range
101265 - * @cParams: The zstd compression parameters.
101267 - * Return:   Zero or an error, which can be checked using ZSTD_isError().
101268 + * Return:    The compressed size of the frame pointed to by `src` or an error,
101269 + *            which can be check with zstd_is_error().
101270 + *            Suitable to pass to ZSTD_decompress() or similar functions.
101271   */
101272 -size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams);
101273 +size_t zstd_find_frame_compressed_size(const void *src, size_t src_size);
101275  /**
101276 - * ZSTD_adjustCParams() - optimize parameters for a given srcSize and dictSize
101277 - * @srcSize:  Optionally the estimated source size, or zero if unknown.
101278 - * @dictSize: Optionally the estimated dictionary size, or zero if unknown.
101280 - * Return:    The optimized parameters.
101281 - */
101282 -ZSTD_compressionParameters ZSTD_adjustCParams(
101283 -       ZSTD_compressionParameters cParams, unsigned long long srcSize,
101284 -       size_t dictSize);
101286 -/*--- Advanced decompression functions ---*/
101289 - * ZSTD_isFrame() - returns true iff the buffer starts with a valid frame
101290 - * @buffer: The source buffer to check.
101291 - * @size:   The size of the source buffer, must be at least 4 bytes.
101293 - * Return: True iff the buffer starts with a zstd or skippable frame identifier.
101294 - */
101295 -unsigned int ZSTD_isFrame(const void *buffer, size_t size);
101298 - * ZSTD_getDictID_fromDict() - returns the dictionary id stored in a dictionary
101299 - * @dict:     The dictionary buffer.
101300 - * @dictSize: The size of the dictionary buffer.
101302 - * Return:    The dictionary id stored within the dictionary or 0 if the
101303 - *            dictionary is not a zstd dictionary. If it returns 0 the
101304 - *            dictionary can still be loaded as a content-only dictionary.
101305 - */
101306 -unsigned int ZSTD_getDictID_fromDict(const void *dict, size_t dictSize);
101309 - * ZSTD_getDictID_fromDDict() - returns the dictionary id stored in a ZSTD_DDict
101310 - * @ddict: The ddict to find the id of.
101312 - * Return: The dictionary id stored within `ddict` or 0 if the dictionary is not
101313 - *         a zstd dictionary. If it returns 0 `ddict` will be loaded as a
101314 - *         content-only dictionary.
101315 - */
101316 -unsigned int ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict);
101319 - * ZSTD_getDictID_fromFrame() - returns the dictionary id stored in a zstd frame
101320 - * @src:     Source buffer. It must be a zstd encoded frame.
101321 - * @srcSize: The size of the source buffer. It must be at least as large as the
101322 - *           frame header. `ZSTD_frameHeaderSize_max` is always large enough.
101324 - * Return:   The dictionary id required to decompress the frame stored within
101325 - *           `src` or 0 if the dictionary id could not be decoded. It can return
101326 - *           0 if the frame does not require a dictionary, the dictionary id
101327 - *           wasn't stored in the frame, `src` is not a zstd frame, or `srcSize`
101328 - *           is too small.
101329 - */
101330 -unsigned int ZSTD_getDictID_fromFrame(const void *src, size_t srcSize);
101333 - * struct ZSTD_frameParams - zstd frame parameters stored in the frame header
101334 - * @frameContentSize: The frame content size, or 0 if not present.
101335 + * struct zstd_frame_params - zstd frame parameters stored in the frame header
101336 + * @frameContentSize: The frame content size, or ZSTD_CONTENTSIZE_UNKNOWN if not
101337 + *                    present.
101338   * @windowSize:       The window size, or 0 if the frame is a skippable frame.
101339 + * @blockSizeMax:     The maximum block size.
101340 + * @frameType:        The frame type (zstd or skippable)
101341 + * @headerSize:       The size of the frame header.
101342   * @dictID:           The dictionary id, or 0 if not present.
101343   * @checksumFlag:     Whether a checksum was used.
101345 + * See zstd_lib.h.
101346   */
101347 -typedef struct {
101348 -       unsigned long long frameContentSize;
101349 -       unsigned int windowSize;
101350 -       unsigned int dictID;
101351 -       unsigned int checksumFlag;
101352 -} ZSTD_frameParams;
101353 +typedef ZSTD_frameHeader zstd_frame_header;
101355  /**
101356 - * ZSTD_getFrameParams() - extracts parameters from a zstd or skippable frame
101357 - * @fparamsPtr: On success the frame parameters are written here.
101358 - * @src:        The source buffer. It must point to a zstd or skippable frame.
101359 - * @srcSize:    The size of the source buffer. `ZSTD_frameHeaderSize_max` is
101360 - *              always large enough to succeed.
101361 + * zstd_get_frame_header() - extracts parameters from a zstd or skippable frame
101362 + * @params:   On success the frame parameters are written here.
101363 + * @src:      The source buffer. It must point to a zstd or skippable frame.
101364 + * @src_size: The size of the source buffer.
101365   *
101366 - * Return:      0 on success. If more data is required it returns how many bytes
101367 - *              must be provided to make forward progress. Otherwise it returns
101368 - *              an error, which can be checked using ZSTD_isError().
101369 + * Return:    0 on success. If more data is required it returns how many bytes
101370 + *            must be provided to make forward progress. Otherwise it returns
101371 + *            an error, which can be checked using zstd_is_error().
101372   */
101373 -size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src,
101374 -       size_t srcSize);
101376 -/*-*****************************************************************************
101377 - * Buffer-less and synchronous inner streaming functions
101379 - * This is an advanced API, giving full control over buffer management, for
101380 - * users which need direct control over memory.
101381 - * But it's also a complex one, with many restrictions (documented below).
101382 - * Prefer using normal streaming API for an easier experience
101383 - ******************************************************************************/
101385 -/*-*****************************************************************************
101386 - * Buffer-less streaming compression (synchronous mode)
101388 - * A ZSTD_CCtx object is required to track streaming operations.
101389 - * Use ZSTD_initCCtx() to initialize a context.
101390 - * ZSTD_CCtx object can be re-used multiple times within successive compression
101391 - * operations.
101393 - * Start by initializing a context.
101394 - * Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary
101395 - * compression,
101396 - * or ZSTD_compressBegin_advanced(), for finer parameter control.
101397 - * It's also possible to duplicate a reference context which has already been
101398 - * initialized, using ZSTD_copyCCtx()
101400 - * Then, consume your input using ZSTD_compressContinue().
101401 - * There are some important considerations to keep in mind when using this
101402 - * advanced function :
101403 - * - ZSTD_compressContinue() has no internal buffer. It uses externally provided
101404 - *   buffer only.
101405 - * - Interface is synchronous : input is consumed entirely and produce 1+
101406 - *   (or more) compressed blocks.
101407 - * - Caller must ensure there is enough space in `dst` to store compressed data
101408 - *   under worst case scenario. Worst case evaluation is provided by
101409 - *   ZSTD_compressBound().
101410 - *   ZSTD_compressContinue() doesn't guarantee recover after a failed
101411 - *   compression.
101412 - * - ZSTD_compressContinue() presumes prior input ***is still accessible and
101413 - *   unmodified*** (up to maximum distance size, see WindowLog).
101414 - *   It remembers all previous contiguous blocks, plus one separated memory
101415 - *   segment (which can itself consists of multiple contiguous blocks)
101416 - * - ZSTD_compressContinue() detects that prior input has been overwritten when
101417 - *   `src` buffer overlaps. In which case, it will "discard" the relevant memory
101418 - *   section from its history.
101420 - * Finish a frame with ZSTD_compressEnd(), which will write the last block(s)
101421 - * and optional checksum. It's possible to use srcSize==0, in which case, it
101422 - * will write a final empty block to end the frame. Without last block mark,
101423 - * frames will be considered unfinished (corrupted) by decoders.
101425 - * `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress some new
101426 - * frame.
101427 - ******************************************************************************/
101429 -/*=====   Buffer-less streaming compression functions  =====*/
101430 -size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel);
101431 -size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict,
101432 -       size_t dictSize, int compressionLevel);
101433 -size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict,
101434 -       size_t dictSize, ZSTD_parameters params,
101435 -       unsigned long long pledgedSrcSize);
101436 -size_t ZSTD_copyCCtx(ZSTD_CCtx *cctx, const ZSTD_CCtx *preparedCCtx,
101437 -       unsigned long long pledgedSrcSize);
101438 -size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict,
101439 -       unsigned long long pledgedSrcSize);
101440 -size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
101441 -       const void *src, size_t srcSize);
101442 -size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
101443 -       const void *src, size_t srcSize);
101447 -/*-*****************************************************************************
101448 - * Buffer-less streaming decompression (synchronous mode)
101450 - * A ZSTD_DCtx object is required to track streaming operations.
101451 - * Use ZSTD_initDCtx() to initialize a context.
101452 - * A ZSTD_DCtx object can be re-used multiple times.
101454 - * First typical operation is to retrieve frame parameters, using
101455 - * ZSTD_getFrameParams(). It fills a ZSTD_frameParams structure which provide
101456 - * important information to correctly decode the frame, such as the minimum
101457 - * rolling buffer size to allocate to decompress data (`windowSize`), and the
101458 - * dictionary ID used.
101459 - * Note: content size is optional, it may not be present. 0 means unknown.
101460 - * Note that these values could be wrong, either because of data malformation,
101461 - * or because an attacker is spoofing deliberate false information. As a
101462 - * consequence, check that values remain within valid application range,
101463 - * especially `windowSize`, before allocation. Each application can set its own
101464 - * limit, depending on local restrictions. For extended interoperability, it is
101465 - * recommended to support at least 8 MB.
101466 - * Frame parameters are extracted from the beginning of the compressed frame.
101467 - * Data fragment must be large enough to ensure successful decoding, typically
101468 - * `ZSTD_frameHeaderSize_max` bytes.
101469 - * Result: 0: successful decoding, the `ZSTD_frameParams` structure is filled.
101470 - *        >0: `srcSize` is too small, provide at least this many bytes.
101471 - *        errorCode, which can be tested using ZSTD_isError().
101473 - * Start decompression, with ZSTD_decompressBegin() or
101474 - * ZSTD_decompressBegin_usingDict(). Alternatively, you can copy a prepared
101475 - * context, using ZSTD_copyDCtx().
101477 - * Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue()
101478 - * alternatively.
101479 - * ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize'
101480 - * to ZSTD_decompressContinue().
101481 - * ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will
101482 - * fail.
101484 - * The result of ZSTD_decompressContinue() is the number of bytes regenerated
101485 - * within 'dst' (necessarily <= dstCapacity). It can be zero, which is not an
101486 - * error; it just means ZSTD_decompressContinue() has decoded some metadata
101487 - * item. It can also be an error code, which can be tested with ZSTD_isError().
101489 - * ZSTD_decompressContinue() needs previous data blocks during decompression, up
101490 - * to `windowSize`. They should preferably be located contiguously, prior to
101491 - * current block. Alternatively, a round buffer of sufficient size is also
101492 - * possible. Sufficient size is determined by frame parameters.
101493 - * ZSTD_decompressContinue() is very sensitive to contiguity, if 2 blocks don't
101494 - * follow each other, make sure that either the compressor breaks contiguity at
101495 - * the same place, or that previous contiguous segment is large enough to
101496 - * properly handle maximum back-reference.
101498 - * A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
101499 - * Context can then be reset to start a new decompression.
101501 - * Note: it's possible to know if next input to present is a header or a block,
101502 - * using ZSTD_nextInputType(). This information is not required to properly
101503 - * decode a frame.
101505 - * == Special case: skippable frames ==
101507 - * Skippable frames allow integration of user-defined data into a flow of
101508 - * concatenated frames. Skippable frames will be ignored (skipped) by a
101509 - * decompressor. The format of skippable frames is as follows:
101510 - * a) Skippable frame ID - 4 Bytes, Little endian format, any value from
101511 - *    0x184D2A50 to 0x184D2A5F
101512 - * b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
101513 - * c) Frame Content - any content (User Data) of length equal to Frame Size
101514 - * For skippable frames ZSTD_decompressContinue() always returns 0.
101515 - * For skippable frames ZSTD_getFrameParams() returns fparamsPtr->windowLog==0
101516 - * what means that a frame is skippable.
101517 - * Note: If fparamsPtr->frameContentSize==0, it is ambiguous: the frame might
101518 - *       actually be a zstd encoded frame with no content. For purposes of
101519 - *       decompression, it is valid in both cases to skip the frame using
101520 - *       ZSTD_findFrameCompressedSize() to find its size in bytes.
101521 - * It also returns frame size as fparamsPtr->frameContentSize.
101522 - ******************************************************************************/
101524 -/*=====   Buffer-less streaming decompression functions  =====*/
101525 -size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx);
101526 -size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict,
101527 -       size_t dictSize);
101528 -void   ZSTD_copyDCtx(ZSTD_DCtx *dctx, const ZSTD_DCtx *preparedDCtx);
101529 -size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx);
101530 -size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity,
101531 -       const void *src, size_t srcSize);
101532 -typedef enum {
101533 -       ZSTDnit_frameHeader,
101534 -       ZSTDnit_blockHeader,
101535 -       ZSTDnit_block,
101536 -       ZSTDnit_lastBlock,
101537 -       ZSTDnit_checksum,
101538 -       ZSTDnit_skippableFrame
101539 -} ZSTD_nextInputType_e;
101540 -ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx);
101542 -/*-*****************************************************************************
101543 - * Block functions
101545 - * Block functions produce and decode raw zstd blocks, without frame metadata.
101546 - * Frame metadata cost is typically ~18 bytes, which can be non-negligible for
101547 - * very small blocks (< 100 bytes). User will have to take in charge required
101548 - * information to regenerate data, such as compressed and content sizes.
101550 - * A few rules to respect:
101551 - * - Compressing and decompressing require a context structure
101552 - *   + Use ZSTD_initCCtx() and ZSTD_initDCtx()
101553 - * - It is necessary to init context before starting
101554 - *   + compression : ZSTD_compressBegin()
101555 - *   + decompression : ZSTD_decompressBegin()
101556 - *   + variants _usingDict() are also allowed
101557 - *   + copyCCtx() and copyDCtx() work too
101558 - * - Block size is limited, it must be <= ZSTD_getBlockSizeMax()
101559 - *   + If you need to compress more, cut data into multiple blocks
101560 - *   + Consider using the regular ZSTD_compress() instead, as frame metadata
101561 - *     costs become negligible when source size is large.
101562 - * - When a block is considered not compressible enough, ZSTD_compressBlock()
101563 - *   result will be zero. In which case, nothing is produced into `dst`.
101564 - *   + User must test for such outcome and deal directly with uncompressed data
101565 - *   + ZSTD_decompressBlock() doesn't accept uncompressed data as input!!!
101566 - *   + In case of multiple successive blocks, decoder must be informed of
101567 - *     uncompressed block existence to follow proper history. Use
101568 - *     ZSTD_insertBlock() in such a case.
101569 - ******************************************************************************/
101571 -/* Define for static allocation */
101572 -#define ZSTD_BLOCKSIZE_ABSOLUTEMAX (128 * 1024)
101573 -/*=====   Raw zstd block functions  =====*/
101574 -size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx);
101575 -size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
101576 -       const void *src, size_t srcSize);
101577 -size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity,
101578 -       const void *src, size_t srcSize);
101579 -size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart,
101580 -       size_t blockSize);
101581 +size_t zstd_get_frame_header(zstd_frame_header *params, const void *src,
101582 +       size_t src_size);
101584 -#endif  /* ZSTD_H */
101585 +#endif  /* LINUX_ZSTD_H */
101586 diff --git a/include/linux/zstd_errors.h b/include/linux/zstd_errors.h
101587 new file mode 100644
101588 index 000000000000..ccb92064ef03
101589 --- /dev/null
101590 +++ b/include/linux/zstd_errors.h
101591 @@ -0,0 +1,77 @@
101593 + * Copyright (c) Yann Collet, Facebook, Inc.
101594 + * All rights reserved.
101596 + * This source code is licensed under both the BSD-style license (found in the
101597 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
101598 + * in the COPYING file in the root directory of this source tree).
101599 + * You may select, at your option, one of the above-listed licenses.
101600 + */
101602 +#ifndef ZSTD_ERRORS_H_398273423
101603 +#define ZSTD_ERRORS_H_398273423
101606 +/*===== dependency =====*/
101607 +#include <linux/types.h>   /* size_t */
101610 +/* =====   ZSTDERRORLIB_API : control library symbols visibility   ===== */
101611 +#define ZSTDERRORLIB_VISIBILITY
101612 +#define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY
101614 +/*-*********************************************
101615 + *  Error codes list
101616 + *-*********************************************
101617 + *  Error codes _values_ are pinned down since v1.3.1 only.
101618 + *  Therefore, don't rely on values if you may link to any version < v1.3.1.
101620 + *  Only values < 100 are considered stable.
101622 + *  note 1 : this API shall be used with static linking only.
101623 + *           dynamic linking is not yet officially supported.
101624 + *  note 2 : Prefer relying on the enum than on its value whenever possible
101625 + *           This is the only supported way to use the error list < v1.3.1
101626 + *  note 3 : ZSTD_isError() is always correct, whatever the library version.
101627 + **********************************************/
101628 +typedef enum {
101629 +  ZSTD_error_no_error = 0,
101630 +  ZSTD_error_GENERIC  = 1,
101631 +  ZSTD_error_prefix_unknown                = 10,
101632 +  ZSTD_error_version_unsupported           = 12,
101633 +  ZSTD_error_frameParameter_unsupported    = 14,
101634 +  ZSTD_error_frameParameter_windowTooLarge = 16,
101635 +  ZSTD_error_corruption_detected = 20,
101636 +  ZSTD_error_checksum_wrong      = 22,
101637 +  ZSTD_error_dictionary_corrupted      = 30,
101638 +  ZSTD_error_dictionary_wrong          = 32,
101639 +  ZSTD_error_dictionaryCreation_failed = 34,
101640 +  ZSTD_error_parameter_unsupported   = 40,
101641 +  ZSTD_error_parameter_outOfBound    = 42,
101642 +  ZSTD_error_tableLog_tooLarge       = 44,
101643 +  ZSTD_error_maxSymbolValue_tooLarge = 46,
101644 +  ZSTD_error_maxSymbolValue_tooSmall = 48,
101645 +  ZSTD_error_stage_wrong       = 60,
101646 +  ZSTD_error_init_missing      = 62,
101647 +  ZSTD_error_memory_allocation = 64,
101648 +  ZSTD_error_workSpace_tooSmall= 66,
101649 +  ZSTD_error_dstSize_tooSmall = 70,
101650 +  ZSTD_error_srcSize_wrong    = 72,
101651 +  ZSTD_error_dstBuffer_null   = 74,
101652 +  /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */
101653 +  ZSTD_error_frameIndex_tooLarge = 100,
101654 +  ZSTD_error_seekableIO          = 102,
101655 +  ZSTD_error_dstBuffer_wrong     = 104,
101656 +  ZSTD_error_srcBuffer_wrong     = 105,
101657 +  ZSTD_error_maxCode = 120  /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
101658 +} ZSTD_ErrorCode;
101660 +/*! ZSTD_getErrorCode() :
101661 +    convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
101662 +    which can be used to compare with enum list published above */
101663 +ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
101664 +ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code);   /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */
101668 +#endif /* ZSTD_ERRORS_H_398273423 */
101669 diff --git a/include/linux/zstd_lib.h b/include/linux/zstd_lib.h
101670 new file mode 100644
101671 index 000000000000..d81779076217
101672 --- /dev/null
101673 +++ b/include/linux/zstd_lib.h
101674 @@ -0,0 +1,2431 @@
101676 + * Copyright (c) Yann Collet, Facebook, Inc.
101677 + * All rights reserved.
101679 + * This source code is licensed under both the BSD-style license (found in the
101680 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
101681 + * in the COPYING file in the root directory of this source tree).
101682 + * You may select, at your option, one of the above-listed licenses.
101683 + */
101685 +#ifndef ZSTD_H_235446
101686 +#define ZSTD_H_235446
101688 +/* ======   Dependency   ======*/
101689 +#include <linux/limits.h>   /* INT_MAX */
101690 +#include <linux/types.h>   /* size_t */
101693 +/* =====   ZSTDLIB_API : control library symbols visibility   ===== */
101694 +#define ZSTDLIB_VISIBILITY
101695 +#define ZSTDLIB_API ZSTDLIB_VISIBILITY
101698 +/*******************************************************************************
101699 +  Introduction
101701 +  zstd, short for Zstandard, is a fast lossless compression algorithm, targeting
101702 +  real-time compression scenarios at zlib-level and better compression ratios.
101703 +  The zstd compression library provides in-memory compression and decompression
101704 +  functions.
101706 +  The library supports regular compression levels from 1 up to ZSTD_maxCLevel(),
101707 +  which is currently 22. Levels >= 20, labeled `--ultra`, should be used with
101708 +  caution, as they require more memory. The library also offers negative
101709 +  compression levels, which extend the range of speed vs. ratio preferences.
101710 +  The lower the level, the faster the speed (at the cost of compression).
101712 +  Compression can be done in:
101713 +    - a single step (described as Simple API)
101714 +    - a single step, reusing a context (described as Explicit context)
101715 +    - unbounded multiple steps (described as Streaming compression)
101717 +  The compression ratio achievable on small data can be highly improved using
101718 +  a dictionary. Dictionary compression can be performed in:
101719 +    - a single step (described as Simple dictionary API)
101720 +    - a single step, reusing a dictionary (described as Bulk-processing
101721 +      dictionary API)
101723 +  Advanced experimental functions can be accessed using
101724 +  `#define ZSTD_STATIC_LINKING_ONLY` before including zstd.h.
101726 +  Advanced experimental APIs should never be used with a dynamically-linked
101727 +  library. They are not "stable"; their definitions or signatures may change in
101728 +  the future. Only static linking is allowed.
101729 +*******************************************************************************/
101731 +/*------   Version   ------*/
101732 +#define ZSTD_VERSION_MAJOR    1
101733 +#define ZSTD_VERSION_MINOR    4
101734 +#define ZSTD_VERSION_RELEASE  10
101735 +#define ZSTD_VERSION_NUMBER  (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
101737 +/*! ZSTD_versionNumber() :
101738 + *  Return runtime library version, the value is (MAJOR*100*100 + MINOR*100 + RELEASE). */
101739 +ZSTDLIB_API unsigned ZSTD_versionNumber(void);
101741 +#define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE
101742 +#define ZSTD_QUOTE(str) #str
101743 +#define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str)
101744 +#define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION)
101746 +/*! ZSTD_versionString() :
101747 + *  Return runtime library version, like "1.4.5". Requires v1.3.0+. */
101748 +ZSTDLIB_API const char* ZSTD_versionString(void);
101750 +/* *************************************
101751 + *  Default constant
101752 + ***************************************/
101753 +#ifndef ZSTD_CLEVEL_DEFAULT
101754 +#  define ZSTD_CLEVEL_DEFAULT 3
101755 +#endif
101757 +/* *************************************
101758 + *  Constants
101759 + ***************************************/
101761 +/* All magic numbers are supposed read/written to/from files/memory using little-endian convention */
101762 +#define ZSTD_MAGICNUMBER            0xFD2FB528    /* valid since v0.8.0 */
101763 +#define ZSTD_MAGIC_DICTIONARY       0xEC30A437    /* valid since v0.7.0 */
101764 +#define ZSTD_MAGIC_SKIPPABLE_START  0x184D2A50    /* all 16 values, from 0x184D2A50 to 0x184D2A5F, signal the beginning of a skippable frame */
101765 +#define ZSTD_MAGIC_SKIPPABLE_MASK   0xFFFFFFF0
101767 +#define ZSTD_BLOCKSIZELOG_MAX  17
101768 +#define ZSTD_BLOCKSIZE_MAX     (1<<ZSTD_BLOCKSIZELOG_MAX)
101772 +/***************************************
101773 +*  Simple API
101774 +***************************************/
101775 +/*! ZSTD_compress() :
101776 + *  Compresses `src` content as a single zstd compressed frame into already allocated `dst`.
101777 + *  Hint : compression runs faster if `dstCapacity` >=  `ZSTD_compressBound(srcSize)`.
101778 + *  @return : compressed size written into `dst` (<= `dstCapacity),
101779 + *            or an error code if it fails (which can be tested using ZSTD_isError()). */
101780 +ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,
101781 +                            const void* src, size_t srcSize,
101782 +                                  int compressionLevel);
101784 +/*! ZSTD_decompress() :
101785 + *  `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
101786 + *  `dstCapacity` is an upper bound of originalSize to regenerate.
101787 + *  If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
101788 + *  @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
101789 + *            or an errorCode if it fails (which can be tested using ZSTD_isError()). */
101790 +ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity,
101791 +                              const void* src, size_t compressedSize);
101793 +/*! ZSTD_getFrameContentSize() : requires v1.3.0+
101794 + *  `src` should point to the start of a ZSTD encoded frame.
101795 + *  `srcSize` must be at least as large as the frame header.
101796 + *            hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
101797 + *  @return : - decompressed size of `src` frame content, if known
101798 + *            - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
101799 + *            - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
101800 + *   note 1 : a 0 return value means the frame is valid but "empty".
101801 + *   note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode.
101802 + *            When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
101803 + *            In which case, it's necessary to use streaming mode to decompress data.
101804 + *            Optionally, application can rely on some implicit limit,
101805 + *            as ZSTD_decompress() only needs an upper bound of decompressed size.
101806 + *            (For example, data could be necessarily cut into blocks <= 16 KB).
101807 + *   note 3 : decompressed size is always present when compression is completed using single-pass functions,
101808 + *            such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict().
101809 + *   note 4 : decompressed size can be very large (64-bits value),
101810 + *            potentially larger than what local system can handle as a single memory segment.
101811 + *            In which case, it's necessary to use streaming mode to decompress data.
101812 + *   note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified.
101813 + *            Always ensure return value fits within application's authorized limits.
101814 + *            Each application can set its own limits.
101815 + *   note 6 : This function replaces ZSTD_getDecompressedSize() */
101816 +#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
101817 +#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
101818 +ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
101820 +/*! ZSTD_getDecompressedSize() :
101821 + *  NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize().
101822 + *  Both functions work the same way, but ZSTD_getDecompressedSize() blends
101823 + *  "empty", "unknown" and "error" results to the same return value (0),
101824 + *  while ZSTD_getFrameContentSize() gives them separate return values.
101825 + * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */
101826 +ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
101828 +/*! ZSTD_findFrameCompressedSize() :
101829 + * `src` should point to the start of a ZSTD frame or skippable frame.
101830 + * `srcSize` must be >= first frame size
101831 + * @return : the compressed size of the first frame starting at `src`,
101832 + *           suitable to pass as `srcSize` to `ZSTD_decompress` or similar,
101833 + *        or an error code if input is invalid */
101834 +ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
101837 +/*======  Helper functions  ======*/
101838 +#define ZSTD_COMPRESSBOUND(srcSize)   ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0))  /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
101839 +ZSTDLIB_API size_t      ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
101840 +ZSTDLIB_API unsigned    ZSTD_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */
101841 +ZSTDLIB_API const char* ZSTD_getErrorName(size_t code);     /*!< provides readable string from an error code */
101842 +ZSTDLIB_API int         ZSTD_minCLevel(void);               /*!< minimum negative compression level allowed */
101843 +ZSTDLIB_API int         ZSTD_maxCLevel(void);               /*!< maximum compression level available */
101846 +/***************************************
101847 +*  Explicit context
101848 +***************************************/
101849 +/*= Compression context
101850 + *  When compressing many times,
101851 + *  it is recommended to allocate a context just once,
101852 + *  and re-use it for each successive compression operation.
101853 + *  This will make workload friendlier for system's memory.
101854 + *  Note : re-using context is just a speed / resource optimization.
101855 + *         It doesn't change the compression ratio, which remains identical.
101856 + *  Note 2 : In multi-threaded environments,
101857 + *         use one different context per thread for parallel execution.
101858 + */
101859 +typedef struct ZSTD_CCtx_s ZSTD_CCtx;
101860 +ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
101861 +ZSTDLIB_API size_t     ZSTD_freeCCtx(ZSTD_CCtx* cctx);  /* accept NULL pointer */
101863 +/*! ZSTD_compressCCtx() :
101864 + *  Same as ZSTD_compress(), using an explicit ZSTD_CCtx.
101865 + *  Important : in order to behave similarly to `ZSTD_compress()`,
101866 + *  this function compresses at requested compression level,
101867 + *  __ignoring any other parameter__ .
101868 + *  If any advanced parameter was set using the advanced API,
101869 + *  they will all be reset. Only `compressionLevel` remains.
101870 + */
101871 +ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
101872 +                                     void* dst, size_t dstCapacity,
101873 +                               const void* src, size_t srcSize,
101874 +                                     int compressionLevel);
101876 +/*= Decompression context
101877 + *  When decompressing many times,
101878 + *  it is recommended to allocate a context only once,
101879 + *  and re-use it for each successive compression operation.
101880 + *  This will make workload friendlier for system's memory.
101881 + *  Use one context per thread for parallel execution. */
101882 +typedef struct ZSTD_DCtx_s ZSTD_DCtx;
101883 +ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void);
101884 +ZSTDLIB_API size_t     ZSTD_freeDCtx(ZSTD_DCtx* dctx);  /* accept NULL pointer */
101886 +/*! ZSTD_decompressDCtx() :
101887 + *  Same as ZSTD_decompress(),
101888 + *  requires an allocated ZSTD_DCtx.
101889 + *  Compatible with sticky parameters.
101890 + */
101891 +ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx,
101892 +                                       void* dst, size_t dstCapacity,
101893 +                                 const void* src, size_t srcSize);
101896 +/***************************************
101897 +*  Advanced compression API
101898 +***************************************/
101900 +/* API design :
101901 + *   Parameters are pushed one by one into an existing context,
101902 + *   using ZSTD_CCtx_set*() functions.
101903 + *   Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame.
101904 + *   "sticky" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` !
101905 + *   __They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()__ .
101907 + *   It's possible to reset all parameters to "default" using ZSTD_CCtx_reset().
101909 + *   This API supercedes all other "advanced" API entry points in the experimental section.
101910 + *   In the future, we expect to remove from experimental API entry points which are redundant with this API.
101911 + */
101914 +/* Compression strategies, listed from fastest to strongest */
101915 +typedef enum { ZSTD_fast=1,
101916 +               ZSTD_dfast=2,
101917 +               ZSTD_greedy=3,
101918 +               ZSTD_lazy=4,
101919 +               ZSTD_lazy2=5,
101920 +               ZSTD_btlazy2=6,
101921 +               ZSTD_btopt=7,
101922 +               ZSTD_btultra=8,
101923 +               ZSTD_btultra2=9
101924 +               /* note : new strategies _might_ be added in the future.
101925 +                         Only the order (from fast to strong) is guaranteed */
101926 +} ZSTD_strategy;
101929 +typedef enum {
101931 +    /* compression parameters
101932 +     * Note: When compressing with a ZSTD_CDict these parameters are superseded
101933 +     * by the parameters used to construct the ZSTD_CDict.
101934 +     * See ZSTD_CCtx_refCDict() for more info (superseded-by-cdict). */
101935 +    ZSTD_c_compressionLevel=100, /* Set compression parameters according to pre-defined cLevel table.
101936 +                              * Note that exact compression parameters are dynamically determined,
101937 +                              * depending on both compression level and srcSize (when known).
101938 +                              * Default level is ZSTD_CLEVEL_DEFAULT==3.
101939 +                              * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT.
101940 +                              * Note 1 : it's possible to pass a negative compression level.
101941 +                              * Note 2 : setting a level does not automatically set all other compression parameters
101942 +                              *   to default. Setting this will however eventually dynamically impact the compression
101943 +                              *   parameters which have not been manually set. The manually set
101944 +                              *   ones will 'stick'. */
101945 +    /* Advanced compression parameters :
101946 +     * It's possible to pin down compression parameters to some specific values.
101947 +     * In which case, these values are no longer dynamically selected by the compressor */
101948 +    ZSTD_c_windowLog=101,    /* Maximum allowed back-reference distance, expressed as power of 2.
101949 +                              * This will set a memory budget for streaming decompression,
101950 +                              * with larger values requiring more memory
101951 +                              * and typically compressing more.
101952 +                              * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
101953 +                              * Special: value 0 means "use default windowLog".
101954 +                              * Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT
101955 +                              *       requires explicitly allowing such size at streaming decompression stage. */
101956 +    ZSTD_c_hashLog=102,      /* Size of the initial probe table, as a power of 2.
101957 +                              * Resulting memory usage is (1 << (hashLog+2)).
101958 +                              * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
101959 +                              * Larger tables improve compression ratio of strategies <= dFast,
101960 +                              * and improve speed of strategies > dFast.
101961 +                              * Special: value 0 means "use default hashLog". */
101962 +    ZSTD_c_chainLog=103,     /* Size of the multi-probe search table, as a power of 2.
101963 +                              * Resulting memory usage is (1 << (chainLog+2)).
101964 +                              * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX.
101965 +                              * Larger tables result in better and slower compression.
101966 +                              * This parameter is useless for "fast" strategy.
101967 +                              * It's still useful when using "dfast" strategy,
101968 +                              * in which case it defines a secondary probe table.
101969 +                              * Special: value 0 means "use default chainLog". */
101970 +    ZSTD_c_searchLog=104,    /* Number of search attempts, as a power of 2.
101971 +                              * More attempts result in better and slower compression.
101972 +                              * This parameter is useless for "fast" and "dFast" strategies.
101973 +                              * Special: value 0 means "use default searchLog". */
101974 +    ZSTD_c_minMatch=105,     /* Minimum size of searched matches.
101975 +                              * Note that Zstandard can still find matches of smaller size,
101976 +                              * it just tweaks its search algorithm to look for this size and larger.
101977 +                              * Larger values increase compression and decompression speed, but decrease ratio.
101978 +                              * Must be clamped between ZSTD_MINMATCH_MIN and ZSTD_MINMATCH_MAX.
101979 +                              * Note that currently, for all strategies < btopt, effective minimum is 4.
101980 +                              *                    , for all strategies > fast, effective maximum is 6.
101981 +                              * Special: value 0 means "use default minMatchLength". */
101982 +    ZSTD_c_targetLength=106, /* Impact of this field depends on strategy.
101983 +                              * For strategies btopt, btultra & btultra2:
101984 +                              *     Length of Match considered "good enough" to stop search.
101985 +                              *     Larger values make compression stronger, and slower.
101986 +                              * For strategy fast:
101987 +                              *     Distance between match sampling.
101988 +                              *     Larger values make compression faster, and weaker.
101989 +                              * Special: value 0 means "use default targetLength". */
101990 +    ZSTD_c_strategy=107,     /* See ZSTD_strategy enum definition.
101991 +                              * The higher the value of selected strategy, the more complex it is,
101992 +                              * resulting in stronger and slower compression.
101993 +                              * Special: value 0 means "use default strategy". */
101995 +    /* LDM mode parameters */
101996 +    ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching.
101997 +                                     * This parameter is designed to improve compression ratio
101998 +                                     * for large inputs, by finding large matches at long distance.
101999 +                                     * It increases memory usage and window size.
102000 +                                     * Note: enabling this parameter increases default ZSTD_c_windowLog to 128 MB
102001 +                                     * except when expressly set to a different value.
102002 +                                     * Note: will be enabled by default if ZSTD_c_windowLog >= 128 MB and
102003 +                                     * compression strategy >= ZSTD_btopt (== compression level 16+) */
102004 +    ZSTD_c_ldmHashLog=161,   /* Size of the table for long distance matching, as a power of 2.
102005 +                              * Larger values increase memory usage and compression ratio,
102006 +                              * but decrease compression speed.
102007 +                              * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX
102008 +                              * default: windowlog - 7.
102009 +                              * Special: value 0 means "automatically determine hashlog". */
102010 +    ZSTD_c_ldmMinMatch=162,  /* Minimum match size for long distance matcher.
102011 +                              * Larger/too small values usually decrease compression ratio.
102012 +                              * Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX.
102013 +                              * Special: value 0 means "use default value" (default: 64). */
102014 +    ZSTD_c_ldmBucketSizeLog=163, /* Log size of each bucket in the LDM hash table for collision resolution.
102015 +                              * Larger values improve collision resolution but decrease compression speed.
102016 +                              * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX.
102017 +                              * Special: value 0 means "use default value" (default: 3). */
102018 +    ZSTD_c_ldmHashRateLog=164, /* Frequency of inserting/looking up entries into the LDM hash table.
102019 +                              * Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN).
102020 +                              * Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage.
102021 +                              * Larger values improve compression speed.
102022 +                              * Deviating far from default value will likely result in a compression ratio decrease.
102023 +                              * Special: value 0 means "automatically determine hashRateLog". */
102025 +    /* frame parameters */
102026 +    ZSTD_c_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1)
102027 +                              * Content size must be known at the beginning of compression.
102028 +                              * This is automatically the case when using ZSTD_compress2(),
102029 +                              * For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */
102030 +    ZSTD_c_checksumFlag=201, /* A 32-bits checksum of content is written at end of frame (default:0) */
102031 +    ZSTD_c_dictIDFlag=202,   /* When applicable, dictionary's ID is written into frame header (default:1) */
102033 +    /* multi-threading parameters */
102034 +    /* These parameters are only active if multi-threading is enabled (compiled with build macro ZSTD_MULTITHREAD).
102035 +     * Otherwise, trying to set any other value than default (0) will be a no-op and return an error.
102036 +     * In a situation where it's unknown if the linked library supports multi-threading or not,
102037 +     * setting ZSTD_c_nbWorkers to any value >= 1 and consulting the return value provides a quick way to check this property.
102038 +     */
102039 +    ZSTD_c_nbWorkers=400,    /* Select how many threads will be spawned to compress in parallel.
102040 +                              * When nbWorkers >= 1, triggers asynchronous mode when invoking ZSTD_compressStream*() :
102041 +                              * ZSTD_compressStream*() consumes input and flush output if possible, but immediately gives back control to caller,
102042 +                              * while compression is performed in parallel, within worker thread(s).
102043 +                              * (note : a strong exception to this rule is when first invocation of ZSTD_compressStream2() sets ZSTD_e_end :
102044 +                              *  in which case, ZSTD_compressStream2() delegates to ZSTD_compress2(), which is always a blocking call).
102045 +                              * More workers improve speed, but also increase memory usage.
102046 +                              * Default value is `0`, aka "single-threaded mode" : no worker is spawned,
102047 +                              * compression is performed inside Caller's thread, and all invocations are blocking */
102048 +    ZSTD_c_jobSize=401,      /* Size of a compression job. This value is enforced only when nbWorkers >= 1.
102049 +                              * Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads.
102050 +                              * 0 means default, which is dynamically determined based on compression parameters.
102051 +                              * Job size must be a minimum of overlap size, or 1 MB, whichever is largest.
102052 +                              * The minimum size is automatically and transparently enforced. */
102053 +    ZSTD_c_overlapLog=402,   /* Control the overlap size, as a fraction of window size.
102054 +                              * The overlap size is an amount of data reloaded from previous job at the beginning of a new job.
102055 +                              * It helps preserve compression ratio, while each job is compressed in parallel.
102056 +                              * This value is enforced only when nbWorkers >= 1.
102057 +                              * Larger values increase compression ratio, but decrease speed.
102058 +                              * Possible values range from 0 to 9 :
102059 +                              * - 0 means "default" : value will be determined by the library, depending on strategy
102060 +                              * - 1 means "no overlap"
102061 +                              * - 9 means "full overlap", using a full window size.
102062 +                              * Each intermediate rank increases/decreases load size by a factor 2 :
102063 +                              * 9: full window;  8: w/2;  7: w/4;  6: w/8;  5:w/16;  4: w/32;  3:w/64;  2:w/128;  1:no overlap;  0:default
102064 +                              * default value varies between 6 and 9, depending on strategy */
102066 +    /* note : additional experimental parameters are also available
102067 +     * within the experimental section of the API.
102068 +     * At the time of this writing, they include :
102069 +     * ZSTD_c_rsyncable
102070 +     * ZSTD_c_format
102071 +     * ZSTD_c_forceMaxWindow
102072 +     * ZSTD_c_forceAttachDict
102073 +     * ZSTD_c_literalCompressionMode
102074 +     * ZSTD_c_targetCBlockSize
102075 +     * ZSTD_c_srcSizeHint
102076 +     * ZSTD_c_enableDedicatedDictSearch
102077 +     * ZSTD_c_stableInBuffer
102078 +     * ZSTD_c_stableOutBuffer
102079 +     * ZSTD_c_blockDelimiters
102080 +     * ZSTD_c_validateSequences
102081 +     * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
102082 +     * note : never ever use experimentalParam? names directly;
102083 +     *        also, the enums values themselves are unstable and can still change.
102084 +     */
102085 +     ZSTD_c_experimentalParam1=500,
102086 +     ZSTD_c_experimentalParam2=10,
102087 +     ZSTD_c_experimentalParam3=1000,
102088 +     ZSTD_c_experimentalParam4=1001,
102089 +     ZSTD_c_experimentalParam5=1002,
102090 +     ZSTD_c_experimentalParam6=1003,
102091 +     ZSTD_c_experimentalParam7=1004,
102092 +     ZSTD_c_experimentalParam8=1005,
102093 +     ZSTD_c_experimentalParam9=1006,
102094 +     ZSTD_c_experimentalParam10=1007,
102095 +     ZSTD_c_experimentalParam11=1008,
102096 +     ZSTD_c_experimentalParam12=1009
102097 +} ZSTD_cParameter;
102099 +typedef struct {
102100 +    size_t error;
102101 +    int lowerBound;
102102 +    int upperBound;
102103 +} ZSTD_bounds;
102105 +/*! ZSTD_cParam_getBounds() :
102106 + *  All parameters must belong to an interval with lower and upper bounds,
102107 + *  otherwise they will either trigger an error or be automatically clamped.
102108 + * @return : a structure, ZSTD_bounds, which contains
102109 + *         - an error status field, which must be tested using ZSTD_isError()
102110 + *         - lower and upper bounds, both inclusive
102111 + */
102112 +ZSTDLIB_API ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter cParam);
102114 +/*! ZSTD_CCtx_setParameter() :
102115 + *  Set one compression parameter, selected by enum ZSTD_cParameter.
102116 + *  All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds().
102117 + *  Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).
102118 + *  Setting a parameter is generally only possible during frame initialization (before starting compression).
102119 + *  Exception : when using multi-threading mode (nbWorkers >= 1),
102120 + *              the following parameters can be updated _during_ compression (within same frame):
102121 + *              => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy.
102122 + *              new parameters will be active for next job only (after a flush()).
102123 + * @return : an error code (which can be tested using ZSTD_isError()).
102124 + */
102125 +ZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value);
102127 +/*! ZSTD_CCtx_setPledgedSrcSize() :
102128 + *  Total input data size to be compressed as a single frame.
102129 + *  Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag.
102130 + *  This value will also be controlled at end of frame, and trigger an error if not respected.
102131 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
102132 + *  Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame.
102133 + *           In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN.
102134 + *           ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame.
102135 + *  Note 2 : pledgedSrcSize is only valid once, for the next frame.
102136 + *           It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN.
102137 + *  Note 3 : Whenever all input data is provided and consumed in a single round,
102138 + *           for example with ZSTD_compress2(),
102139 + *           or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end),
102140 + *           this value is automatically overridden by srcSize instead.
102141 + */
102142 +ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize);
102144 +typedef enum {
102145 +    ZSTD_reset_session_only = 1,
102146 +    ZSTD_reset_parameters = 2,
102147 +    ZSTD_reset_session_and_parameters = 3
102148 +} ZSTD_ResetDirective;
102150 +/*! ZSTD_CCtx_reset() :
102151 + *  There are 2 different things that can be reset, independently or jointly :
102152 + *  - The session : will stop compressing current frame, and make CCtx ready to start a new one.
102153 + *                  Useful after an error, or to interrupt any ongoing compression.
102154 + *                  Any internal data not yet flushed is cancelled.
102155 + *                  Compression parameters and dictionary remain unchanged.
102156 + *                  They will be used to compress next frame.
102157 + *                  Resetting session never fails.
102158 + *  - The parameters : changes all parameters back to "default".
102159 + *                  This removes any reference to any dictionary too.
102160 + *                  Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)
102161 + *                  otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())
102162 + *  - Both : similar to resetting the session, followed by resetting parameters.
102163 + */
102164 +ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset);
102166 +/*! ZSTD_compress2() :
102167 + *  Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.
102168 + *  ZSTD_compress2() always starts a new frame.
102169 + *  Should cctx hold data from a previously unfinished frame, everything about it is forgotten.
102170 + *  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
102171 + *  - The function is always blocking, returns when compression is completed.
102172 + *  Hint : compression runs faster if `dstCapacity` >=  `ZSTD_compressBound(srcSize)`.
102173 + * @return : compressed size written into `dst` (<= `dstCapacity),
102174 + *           or an error code if it fails (which can be tested using ZSTD_isError()).
102175 + */
102176 +ZSTDLIB_API size_t ZSTD_compress2( ZSTD_CCtx* cctx,
102177 +                                   void* dst, size_t dstCapacity,
102178 +                             const void* src, size_t srcSize);
102181 +/***************************************
102182 +*  Advanced decompression API
102183 +***************************************/
102185 +/* The advanced API pushes parameters one by one into an existing DCtx context.
102186 + * Parameters are sticky, and remain valid for all following frames
102187 + * using the same DCtx context.
102188 + * It's possible to reset parameters to default values using ZSTD_DCtx_reset().
102189 + * Note : This API is compatible with existing ZSTD_decompressDCtx() and ZSTD_decompressStream().
102190 + *        Therefore, no new decompression function is necessary.
102191 + */
102193 +typedef enum {
102195 +    ZSTD_d_windowLogMax=100, /* Select a size limit (in power of 2) beyond which
102196 +                              * the streaming API will refuse to allocate memory buffer
102197 +                              * in order to protect the host from unreasonable memory requirements.
102198 +                              * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
102199 +                              * By default, a decompression context accepts window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT).
102200 +                              * Special: value 0 means "use default maximum windowLog". */
102202 +    /* note : additional experimental parameters are also available
102203 +     * within the experimental section of the API.
102204 +     * At the time of this writing, they include :
102205 +     * ZSTD_d_format
102206 +     * ZSTD_d_stableOutBuffer
102207 +     * ZSTD_d_forceIgnoreChecksum
102208 +     * ZSTD_d_refMultipleDDicts
102209 +     * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
102210 +     * note : never ever use experimentalParam? names directly
102211 +     */
102212 +     ZSTD_d_experimentalParam1=1000,
102213 +     ZSTD_d_experimentalParam2=1001,
102214 +     ZSTD_d_experimentalParam3=1002,
102215 +     ZSTD_d_experimentalParam4=1003
102217 +} ZSTD_dParameter;
102219 +/*! ZSTD_dParam_getBounds() :
102220 + *  All parameters must belong to an interval with lower and upper bounds,
102221 + *  otherwise they will either trigger an error or be automatically clamped.
102222 + * @return : a structure, ZSTD_bounds, which contains
102223 + *         - an error status field, which must be tested using ZSTD_isError()
102224 + *         - both lower and upper bounds, inclusive
102225 + */
102226 +ZSTDLIB_API ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam);
102228 +/*! ZSTD_DCtx_setParameter() :
102229 + *  Set one compression parameter, selected by enum ZSTD_dParameter.
102230 + *  All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds().
102231 + *  Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).
102232 + *  Setting a parameter is only possible during frame initialization (before starting decompression).
102233 + * @return : 0, or an error code (which can be tested using ZSTD_isError()).
102234 + */
102235 +ZSTDLIB_API size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int value);
102237 +/*! ZSTD_DCtx_reset() :
102238 + *  Return a DCtx to clean state.
102239 + *  Session and parameters can be reset jointly or separately.
102240 + *  Parameters can only be reset when no active frame is being decompressed.
102241 + * @return : 0, or an error code, which can be tested with ZSTD_isError()
102242 + */
102243 +ZSTDLIB_API size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset);
102246 +/****************************
102247 +*  Streaming
102248 +****************************/
102250 +typedef struct ZSTD_inBuffer_s {
102251 +  const void* src;    /**< start of input buffer */
102252 +  size_t size;        /**< size of input buffer */
102253 +  size_t pos;         /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */
102254 +} ZSTD_inBuffer;
102256 +typedef struct ZSTD_outBuffer_s {
102257 +  void*  dst;         /**< start of output buffer */
102258 +  size_t size;        /**< size of output buffer */
102259 +  size_t pos;         /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */
102260 +} ZSTD_outBuffer;
102264 +/*-***********************************************************************
102265 +*  Streaming compression - HowTo
102267 +*  A ZSTD_CStream object is required to track streaming operation.
102268 +*  Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.
102269 +*  ZSTD_CStream objects can be reused multiple times on consecutive compression operations.
102270 +*  It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
102272 +*  For parallel execution, use one separate ZSTD_CStream per thread.
102274 +*  note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing.
102276 +*  Parameters are sticky : when starting a new compression on the same context,
102277 +*  it will re-use the same sticky parameters as previous compression session.
102278 +*  When in doubt, it's recommended to fully initialize the context before usage.
102279 +*  Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(),
102280 +*  ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to
102281 +*  set more specific parameters, the pledged source size, or load a dictionary.
102283 +*  Use ZSTD_compressStream2() with ZSTD_e_continue as many times as necessary to
102284 +*  consume input stream. The function will automatically update both `pos`
102285 +*  fields within `input` and `output`.
102286 +*  Note that the function may not consume the entire input, for example, because
102287 +*  the output buffer is already full, in which case `input.pos < input.size`.
102288 +*  The caller must check if input has been entirely consumed.
102289 +*  If not, the caller must make some room to receive more compressed data,
102290 +*  and then present again remaining input data.
102291 +*  note: ZSTD_e_continue is guaranteed to make some forward progress when called,
102292 +*        but doesn't guarantee maximal forward progress. This is especially relevant
102293 +*        when compressing with multiple threads. The call won't block if it can
102294 +*        consume some input, but if it can't it will wait for some, but not all,
102295 +*        output to be flushed.
102296 +* @return : provides a minimum amount of data remaining to be flushed from internal buffers
102297 +*           or an error code, which can be tested using ZSTD_isError().
102299 +*  At any moment, it's possible to flush whatever data might remain stuck within internal buffer,
102300 +*  using ZSTD_compressStream2() with ZSTD_e_flush. `output->pos` will be updated.
102301 +*  Note that, if `output->size` is too small, a single invocation with ZSTD_e_flush might not be enough (return code > 0).
102302 +*  In which case, make some room to receive more compressed data, and call again ZSTD_compressStream2() with ZSTD_e_flush.
102303 +*  You must continue calling ZSTD_compressStream2() with ZSTD_e_flush until it returns 0, at which point you can change the
102304 +*  operation.
102305 +*  note: ZSTD_e_flush will flush as much output as possible, meaning when compressing with multiple threads, it will
102306 +*        block until the flush is complete or the output buffer is full.
102307 +*  @return : 0 if internal buffers are entirely flushed,
102308 +*            >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
102309 +*            or an error code, which can be tested using ZSTD_isError().
102311 +*  Calling ZSTD_compressStream2() with ZSTD_e_end instructs to finish a frame.
102312 +*  It will perform a flush and write frame epilogue.
102313 +*  The epilogue is required for decoders to consider a frame completed.
102314 +*  flush operation is the same, and follows same rules as calling ZSTD_compressStream2() with ZSTD_e_flush.
102315 +*  You must continue calling ZSTD_compressStream2() with ZSTD_e_end until it returns 0, at which point you are free to
102316 +*  start a new frame.
102317 +*  note: ZSTD_e_end will flush as much output as possible, meaning when compressing with multiple threads, it will
102318 +*        block until the flush is complete or the output buffer is full.
102319 +*  @return : 0 if frame fully completed and fully flushed,
102320 +*            >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
102321 +*            or an error code, which can be tested using ZSTD_isError().
102323 +* *******************************************************************/
102325 +typedef ZSTD_CCtx ZSTD_CStream;  /**< CCtx and CStream are now effectively same object (>= v1.3.0) */
102326 +                                 /* Continue to distinguish them for compatibility with older versions <= v1.2.0 */
102327 +/*===== ZSTD_CStream management functions =====*/
102328 +ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);
102329 +ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs);  /* accept NULL pointer */
102331 +/*===== Streaming compression functions =====*/
102332 +typedef enum {
102333 +    ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal compression ratio */
102334 +    ZSTD_e_flush=1,    /* flush any data provided so far,
102335 +                        * it creates (at least) one new block, that can be decoded immediately on reception;
102336 +                        * frame will continue: any future data can still reference previously compressed data, improving compression.
102337 +                        * note : multithreaded compression will block to flush as much output as possible. */
102338 +    ZSTD_e_end=2       /* flush any remaining data _and_ close current frame.
102339 +                        * note that frame is only closed after compressed data is fully flushed (return value == 0).
102340 +                        * After that point, any additional data starts a new frame.
102341 +                        * note : each frame is independent (does not reference any content from previous frame).
102342 +                        : note : multithreaded compression will block to flush as much output as possible. */
102343 +} ZSTD_EndDirective;
102345 +/*! ZSTD_compressStream2() :
102346 + *  Behaves about the same as ZSTD_compressStream, with additional control on end directive.
102347 + *  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
102348 + *  - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)
102349 + *  - output->pos must be <= dstCapacity, input->pos must be <= srcSize
102350 + *  - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
102351 + *  - endOp must be a valid directive
102352 + *  - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.
102353 + *  - When nbWorkers>=1, function is non-blocking : it copies a portion of input, distributes jobs to internal worker threads, flush to output whatever is available,
102354 + *                                                  and then immediately returns, just indicating that there is some data remaining to be flushed.
102355 + *                                                  The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.
102356 + *  - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.
102357 + *  - @return provides a minimum amount of data remaining to be flushed from internal buffers
102358 + *            or an error code, which can be tested using ZSTD_isError().
102359 + *            if @return != 0, flush is not fully completed, there is still some data left within internal buffers.
102360 + *            This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.
102361 + *            For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.
102362 + *  - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),
102363 + *            only ZSTD_e_end or ZSTD_e_flush operations are allowed.
102364 + *            Before starting a new compression job, or changing compression parameters,
102365 + *            it is required to fully flush internal buffers.
102366 + */
102367 +ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
102368 +                                         ZSTD_outBuffer* output,
102369 +                                         ZSTD_inBuffer* input,
102370 +                                         ZSTD_EndDirective endOp);
102373 +/* These buffer sizes are softly recommended.
102374 + * They are not required : ZSTD_compressStream*() happily accepts any buffer size, for both input and output.
102375 + * Respecting the recommended size just makes it a bit easier for ZSTD_compressStream*(),
102376 + * reducing the amount of memory shuffling and buffering, resulting in minor performance savings.
102378 + * However, note that these recommendations are from the perspective of a C caller program.
102379 + * If the streaming interface is invoked from some other language,
102380 + * especially managed ones such as Java or Go, through a foreign function interface such as jni or cgo,
102381 + * a major performance rule is to reduce crossing such interface to an absolute minimum.
102382 + * It's not rare that performance ends being spent more into the interface, rather than compression itself.
102383 + * In which cases, prefer using large buffers, as large as practical,
102384 + * for both input and output, to reduce the nb of roundtrips.
102385 + */
102386 +ZSTDLIB_API size_t ZSTD_CStreamInSize(void);    /**< recommended size for input buffer */
102387 +ZSTDLIB_API size_t ZSTD_CStreamOutSize(void);   /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block. */
102390 +/* *****************************************************************************
102391 + * This following is a legacy streaming API.
102392 + * It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2().
102393 + * It is redundant, but remains fully supported.
102394 + * Advanced parameters and dictionary compression can only be used through the
102395 + * new API.
102396 + ******************************************************************************/
102399 + * Equivalent to:
102401 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
102402 + *     ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
102403 + *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
102404 + */
102405 +ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
102407 + * Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue).
102408 + * NOTE: The return value is different. ZSTD_compressStream() returns a hint for
102409 + * the next read size (if non-zero and not an error). ZSTD_compressStream2()
102410 + * returns the minimum nb of bytes left to flush (if non-zero and not an error).
102411 + */
102412 +ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
102413 +/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush). */
102414 +ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
102415 +/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */
102416 +ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
102419 +/*-***************************************************************************
102420 +*  Streaming decompression - HowTo
102422 +*  A ZSTD_DStream object is required to track streaming operations.
102423 +*  Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
102424 +*  ZSTD_DStream objects can be re-used multiple times.
102426 +*  Use ZSTD_initDStream() to start a new decompression operation.
102427 +* @return : recommended first input size
102428 +*  Alternatively, use advanced API to set specific properties.
102430 +*  Use ZSTD_decompressStream() repetitively to consume your input.
102431 +*  The function will update both `pos` fields.
102432 +*  If `input.pos < input.size`, some input has not been consumed.
102433 +*  It's up to the caller to present again remaining data.
102434 +*  The function tries to flush all data decoded immediately, respecting output buffer size.
102435 +*  If `output.pos < output.size`, decoder has flushed everything it could.
102436 +*  But if `output.pos == output.size`, there might be some data left within internal buffers.,
102437 +*  In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer.
102438 +*  Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX.
102439 +* @return : 0 when a frame is completely decoded and fully flushed,
102440 +*        or an error code, which can be tested using ZSTD_isError(),
102441 +*        or any other value > 0, which means there is still some decoding or flushing to do to complete current frame :
102442 +*                                the return value is a suggested next input size (just a hint for better latency)
102443 +*                                that will never request more than the remaining frame size.
102444 +* *******************************************************************************/
102446 +typedef ZSTD_DCtx ZSTD_DStream;  /**< DCtx and DStream are now effectively same object (>= v1.3.0) */
102447 +                                 /* For compatibility with versions <= v1.2.0, prefer differentiating them. */
102448 +/*===== ZSTD_DStream management functions =====*/
102449 +ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);
102450 +ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds);  /* accept NULL pointer */
102452 +/*===== Streaming decompression functions =====*/
102454 +/* This function is redundant with the advanced API and equivalent to:
102456 + *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
102457 + *     ZSTD_DCtx_refDDict(zds, NULL);
102458 + */
102459 +ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
102461 +ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
102463 +ZSTDLIB_API size_t ZSTD_DStreamInSize(void);    /*!< recommended size for input buffer */
102464 +ZSTDLIB_API size_t ZSTD_DStreamOutSize(void);   /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
102467 +/**************************
102468 +*  Simple dictionary API
102469 +***************************/
102470 +/*! ZSTD_compress_usingDict() :
102471 + *  Compression at an explicit compression level using a Dictionary.
102472 + *  A dictionary can be any arbitrary data segment (also called a prefix),
102473 + *  or a buffer with specified information (see dictBuilder/zdict.h).
102474 + *  Note : This function loads the dictionary, resulting in significant startup delay.
102475 + *         It's intended for a dictionary used only once.
102476 + *  Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */
102477 +ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
102478 +                                           void* dst, size_t dstCapacity,
102479 +                                     const void* src, size_t srcSize,
102480 +                                     const void* dict,size_t dictSize,
102481 +                                           int compressionLevel);
102483 +/*! ZSTD_decompress_usingDict() :
102484 + *  Decompression using a known Dictionary.
102485 + *  Dictionary must be identical to the one used during compression.
102486 + *  Note : This function loads the dictionary, resulting in significant startup delay.
102487 + *         It's intended for a dictionary used only once.
102488 + *  Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
102489 +ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
102490 +                                             void* dst, size_t dstCapacity,
102491 +                                       const void* src, size_t srcSize,
102492 +                                       const void* dict,size_t dictSize);
102495 +/***********************************
102496 + *  Bulk processing dictionary API
102497 + **********************************/
102498 +typedef struct ZSTD_CDict_s ZSTD_CDict;
102500 +/*! ZSTD_createCDict() :
102501 + *  When compressing multiple messages or blocks using the same dictionary,
102502 + *  it's recommended to digest the dictionary only once, since it's a costly operation.
102503 + *  ZSTD_createCDict() will create a state from digesting a dictionary.
102504 + *  The resulting state can be used for future compression operations with very limited startup cost.
102505 + *  ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
102506 + * @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict.
102507 + *  Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content.
102508 + *  Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer,
102509 + *      in which case the only thing that it transports is the @compressionLevel.
102510 + *      This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively,
102511 + *      expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. */
102512 +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
102513 +                                         int compressionLevel);
102515 +/*! ZSTD_freeCDict() :
102516 + *  Function frees memory allocated by ZSTD_createCDict().
102517 + *  If a NULL pointer is passed, no operation is performed. */
102518 +ZSTDLIB_API size_t      ZSTD_freeCDict(ZSTD_CDict* CDict);
102520 +/*! ZSTD_compress_usingCDict() :
102521 + *  Compression using a digested Dictionary.
102522 + *  Recommended when same dictionary is used multiple times.
102523 + *  Note : compression level is _decided at dictionary creation time_,
102524 + *     and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */
102525 +ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
102526 +                                            void* dst, size_t dstCapacity,
102527 +                                      const void* src, size_t srcSize,
102528 +                                      const ZSTD_CDict* cdict);
102531 +typedef struct ZSTD_DDict_s ZSTD_DDict;
102533 +/*! ZSTD_createDDict() :
102534 + *  Create a digested dictionary, ready to start decompression operation without startup delay.
102535 + *  dictBuffer can be released after DDict creation, as its content is copied inside DDict. */
102536 +ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
102538 +/*! ZSTD_freeDDict() :
102539 + *  Function frees memory allocated with ZSTD_createDDict()
102540 + *  If a NULL pointer is passed, no operation is performed. */
102541 +ZSTDLIB_API size_t      ZSTD_freeDDict(ZSTD_DDict* ddict);
102543 +/*! ZSTD_decompress_usingDDict() :
102544 + *  Decompression using a digested Dictionary.
102545 + *  Recommended when same dictionary is used multiple times. */
102546 +ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
102547 +                                              void* dst, size_t dstCapacity,
102548 +                                        const void* src, size_t srcSize,
102549 +                                        const ZSTD_DDict* ddict);
102552 +/********************************
102553 + *  Dictionary helper functions
102554 + *******************************/
102556 +/*! ZSTD_getDictID_fromDict() :
102557 + *  Provides the dictID stored within dictionary.
102558 + *  if @return == 0, the dictionary is not conformant with Zstandard specification.
102559 + *  It can still be loaded, but as a content-only dictionary. */
102560 +ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
102562 +/*! ZSTD_getDictID_fromDDict() :
102563 + *  Provides the dictID of the dictionary loaded into `ddict`.
102564 + *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
102565 + *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
102566 +ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
102568 +/*! ZSTD_getDictID_fromFrame() :
102569 + *  Provides the dictID required to decompressed the frame stored within `src`.
102570 + *  If @return == 0, the dictID could not be decoded.
102571 + *  This could for one of the following reasons :
102572 + *  - The frame does not require a dictionary to be decoded (most common case).
102573 + *  - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
102574 + *    Note : this use case also happens when using a non-conformant dictionary.
102575 + *  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
102576 + *  - This is not a Zstandard frame.
102577 + *  When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */
102578 +ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
102581 +/*******************************************************************************
102582 + * Advanced dictionary and prefix API
102584 + * This API allows dictionaries to be used with ZSTD_compress2(),
102585 + * ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and
102586 + * only reset with the context is reset with ZSTD_reset_parameters or
102587 + * ZSTD_reset_session_and_parameters. Prefixes are single-use.
102588 + ******************************************************************************/
102591 +/*! ZSTD_CCtx_loadDictionary() :
102592 + *  Create an internal CDict from `dict` buffer.
102593 + *  Decompression will have to use same dictionary.
102594 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
102595 + *  Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,
102596 + *           meaning "return to no-dictionary mode".
102597 + *  Note 1 : Dictionary is sticky, it will be used for all future compressed frames.
102598 + *           To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters).
102599 + *  Note 2 : Loading a dictionary involves building tables.
102600 + *           It's also a CPU consuming operation, with non-negligible impact on latency.
102601 + *           Tables are dependent on compression parameters, and for this reason,
102602 + *           compression parameters can no longer be changed after loading a dictionary.
102603 + *  Note 3 :`dict` content will be copied internally.
102604 + *           Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.
102605 + *           In such a case, dictionary buffer must outlive its users.
102606 + *  Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()
102607 + *           to precisely select how dictionary content must be interpreted. */
102608 +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
102610 +/*! ZSTD_CCtx_refCDict() :
102611 + *  Reference a prepared dictionary, to be used for all next compressed frames.
102612 + *  Note that compression parameters are enforced from within CDict,
102613 + *  and supersede any compression parameter previously set within CCtx.
102614 + *  The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs.
102615 + *  The ignored parameters will be used again if the CCtx is returned to no-dictionary mode.
102616 + *  The dictionary will remain valid for future compressed frames using same CCtx.
102617 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
102618 + *  Special : Referencing a NULL CDict means "return to no-dictionary mode".
102619 + *  Note 1 : Currently, only one dictionary can be managed.
102620 + *           Referencing a new dictionary effectively "discards" any previous one.
102621 + *  Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */
102622 +ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
102624 +/*! ZSTD_CCtx_refPrefix() :
102625 + *  Reference a prefix (single-usage dictionary) for next compressed frame.
102626 + *  A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end).
102627 + *  Decompression will need same prefix to properly regenerate data.
102628 + *  Compressing with a prefix is similar in outcome as performing a diff and compressing it,
102629 + *  but performs much faster, especially during decompression (compression speed is tunable with compression level).
102630 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
102631 + *  Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary
102632 + *  Note 1 : Prefix buffer is referenced. It **must** outlive compression.
102633 + *           Its content must remain unmodified during compression.
102634 + *  Note 2 : If the intention is to diff some large src data blob with some prior version of itself,
102635 + *           ensure that the window size is large enough to contain the entire source.
102636 + *           See ZSTD_c_windowLog.
102637 + *  Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.
102638 + *           It's a CPU consuming operation, with non-negligible impact on latency.
102639 + *           If there is a need to use the same prefix multiple times, consider loadDictionary instead.
102640 + *  Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent).
102641 + *           Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */
102642 +ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
102643 +                                 const void* prefix, size_t prefixSize);
102645 +/*! ZSTD_DCtx_loadDictionary() :
102646 + *  Create an internal DDict from dict buffer,
102647 + *  to be used to decompress next frames.
102648 + *  The dictionary remains valid for all future frames, until explicitly invalidated.
102649 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
102650 + *  Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
102651 + *            meaning "return to no-dictionary mode".
102652 + *  Note 1 : Loading a dictionary involves building tables,
102653 + *           which has a non-negligible impact on CPU usage and latency.
102654 + *           It's recommended to "load once, use many times", to amortize the cost
102655 + *  Note 2 :`dict` content will be copied internally, so `dict` can be released after loading.
102656 + *           Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead.
102657 + *  Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of
102658 + *           how dictionary content is loaded and interpreted.
102659 + */
102660 +ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
102662 +/*! ZSTD_DCtx_refDDict() :
102663 + *  Reference a prepared dictionary, to be used to decompress next frames.
102664 + *  The dictionary remains active for decompression of future frames using same DCtx.
102666 + *  If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function
102667 + *  will store the DDict references in a table, and the DDict used for decompression
102668 + *  will be determined at decompression time, as per the dict ID in the frame.
102669 + *  The memory for the table is allocated on the first call to refDDict, and can be
102670 + *  freed with ZSTD_freeDCtx().
102672 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
102673 + *  Note 1 : Currently, only one dictionary can be managed.
102674 + *           Referencing a new dictionary effectively "discards" any previous one.
102675 + *  Special: referencing a NULL DDict means "return to no-dictionary mode".
102676 + *  Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.
102677 + */
102678 +ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
102680 +/*! ZSTD_DCtx_refPrefix() :
102681 + *  Reference a prefix (single-usage dictionary) to decompress next frame.
102682 + *  This is the reverse operation of ZSTD_CCtx_refPrefix(),
102683 + *  and must use the same prefix as the one used during compression.
102684 + *  Prefix is **only used once**. Reference is discarded at end of frame.
102685 + *  End of frame is reached when ZSTD_decompressStream() returns 0.
102686 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
102687 + *  Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary
102688 + *  Note 2 : Prefix buffer is referenced. It **must** outlive decompression.
102689 + *           Prefix buffer must remain unmodified up to the end of frame,
102690 + *           reached when ZSTD_decompressStream() returns 0.
102691 + *  Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent).
102692 + *           Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section)
102693 + *  Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.
102694 + *           A full dictionary is more costly, as it requires building tables.
102695 + */
102696 +ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx,
102697 +                                 const void* prefix, size_t prefixSize);
102699 +/* ===   Memory management   === */
102701 +/*! ZSTD_sizeof_*() :
102702 + *  These functions give the _current_ memory usage of selected object.
102703 + *  Note that object memory usage can evolve (increase or decrease) over time. */
102704 +ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
102705 +ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
102706 +ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
102707 +ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
102708 +ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
102709 +ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
102711 +#endif  /* ZSTD_H_235446 */
102714 +/* **************************************************************************************
102715 + *   ADVANCED AND EXPERIMENTAL FUNCTIONS
102716 + ****************************************************************************************
102717 + * The definitions in the following section are considered experimental.
102718 + * They are provided for advanced scenarios.
102719 + * They should never be used with a dynamic library, as prototypes may change in the future.
102720 + * Use them only in association with static linking.
102721 + * ***************************************************************************************/
102723 +#if !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
102724 +#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
102726 +/****************************************************************************************
102727 + *   experimental API (static linking only)
102728 + ****************************************************************************************
102729 + * The following symbols and constants
102730 + * are not planned to join "stable API" status in the near future.
102731 + * They can still change in future versions.
102732 + * Some of them are planned to remain in the static_only section indefinitely.
102733 + * Some of them might be removed in the future (especially when redundant with existing stable functions)
102734 + * ***************************************************************************************/
102736 +#define ZSTD_FRAMEHEADERSIZE_PREFIX(format) ((format) == ZSTD_f_zstd1 ? 5 : 1)   /* minimum input size required to query frame header size */
102737 +#define ZSTD_FRAMEHEADERSIZE_MIN(format)    ((format) == ZSTD_f_zstd1 ? 6 : 2)
102738 +#define ZSTD_FRAMEHEADERSIZE_MAX   18   /* can be useful for static allocation */
102739 +#define ZSTD_SKIPPABLEHEADERSIZE    8
102741 +/* compression parameter bounds */
102742 +#define ZSTD_WINDOWLOG_MAX_32    30
102743 +#define ZSTD_WINDOWLOG_MAX_64    31
102744 +#define ZSTD_WINDOWLOG_MAX     ((int)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64))
102745 +#define ZSTD_WINDOWLOG_MIN       10
102746 +#define ZSTD_HASHLOG_MAX       ((ZSTD_WINDOWLOG_MAX < 30) ? ZSTD_WINDOWLOG_MAX : 30)
102747 +#define ZSTD_HASHLOG_MIN          6
102748 +#define ZSTD_CHAINLOG_MAX_32     29
102749 +#define ZSTD_CHAINLOG_MAX_64     30
102750 +#define ZSTD_CHAINLOG_MAX      ((int)(sizeof(size_t) == 4 ? ZSTD_CHAINLOG_MAX_32 : ZSTD_CHAINLOG_MAX_64))
102751 +#define ZSTD_CHAINLOG_MIN        ZSTD_HASHLOG_MIN
102752 +#define ZSTD_SEARCHLOG_MAX      (ZSTD_WINDOWLOG_MAX-1)
102753 +#define ZSTD_SEARCHLOG_MIN        1
102754 +#define ZSTD_MINMATCH_MAX         7   /* only for ZSTD_fast, other strategies are limited to 6 */
102755 +#define ZSTD_MINMATCH_MIN         3   /* only for ZSTD_btopt+, faster strategies are limited to 4 */
102756 +#define ZSTD_TARGETLENGTH_MAX    ZSTD_BLOCKSIZE_MAX
102757 +#define ZSTD_TARGETLENGTH_MIN     0   /* note : comparing this constant to an unsigned results in a tautological test */
102758 +#define ZSTD_STRATEGY_MIN        ZSTD_fast
102759 +#define ZSTD_STRATEGY_MAX        ZSTD_btultra2
102762 +#define ZSTD_OVERLAPLOG_MIN       0
102763 +#define ZSTD_OVERLAPLOG_MAX       9
102765 +#define ZSTD_WINDOWLOG_LIMIT_DEFAULT 27   /* by default, the streaming decoder will refuse any frame
102766 +                                           * requiring larger than (1<<ZSTD_WINDOWLOG_LIMIT_DEFAULT) window size,
102767 +                                           * to preserve host's memory from unreasonable requirements.
102768 +                                           * This limit can be overridden using ZSTD_DCtx_setParameter(,ZSTD_d_windowLogMax,).
102769 +                                           * The limit does not apply for one-pass decoders (such as ZSTD_decompress()), since no additional memory is allocated */
102772 +/* LDM parameter bounds */
102773 +#define ZSTD_LDM_HASHLOG_MIN      ZSTD_HASHLOG_MIN
102774 +#define ZSTD_LDM_HASHLOG_MAX      ZSTD_HASHLOG_MAX
102775 +#define ZSTD_LDM_MINMATCH_MIN        4
102776 +#define ZSTD_LDM_MINMATCH_MAX     4096
102777 +#define ZSTD_LDM_BUCKETSIZELOG_MIN   1
102778 +#define ZSTD_LDM_BUCKETSIZELOG_MAX   8
102779 +#define ZSTD_LDM_HASHRATELOG_MIN     0
102780 +#define ZSTD_LDM_HASHRATELOG_MAX (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
102782 +/* Advanced parameter bounds */
102783 +#define ZSTD_TARGETCBLOCKSIZE_MIN   64
102784 +#define ZSTD_TARGETCBLOCKSIZE_MAX   ZSTD_BLOCKSIZE_MAX
102785 +#define ZSTD_SRCSIZEHINT_MIN        0
102786 +#define ZSTD_SRCSIZEHINT_MAX        INT_MAX
102788 +/* internal */
102789 +#define ZSTD_HASHLOG3_MAX           17
102792 +/* ---  Advanced types  --- */
102794 +typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params;
102796 +typedef struct {
102797 +    unsigned int offset;      /* The offset of the match. (NOT the same as the offset code)
102798 +                               * If offset == 0 and matchLength == 0, this sequence represents the last
102799 +                               * literals in the block of litLength size.
102800 +                               */
102802 +    unsigned int litLength;   /* Literal length of the sequence. */
102803 +    unsigned int matchLength; /* Match length of the sequence. */
102805 +                              /* Note: Users of this API may provide a sequence with matchLength == litLength == offset == 0.
102806 +                               * In this case, we will treat the sequence as a marker for a block boundary.
102807 +                               */
102809 +    unsigned int rep;         /* Represents which repeat offset is represented by the field 'offset'.
102810 +                               * Ranges from [0, 3].
102811 +                               *
102812 +                               * Repeat offsets are essentially previous offsets from previous sequences sorted in
102813 +                               * recency order. For more detail, see doc/zstd_compression_format.md
102814 +                               *
102815 +                               * If rep == 0, then 'offset' does not contain a repeat offset.
102816 +                               * If rep > 0:
102817 +                               *  If litLength != 0:
102818 +                               *      rep == 1 --> offset == repeat_offset_1
102819 +                               *      rep == 2 --> offset == repeat_offset_2
102820 +                               *      rep == 3 --> offset == repeat_offset_3
102821 +                               *  If litLength == 0:
102822 +                               *      rep == 1 --> offset == repeat_offset_2
102823 +                               *      rep == 2 --> offset == repeat_offset_3
102824 +                               *      rep == 3 --> offset == repeat_offset_1 - 1
102825 +                               *
102826 +                               * Note: This field is optional. ZSTD_generateSequences() will calculate the value of
102827 +                               * 'rep', but repeat offsets do not necessarily need to be calculated from an external
102828 +                               * sequence provider's perspective. For example, ZSTD_compressSequences() does not
102829 +                               * use this 'rep' field at all (as of now).
102830 +                               */
102831 +} ZSTD_Sequence;
102833 +typedef struct {
102834 +    unsigned windowLog;       /**< largest match distance : larger == more compression, more memory needed during decompression */
102835 +    unsigned chainLog;        /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */
102836 +    unsigned hashLog;         /**< dispatch table : larger == faster, more memory */
102837 +    unsigned searchLog;       /**< nb of searches : larger == more compression, slower */
102838 +    unsigned minMatch;        /**< match length searched : larger == faster decompression, sometimes less compression */
102839 +    unsigned targetLength;    /**< acceptable match size for optimal parser (only) : larger == more compression, slower */
102840 +    ZSTD_strategy strategy;   /**< see ZSTD_strategy definition above */
102841 +} ZSTD_compressionParameters;
102843 +typedef struct {
102844 +    int contentSizeFlag; /**< 1: content size will be in frame header (when known) */
102845 +    int checksumFlag;    /**< 1: generate a 32-bits checksum using XXH64 algorithm at end of frame, for error detection */
102846 +    int noDictIDFlag;    /**< 1: no dictID will be saved into frame header (dictID is only useful for dictionary compression) */
102847 +} ZSTD_frameParameters;
102849 +typedef struct {
102850 +    ZSTD_compressionParameters cParams;
102851 +    ZSTD_frameParameters fParams;
102852 +} ZSTD_parameters;
102854 +typedef enum {
102855 +    ZSTD_dct_auto = 0,       /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */
102856 +    ZSTD_dct_rawContent = 1, /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */
102857 +    ZSTD_dct_fullDict = 2    /* refuses to load a dictionary if it does not respect Zstandard's specification, starting with ZSTD_MAGIC_DICTIONARY */
102858 +} ZSTD_dictContentType_e;
102860 +typedef enum {
102861 +    ZSTD_dlm_byCopy = 0,  /**< Copy dictionary content internally */
102862 +    ZSTD_dlm_byRef = 1    /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
102863 +} ZSTD_dictLoadMethod_e;
102865 +typedef enum {
102866 +    ZSTD_f_zstd1 = 0,           /* zstd frame format, specified in zstd_compression_format.md (default) */
102867 +    ZSTD_f_zstd1_magicless = 1  /* Variant of zstd frame format, without initial 4-bytes magic number.
102868 +                                 * Useful to save 4 bytes per generated frame.
102869 +                                 * Decoder cannot recognise automatically this format, requiring this instruction. */
102870 +} ZSTD_format_e;
102872 +typedef enum {
102873 +    /* Note: this enum controls ZSTD_d_forceIgnoreChecksum */
102874 +    ZSTD_d_validateChecksum = 0,
102875 +    ZSTD_d_ignoreChecksum = 1
102876 +} ZSTD_forceIgnoreChecksum_e;
102878 +typedef enum {
102879 +    /* Note: this enum controls ZSTD_d_refMultipleDDicts */
102880 +    ZSTD_rmd_refSingleDDict = 0,
102881 +    ZSTD_rmd_refMultipleDDicts = 1
102882 +} ZSTD_refMultipleDDicts_e;
102884 +typedef enum {
102885 +    /* Note: this enum and the behavior it controls are effectively internal
102886 +     * implementation details of the compressor. They are expected to continue
102887 +     * to evolve and should be considered only in the context of extremely
102888 +     * advanced performance tuning.
102889 +     *
102890 +     * Zstd currently supports the use of a CDict in three ways:
102891 +     *
102892 +     * - The contents of the CDict can be copied into the working context. This
102893 +     *   means that the compression can search both the dictionary and input
102894 +     *   while operating on a single set of internal tables. This makes
102895 +     *   the compression faster per-byte of input. However, the initial copy of
102896 +     *   the CDict's tables incurs a fixed cost at the beginning of the
102897 +     *   compression. For small compressions (< 8 KB), that copy can dominate
102898 +     *   the cost of the compression.
102899 +     *
102900 +     * - The CDict's tables can be used in-place. In this model, compression is
102901 +     *   slower per input byte, because the compressor has to search two sets of
102902 +     *   tables. However, this model incurs no start-up cost (as long as the
102903 +     *   working context's tables can be reused). For small inputs, this can be
102904 +     *   faster than copying the CDict's tables.
102905 +     *
102906 +     * - The CDict's tables are not used at all, and instead we use the working
102907 +     *   context alone to reload the dictionary and use params based on the source
102908 +     *   size. See ZSTD_compress_insertDictionary() and ZSTD_compress_usingDict().
102909 +     *   This method is effective when the dictionary sizes are very small relative
102910 +     *   to the input size, and the input size is fairly large to begin with.
102911 +     *
102912 +     * Zstd has a simple internal heuristic that selects which strategy to use
102913 +     * at the beginning of a compression. However, if experimentation shows that
102914 +     * Zstd is making poor choices, it is possible to override that choice with
102915 +     * this enum.
102916 +     */
102917 +    ZSTD_dictDefaultAttach = 0, /* Use the default heuristic. */
102918 +    ZSTD_dictForceAttach   = 1, /* Never copy the dictionary. */
102919 +    ZSTD_dictForceCopy     = 2, /* Always copy the dictionary. */
102920 +    ZSTD_dictForceLoad     = 3  /* Always reload the dictionary */
102921 +} ZSTD_dictAttachPref_e;
102923 +typedef enum {
102924 +  ZSTD_lcm_auto = 0,          /**< Automatically determine the compression mode based on the compression level.
102925 +                               *   Negative compression levels will be uncompressed, and positive compression
102926 +                               *   levels will be compressed. */
102927 +  ZSTD_lcm_huffman = 1,       /**< Always attempt Huffman compression. Uncompressed literals will still be
102928 +                               *   emitted if Huffman compression is not profitable. */
102929 +  ZSTD_lcm_uncompressed = 2   /**< Always emit uncompressed literals. */
102930 +} ZSTD_literalCompressionMode_e;
102933 +/***************************************
102934 +*  Frame size functions
102935 +***************************************/
102937 +/*! ZSTD_findDecompressedSize() :
102938 + *  `src` should point to the start of a series of ZSTD encoded and/or skippable frames
102939 + *  `srcSize` must be the _exact_ size of this series
102940 + *       (i.e. there should be a frame boundary at `src + srcSize`)
102941 + *  @return : - decompressed size of all data in all successive frames
102942 + *            - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
102943 + *            - if an error occurred: ZSTD_CONTENTSIZE_ERROR
102945 + *   note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
102946 + *            When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
102947 + *            In which case, it's necessary to use streaming mode to decompress data.
102948 + *   note 2 : decompressed size is always present when compression is done with ZSTD_compress()
102949 + *   note 3 : decompressed size can be very large (64-bits value),
102950 + *            potentially larger than what local system can handle as a single memory segment.
102951 + *            In which case, it's necessary to use streaming mode to decompress data.
102952 + *   note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
102953 + *            Always ensure result fits within application's authorized limits.
102954 + *            Each application can set its own limits.
102955 + *   note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
102956 + *            read each contained frame header.  This is fast as most of the data is skipped,
102957 + *            however it does mean that all frame data must be present and valid. */
102958 +ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
102960 +/*! ZSTD_decompressBound() :
102961 + *  `src` should point to the start of a series of ZSTD encoded and/or skippable frames
102962 + *  `srcSize` must be the _exact_ size of this series
102963 + *       (i.e. there should be a frame boundary at `src + srcSize`)
102964 + *  @return : - upper-bound for the decompressed size of all data in all successive frames
102965 + *            - if an error occurred: ZSTD_CONTENTSIZE_ERROR
102967 + *  note 1  : an error can occur if `src` contains an invalid or incorrectly formatted frame.
102968 + *  note 2  : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`.
102969 + *            in this case, `ZSTD_findDecompressedSize` and `ZSTD_decompressBound` return the same value.
102970 + *  note 3  : when the decompressed size field isn't available, the upper-bound for that frame is calculated by:
102971 + *              upper-bound = # blocks * min(128 KB, Window_Size)
102972 + */
102973 +ZSTDLIB_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize);
102975 +/*! ZSTD_frameHeaderSize() :
102976 + *  srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX.
102977 + * @return : size of the Frame Header,
102978 + *           or an error code (if srcSize is too small) */
102979 +ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
102981 +typedef enum {
102982 +  ZSTD_sf_noBlockDelimiters = 0,         /* Representation of ZSTD_Sequence has no block delimiters, sequences only */
102983 +  ZSTD_sf_explicitBlockDelimiters = 1    /* Representation of ZSTD_Sequence contains explicit block delimiters */
102984 +} ZSTD_sequenceFormat_e;
102986 +/*! ZSTD_generateSequences() :
102987 + * Generate sequences using ZSTD_compress2, given a source buffer.
102989 + * Each block will end with a dummy sequence
102990 + * with offset == 0, matchLength == 0, and litLength == length of last literals.
102991 + * litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0)
102992 + * simply acts as a block delimiter.
102994 + * zc can be used to insert custom compression params.
102995 + * This function invokes ZSTD_compress2
102997 + * The output of this function can be fed into ZSTD_compressSequences() with CCtx
102998 + * setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters
102999 + * @return : number of sequences generated
103000 + */
103002 +ZSTDLIB_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
103003 +                                          size_t outSeqsSize, const void* src, size_t srcSize);
103005 +/*! ZSTD_mergeBlockDelimiters() :
103006 + * Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals
103007 + * by merging them into into the literals of the next sequence.
103009 + * As such, the final generated result has no explicit representation of block boundaries,
103010 + * and the final last literals segment is not represented in the sequences.
103012 + * The output of this function can be fed into ZSTD_compressSequences() with CCtx
103013 + * setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters
103014 + * @return : number of sequences left after merging
103015 + */
103016 +ZSTDLIB_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize);
103018 +/*! ZSTD_compressSequences() :
103019 + * Compress an array of ZSTD_Sequence, generated from the original source buffer, into dst.
103020 + * If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.)
103021 + * The entire source is compressed into a single frame.
103023 + * The compression behavior changes based on cctx params. In particular:
103024 + *    If ZSTD_c_blockDelimiters == ZSTD_sf_noBlockDelimiters, the array of ZSTD_Sequence is expected to contain
103025 + *    no block delimiters (defined in ZSTD_Sequence). Block boundaries are roughly determined based on
103026 + *    the block size derived from the cctx, and sequences may be split. This is the default setting.
103028 + *    If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain
103029 + *    block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided.
103031 + *    If ZSTD_c_validateSequences == 0, this function will blindly accept the sequences provided. Invalid sequences cause undefined
103032 + *    behavior. If ZSTD_c_validateSequences == 1, then if sequence is invalid (see doc/zstd_compression_format.md for
103033 + *    specifics regarding offset/matchlength requirements) then the function will bail out and return an error.
103035 + *    In addition to the two adjustable experimental params, there are other important cctx params.
103036 + *    - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN.
103037 + *    - ZSTD_c_compressionLevel accordingly adjusts the strength of the entropy coder, as it would in typical compression.
103038 + *    - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset
103039 + *      is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md
103041 + * Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused.
103042 + * Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly,
103043 + *         and cannot emit an RLE block that disagrees with the repcode history
103044 + * @return : final compressed size or a ZSTD error.
103045 + */
103046 +ZSTDLIB_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize,
103047 +                                  const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
103048 +                                  const void* src, size_t srcSize);
103051 +/*! ZSTD_writeSkippableFrame() :
103052 + * Generates a zstd skippable frame containing data given by src, and writes it to dst buffer.
103054 + * Skippable frames begin with a a 4-byte magic number. There are 16 possible choices of magic number,
103055 + * ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15.
103056 + * As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so
103057 + * the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant.
103059 + * Returns an error if destination buffer is not large enough, if the source size is not representable
103060 + * with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid).
103062 + * @return : number of bytes written or a ZSTD error.
103063 + */
103064 +ZSTDLIB_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
103065 +                                            const void* src, size_t srcSize, unsigned magicVariant);
103068 +/***************************************
103069 +*  Memory management
103070 +***************************************/
103072 +/*! ZSTD_estimate*() :
103073 + *  These functions make it possible to estimate memory usage
103074 + *  of a future {D,C}Ctx, before its creation.
103076 + *  ZSTD_estimateCCtxSize() will provide a memory budget large enough
103077 + *  for any compression level up to selected one.
103078 + *  Note : Unlike ZSTD_estimateCStreamSize*(), this estimate
103079 + *         does not include space for a window buffer.
103080 + *         Therefore, the estimation is only guaranteed for single-shot compressions, not streaming.
103081 + *  The estimate will assume the input may be arbitrarily large,
103082 + *  which is the worst case.
103084 + *  When srcSize can be bound by a known and rather "small" value,
103085 + *  this fact can be used to provide a tighter estimation
103086 + *  because the CCtx compression context will need less memory.
103087 + *  This tighter estimation can be provided by more advanced functions
103088 + *  ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(),
103089 + *  and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter().
103090 + *  Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits.
103092 + *  Note 2 : only single-threaded compression is supported.
103093 + *  ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.
103094 + */
103095 +ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
103096 +ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
103097 +ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
103098 +ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
103100 +/*! ZSTD_estimateCStreamSize() :
103101 + *  ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.
103102 + *  It will also consider src size to be arbitrarily "large", which is worst case.
103103 + *  If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.
103104 + *  ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
103105 + *  ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
103106 + *  Note : CStream size estimation is only correct for single-threaded compression.
103107 + *  ZSTD_DStream memory budget depends on window Size.
103108 + *  This information can be passed manually, using ZSTD_estimateDStreamSize,
103109 + *  or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();
103110 + *  Note : if streaming is init with function ZSTD_init?Stream_usingDict(),
103111 + *         an internal ?Dict will be created, which additional size is not estimated here.
103112 + *         In this case, get total size by adding ZSTD_estimate?DictSize */
103113 +ZSTDLIB_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
103114 +ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
103115 +ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
103116 +ZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize);
103117 +ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
103119 +/*! ZSTD_estimate?DictSize() :
103120 + *  ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict().
103121 + *  ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced().
103122 + *  Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller.
103123 + */
103124 +ZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
103125 +ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
103126 +ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
103128 +/*! ZSTD_initStatic*() :
103129 + *  Initialize an object using a pre-allocated fixed-size buffer.
103130 + *  workspace: The memory area to emplace the object into.
103131 + *             Provided pointer *must be 8-bytes aligned*.
103132 + *             Buffer must outlive object.
103133 + *  workspaceSize: Use ZSTD_estimate*Size() to determine
103134 + *                 how large workspace must be to support target scenario.
103135 + * @return : pointer to object (same address as workspace, just different type),
103136 + *           or NULL if error (size too small, incorrect alignment, etc.)
103137 + *  Note : zstd will never resize nor malloc() when using a static buffer.
103138 + *         If the object requires more memory than available,
103139 + *         zstd will just error out (typically ZSTD_error_memory_allocation).
103140 + *  Note 2 : there is no corresponding "free" function.
103141 + *           Since workspace is allocated externally, it must be freed externally too.
103142 + *  Note 3 : cParams : use ZSTD_getCParams() to convert a compression level
103143 + *           into its associated cParams.
103144 + *  Limitation 1 : currently not compatible with internal dictionary creation, triggered by
103145 + *                 ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict().
103146 + *  Limitation 2 : static cctx currently not compatible with multi-threading.
103147 + *  Limitation 3 : static dctx is incompatible with legacy support.
103148 + */
103149 +ZSTDLIB_API ZSTD_CCtx*    ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
103150 +ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize);    /**< same as ZSTD_initStaticCCtx() */
103152 +ZSTDLIB_API ZSTD_DCtx*    ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);
103153 +ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize);    /**< same as ZSTD_initStaticDCtx() */
103155 +ZSTDLIB_API const ZSTD_CDict* ZSTD_initStaticCDict(
103156 +                                        void* workspace, size_t workspaceSize,
103157 +                                        const void* dict, size_t dictSize,
103158 +                                        ZSTD_dictLoadMethod_e dictLoadMethod,
103159 +                                        ZSTD_dictContentType_e dictContentType,
103160 +                                        ZSTD_compressionParameters cParams);
103162 +ZSTDLIB_API const ZSTD_DDict* ZSTD_initStaticDDict(
103163 +                                        void* workspace, size_t workspaceSize,
103164 +                                        const void* dict, size_t dictSize,
103165 +                                        ZSTD_dictLoadMethod_e dictLoadMethod,
103166 +                                        ZSTD_dictContentType_e dictContentType);
103169 +/*! Custom memory allocation :
103170 + *  These prototypes make it possible to pass your own allocation/free functions.
103171 + *  ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below.
103172 + *  All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones.
103173 + */
103174 +typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
103175 +typedef void  (*ZSTD_freeFunction) (void* opaque, void* address);
103176 +typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
103177 +static
103178 +__attribute__((__unused__))
103179 +ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL };  /**< this constant defers to stdlib's functions */
103181 +ZSTDLIB_API ZSTD_CCtx*    ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
103182 +ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
103183 +ZSTDLIB_API ZSTD_DCtx*    ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
103184 +ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
103186 +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
103187 +                                                  ZSTD_dictLoadMethod_e dictLoadMethod,
103188 +                                                  ZSTD_dictContentType_e dictContentType,
103189 +                                                  ZSTD_compressionParameters cParams,
103190 +                                                  ZSTD_customMem customMem);
103192 +/* ! Thread pool :
103193 + * These prototypes make it possible to share a thread pool among multiple compression contexts.
103194 + * This can limit resources for applications with multiple threads where each one uses
103195 + * a threaded compression mode (via ZSTD_c_nbWorkers parameter).
103196 + * ZSTD_createThreadPool creates a new thread pool with a given number of threads.
103197 + * Note that the lifetime of such pool must exist while being used.
103198 + * ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value
103199 + * to use an internal thread pool).
103200 + * ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer.
103201 + */
103202 +typedef struct POOL_ctx_s ZSTD_threadPool;
103203 +ZSTDLIB_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads);
103204 +ZSTDLIB_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool);  /* accept NULL pointer */
103205 +ZSTDLIB_API size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool);
103209 + * This API is temporary and is expected to change or disappear in the future!
103210 + */
103211 +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2(
103212 +    const void* dict, size_t dictSize,
103213 +    ZSTD_dictLoadMethod_e dictLoadMethod,
103214 +    ZSTD_dictContentType_e dictContentType,
103215 +    const ZSTD_CCtx_params* cctxParams,
103216 +    ZSTD_customMem customMem);
103218 +ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(
103219 +    const void* dict, size_t dictSize,
103220 +    ZSTD_dictLoadMethod_e dictLoadMethod,
103221 +    ZSTD_dictContentType_e dictContentType,
103222 +    ZSTD_customMem customMem);
103225 +/***************************************
103226 +*  Advanced compression functions
103227 +***************************************/
103229 +/*! ZSTD_createCDict_byReference() :
103230 + *  Create a digested dictionary for compression
103231 + *  Dictionary content is just referenced, not duplicated.
103232 + *  As a consequence, `dictBuffer` **must** outlive CDict,
103233 + *  and its content must remain unmodified throughout the lifetime of CDict.
103234 + *  note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */
103235 +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
103237 +/*! ZSTD_getDictID_fromCDict() :
103238 + *  Provides the dictID of the dictionary loaded into `cdict`.
103239 + *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
103240 + *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
103241 +ZSTDLIB_API unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict);
103243 +/*! ZSTD_getCParams() :
103244 + * @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
103245 + * `estimatedSrcSize` value is optional, select 0 if not known */
103246 +ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
103248 +/*! ZSTD_getParams() :
103249 + *  same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.
103250 + *  All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */
103251 +ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
103253 +/*! ZSTD_checkCParams() :
103254 + *  Ensure param values remain within authorized range.
103255 + * @return 0 on success, or an error code (can be checked with ZSTD_isError()) */
103256 +ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
103258 +/*! ZSTD_adjustCParams() :
103259 + *  optimize params for a given `srcSize` and `dictSize`.
103260 + * `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN.
103261 + * `dictSize` must be `0` when there is no dictionary.
103262 + *  cPar can be invalid : all parameters will be clamped within valid range in the @return struct.
103263 + *  This function never fails (wide contract) */
103264 +ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
103266 +/*! ZSTD_compress_advanced() :
103267 + *  Note : this function is now DEPRECATED.
103268 + *         It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters.
103269 + *  This prototype will be marked as deprecated and generate compilation warning on reaching v1.5.x */
103270 +ZSTDLIB_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
103271 +                                          void* dst, size_t dstCapacity,
103272 +                                    const void* src, size_t srcSize,
103273 +                                    const void* dict,size_t dictSize,
103274 +                                          ZSTD_parameters params);
103276 +/*! ZSTD_compress_usingCDict_advanced() :
103277 + *  Note : this function is now REDUNDANT.
103278 + *         It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters.
103279 + *  This prototype will be marked as deprecated and generate compilation warning in some future version */
103280 +ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
103281 +                                              void* dst, size_t dstCapacity,
103282 +                                        const void* src, size_t srcSize,
103283 +                                        const ZSTD_CDict* cdict,
103284 +                                              ZSTD_frameParameters fParams);
103287 +/*! ZSTD_CCtx_loadDictionary_byReference() :
103288 + *  Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx.
103289 + *  It saves some memory, but also requires that `dict` outlives its usage within `cctx` */
103290 +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
103292 +/*! ZSTD_CCtx_loadDictionary_advanced() :
103293 + *  Same as ZSTD_CCtx_loadDictionary(), but gives finer control over
103294 + *  how to load the dictionary (by copy ? by reference ?)
103295 + *  and how to interpret it (automatic ? force raw mode ? full mode only ?) */
103296 +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
103298 +/*! ZSTD_CCtx_refPrefix_advanced() :
103299 + *  Same as ZSTD_CCtx_refPrefix(), but gives finer control over
103300 + *  how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
103301 +ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
103303 +/* ===   experimental parameters   === */
103304 +/* these parameters can be used with ZSTD_setParameter()
103305 + * they are not guaranteed to remain supported in the future */
103307 + /* Enables rsyncable mode,
103308 +  * which makes compressed files more rsync friendly
103309 +  * by adding periodic synchronization points to the compressed data.
103310 +  * The target average block size is ZSTD_c_jobSize / 2.
103311 +  * It's possible to modify the job size to increase or decrease
103312 +  * the granularity of the synchronization point.
103313 +  * Once the jobSize is smaller than the window size,
103314 +  * it will result in compression ratio degradation.
103315 +  * NOTE 1: rsyncable mode only works when multithreading is enabled.
103316 +  * NOTE 2: rsyncable performs poorly in combination with long range mode,
103317 +  * since it will decrease the effectiveness of synchronization points,
103318 +  * though mileage may vary.
103319 +  * NOTE 3: Rsyncable mode limits maximum compression speed to ~400 MB/s.
103320 +  * If the selected compression level is already running significantly slower,
103321 +  * the overall speed won't be significantly impacted.
103322 +  */
103323 + #define ZSTD_c_rsyncable ZSTD_c_experimentalParam1
103325 +/* Select a compression format.
103326 + * The value must be of type ZSTD_format_e.
103327 + * See ZSTD_format_e enum definition for details */
103328 +#define ZSTD_c_format ZSTD_c_experimentalParam2
103330 +/* Force back-reference distances to remain < windowSize,
103331 + * even when referencing into Dictionary content (default:0) */
103332 +#define ZSTD_c_forceMaxWindow ZSTD_c_experimentalParam3
103334 +/* Controls whether the contents of a CDict
103335 + * are used in place, or copied into the working context.
103336 + * Accepts values from the ZSTD_dictAttachPref_e enum.
103337 + * See the comments on that enum for an explanation of the feature. */
103338 +#define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4
103340 +/* Controls how the literals are compressed (default is auto).
103341 + * The value must be of type ZSTD_literalCompressionMode_e.
103342 + * See ZSTD_literalCompressionMode_t enum definition for details.
103343 + */
103344 +#define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5
103346 +/* Tries to fit compressed block size to be around targetCBlockSize.
103347 + * No target when targetCBlockSize == 0.
103348 + * There is no guarantee on compressed block size (default:0) */
103349 +#define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6
103351 +/* User's best guess of source size.
103352 + * Hint is not valid when srcSizeHint == 0.
103353 + * There is no guarantee that hint is close to actual source size,
103354 + * but compression ratio may regress significantly if guess considerably underestimates */
103355 +#define ZSTD_c_srcSizeHint ZSTD_c_experimentalParam7
103357 +/* Controls whether the new and experimental "dedicated dictionary search
103358 + * structure" can be used. This feature is still rough around the edges, be
103359 + * prepared for surprising behavior!
103361 + * How to use it:
103363 + * When using a CDict, whether to use this feature or not is controlled at
103364 + * CDict creation, and it must be set in a CCtxParams set passed into that
103365 + * construction (via ZSTD_createCDict_advanced2()). A compression will then
103366 + * use the feature or not based on how the CDict was constructed; the value of
103367 + * this param, set in the CCtx, will have no effect.
103369 + * However, when a dictionary buffer is passed into a CCtx, such as via
103370 + * ZSTD_CCtx_loadDictionary(), this param can be set on the CCtx to control
103371 + * whether the CDict that is created internally can use the feature or not.
103373 + * What it does:
103375 + * Normally, the internal data structures of the CDict are analogous to what
103376 + * would be stored in a CCtx after compressing the contents of a dictionary.
103377 + * To an approximation, a compression using a dictionary can then use those
103378 + * data structures to simply continue what is effectively a streaming
103379 + * compression where the simulated compression of the dictionary left off.
103380 + * Which is to say, the search structures in the CDict are normally the same
103381 + * format as in the CCtx.
103383 + * It is possible to do better, since the CDict is not like a CCtx: the search
103384 + * structures are written once during CDict creation, and then are only read
103385 + * after that, while the search structures in the CCtx are both read and
103386 + * written as the compression goes along. This means we can choose a search
103387 + * structure for the dictionary that is read-optimized.
103389 + * This feature enables the use of that different structure.
103391 + * Note that some of the members of the ZSTD_compressionParameters struct have
103392 + * different semantics and constraints in the dedicated search structure. It is
103393 + * highly recommended that you simply set a compression level in the CCtxParams
103394 + * you pass into the CDict creation call, and avoid messing with the cParams
103395 + * directly.
103397 + * Effects:
103399 + * This will only have any effect when the selected ZSTD_strategy
103400 + * implementation supports this feature. Currently, that's limited to
103401 + * ZSTD_greedy, ZSTD_lazy, and ZSTD_lazy2.
103403 + * Note that this means that the CDict tables can no longer be copied into the
103404 + * CCtx, so the dict attachment mode ZSTD_dictForceCopy will no longer be
103405 + * useable. The dictionary can only be attached or reloaded.
103407 + * In general, you should expect compression to be faster--sometimes very much
103408 + * so--and CDict creation to be slightly slower. Eventually, we will probably
103409 + * make this mode the default.
103410 + */
103411 +#define ZSTD_c_enableDedicatedDictSearch ZSTD_c_experimentalParam8
103413 +/* ZSTD_c_stableInBuffer
103414 + * Experimental parameter.
103415 + * Default is 0 == disabled. Set to 1 to enable.
103417 + * Tells the compressor that the ZSTD_inBuffer will ALWAYS be the same
103418 + * between calls, except for the modifications that zstd makes to pos (the
103419 + * caller must not modify pos). This is checked by the compressor, and
103420 + * compression will fail if it ever changes. This means the only flush
103421 + * mode that makes sense is ZSTD_e_end, so zstd will error if ZSTD_e_end
103422 + * is not used. The data in the ZSTD_inBuffer in the range [src, src + pos)
103423 + * MUST not be modified during compression or you will get data corruption.
103425 + * When this flag is enabled zstd won't allocate an input window buffer,
103426 + * because the user guarantees it can reference the ZSTD_inBuffer until
103427 + * the frame is complete. But, it will still allocate an output buffer
103428 + * large enough to fit a block (see ZSTD_c_stableOutBuffer). This will also
103429 + * avoid the memcpy() from the input buffer to the input window buffer.
103431 + * NOTE: ZSTD_compressStream2() will error if ZSTD_e_end is not used.
103432 + * That means this flag cannot be used with ZSTD_compressStream().
103434 + * NOTE: So long as the ZSTD_inBuffer always points to valid memory, using
103435 + * this flag is ALWAYS memory safe, and will never access out-of-bounds
103436 + * memory. However, compression WILL fail if you violate the preconditions.
103438 + * WARNING: The data in the ZSTD_inBuffer in the range [dst, dst + pos) MUST
103439 + * not be modified during compression or you will get data corruption. This
103440 + * is because zstd needs to reference data in the ZSTD_inBuffer to find
103441 + * matches. Normally zstd maintains its own window buffer for this purpose,
103442 + * but passing this flag tells zstd to use the user provided buffer.
103443 + */
103444 +#define ZSTD_c_stableInBuffer ZSTD_c_experimentalParam9
103446 +/* ZSTD_c_stableOutBuffer
103447 + * Experimental parameter.
103448 + * Default is 0 == disabled. Set to 1 to enable.
103450 + * Tells he compressor that the ZSTD_outBuffer will not be resized between
103451 + * calls. Specifically: (out.size - out.pos) will never grow. This gives the
103452 + * compressor the freedom to say: If the compressed data doesn't fit in the
103453 + * output buffer then return ZSTD_error_dstSizeTooSmall. This allows us to
103454 + * always decompress directly into the output buffer, instead of decompressing
103455 + * into an internal buffer and copying to the output buffer.
103457 + * When this flag is enabled zstd won't allocate an output buffer, because
103458 + * it can write directly to the ZSTD_outBuffer. It will still allocate the
103459 + * input window buffer (see ZSTD_c_stableInBuffer).
103461 + * Zstd will check that (out.size - out.pos) never grows and return an error
103462 + * if it does. While not strictly necessary, this should prevent surprises.
103463 + */
103464 +#define ZSTD_c_stableOutBuffer ZSTD_c_experimentalParam10
103466 +/* ZSTD_c_blockDelimiters
103467 + * Default is 0 == ZSTD_sf_noBlockDelimiters.
103469 + * For use with sequence compression API: ZSTD_compressSequences().
103471 + * Designates whether or not the given array of ZSTD_Sequence contains block delimiters
103472 + * and last literals, which are defined as sequences with offset == 0 and matchLength == 0.
103473 + * See the definition of ZSTD_Sequence for more specifics.
103474 + */
103475 +#define ZSTD_c_blockDelimiters ZSTD_c_experimentalParam11
103477 +/* ZSTD_c_validateSequences
103478 + * Default is 0 == disabled. Set to 1 to enable sequence validation.
103480 + * For use with sequence compression API: ZSTD_compressSequences().
103481 + * Designates whether or not we validate sequences provided to ZSTD_compressSequences()
103482 + * during function execution.
103484 + * Without validation, providing a sequence that does not conform to the zstd spec will cause
103485 + * undefined behavior, and may produce a corrupted block.
103487 + * With validation enabled, a if sequence is invalid (see doc/zstd_compression_format.md for
103488 + * specifics regarding offset/matchlength requirements) then the function will bail out and
103489 + * return an error.
103491 + */
103492 +#define ZSTD_c_validateSequences ZSTD_c_experimentalParam12
103494 +/*! ZSTD_CCtx_getParameter() :
103495 + *  Get the requested compression parameter value, selected by enum ZSTD_cParameter,
103496 + *  and store it into int* value.
103497 + * @return : 0, or an error code (which can be tested with ZSTD_isError()).
103498 + */
103499 +ZSTDLIB_API size_t ZSTD_CCtx_getParameter(const ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value);
103502 +/*! ZSTD_CCtx_params :
103503 + *  Quick howto :
103504 + *  - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure
103505 + *  - ZSTD_CCtxParams_setParameter() : Push parameters one by one into
103506 + *                                     an existing ZSTD_CCtx_params structure.
103507 + *                                     This is similar to
103508 + *                                     ZSTD_CCtx_setParameter().
103509 + *  - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to
103510 + *                                    an existing CCtx.
103511 + *                                    These parameters will be applied to
103512 + *                                    all subsequent frames.
103513 + *  - ZSTD_compressStream2() : Do compression using the CCtx.
103514 + *  - ZSTD_freeCCtxParams() : Free the memory, accept NULL pointer.
103516 + *  This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()
103517 + *  for static allocation of CCtx for single-threaded compression.
103518 + */
103519 +ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
103520 +ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params);  /* accept NULL pointer */
103522 +/*! ZSTD_CCtxParams_reset() :
103523 + *  Reset params to default values.
103524 + */
103525 +ZSTDLIB_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params);
103527 +/*! ZSTD_CCtxParams_init() :
103528 + *  Initializes the compression parameters of cctxParams according to
103529 + *  compression level. All other parameters are reset to their default values.
103530 + */
103531 +ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel);
103533 +/*! ZSTD_CCtxParams_init_advanced() :
103534 + *  Initializes the compression and frame parameters of cctxParams according to
103535 + *  params. All other parameters are reset to their default values.
103536 + */
103537 +ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
103539 +/*! ZSTD_CCtxParams_setParameter() :
103540 + *  Similar to ZSTD_CCtx_setParameter.
103541 + *  Set one compression parameter, selected by enum ZSTD_cParameter.
103542 + *  Parameters must be applied to a ZSTD_CCtx using
103543 + *  ZSTD_CCtx_setParametersUsingCCtxParams().
103544 + * @result : a code representing success or failure (which can be tested with
103545 + *           ZSTD_isError()).
103546 + */
103547 +ZSTDLIB_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value);
103549 +/*! ZSTD_CCtxParams_getParameter() :
103550 + * Similar to ZSTD_CCtx_getParameter.
103551 + * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.
103552 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
103553 + */
103554 +ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(const ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value);
103556 +/*! ZSTD_CCtx_setParametersUsingCCtxParams() :
103557 + *  Apply a set of ZSTD_CCtx_params to the compression context.
103558 + *  This can be done even after compression is started,
103559 + *    if nbWorkers==0, this will have no impact until a new compression is started.
103560 + *    if nbWorkers>=1, new parameters will be picked up at next job,
103561 + *       with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated).
103562 + */
103563 +ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams(
103564 +        ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params);
103566 +/*! ZSTD_compressStream2_simpleArgs() :
103567 + *  Same as ZSTD_compressStream2(),
103568 + *  but using only integral types as arguments.
103569 + *  This variant might be helpful for binders from dynamic languages
103570 + *  which have troubles handling structures containing memory pointers.
103571 + */
103572 +ZSTDLIB_API size_t ZSTD_compressStream2_simpleArgs (
103573 +                            ZSTD_CCtx* cctx,
103574 +                            void* dst, size_t dstCapacity, size_t* dstPos,
103575 +                      const void* src, size_t srcSize, size_t* srcPos,
103576 +                            ZSTD_EndDirective endOp);
103579 +/***************************************
103580 +*  Advanced decompression functions
103581 +***************************************/
103583 +/*! ZSTD_isFrame() :
103584 + *  Tells if the content of `buffer` starts with a valid Frame Identifier.
103585 + *  Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
103586 + *  Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
103587 + *  Note 3 : Skippable Frame Identifiers are considered valid. */
103588 +ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size);
103590 +/*! ZSTD_createDDict_byReference() :
103591 + *  Create a digested dictionary, ready to start decompression operation without startup delay.
103592 + *  Dictionary content is referenced, and therefore stays in dictBuffer.
103593 + *  It is important that dictBuffer outlives DDict,
103594 + *  it must remain read accessible throughout the lifetime of DDict */
103595 +ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
103597 +/*! ZSTD_DCtx_loadDictionary_byReference() :
103598 + *  Same as ZSTD_DCtx_loadDictionary(),
103599 + *  but references `dict` content instead of copying it into `dctx`.
103600 + *  This saves memory if `dict` remains around.,
103601 + *  However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */
103602 +ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
103604 +/*! ZSTD_DCtx_loadDictionary_advanced() :
103605 + *  Same as ZSTD_DCtx_loadDictionary(),
103606 + *  but gives direct control over
103607 + *  how to load the dictionary (by copy ? by reference ?)
103608 + *  and how to interpret it (automatic ? force raw mode ? full mode only ?). */
103609 +ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
103611 +/*! ZSTD_DCtx_refPrefix_advanced() :
103612 + *  Same as ZSTD_DCtx_refPrefix(), but gives finer control over
103613 + *  how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
103614 +ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
103616 +/*! ZSTD_DCtx_setMaxWindowSize() :
103617 + *  Refuses allocating internal buffers for frames requiring a window size larger than provided limit.
103618 + *  This protects a decoder context from reserving too much memory for itself (potential attack scenario).
103619 + *  This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
103620 + *  By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT)
103621 + * @return : 0, or an error code (which can be tested using ZSTD_isError()).
103622 + */
103623 +ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);
103625 +/*! ZSTD_DCtx_getParameter() :
103626 + *  Get the requested decompression parameter value, selected by enum ZSTD_dParameter,
103627 + *  and store it into int* value.
103628 + * @return : 0, or an error code (which can be tested with ZSTD_isError()).
103629 + */
103630 +ZSTDLIB_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value);
103632 +/* ZSTD_d_format
103633 + * experimental parameter,
103634 + * allowing selection between ZSTD_format_e input compression formats
103635 + */
103636 +#define ZSTD_d_format ZSTD_d_experimentalParam1
103637 +/* ZSTD_d_stableOutBuffer
103638 + * Experimental parameter.
103639 + * Default is 0 == disabled. Set to 1 to enable.
103641 + * Tells the decompressor that the ZSTD_outBuffer will ALWAYS be the same
103642 + * between calls, except for the modifications that zstd makes to pos (the
103643 + * caller must not modify pos). This is checked by the decompressor, and
103644 + * decompression will fail if it ever changes. Therefore the ZSTD_outBuffer
103645 + * MUST be large enough to fit the entire decompressed frame. This will be
103646 + * checked when the frame content size is known. The data in the ZSTD_outBuffer
103647 + * in the range [dst, dst + pos) MUST not be modified during decompression
103648 + * or you will get data corruption.
103650 + * When this flags is enabled zstd won't allocate an output buffer, because
103651 + * it can write directly to the ZSTD_outBuffer, but it will still allocate
103652 + * an input buffer large enough to fit any compressed block. This will also
103653 + * avoid the memcpy() from the internal output buffer to the ZSTD_outBuffer.
103654 + * If you need to avoid the input buffer allocation use the buffer-less
103655 + * streaming API.
103657 + * NOTE: So long as the ZSTD_outBuffer always points to valid memory, using
103658 + * this flag is ALWAYS memory safe, and will never access out-of-bounds
103659 + * memory. However, decompression WILL fail if you violate the preconditions.
103661 + * WARNING: The data in the ZSTD_outBuffer in the range [dst, dst + pos) MUST
103662 + * not be modified during decompression or you will get data corruption. This
103663 + * is because zstd needs to reference data in the ZSTD_outBuffer to regenerate
103664 + * matches. Normally zstd maintains its own buffer for this purpose, but passing
103665 + * this flag tells zstd to use the user provided buffer.
103666 + */
103667 +#define ZSTD_d_stableOutBuffer ZSTD_d_experimentalParam2
103669 +/* ZSTD_d_forceIgnoreChecksum
103670 + * Experimental parameter.
103671 + * Default is 0 == disabled. Set to 1 to enable
103673 + * Tells the decompressor to skip checksum validation during decompression, regardless
103674 + * of whether checksumming was specified during compression. This offers some
103675 + * slight performance benefits, and may be useful for debugging.
103676 + * Param has values of type ZSTD_forceIgnoreChecksum_e
103677 + */
103678 +#define ZSTD_d_forceIgnoreChecksum ZSTD_d_experimentalParam3
103680 +/* ZSTD_d_refMultipleDDicts
103681 + * Experimental parameter.
103682 + * Default is 0 == disabled. Set to 1 to enable
103684 + * If enabled and dctx is allocated on the heap, then additional memory will be allocated
103685 + * to store references to multiple ZSTD_DDict. That is, multiple calls of ZSTD_refDDict()
103686 + * using a given ZSTD_DCtx, rather than overwriting the previous DDict reference, will instead
103687 + * store all references. At decompression time, the appropriate dictID is selected
103688 + * from the set of DDicts based on the dictID in the frame.
103690 + * Usage is simply calling ZSTD_refDDict() on multiple dict buffers.
103692 + * Param has values of byte ZSTD_refMultipleDDicts_e
103694 + * WARNING: Enabling this parameter and calling ZSTD_DCtx_refDDict(), will trigger memory
103695 + * allocation for the hash table. ZSTD_freeDCtx() also frees this memory.
103696 + * Memory is allocated as per ZSTD_DCtx::customMem.
103698 + * Although this function allocates memory for the table, the user is still responsible for
103699 + * memory management of the underlying ZSTD_DDict* themselves.
103700 + */
103701 +#define ZSTD_d_refMultipleDDicts ZSTD_d_experimentalParam4
103704 +/*! ZSTD_DCtx_setFormat() :
103705 + *  Instruct the decoder context about what kind of data to decode next.
103706 + *  This instruction is mandatory to decode data without a fully-formed header,
103707 + *  such ZSTD_f_zstd1_magicless for example.
103708 + * @return : 0, or an error code (which can be tested using ZSTD_isError()). */
103709 +ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
103711 +/*! ZSTD_decompressStream_simpleArgs() :
103712 + *  Same as ZSTD_decompressStream(),
103713 + *  but using only integral types as arguments.
103714 + *  This can be helpful for binders from dynamic languages
103715 + *  which have troubles handling structures containing memory pointers.
103716 + */
103717 +ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs (
103718 +                            ZSTD_DCtx* dctx,
103719 +                            void* dst, size_t dstCapacity, size_t* dstPos,
103720 +                      const void* src, size_t srcSize, size_t* srcPos);
103723 +/********************************************************************
103724 +*  Advanced streaming functions
103725 +*  Warning : most of these functions are now redundant with the Advanced API.
103726 +*  Once Advanced API reaches "stable" status,
103727 +*  redundant functions will be deprecated, and then at some point removed.
103728 +********************************************************************/
103730 +/*=====   Advanced Streaming compression functions  =====*/
103732 +/*! ZSTD_initCStream_srcSize() :
103733 + * This function is deprecated, and equivalent to:
103734 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
103735 + *     ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
103736 + *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
103737 + *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
103739 + * pledgedSrcSize must be correct. If it is not known at init time, use
103740 + * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs,
103741 + * "0" also disables frame content size field. It may be enabled in the future.
103742 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
103743 + */
103744 +ZSTDLIB_API size_t
103745 +ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
103746 +                         int compressionLevel,
103747 +                         unsigned long long pledgedSrcSize);
103749 +/*! ZSTD_initCStream_usingDict() :
103750 + * This function is deprecated, and is equivalent to:
103751 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
103752 + *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
103753 + *     ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
103755 + * Creates of an internal CDict (incompatible with static CCtx), except if
103756 + * dict == NULL or dictSize < 8, in which case no dict is used.
103757 + * Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if
103758 + * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.
103759 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
103760 + */
103761 +ZSTDLIB_API size_t
103762 +ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
103763 +                     const void* dict, size_t dictSize,
103764 +                           int compressionLevel);
103766 +/*! ZSTD_initCStream_advanced() :
103767 + * This function is deprecated, and is approximately equivalent to:
103768 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
103769 + *     // Pseudocode: Set each zstd parameter and leave the rest as-is.
103770 + *     for ((param, value) : params) {
103771 + *         ZSTD_CCtx_setParameter(zcs, param, value);
103772 + *     }
103773 + *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
103774 + *     ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
103776 + * dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy.
103777 + * pledgedSrcSize must be correct.
103778 + * If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
103779 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
103780 + */
103781 +ZSTDLIB_API size_t
103782 +ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
103783 +                    const void* dict, size_t dictSize,
103784 +                          ZSTD_parameters params,
103785 +                          unsigned long long pledgedSrcSize);
103787 +/*! ZSTD_initCStream_usingCDict() :
103788 + * This function is deprecated, and equivalent to:
103789 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
103790 + *     ZSTD_CCtx_refCDict(zcs, cdict);
103792 + * note : cdict will just be referenced, and must outlive compression session
103793 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
103794 + */
103795 +ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
103797 +/*! ZSTD_initCStream_usingCDict_advanced() :
103798 + *   This function is DEPRECATED, and is approximately equivalent to:
103799 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
103800 + *     // Pseudocode: Set each zstd frame parameter and leave the rest as-is.
103801 + *     for ((fParam, value) : fParams) {
103802 + *         ZSTD_CCtx_setParameter(zcs, fParam, value);
103803 + *     }
103804 + *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
103805 + *     ZSTD_CCtx_refCDict(zcs, cdict);
103807 + * same as ZSTD_initCStream_usingCDict(), with control over frame parameters.
103808 + * pledgedSrcSize must be correct. If srcSize is not known at init time, use
103809 + * value ZSTD_CONTENTSIZE_UNKNOWN.
103810 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
103811 + */
103812 +ZSTDLIB_API size_t
103813 +ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
103814 +                               const ZSTD_CDict* cdict,
103815 +                                     ZSTD_frameParameters fParams,
103816 +                                     unsigned long long pledgedSrcSize);
103818 +/*! ZSTD_resetCStream() :
103819 + * This function is deprecated, and is equivalent to:
103820 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
103821 + *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
103823 + *  start a new frame, using same parameters from previous frame.
103824 + *  This is typically useful to skip dictionary loading stage, since it will re-use it in-place.
103825 + *  Note that zcs must be init at least once before using ZSTD_resetCStream().
103826 + *  If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.
103827 + *  If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.
103828 + *  For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs,
103829 + *  but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.
103830 + * @return : 0, or an error code (which can be tested using ZSTD_isError())
103831 + *  Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
103832 + */
103833 +ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
103836 +typedef struct {
103837 +    unsigned long long ingested;   /* nb input bytes read and buffered */
103838 +    unsigned long long consumed;   /* nb input bytes actually compressed */
103839 +    unsigned long long produced;   /* nb of compressed bytes generated and buffered */
103840 +    unsigned long long flushed;    /* nb of compressed bytes flushed : not provided; can be tracked from caller side */
103841 +    unsigned currentJobID;         /* MT only : latest started job nb */
103842 +    unsigned nbActiveWorkers;      /* MT only : nb of workers actively compressing at probe time */
103843 +} ZSTD_frameProgression;
103845 +/* ZSTD_getFrameProgression() :
103846 + * tells how much data has been ingested (read from input)
103847 + * consumed (input actually compressed) and produced (output) for current frame.
103848 + * Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed.
103849 + * Aggregates progression inside active worker threads.
103850 + */
103851 +ZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx);
103853 +/*! ZSTD_toFlushNow() :
103854 + *  Tell how many bytes are ready to be flushed immediately.
103855 + *  Useful for multithreading scenarios (nbWorkers >= 1).
103856 + *  Probe the oldest active job, defined as oldest job not yet entirely flushed,
103857 + *  and check its output buffer.
103858 + * @return : amount of data stored in oldest job and ready to be flushed immediately.
103859 + *  if @return == 0, it means either :
103860 + *  + there is no active job (could be checked with ZSTD_frameProgression()), or
103861 + *  + oldest job is still actively compressing data,
103862 + *    but everything it has produced has also been flushed so far,
103863 + *    therefore flush speed is limited by production speed of oldest job
103864 + *    irrespective of the speed of concurrent (and newer) jobs.
103865 + */
103866 +ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
103869 +/*=====   Advanced Streaming decompression functions  =====*/
103872 + * This function is deprecated, and is equivalent to:
103874 + *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
103875 + *     ZSTD_DCtx_loadDictionary(zds, dict, dictSize);
103877 + * note: no dictionary will be used if dict == NULL or dictSize < 8
103878 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
103879 + */
103880 +ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
103883 + * This function is deprecated, and is equivalent to:
103885 + *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
103886 + *     ZSTD_DCtx_refDDict(zds, ddict);
103888 + * note : ddict is referenced, it must outlive decompression session
103889 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
103890 + */
103891 +ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
103894 + * This function is deprecated, and is equivalent to:
103896 + *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
103898 + * re-use decompression parameters from previous init; saves dictionary loading
103899 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
103900 + */
103901 +ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
103904 +/*********************************************************************
103905 +*  Buffer-less and synchronous inner streaming functions
103907 +*  This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
103908 +*  But it's also a complex one, with several restrictions, documented below.
103909 +*  Prefer normal streaming API for an easier experience.
103910 +********************************************************************* */
103913 +  Buffer-less streaming compression (synchronous mode)
103915 +  A ZSTD_CCtx object is required to track streaming operations.
103916 +  Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.
103917 +  ZSTD_CCtx object can be re-used multiple times within successive compression operations.
103919 +  Start by initializing a context.
103920 +  Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression,
103921 +  or ZSTD_compressBegin_advanced(), for finer parameter control.
103922 +  It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx()
103924 +  Then, consume your input using ZSTD_compressContinue().
103925 +  There are some important considerations to keep in mind when using this advanced function :
103926 +  - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.
103927 +  - Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.
103928 +  - Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.
103929 +    Worst case evaluation is provided by ZSTD_compressBound().
103930 +    ZSTD_compressContinue() doesn't guarantee recover after a failed compression.
103931 +  - ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog).
103932 +    It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)
103933 +  - ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps.
103934 +    In which case, it will "discard" the relevant memory section from its history.
103936 +  Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.
103937 +  It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
103938 +  Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.
103940 +  `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.
103943 +/*=====   Buffer-less streaming compression functions  =====*/
103944 +ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
103945 +ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
103946 +ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
103947 +ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
103948 +ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize);   /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */
103949 +ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**<  note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
103951 +ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
103952 +ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
103956 +  Buffer-less streaming decompression (synchronous mode)
103958 +  A ZSTD_DCtx object is required to track streaming operations.
103959 +  Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
103960 +  A ZSTD_DCtx object can be re-used multiple times.
103962 +  First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
103963 +  Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
103964 +  Data fragment must be large enough to ensure successful decoding.
103965 + `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
103966 +  @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
103967 +           >0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
103968 +           errorCode, which can be tested using ZSTD_isError().
103970 +  It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
103971 +  such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
103972 +  Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
103973 +  As a consequence, check that values remain within valid application range.
103974 +  For example, do not allocate memory blindly, check that `windowSize` is within expectation.
103975 +  Each application can set its own limits, depending on local restrictions.
103976 +  For extended interoperability, it is recommended to support `windowSize` of at least 8 MB.
103978 +  ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.
103979 +  ZSTD_decompressContinue() is very sensitive to contiguity,
103980 +  if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
103981 +  or that previous contiguous segment is large enough to properly handle maximum back-reference distance.
103982 +  There are multiple ways to guarantee this condition.
103984 +  The most memory efficient way is to use a round buffer of sufficient size.
103985 +  Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
103986 +  which can @return an error code if required value is too large for current system (in 32-bits mode).
103987 +  In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
103988 +  up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
103989 +  which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
103990 +  At which point, decoding can resume from the beginning of the buffer.
103991 +  Note that already decoded data stored in the buffer should be flushed before being overwritten.
103993 +  There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.
103995 +  Finally, if you control the compression process, you can also ignore all buffer size rules,
103996 +  as long as the encoder and decoder progress in "lock-step",
103997 +  aka use exactly the same buffer sizes, break contiguity at the same place, etc.
103999 +  Once buffers are setup, start decompression, with ZSTD_decompressBegin().
104000 +  If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().
104002 +  Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
104003 +  ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
104004 +  ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
104006 + @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
104007 +  It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
104008 +  It can also be an error code, which can be tested with ZSTD_isError().
104010 +  A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
104011 +  Context can then be reset to start a new decompression.
104013 +  Note : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType().
104014 +  This information is not required to properly decode a frame.
104016 +  == Special case : skippable frames ==
104018 +  Skippable frames allow integration of user-defined data into a flow of concatenated frames.
104019 +  Skippable frames will be ignored (skipped) by decompressor.
104020 +  The format of skippable frames is as follows :
104021 +  a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F
104022 +  b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
104023 +  c) Frame Content - any content (User Data) of length equal to Frame Size
104024 +  For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.
104025 +  For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.
104028 +/*=====   Buffer-less streaming decompression functions  =====*/
104029 +typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
104030 +typedef struct {
104031 +    unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
104032 +    unsigned long long windowSize;       /* can be very large, up to <= frameContentSize */
104033 +    unsigned blockSizeMax;
104034 +    ZSTD_frameType_e frameType;          /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
104035 +    unsigned headerSize;
104036 +    unsigned dictID;
104037 +    unsigned checksumFlag;
104038 +} ZSTD_frameHeader;
104040 +/*! ZSTD_getFrameHeader() :
104041 + *  decode Frame Header, or requires larger `srcSize`.
104042 + * @return : 0, `zfhPtr` is correctly filled,
104043 + *          >0, `srcSize` is too small, value is wanted `srcSize` amount,
104044 + *           or an error code, which can be tested using ZSTD_isError() */
104045 +ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize);   /**< doesn't consume input */
104046 +/*! ZSTD_getFrameHeader_advanced() :
104047 + *  same as ZSTD_getFrameHeader(),
104048 + *  with added capability to select a format (like ZSTD_f_zstd1_magicless) */
104049 +ZSTDLIB_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
104050 +ZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize);  /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
104052 +ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
104053 +ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
104054 +ZSTDLIB_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
104056 +ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
104057 +ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
104059 +/* misc */
104060 +ZSTDLIB_API void   ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
104061 +typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
104062 +ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
104067 +/* ============================ */
104068 +/**       Block level API       */
104069 +/* ============================ */
104072 +    Block functions produce and decode raw zstd blocks, without frame metadata.
104073 +    Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).
104074 +    But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.
104076 +    A few rules to respect :
104077 +    - Compressing and decompressing require a context structure
104078 +      + Use ZSTD_createCCtx() and ZSTD_createDCtx()
104079 +    - It is necessary to init context before starting
104080 +      + compression : any ZSTD_compressBegin*() variant, including with dictionary
104081 +      + decompression : any ZSTD_decompressBegin*() variant, including with dictionary
104082 +      + copyCCtx() and copyDCtx() can be used too
104083 +    - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
104084 +      + If input is larger than a block size, it's necessary to split input data into multiple blocks
104085 +      + For inputs larger than a single block, consider using regular ZSTD_compress() instead.
104086 +        Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.
104087 +    - When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) !
104088 +      ===> In which case, nothing is produced into `dst` !
104089 +      + User __must__ test for such outcome and deal directly with uncompressed data
104090 +      + A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0.
104091 +        Doing so would mess up with statistics history, leading to potential data corruption.
104092 +      + ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !!
104093 +      + In case of multiple successive blocks, should some of them be uncompressed,
104094 +        decoder must be informed of their existence in order to follow proper history.
104095 +        Use ZSTD_insertBlock() for such a case.
104098 +/*=====   Raw zstd block functions  =====*/
104099 +ZSTDLIB_API size_t ZSTD_getBlockSize   (const ZSTD_CCtx* cctx);
104100 +ZSTDLIB_API size_t ZSTD_compressBlock  (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
104101 +ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
104102 +ZSTDLIB_API size_t ZSTD_insertBlock    (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize);  /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
104105 +#endif   /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */
104106 diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
104107 index 167ca8c8424f..2fe4019b749f 100644
104108 --- a/include/media/v4l2-ctrls.h
104109 +++ b/include/media/v4l2-ctrls.h
104110 @@ -301,12 +301,14 @@ struct v4l2_ctrl {
104111   *             the control has been applied. This prevents applying controls
104112   *             from a cluster with multiple controls twice (when the first
104113   *             control of a cluster is applied, they all are).
104114 - * @req:       If set, this refers to another request that sets this control.
104115 + * @valid_p_req: If set, then p_req contains the control value for the request.
104116   * @p_req:     If the control handler containing this control reference
104117   *             is bound to a media request, then this points to the
104118 - *             value of the control that should be applied when the request
104119 + *             value of the control that must be applied when the request
104120   *             is executed, or to the value of the control at the time
104121 - *             that the request was completed.
104122 + *             that the request was completed. If @valid_p_req is false,
104123 + *             then this control was never set for this request and the
104124 + *             control will not be updated when this request is applied.
104125   *
104126   * Each control handler has a list of these refs. The list_head is used to
104127   * keep a sorted-by-control-ID list of all controls, while the next pointer
104128 @@ -319,7 +321,7 @@ struct v4l2_ctrl_ref {
104129         struct v4l2_ctrl_helper *helper;
104130         bool from_other_dev;
104131         bool req_done;
104132 -       struct v4l2_ctrl_ref *req;
104133 +       bool valid_p_req;
104134         union v4l2_ctrl_ptr p_req;
104137 @@ -346,7 +348,7 @@ struct v4l2_ctrl_ref {
104138   * @error:     The error code of the first failed control addition.
104139   * @request_is_queued: True if the request was queued.
104140   * @requests:  List to keep track of open control handler request objects.
104141 - *             For the parent control handler (@req_obj.req == NULL) this
104142 + *             For the parent control handler (@req_obj.ops == NULL) this
104143   *             is the list header. When the parent control handler is
104144   *             removed, it has to unbind and put all these requests since
104145   *             they refer to the parent.
104146 diff --git a/include/net/addrconf.h b/include/net/addrconf.h
104147 index 18f783dcd55f..78ea3e332688 100644
104148 --- a/include/net/addrconf.h
104149 +++ b/include/net/addrconf.h
104150 @@ -233,7 +233,6 @@ void ipv6_mc_unmap(struct inet6_dev *idev);
104151  void ipv6_mc_remap(struct inet6_dev *idev);
104152  void ipv6_mc_init_dev(struct inet6_dev *idev);
104153  void ipv6_mc_destroy_dev(struct inet6_dev *idev);
104154 -int ipv6_mc_check_icmpv6(struct sk_buff *skb);
104155  int ipv6_mc_check_mld(struct sk_buff *skb);
104156  void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp);
104158 diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
104159 index ebdd4afe30d2..ca4ac6603b9a 100644
104160 --- a/include/net/bluetooth/hci_core.h
104161 +++ b/include/net/bluetooth/hci_core.h
104162 @@ -704,6 +704,7 @@ struct hci_chan {
104163         struct sk_buff_head data_q;
104164         unsigned int    sent;
104165         __u8            state;
104166 +       bool            amp;
104169  struct hci_conn_params {
104170 diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
104171 index 3c8c59471bc1..2cdc5a0709fe 100644
104172 --- a/include/net/inet_connection_sock.h
104173 +++ b/include/net/inet_connection_sock.h
104174 @@ -134,8 +134,9 @@ struct inet_connection_sock {
104175         u32                       icsk_probes_tstamp;
104176         u32                       icsk_user_timeout;
104178 -       u64                       icsk_ca_priv[104 / sizeof(u64)];
104179 -#define ICSK_CA_PRIV_SIZE      (13 * sizeof(u64))
104180 +/* XXX inflated by temporary internal debugging info */
104181 +#define ICSK_CA_PRIV_SIZE      (216)
104182 +       u64                       icsk_ca_priv[ICSK_CA_PRIV_SIZE / sizeof(u64)];
104185  #define ICSK_TIME_RETRANS      1       /* Retransmit timer */
104186 diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
104187 index 1d34fe154fe0..434a6158852f 100644
104188 --- a/include/net/netfilter/nf_tables_offload.h
104189 +++ b/include/net/netfilter/nf_tables_offload.h
104190 @@ -4,11 +4,16 @@
104191  #include <net/flow_offload.h>
104192  #include <net/netfilter/nf_tables.h>
104194 +enum nft_offload_reg_flags {
104195 +       NFT_OFFLOAD_F_NETWORK2HOST      = (1 << 0),
104198  struct nft_offload_reg {
104199         u32             key;
104200         u32             len;
104201         u32             base_offset;
104202         u32             offset;
104203 +       u32             flags;
104204         struct nft_data data;
104205         struct nft_data mask;
104207 @@ -45,6 +50,7 @@ struct nft_flow_key {
104208         struct flow_dissector_key_ports                 tp;
104209         struct flow_dissector_key_ip                    ip;
104210         struct flow_dissector_key_vlan                  vlan;
104211 +       struct flow_dissector_key_vlan                  cvlan;
104212         struct flow_dissector_key_eth_addrs             eth_addrs;
104213         struct flow_dissector_key_meta                  meta;
104214  } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
104215 @@ -71,13 +77,17 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net, const struct nft_rul
104216  void nft_flow_rule_destroy(struct nft_flow_rule *flow);
104217  int nft_flow_rule_offload_commit(struct net *net);
104219 -#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg)                \
104220 +#define NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, __flags) \
104221         (__reg)->base_offset    =                                       \
104222                 offsetof(struct nft_flow_key, __base);                  \
104223         (__reg)->offset         =                                       \
104224                 offsetof(struct nft_flow_key, __base.__field);          \
104225         (__reg)->len            = __len;                                \
104226         (__reg)->key            = __key;                                \
104227 +       (__reg)->flags          = __flags;
104229 +#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg)                \
104230 +       NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, 0)
104232  #define NFT_OFFLOAD_MATCH_EXACT(__key, __base, __field, __len, __reg)  \
104233         NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg)         \
104234 diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
104235 index 43c9c5d2bedb..33979017b782 100644
104236 --- a/include/net/nfc/nci_core.h
104237 +++ b/include/net/nfc/nci_core.h
104238 @@ -298,6 +298,7 @@ int nci_nfcc_loopback(struct nci_dev *ndev, void *data, size_t data_len,
104239                       struct sk_buff **resp);
104241  struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev);
104242 +void nci_hci_deallocate(struct nci_dev *ndev);
104243  int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event,
104244                        const u8 *param, size_t param_len);
104245  int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate,
104246 diff --git a/include/net/page_pool.h b/include/net/page_pool.h
104247 index b5b195305346..e05744b9a1bc 100644
104248 --- a/include/net/page_pool.h
104249 +++ b/include/net/page_pool.h
104250 @@ -198,7 +198,17 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
104252  static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
104254 -       return page->dma_addr;
104255 +       dma_addr_t ret = page->dma_addr[0];
104256 +       if (sizeof(dma_addr_t) > sizeof(unsigned long))
104257 +               ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
104258 +       return ret;
104261 +static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
104263 +       page->dma_addr[0] = addr;
104264 +       if (sizeof(dma_addr_t) > sizeof(unsigned long))
104265 +               page->dma_addr[1] = upper_32_bits(addr);
104268  static inline bool is_page_pool_compiled_in(void)
104269 diff --git a/include/net/tcp.h b/include/net/tcp.h
104270 index 963cd86d12dd..5a86fa1d2ff1 100644
104271 --- a/include/net/tcp.h
104272 +++ b/include/net/tcp.h
104273 @@ -799,6 +799,11 @@ static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
104274         return max_t(s64, t1 - t0, 0);
104277 +static inline u32 tcp_stamp32_us_delta(u32 t1, u32 t0)
104279 +       return max_t(s32, t1 - t0, 0);
104282  static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
104284         return tcp_ns_to_ts(skb->skb_mstamp_ns);
104285 @@ -866,16 +871,22 @@ struct tcp_skb_cb {
104286         __u32           ack_seq;        /* Sequence number ACK'd        */
104287         union {
104288                 struct {
104289 +#define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
104290                         /* There is space for up to 24 bytes */
104291 -                       __u32 in_flight:30,/* Bytes in flight at transmit */
104292 -                             is_app_limited:1, /* cwnd not fully used? */
104293 -                             unused:1;
104294 +                       __u32 is_app_limited:1, /* cwnd not fully used? */
104295 +                             delivered_ce:20,
104296 +                             unused:11;
104297                         /* pkts S/ACKed so far upon tx of skb, incl retrans: */
104298                         __u32 delivered;
104299                         /* start of send pipeline phase */
104300 -                       u64 first_tx_mstamp;
104301 +                       u32 first_tx_mstamp;
104302                         /* when we reached the "delivered" count */
104303 -                       u64 delivered_mstamp;
104304 +                       u32 delivered_mstamp;
104305 +#define TCPCB_IN_FLIGHT_BITS 20
104306 +#define TCPCB_IN_FLIGHT_MAX ((1U << TCPCB_IN_FLIGHT_BITS) - 1)
104307 +                       u32 in_flight:20,   /* packets in flight at transmit */
104308 +                           unused2:12;
104309 +                       u32 lost;       /* packets lost so far upon tx of skb */
104310                 } tx;   /* only used for outgoing skbs */
104311                 union {
104312                         struct inet_skb_parm    h4;
104313 @@ -1025,7 +1036,11 @@ enum tcp_ca_ack_event_flags {
104314  #define TCP_CONG_NON_RESTRICTED 0x1
104315  /* Requires ECN/ECT set on all packets */
104316  #define TCP_CONG_NEEDS_ECN     0x2
104317 -#define TCP_CONG_MASK  (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
104318 +/* Wants notification of CE events (CA_EVENT_ECN_IS_CE, CA_EVENT_ECN_NO_CE). */
104319 +#define TCP_CONG_WANTS_CE_EVENTS       0x4
104320 +#define TCP_CONG_MASK  (TCP_CONG_NON_RESTRICTED | \
104321 +                        TCP_CONG_NEEDS_ECN | \
104322 +                        TCP_CONG_WANTS_CE_EVENTS)
104324  union tcp_cc_info;
104326 @@ -1045,8 +1060,13 @@ struct ack_sample {
104327   */
104328  struct rate_sample {
104329         u64  prior_mstamp; /* starting timestamp for interval */
104330 +       u32  prior_lost;        /* tp->lost at "prior_mstamp" */
104331         u32  prior_delivered;   /* tp->delivered at "prior_mstamp" */
104332 +       u32  prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
104333 +       u32 tx_in_flight;       /* packets in flight at starting timestamp */
104334 +       s32  lost;              /* number of packets lost over interval */
104335         s32  delivered;         /* number of packets delivered over interval */
104336 +       s32  delivered_ce;      /* packets delivered w/ CE mark over interval */
104337         long interval_us;       /* time for tp->delivered to incr "delivered" */
104338         u32 snd_interval_us;    /* snd interval for delivered packets */
104339         u32 rcv_interval_us;    /* rcv interval for delivered packets */
104340 @@ -1057,6 +1077,7 @@ struct rate_sample {
104341         bool is_app_limited;    /* is sample from packet with bubble in pipe? */
104342         bool is_retrans;        /* is sample from retransmission? */
104343         bool is_ack_delayed;    /* is this (likely) a delayed ACK? */
104344 +       bool is_ece;            /* did this ACK have ECN marked? */
104347  struct tcp_congestion_ops {
104348 @@ -1083,10 +1104,12 @@ struct tcp_congestion_ops {
104349         u32  (*undo_cwnd)(struct sock *sk);
104350         /* hook for packet ack accounting (optional) */
104351         void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
104352 -       /* override sysctl_tcp_min_tso_segs */
104353 -       u32 (*min_tso_segs)(struct sock *sk);
104354 +       /* pick target number of segments per TSO/GSO skb (optional): */
104355 +       u32 (*tso_segs)(struct sock *sk, unsigned int mss_now);
104356         /* returns the multiplier used in tcp_sndbuf_expand (optional) */
104357         u32 (*sndbuf_expand)(struct sock *sk);
104358 +       /* react to a specific lost skb (optional) */
104359 +       void (*skb_marked_lost)(struct sock *sk, const struct sk_buff *skb);
104360         /* call when packets are delivered to update cwnd and pacing rate,
104361          * after all the ca_state processing. (optional)
104362          */
104363 @@ -1132,6 +1155,14 @@ static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
104365  #endif
104367 +static inline bool tcp_ca_wants_ce_events(const struct sock *sk)
104369 +       const struct inet_connection_sock *icsk = inet_csk(sk);
104371 +       return icsk->icsk_ca_ops->flags & (TCP_CONG_NEEDS_ECN |
104372 +                                          TCP_CONG_WANTS_CE_EVENTS);
104375  static inline bool tcp_ca_needs_ecn(const struct sock *sk)
104377         const struct inet_connection_sock *icsk = inet_csk(sk);
104378 @@ -1157,6 +1188,7 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
104381  /* From tcp_rate.c */
104382 +void tcp_set_tx_in_flight(struct sock *sk, struct sk_buff *skb);
104383  void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
104384  void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
104385                             struct rate_sample *rs);
104386 diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
104387 index 2568cb0627ec..fac8e89aed81 100644
104388 --- a/include/scsi/libfcoe.h
104389 +++ b/include/scsi/libfcoe.h
104390 @@ -249,7 +249,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *,
104391                          struct fc_frame *);
104393  /* libfcoe funcs */
104394 -u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int);
104395 +u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], unsigned int, unsigned int);
104396  int fcoe_libfc_config(struct fc_lport *, struct fcoe_ctlr *,
104397                       const struct libfc_function_template *, int init_fcp);
104398  u32 fcoe_fc_crc(struct fc_frame *fp);
104399 diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
104400 index 036eb1f5c133..2f01314de73a 100644
104401 --- a/include/trace/events/sunrpc.h
104402 +++ b/include/trace/events/sunrpc.h
104403 @@ -1141,7 +1141,6 @@ DECLARE_EVENT_CLASS(xprt_writelock_event,
104405  DEFINE_WRITELOCK_EVENT(reserve_xprt);
104406  DEFINE_WRITELOCK_EVENT(release_xprt);
104407 -DEFINE_WRITELOCK_EVENT(transmit_queued);
104409  DECLARE_EVENT_CLASS(xprt_cong_event,
104410         TP_PROTO(
104411 diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
104412 index ce58cff99b66..2778da551846 100644
104413 --- a/include/uapi/asm-generic/unistd.h
104414 +++ b/include/uapi/asm-generic/unistd.h
104415 @@ -864,8 +864,20 @@ __SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
104416  #define __NR_mount_setattr 442
104417  __SYSCALL(__NR_mount_setattr, sys_mount_setattr)
104419 +#define __NR_futex_wait 443
104420 +__SYSCALL(__NR_futex_wait, sys_futex_wait)
104422 +#define __NR_futex_wake 444
104423 +__SYSCALL(__NR_futex_wake, sys_futex_wake)
104425 +#define __NR_futex_waitv 445
104426 +__SC_COMP(__NR_futex_waitv, sys_futex_waitv, compat_sys_futex_waitv)
104428 +#define __NR_futex_requeue 446
104429 +__SC_COMP(__NR_futex_requeue, sys_futex_requeue, compat_sys_futex_requeue)
104431  #undef __NR_syscalls
104432 -#define __NR_syscalls 443
104433 +#define __NR_syscalls 447
104436   * 32 bit systems traditionally used different
104437 diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h
104438 index a89eb0accd5e..afc3245e5728 100644
104439 --- a/include/uapi/linux/futex.h
104440 +++ b/include/uapi/linux/futex.h
104441 @@ -21,6 +21,7 @@
104442  #define FUTEX_WAKE_BITSET      10
104443  #define FUTEX_WAIT_REQUEUE_PI  11
104444  #define FUTEX_CMP_REQUEUE_PI   12
104445 +#define FUTEX_WAIT_MULTIPLE    31
104447  #define FUTEX_PRIVATE_FLAG     128
104448  #define FUTEX_CLOCK_REALTIME   256
104449 @@ -40,6 +41,39 @@
104450                                          FUTEX_PRIVATE_FLAG)
104451  #define FUTEX_CMP_REQUEUE_PI_PRIVATE   (FUTEX_CMP_REQUEUE_PI | \
104452                                          FUTEX_PRIVATE_FLAG)
104453 +#define FUTEX_WAIT_MULTIPLE_PRIVATE    (FUTEX_WAIT_MULTIPLE | \
104454 +                                        FUTEX_PRIVATE_FLAG)
104456 +/* Size argument to futex2 syscall */
104457 +#define FUTEX_32       2
104459 +#define FUTEX_SIZE_MASK        0x3
104461 +#define FUTEX_SHARED_FLAG 8
104463 +#define FUTEX_WAITV_MAX 128
104466 + * struct futex_waitv - A waiter for vectorized wait
104467 + * @uaddr: User address to wait on
104468 + * @val:   Expected value at uaddr
104469 + * @flags: Flags for this waiter
104470 + */
104471 +struct futex_waitv {
104472 +       void __user *uaddr;
104473 +       unsigned int val;
104474 +       unsigned int flags;
104478 + * struct futex_requeue - Define an address and its flags for requeue operation
104479 + * @uaddr: User address of one of the requeue arguments
104480 + * @flags: Flags for this address
104481 + */
104482 +struct futex_requeue {
104483 +       void __user *uaddr;
104484 +       unsigned int flags;
104488   * Support for robust futexes: the kernel cleans up held futexes at
104489 @@ -150,4 +184,21 @@ struct robust_list_head {
104490    (((op & 0xf) << 28) | ((cmp & 0xf) << 24)            \
104491     | ((oparg & 0xfff) << 12) | (cmparg & 0xfff))
104494 + * Maximum number of multiple futexes to wait for
104495 + */
104496 +#define FUTEX_MULTIPLE_MAX_COUNT       128
104499 + * struct futex_wait_block - Block of futexes to be waited for
104500 + * @uaddr:     User address of the futex
104501 + * @val:       Futex value expected by userspace
104502 + * @bitset:    Bitset for the optional bitmasked wakeup
104503 + */
104504 +struct futex_wait_block {
104505 +       __u32 __user *uaddr;
104506 +       __u32 val;
104507 +       __u32 bitset;
104510  #endif /* _UAPI_LINUX_FUTEX_H */
104511 diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
104512 index 20ee93f0f876..96d52dd9c48a 100644
104513 --- a/include/uapi/linux/inet_diag.h
104514 +++ b/include/uapi/linux/inet_diag.h
104515 @@ -231,9 +231,42 @@ struct tcp_bbr_info {
104516         __u32   bbr_cwnd_gain;          /* cwnd gain shifted left 8 bits */
104519 +/* Phase as reported in netlink/ss stats. */
104520 +enum tcp_bbr2_phase {
104521 +       BBR2_PHASE_INVALID              = 0,
104522 +       BBR2_PHASE_STARTUP              = 1,
104523 +       BBR2_PHASE_DRAIN                = 2,
104524 +       BBR2_PHASE_PROBE_RTT            = 3,
104525 +       BBR2_PHASE_PROBE_BW_UP          = 4,
104526 +       BBR2_PHASE_PROBE_BW_DOWN        = 5,
104527 +       BBR2_PHASE_PROBE_BW_CRUISE      = 6,
104528 +       BBR2_PHASE_PROBE_BW_REFILL      = 7
104531 +struct tcp_bbr2_info {
104532 +       /* u64 bw: bandwidth (app throughput) estimate in Byte per sec: */
104533 +       __u32   bbr_bw_lsb;             /* lower 32 bits of bw */
104534 +       __u32   bbr_bw_msb;             /* upper 32 bits of bw */
104535 +       __u32   bbr_min_rtt;            /* min-filtered RTT in uSec */
104536 +       __u32   bbr_pacing_gain;        /* pacing gain shifted left 8 bits */
104537 +       __u32   bbr_cwnd_gain;          /* cwnd gain shifted left 8 bits */
104538 +       __u32   bbr_bw_hi_lsb;          /* lower 32 bits of bw_hi */
104539 +       __u32   bbr_bw_hi_msb;          /* upper 32 bits of bw_hi */
104540 +       __u32   bbr_bw_lo_lsb;          /* lower 32 bits of bw_lo */
104541 +       __u32   bbr_bw_lo_msb;          /* upper 32 bits of bw_lo */
104542 +       __u8    bbr_mode;               /* current bbr_mode in state machine */
104543 +       __u8    bbr_phase;              /* current state machine phase */
104544 +       __u8    unused1;                /* alignment padding; not used yet */
104545 +       __u8    bbr_version;            /* MUST be at this offset in struct */
104546 +       __u32   bbr_inflight_lo;        /* lower/short-term data volume bound */
104547 +       __u32   bbr_inflight_hi;        /* higher/long-term data volume bound */
104548 +       __u32   bbr_extra_acked;        /* max excess packets ACKed in epoch */
104551  union tcp_cc_info {
104552         struct tcpvegas_info    vegas;
104553         struct tcp_dctcp_info   dctcp;
104554         struct tcp_bbr_info     bbr;
104555 +       struct tcp_bbr2_info    bbr2;
104557  #endif /* _UAPI_INET_DIAG_H_ */
104558 diff --git a/include/uapi/linux/netfilter/xt_SECMARK.h b/include/uapi/linux/netfilter/xt_SECMARK.h
104559 index 1f2a708413f5..beb2cadba8a9 100644
104560 --- a/include/uapi/linux/netfilter/xt_SECMARK.h
104561 +++ b/include/uapi/linux/netfilter/xt_SECMARK.h
104562 @@ -20,4 +20,10 @@ struct xt_secmark_target_info {
104563         char secctx[SECMARK_SECCTX_MAX];
104566 +struct xt_secmark_target_info_v1 {
104567 +       __u8 mode;
104568 +       char secctx[SECMARK_SECCTX_MAX];
104569 +       __u32 secid;
104572  #endif /*_XT_SECMARK_H_target */
104573 diff --git a/include/uapi/linux/tty_flags.h b/include/uapi/linux/tty_flags.h
104574 index 900a32e63424..6a3ac496a56c 100644
104575 --- a/include/uapi/linux/tty_flags.h
104576 +++ b/include/uapi/linux/tty_flags.h
104577 @@ -39,7 +39,7 @@
104578   * WARNING: These flags are no longer used and have been superceded by the
104579   *         TTY_PORT_ flags in the iflags field (and not userspace-visible)
104580   */
104581 -#ifndef _KERNEL_
104582 +#ifndef __KERNEL__
104583  #define ASYNCB_INITIALIZED     31 /* Serial port was initialized */
104584  #define ASYNCB_SUSPENDED       30 /* Serial port is suspended */
104585  #define ASYNCB_NORMAL_ACTIVE   29 /* Normal device is active */
104586 @@ -81,7 +81,7 @@
104587  #define ASYNC_SPD_WARP         (ASYNC_SPD_HI|ASYNC_SPD_SHI)
104588  #define ASYNC_SPD_MASK         (ASYNC_SPD_HI|ASYNC_SPD_VHI|ASYNC_SPD_SHI)
104590 -#ifndef _KERNEL_
104591 +#ifndef __KERNEL__
104592  /* These flags are no longer used (and were always masked from userspace) */
104593  #define ASYNC_INITIALIZED      (1U << ASYNCB_INITIALIZED)
104594  #define ASYNC_NORMAL_ACTIVE    (1U << ASYNCB_NORMAL_ACTIVE)
104595 diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h
104596 index d854cb19c42c..bfdae12cdacf 100644
104597 --- a/include/uapi/linux/usb/video.h
104598 +++ b/include/uapi/linux/usb/video.h
104599 @@ -302,9 +302,10 @@ struct uvc_processing_unit_descriptor {
104600         __u8   bControlSize;
104601         __u8   bmControls[2];
104602         __u8   iProcessing;
104603 +       __u8   bmVideoStandards;
104604  } __attribute__((__packed__));
104606 -#define UVC_DT_PROCESSING_UNIT_SIZE(n)                 (9+(n))
104607 +#define UVC_DT_PROCESSING_UNIT_SIZE(n)                 (10+(n))
104609  /* 3.7.2.6. Extension Unit Descriptor */
104610  struct uvc_extension_unit_descriptor {
104611 diff --git a/init/Kconfig b/init/Kconfig
104612 index 5f5c776ef192..f49c69d8a8b0 100644
104613 --- a/init/Kconfig
104614 +++ b/init/Kconfig
104615 @@ -830,6 +830,17 @@ config UCLAMP_BUCKETS_COUNT
104617  endmenu
104619 +config CACULE_SCHED
104620 +       bool "CacULE CPU scheduler"
104621 +       default y
104622 +       help
104623 +         The CacULE CPU scheduler is based on interactivity score mechanism.
104624 +         The interactivity score is inspired by the ULE scheduler (FreeBSD
104625 +         scheduler).
104627 +         If unsure, say Y here.
104631  # For architectures that want to enable the support for NUMA-affine scheduler
104632  # balancing logic:
104633 @@ -1220,6 +1231,18 @@ config SCHED_AUTOGROUP
104634           desktop applications.  Task group autogeneration is currently based
104635           upon task session.
104637 +config SCHED_AUTOGROUP_DEFAULT_ENABLED
104638 +       bool "Enable automatic process group scheduling feature"
104639 +       default y
104640 +       depends on SCHED_AUTOGROUP
104641 +       help
104642 +         If set, automatic process group scheduling will be enabled per
104643 +         default but can be disabled through passing autogroup=0 on the
104644 +         kernel commandline during boot or a value of 0 via the file
104645 +         proc/sys/kernel/sched_autogroup_enabled.
104647 +         If unsure say Y.
104649  config SYSFS_DEPRECATED
104650         bool "Enable deprecated sysfs features to support old userspace tools"
104651         depends on SYSFS
104652 @@ -1316,7 +1339,6 @@ config CC_OPTIMIZE_FOR_PERFORMANCE
104654  config CC_OPTIMIZE_FOR_PERFORMANCE_O3
104655         bool "Optimize more for performance (-O3)"
104656 -       depends on ARC
104657         help
104658           Choosing this option will pass "-O3" to your compiler to optimize
104659           the kernel yet more for performance.
104660 @@ -1537,6 +1559,13 @@ config FUTEX
104661           support for "fast userspace mutexes".  The resulting kernel may not
104662           run glibc-based applications correctly.
104664 +config FUTEX2
104665 +       bool "Enable futex2 support" if EXPERT
104666 +       depends on FUTEX
104667 +       default y
104668 +       help
104669 +         Support for futex2 interface.
104671  config FUTEX_PI
104672         bool
104673         depends on FUTEX && RT_MUTEXES
104674 @@ -2217,8 +2246,8 @@ config MODULE_COMPRESS
104675         bool "Compress modules on installation"
104676         help
104678 -         Compresses kernel modules when 'make modules_install' is run; gzip or
104679 -         xz depending on "Compression algorithm" below.
104680 +         Compresses kernel modules when 'make modules_install' is run; gzip,
104681 +         xz, or zstd depending on "Compression algorithm" below.
104683           module-init-tools MAY support gzip, and kmod MAY support gzip and xz.
104685 @@ -2240,7 +2269,7 @@ choice
104686           This determines which sort of compression will be used during
104687           'make modules_install'.
104689 -         GZIP (default) and XZ are supported.
104690 +         GZIP (default), XZ, and ZSTD are supported.
104692  config MODULE_COMPRESS_GZIP
104693         bool "GZIP"
104694 @@ -2248,6 +2277,9 @@ config MODULE_COMPRESS_GZIP
104695  config MODULE_COMPRESS_XZ
104696         bool "XZ"
104698 +config MODULE_COMPRESS_ZSTD
104699 +       bool "ZSTD"
104701  endchoice
104703  config MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
104704 diff --git a/init/init_task.c b/init/init_task.c
104705 index 3711cdaafed2..8b08c2e19cbb 100644
104706 --- a/init/init_task.c
104707 +++ b/init/init_task.c
104708 @@ -210,7 +210,7 @@ struct task_struct init_task
104709  #ifdef CONFIG_SECURITY
104710         .security       = NULL,
104711  #endif
104712 -#ifdef CONFIG_SECCOMP
104713 +#ifdef CONFIG_SECCOMP_FILTER
104714         .seccomp        = { .filter_count = ATOMIC_INIT(0) },
104715  #endif
104717 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
104718 index 8031464ed4ae..4e4e61111500 100644
104719 --- a/ipc/mqueue.c
104720 +++ b/ipc/mqueue.c
104721 @@ -1004,12 +1004,14 @@ static inline void __pipelined_op(struct wake_q_head *wake_q,
104722                                   struct mqueue_inode_info *info,
104723                                   struct ext_wait_queue *this)
104725 +       struct task_struct *task;
104727         list_del(&this->list);
104728 -       get_task_struct(this->task);
104729 +       task = get_task_struct(this->task);
104731         /* see MQ_BARRIER for purpose/pairing */
104732         smp_store_release(&this->state, STATE_READY);
104733 -       wake_q_add_safe(wake_q, this->task);
104734 +       wake_q_add_safe(wake_q, task);
104737  /* pipelined_send() - send a message directly to the task waiting in
104738 diff --git a/ipc/msg.c b/ipc/msg.c
104739 index acd1bc7af55a..6e6c8e0c9380 100644
104740 --- a/ipc/msg.c
104741 +++ b/ipc/msg.c
104742 @@ -251,11 +251,13 @@ static void expunge_all(struct msg_queue *msq, int res,
104743         struct msg_receiver *msr, *t;
104745         list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
104746 -               get_task_struct(msr->r_tsk);
104747 +               struct task_struct *r_tsk;
104749 +               r_tsk = get_task_struct(msr->r_tsk);
104751                 /* see MSG_BARRIER for purpose/pairing */
104752                 smp_store_release(&msr->r_msg, ERR_PTR(res));
104753 -               wake_q_add_safe(wake_q, msr->r_tsk);
104754 +               wake_q_add_safe(wake_q, r_tsk);
104755         }
104758 diff --git a/ipc/namespace.c b/ipc/namespace.c
104759 index 7bd0766ddc3b..2bb05b2dacd1 100644
104760 --- a/ipc/namespace.c
104761 +++ b/ipc/namespace.c
104762 @@ -172,6 +172,23 @@ void put_ipc_ns(struct ipc_namespace *ns)
104763                         schedule_work(&free_ipc_work);
104764         }
104766 +EXPORT_SYMBOL(put_ipc_ns);
104768 +struct ipc_namespace *get_ipc_ns_exported(struct ipc_namespace *ns)
104770 +       return get_ipc_ns(ns);
104772 +EXPORT_SYMBOL(get_ipc_ns_exported);
104774 +struct ipc_namespace *show_init_ipc_ns(void)
104776 +#if defined(CONFIG_IPC_NS)
104777 +       return &init_ipc_ns;
104778 +#else
104779 +       return NULL;
104780 +#endif
104782 +EXPORT_SYMBOL(show_init_ipc_ns);
104784  static inline struct ipc_namespace *to_ipc_ns(struct ns_common *ns)
104786 diff --git a/ipc/sem.c b/ipc/sem.c
104787 index f6c30a85dadf..7d9c06b0ad6e 100644
104788 --- a/ipc/sem.c
104789 +++ b/ipc/sem.c
104790 @@ -784,12 +784,14 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
104791  static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
104792                                              struct wake_q_head *wake_q)
104794 -       get_task_struct(q->sleeper);
104795 +       struct task_struct *sleeper;
104797 +       sleeper = get_task_struct(q->sleeper);
104799         /* see SEM_BARRIER_2 for purpuse/pairing */
104800         smp_store_release(&q->status, error);
104802 -       wake_q_add_safe(wake_q, q->sleeper);
104803 +       wake_q_add_safe(wake_q, sleeper);
104806  static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
104807 diff --git a/kernel/.gitignore b/kernel/.gitignore
104808 index 78701ea37c97..5518835ac35c 100644
104809 --- a/kernel/.gitignore
104810 +++ b/kernel/.gitignore
104811 @@ -1,4 +1,5 @@
104812  # SPDX-License-Identifier: GPL-2.0-only
104813 +/config_data
104814  kheaders.md5
104815  timeconst.h
104816  hz.bc
104817 diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
104818 index 38ef6d06888e..b4a1995149d0 100644
104819 --- a/kernel/Kconfig.hz
104820 +++ b/kernel/Kconfig.hz
104821 @@ -5,7 +5,7 @@
104823  choice
104824         prompt "Timer frequency"
104825 -       default HZ_250
104826 +       default HZ_500
104827         help
104828          Allows the configuration of the timer frequency. It is customary
104829          to have the timer interrupt run at 1000 Hz but 100 Hz may be more
104830 @@ -40,6 +40,13 @@ choice
104831          on SMP and NUMA systems and exactly dividing by both PAL and
104832          NTSC frame rates for video and multimedia work.
104834 +       config HZ_500
104835 +               bool "500 HZ"
104836 +       help
104837 +        500 Hz is a balanced timer frequency. Provides fast interactivity
104838 +        on desktops with great smoothness without increasing CPU power
104839 +        consumption and sacrificing the battery life on laptops.
104841         config HZ_1000
104842                 bool "1000 HZ"
104843         help
104844 @@ -53,6 +60,7 @@ config HZ
104845         default 100 if HZ_100
104846         default 250 if HZ_250
104847         default 300 if HZ_300
104848 +       default 500 if HZ_500
104849         default 1000 if HZ_1000
104851  config SCHED_HRTICK
104852 diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
104853 index 416017301660..293725c44cbb 100644
104854 --- a/kernel/Kconfig.preempt
104855 +++ b/kernel/Kconfig.preempt
104856 @@ -2,7 +2,7 @@
104858  choice
104859         prompt "Preemption Model"
104860 -       default PREEMPT_NONE
104861 +       default PREEMPT
104863  config PREEMPT_NONE
104864         bool "No Forced Preemption (Server)"
104865 diff --git a/kernel/Makefile b/kernel/Makefile
104866 index 320f1f3941b7..caf7fca27b62 100644
104867 --- a/kernel/Makefile
104868 +++ b/kernel/Makefile
104869 @@ -57,6 +57,7 @@ obj-$(CONFIG_PROFILING) += profile.o
104870  obj-$(CONFIG_STACKTRACE) += stacktrace.o
104871  obj-y += time/
104872  obj-$(CONFIG_FUTEX) += futex.o
104873 +obj-$(CONFIG_FUTEX2) += futex2.o
104874  obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
104875  obj-$(CONFIG_SMP) += smp.o
104876  ifneq ($(CONFIG_SMP),y)
104877 @@ -138,10 +139,15 @@ obj-$(CONFIG_SCF_TORTURE_TEST) += scftorture.o
104879  $(obj)/configs.o: $(obj)/config_data.gz
104881 -targets += config_data.gz
104882 -$(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE
104883 +targets += config_data config_data.gz
104884 +$(obj)/config_data.gz: $(obj)/config_data FORCE
104885         $(call if_changed,gzip)
104887 +filechk_cat = cat $<
104889 +$(obj)/config_data: $(KCONFIG_CONFIG) FORCE
104890 +       $(call filechk,cat)
104892  $(obj)/kheaders.o: $(obj)/kheaders_data.tar.xz
104894  quiet_cmd_genikh = CHK     $(obj)/kheaders_data.tar.xz
104895 diff --git a/kernel/bounds.c b/kernel/bounds.c
104896 index 9795d75b09b2..a8cbf2d0b11a 100644
104897 --- a/kernel/bounds.c
104898 +++ b/kernel/bounds.c
104899 @@ -22,6 +22,12 @@ int main(void)
104900         DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS));
104901  #endif
104902         DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
104903 +#ifdef CONFIG_LRU_GEN
104904 +       /* bits needed to represent internal values stored in page->flags */
104905 +       DEFINE(LRU_GEN_WIDTH, order_base_2(CONFIG_NR_LRU_GENS + 1));
104906 +       /* bits needed to represent normalized values for external uses */
104907 +       DEFINE(LRU_GEN_SHIFT, order_base_2(CONFIG_NR_LRU_GENS));
104908 +#endif
104909         /* End of constants */
104911         return 0;
104912 diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
104913 index f25b719ac786..84b3b35fc0d0 100644
104914 --- a/kernel/bpf/ringbuf.c
104915 +++ b/kernel/bpf/ringbuf.c
104916 @@ -221,25 +221,20 @@ static int ringbuf_map_get_next_key(struct bpf_map *map, void *key,
104917         return -ENOTSUPP;
104920 -static size_t bpf_ringbuf_mmap_page_cnt(const struct bpf_ringbuf *rb)
104922 -       size_t data_pages = (rb->mask + 1) >> PAGE_SHIFT;
104924 -       /* consumer page + producer page + 2 x data pages */
104925 -       return RINGBUF_POS_PAGES + 2 * data_pages;
104928  static int ringbuf_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
104930         struct bpf_ringbuf_map *rb_map;
104931 -       size_t mmap_sz;
104933         rb_map = container_of(map, struct bpf_ringbuf_map, map);
104934 -       mmap_sz = bpf_ringbuf_mmap_page_cnt(rb_map->rb) << PAGE_SHIFT;
104936 -       if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > mmap_sz)
104937 -               return -EINVAL;
104939 +       if (vma->vm_flags & VM_WRITE) {
104940 +               /* allow writable mapping for the consumer_pos only */
104941 +               if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
104942 +                       return -EPERM;
104943 +       } else {
104944 +               vma->vm_flags &= ~VM_MAYWRITE;
104945 +       }
104946 +       /* remap_vmalloc_range() checks size and offset constraints */
104947         return remap_vmalloc_range(vma, rb_map->rb,
104948                                    vma->vm_pgoff + RINGBUF_PGOFF);
104950 @@ -315,6 +310,9 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
104951                 return NULL;
104953         len = round_up(size + BPF_RINGBUF_HDR_SZ, 8);
104954 +       if (len > rb->mask + 1)
104955 +               return NULL;
104957         cons_pos = smp_load_acquire(&rb->consumer_pos);
104959         if (in_nmi()) {
104960 diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
104961 index 0399ac092b36..21247e49fe82 100644
104962 --- a/kernel/bpf/verifier.c
104963 +++ b/kernel/bpf/verifier.c
104964 @@ -1362,9 +1362,7 @@ static bool __reg64_bound_s32(s64 a)
104966  static bool __reg64_bound_u32(u64 a)
104968 -       if (a > U32_MIN && a < U32_MAX)
104969 -               return true;
104970 -       return false;
104971 +       return a > U32_MIN && a < U32_MAX;
104974  static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
104975 @@ -1375,10 +1373,10 @@ static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
104976                 reg->s32_min_value = (s32)reg->smin_value;
104977                 reg->s32_max_value = (s32)reg->smax_value;
104978         }
104979 -       if (__reg64_bound_u32(reg->umin_value))
104980 +       if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) {
104981                 reg->u32_min_value = (u32)reg->umin_value;
104982 -       if (__reg64_bound_u32(reg->umax_value))
104983                 reg->u32_max_value = (u32)reg->umax_value;
104984 +       }
104986         /* Intersecting with the old var_off might have improved our bounds
104987          * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
104988 @@ -5865,18 +5863,10 @@ enum {
104991  static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
104992 -                             const struct bpf_reg_state *off_reg,
104993 -                             u32 *alu_limit, u8 opcode)
104994 +                             u32 *alu_limit, bool mask_to_left)
104996 -       bool off_is_neg = off_reg->smin_value < 0;
104997 -       bool mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
104998 -                           (opcode == BPF_SUB && !off_is_neg);
104999         u32 max = 0, ptr_limit = 0;
105001 -       if (!tnum_is_const(off_reg->var_off) &&
105002 -           (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
105003 -               return REASON_BOUNDS;
105005         switch (ptr_reg->type) {
105006         case PTR_TO_STACK:
105007                 /* Offset 0 is out-of-bounds, but acceptable start for the
105008 @@ -5942,16 +5932,22 @@ static bool sanitize_needed(u8 opcode)
105009         return opcode == BPF_ADD || opcode == BPF_SUB;
105012 +struct bpf_sanitize_info {
105013 +       struct bpf_insn_aux_data aux;
105014 +       bool mask_to_left;
105017  static int sanitize_ptr_alu(struct bpf_verifier_env *env,
105018                             struct bpf_insn *insn,
105019                             const struct bpf_reg_state *ptr_reg,
105020                             const struct bpf_reg_state *off_reg,
105021                             struct bpf_reg_state *dst_reg,
105022 -                           struct bpf_insn_aux_data *tmp_aux,
105023 +                           struct bpf_sanitize_info *info,
105024                             const bool commit_window)
105026 -       struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux;
105027 +       struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
105028         struct bpf_verifier_state *vstate = env->cur_state;
105029 +       bool off_is_imm = tnum_is_const(off_reg->var_off);
105030         bool off_is_neg = off_reg->smin_value < 0;
105031         bool ptr_is_dst_reg = ptr_reg == dst_reg;
105032         u8 opcode = BPF_OP(insn->code);
105033 @@ -5970,7 +5966,16 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
105034         if (vstate->speculative)
105035                 goto do_sim;
105037 -       err = retrieve_ptr_limit(ptr_reg, off_reg, &alu_limit, opcode);
105038 +       if (!commit_window) {
105039 +               if (!tnum_is_const(off_reg->var_off) &&
105040 +                   (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
105041 +                       return REASON_BOUNDS;
105043 +               info->mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
105044 +                                    (opcode == BPF_SUB && !off_is_neg);
105045 +       }
105047 +       err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left);
105048         if (err < 0)
105049                 return err;
105051 @@ -5978,10 +5983,11 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
105052                 /* In commit phase we narrow the masking window based on
105053                  * the observed pointer move after the simulated operation.
105054                  */
105055 -               alu_state = tmp_aux->alu_state;
105056 -               alu_limit = abs(tmp_aux->alu_limit - alu_limit);
105057 +               alu_state = info->aux.alu_state;
105058 +               alu_limit = abs(info->aux.alu_limit - alu_limit);
105059         } else {
105060                 alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
105061 +               alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
105062                 alu_state |= ptr_is_dst_reg ?
105063                              BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
105064         }
105065 @@ -5993,8 +5999,12 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
105066         /* If we're in commit phase, we're done here given we already
105067          * pushed the truncated dst_reg into the speculative verification
105068          * stack.
105069 +        *
105070 +        * Also, when register is a known constant, we rewrite register-based
105071 +        * operation to immediate-based, and thus do not need masking (and as
105072 +        * a consequence, do not need to simulate the zero-truncation either).
105073          */
105074 -       if (commit_window)
105075 +       if (commit_window || off_is_imm)
105076                 return 0;
105078         /* Simulate and find potential out-of-bounds access under
105079 @@ -6139,7 +6149,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
105080             smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
105081         u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
105082             umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
105083 -       struct bpf_insn_aux_data tmp_aux = {};
105084 +       struct bpf_sanitize_info info = {};
105085         u8 opcode = BPF_OP(insn->code);
105086         u32 dst = insn->dst_reg;
105087         int ret;
105088 @@ -6208,7 +6218,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
105090         if (sanitize_needed(opcode)) {
105091                 ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
105092 -                                      &tmp_aux, false);
105093 +                                      &info, false);
105094                 if (ret < 0)
105095                         return sanitize_err(env, insn, ret, off_reg, dst_reg);
105096         }
105097 @@ -6349,7 +6359,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
105098                 return -EACCES;
105099         if (sanitize_needed(opcode)) {
105100                 ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
105101 -                                      &tmp_aux, true);
105102 +                                      &info, true);
105103                 if (ret < 0)
105104                         return sanitize_err(env, insn, ret, off_reg, dst_reg);
105105         }
105106 @@ -6538,11 +6548,10 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
105107         s32 smin_val = src_reg->s32_min_value;
105108         u32 umax_val = src_reg->u32_max_value;
105110 -       /* Assuming scalar64_min_max_and will be called so its safe
105111 -        * to skip updating register for known 32-bit case.
105112 -        */
105113 -       if (src_known && dst_known)
105114 +       if (src_known && dst_known) {
105115 +               __mark_reg32_known(dst_reg, var32_off.value);
105116                 return;
105117 +       }
105119         /* We get our minimum from the var_off, since that's inherently
105120          * bitwise.  Our maximum is the minimum of the operands' maxima.
105121 @@ -6562,7 +6571,6 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
105122                 dst_reg->s32_min_value = dst_reg->u32_min_value;
105123                 dst_reg->s32_max_value = dst_reg->u32_max_value;
105124         }
105128  static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
105129 @@ -6609,11 +6617,10 @@ static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
105130         s32 smin_val = src_reg->s32_min_value;
105131         u32 umin_val = src_reg->u32_min_value;
105133 -       /* Assuming scalar64_min_max_or will be called so it is safe
105134 -        * to skip updating register for known case.
105135 -        */
105136 -       if (src_known && dst_known)
105137 +       if (src_known && dst_known) {
105138 +               __mark_reg32_known(dst_reg, var32_off.value);
105139                 return;
105140 +       }
105142         /* We get our maximum from the var_off, and our minimum is the
105143          * maximum of the operands' minima
105144 @@ -6678,11 +6685,10 @@ static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
105145         struct tnum var32_off = tnum_subreg(dst_reg->var_off);
105146         s32 smin_val = src_reg->s32_min_value;
105148 -       /* Assuming scalar64_min_max_xor will be called so it is safe
105149 -        * to skip updating register for known case.
105150 -        */
105151 -       if (src_known && dst_known)
105152 +       if (src_known && dst_known) {
105153 +               __mark_reg32_known(dst_reg, var32_off.value);
105154                 return;
105155 +       }
105157         /* We get both minimum and maximum from the var32_off. */
105158         dst_reg->u32_min_value = var32_off.value;
105159 @@ -11740,7 +11746,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
105160                         const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
105161                         struct bpf_insn insn_buf[16];
105162                         struct bpf_insn *patch = &insn_buf[0];
105163 -                       bool issrc, isneg;
105164 +                       bool issrc, isneg, isimm;
105165                         u32 off_reg;
105167                         aux = &env->insn_aux_data[i + delta];
105168 @@ -11751,28 +11757,29 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
105169                         isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
105170                         issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
105171                                 BPF_ALU_SANITIZE_SRC;
105172 +                       isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
105174                         off_reg = issrc ? insn->src_reg : insn->dst_reg;
105175 -                       if (isneg)
105176 -                               *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
105177 -                       *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
105178 -                       *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
105179 -                       *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
105180 -                       *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
105181 -                       *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
105182 -                       if (issrc) {
105183 -                               *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
105184 -                                                        off_reg);
105185 -                               insn->src_reg = BPF_REG_AX;
105186 +                       if (isimm) {
105187 +                               *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
105188                         } else {
105189 -                               *patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
105190 -                                                        BPF_REG_AX);
105191 +                               if (isneg)
105192 +                                       *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
105193 +                               *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
105194 +                               *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
105195 +                               *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
105196 +                               *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
105197 +                               *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
105198 +                               *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
105199                         }
105200 +                       if (!issrc)
105201 +                               *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
105202 +                       insn->src_reg = BPF_REG_AX;
105203                         if (isneg)
105204                                 insn->code = insn->code == code_add ?
105205                                              code_sub : code_add;
105206                         *patch++ = *insn;
105207 -                       if (issrc && isneg)
105208 +                       if (issrc && isneg && !isimm)
105209                                 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
105210                         cnt = patch - insn_buf;
105212 diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
105213 index c10e855a03bc..fe4c01c14ab2 100644
105214 --- a/kernel/dma/swiotlb.c
105215 +++ b/kernel/dma/swiotlb.c
105216 @@ -608,7 +608,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
105217                 enum dma_data_direction dir, unsigned long attrs)
105219         unsigned int offset = swiotlb_align_offset(dev, orig_addr);
105220 -       unsigned int index, i;
105221 +       unsigned int i;
105222 +       int index;
105223         phys_addr_t tlb_addr;
105225         if (no_iotlb_memory)
105226 diff --git a/kernel/events/core.c b/kernel/events/core.c
105227 index 03db40f6cba9..c24ea952e7ae 100644
105228 --- a/kernel/events/core.c
105229 +++ b/kernel/events/core.c
105230 @@ -2204,6 +2204,26 @@ static void perf_group_detach(struct perf_event *event)
105231         perf_event__header_size(leader);
105234 +static void sync_child_event(struct perf_event *child_event);
105236 +static void perf_child_detach(struct perf_event *event)
105238 +       struct perf_event *parent_event = event->parent;
105240 +       if (!(event->attach_state & PERF_ATTACH_CHILD))
105241 +               return;
105243 +       event->attach_state &= ~PERF_ATTACH_CHILD;
105245 +       if (WARN_ON_ONCE(!parent_event))
105246 +               return;
105248 +       lockdep_assert_held(&parent_event->child_mutex);
105250 +       sync_child_event(event);
105251 +       list_del_init(&event->child_list);
105254  static bool is_orphaned_event(struct perf_event *event)
105256         return event->state == PERF_EVENT_STATE_DEAD;
105257 @@ -2311,6 +2331,7 @@ group_sched_out(struct perf_event *group_event,
105260  #define DETACH_GROUP   0x01UL
105261 +#define DETACH_CHILD   0x02UL
105264   * Cross CPU call to remove a performance event
105265 @@ -2334,6 +2355,8 @@ __perf_remove_from_context(struct perf_event *event,
105266         event_sched_out(event, cpuctx, ctx);
105267         if (flags & DETACH_GROUP)
105268                 perf_group_detach(event);
105269 +       if (flags & DETACH_CHILD)
105270 +               perf_child_detach(event);
105271         list_del_event(event, ctx);
105273         if (!ctx->nr_events && ctx->is_active) {
105274 @@ -2362,25 +2385,21 @@ static void perf_remove_from_context(struct perf_event *event, unsigned long fla
105276         lockdep_assert_held(&ctx->mutex);
105278 -       event_function_call(event, __perf_remove_from_context, (void *)flags);
105280         /*
105281 -        * The above event_function_call() can NO-OP when it hits
105282 -        * TASK_TOMBSTONE. In that case we must already have been detached
105283 -        * from the context (by perf_event_exit_event()) but the grouping
105284 -        * might still be in-tact.
105285 +        * Because of perf_event_exit_task(), perf_remove_from_context() ought
105286 +        * to work in the face of TASK_TOMBSTONE, unlike every other
105287 +        * event_function_call() user.
105288          */
105289 -       WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
105290 -       if ((flags & DETACH_GROUP) &&
105291 -           (event->attach_state & PERF_ATTACH_GROUP)) {
105292 -               /*
105293 -                * Since in that case we cannot possibly be scheduled, simply
105294 -                * detach now.
105295 -                */
105296 -               raw_spin_lock_irq(&ctx->lock);
105297 -               perf_group_detach(event);
105298 +       raw_spin_lock_irq(&ctx->lock);
105299 +       if (!ctx->is_active) {
105300 +               __perf_remove_from_context(event, __get_cpu_context(ctx),
105301 +                                          ctx, (void *)flags);
105302                 raw_spin_unlock_irq(&ctx->lock);
105303 +               return;
105304         }
105305 +       raw_spin_unlock_irq(&ctx->lock);
105307 +       event_function_call(event, __perf_remove_from_context, (void *)flags);
105311 @@ -11829,12 +11848,12 @@ SYSCALL_DEFINE5(perf_event_open,
105312                         return err;
105313         }
105315 -       err = security_locked_down(LOCKDOWN_PERF);
105316 -       if (err && (attr.sample_type & PERF_SAMPLE_REGS_INTR))
105317 -               /* REGS_INTR can leak data, lockdown must prevent this */
105318 -               return err;
105320 -       err = 0;
105321 +       /* REGS_INTR can leak data, lockdown must prevent this */
105322 +       if (attr.sample_type & PERF_SAMPLE_REGS_INTR) {
105323 +               err = security_locked_down(LOCKDOWN_PERF);
105324 +               if (err)
105325 +                       return err;
105326 +       }
105328         /*
105329          * In cgroup mode, the pid argument is used to pass the fd
105330 @@ -12373,14 +12392,17 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
105332  EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
105334 -static void sync_child_event(struct perf_event *child_event,
105335 -                              struct task_struct *child)
105336 +static void sync_child_event(struct perf_event *child_event)
105338         struct perf_event *parent_event = child_event->parent;
105339         u64 child_val;
105341 -       if (child_event->attr.inherit_stat)
105342 -               perf_event_read_event(child_event, child);
105343 +       if (child_event->attr.inherit_stat) {
105344 +               struct task_struct *task = child_event->ctx->task;
105346 +               if (task && task != TASK_TOMBSTONE)
105347 +                       perf_event_read_event(child_event, task);
105348 +       }
105350         child_val = perf_event_count(child_event);
105352 @@ -12395,60 +12417,53 @@ static void sync_child_event(struct perf_event *child_event,
105355  static void
105356 -perf_event_exit_event(struct perf_event *child_event,
105357 -                     struct perf_event_context *child_ctx,
105358 -                     struct task_struct *child)
105359 +perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx)
105361 -       struct perf_event *parent_event = child_event->parent;
105362 +       struct perf_event *parent_event = event->parent;
105363 +       unsigned long detach_flags = 0;
105365 -       /*
105366 -        * Do not destroy the 'original' grouping; because of the context
105367 -        * switch optimization the original events could've ended up in a
105368 -        * random child task.
105369 -        *
105370 -        * If we were to destroy the original group, all group related
105371 -        * operations would cease to function properly after this random
105372 -        * child dies.
105373 -        *
105374 -        * Do destroy all inherited groups, we don't care about those
105375 -        * and being thorough is better.
105376 -        */
105377 -       raw_spin_lock_irq(&child_ctx->lock);
105378 -       WARN_ON_ONCE(child_ctx->is_active);
105379 +       if (parent_event) {
105380 +               /*
105381 +                * Do not destroy the 'original' grouping; because of the
105382 +                * context switch optimization the original events could've
105383 +                * ended up in a random child task.
105384 +                *
105385 +                * If we were to destroy the original group, all group related
105386 +                * operations would cease to function properly after this
105387 +                * random child dies.
105388 +                *
105389 +                * Do destroy all inherited groups, we don't care about those
105390 +                * and being thorough is better.
105391 +                */
105392 +               detach_flags = DETACH_GROUP | DETACH_CHILD;
105393 +               mutex_lock(&parent_event->child_mutex);
105394 +       }
105396 -       if (parent_event)
105397 -               perf_group_detach(child_event);
105398 -       list_del_event(child_event, child_ctx);
105399 -       perf_event_set_state(child_event, PERF_EVENT_STATE_EXIT); /* is_event_hup() */
105400 -       raw_spin_unlock_irq(&child_ctx->lock);
105401 +       perf_remove_from_context(event, detach_flags);
105403 +       raw_spin_lock_irq(&ctx->lock);
105404 +       if (event->state > PERF_EVENT_STATE_EXIT)
105405 +               perf_event_set_state(event, PERF_EVENT_STATE_EXIT);
105406 +       raw_spin_unlock_irq(&ctx->lock);
105408         /*
105409 -        * Parent events are governed by their filedesc, retain them.
105410 +        * Child events can be freed.
105411          */
105412 -       if (!parent_event) {
105413 -               perf_event_wakeup(child_event);
105414 +       if (parent_event) {
105415 +               mutex_unlock(&parent_event->child_mutex);
105416 +               /*
105417 +                * Kick perf_poll() for is_event_hup();
105418 +                */
105419 +               perf_event_wakeup(parent_event);
105420 +               free_event(event);
105421 +               put_event(parent_event);
105422                 return;
105423         }
105424 -       /*
105425 -        * Child events can be cleaned up.
105426 -        */
105428 -       sync_child_event(child_event, child);
105430         /*
105431 -        * Remove this event from the parent's list
105432 -        */
105433 -       WARN_ON_ONCE(parent_event->ctx->parent_ctx);
105434 -       mutex_lock(&parent_event->child_mutex);
105435 -       list_del_init(&child_event->child_list);
105436 -       mutex_unlock(&parent_event->child_mutex);
105438 -       /*
105439 -        * Kick perf_poll() for is_event_hup().
105440 +        * Parent events are governed by their filedesc, retain them.
105441          */
105442 -       perf_event_wakeup(parent_event);
105443 -       free_event(child_event);
105444 -       put_event(parent_event);
105445 +       perf_event_wakeup(event);
105448  static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
105449 @@ -12505,7 +12520,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
105450         perf_event_task(child, child_ctx, 0);
105452         list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
105453 -               perf_event_exit_event(child_event, child_ctx, child);
105454 +               perf_event_exit_event(child_event, child_ctx);
105456         mutex_unlock(&child_ctx->mutex);
105458 @@ -12765,6 +12780,7 @@ inherit_event(struct perf_event *parent_event,
105459          */
105460         raw_spin_lock_irqsave(&child_ctx->lock, flags);
105461         add_event_to_ctx(child_event, child_ctx);
105462 +       child_event->attach_state |= PERF_ATTACH_CHILD;
105463         raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
105465         /*
105466 diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
105467 index 6addc9780319..4e93e5602723 100644
105468 --- a/kernel/events/uprobes.c
105469 +++ b/kernel/events/uprobes.c
105470 @@ -184,7 +184,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
105471         if (new_page) {
105472                 get_page(new_page);
105473                 page_add_new_anon_rmap(new_page, vma, addr, false);
105474 -               lru_cache_add_inactive_or_unevictable(new_page, vma);
105475 +               lru_cache_add_page_vma(new_page, vma, false);
105476         } else
105477                 /* no new page, just dec_mm_counter for old_page */
105478                 dec_mm_counter(mm, MM_ANONPAGES);
105479 diff --git a/kernel/exit.c b/kernel/exit.c
105480 index 04029e35e69a..e4292717ce37 100644
105481 --- a/kernel/exit.c
105482 +++ b/kernel/exit.c
105483 @@ -422,6 +422,7 @@ void mm_update_next_owner(struct mm_struct *mm)
105484                 goto retry;
105485         }
105486         WRITE_ONCE(mm->owner, c);
105487 +       lru_gen_migrate_mm(mm);
105488         task_unlock(c);
105489         put_task_struct(c);
105491 diff --git a/kernel/fork.c b/kernel/fork.c
105492 index 426cd0c51f9e..c54400f24fb2 100644
105493 --- a/kernel/fork.c
105494 +++ b/kernel/fork.c
105495 @@ -107,6 +107,11 @@
105497  #define CREATE_TRACE_POINTS
105498  #include <trace/events/task.h>
105499 +#ifdef CONFIG_USER_NS
105500 +extern int unprivileged_userns_clone;
105501 +#else
105502 +#define unprivileged_userns_clone 0
105503 +#endif
105506   * Minimum number of threads to boot the kernel
105507 @@ -665,6 +670,7 @@ static void check_mm(struct mm_struct *mm)
105508  #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
105509         VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
105510  #endif
105511 +       VM_BUG_ON_MM(lru_gen_mm_is_active(mm), mm);
105514  #define allocate_mm()  (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
105515 @@ -1055,6 +1061,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
105516                 goto fail_nocontext;
105518         mm->user_ns = get_user_ns(user_ns);
105519 +       lru_gen_init_mm(mm);
105520         return mm;
105522  fail_nocontext:
105523 @@ -1097,6 +1104,7 @@ static inline void __mmput(struct mm_struct *mm)
105524         }
105525         if (mm->binfmt)
105526                 module_put(mm->binfmt->module);
105527 +       lru_gen_del_mm(mm);
105528         mmdrop(mm);
105531 @@ -1128,6 +1136,7 @@ void mmput_async(struct mm_struct *mm)
105532                 schedule_work(&mm->async_put_work);
105533         }
105535 +EXPORT_SYMBOL(mmput_async);
105536  #endif
105538  /**
105539 @@ -1316,6 +1325,8 @@ static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
105540                         put_user(0, tsk->clear_child_tid);
105541                         do_futex(tsk->clear_child_tid, FUTEX_WAKE,
105542                                         1, NULL, NULL, 0, 0);
105543 +                       ksys_futex_wake(tsk->clear_child_tid, 1,
105544 +                                       FUTEX_32 | FUTEX_SHARED_FLAG);
105545                 }
105546                 tsk->clear_child_tid = NULL;
105547         }
105548 @@ -1872,6 +1883,10 @@ static __latent_entropy struct task_struct *copy_process(
105549         if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
105550                 return ERR_PTR(-EINVAL);
105552 +       if ((clone_flags & CLONE_NEWUSER) && !unprivileged_userns_clone)
105553 +               if (!capable(CAP_SYS_ADMIN))
105554 +                       return ERR_PTR(-EPERM);
105556         /*
105557          * Thread groups must share signals as well, and detached threads
105558          * can only be started up within the thread group.
105559 @@ -2521,6 +2536,13 @@ pid_t kernel_clone(struct kernel_clone_args *args)
105560                 get_task_struct(p);
105561         }
105563 +       if (IS_ENABLED(CONFIG_LRU_GEN) && !(clone_flags & CLONE_VM)) {
105564 +               /* lock the task to synchronize with memcg migration */
105565 +               task_lock(p);
105566 +               lru_gen_add_mm(p->mm);
105567 +               task_unlock(p);
105568 +       }
105570         wake_up_new_task(p);
105572         /* forking complete and child started to run, tell ptracer */
105573 @@ -2971,6 +2993,12 @@ int ksys_unshare(unsigned long unshare_flags)
105574         if (unshare_flags & CLONE_NEWNS)
105575                 unshare_flags |= CLONE_FS;
105577 +       if ((unshare_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) {
105578 +               err = -EPERM;
105579 +               if (!capable(CAP_SYS_ADMIN))
105580 +                       goto bad_unshare_out;
105581 +       }
105583         err = check_unshare_flags(unshare_flags);
105584         if (err)
105585                 goto bad_unshare_out;
105586 diff --git a/kernel/futex.c b/kernel/futex.c
105587 index 00febd6dea9c..f923d2da4b40 100644
105588 --- a/kernel/futex.c
105589 +++ b/kernel/futex.c
105590 @@ -198,6 +198,8 @@ struct futex_pi_state {
105591   * @rt_waiter:         rt_waiter storage for use with requeue_pi
105592   * @requeue_pi_key:    the requeue_pi target futex key
105593   * @bitset:            bitset for the optional bitmasked wakeup
105594 + * @uaddr:             userspace address of futex
105595 + * @uval:              expected futex's value
105596   *
105597   * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
105598   * we can wake only the relevant ones (hashed queues may be shared).
105599 @@ -220,6 +222,8 @@ struct futex_q {
105600         struct rt_mutex_waiter *rt_waiter;
105601         union futex_key *requeue_pi_key;
105602         u32 bitset;
105603 +       u32 __user *uaddr;
105604 +       u32 uval;
105605  } __randomize_layout;
105607  static const struct futex_q futex_q_init = {
105608 @@ -2313,6 +2317,29 @@ static int unqueue_me(struct futex_q *q)
105609         return ret;
105613 + * unqueue_multiple() - Remove several futexes from their futex_hash_bucket
105614 + * @q: The list of futexes to unqueue
105615 + * @count: Number of futexes in the list
105617 + * Helper to unqueue a list of futexes. This can't fail.
105619 + * Return:
105620 + *  - >=0 - Index of the last futex that was awoken;
105621 + *  - -1  - If no futex was awoken
105622 + */
105623 +static int unqueue_multiple(struct futex_q *q, int count)
105625 +       int ret = -1;
105626 +       int i;
105628 +       for (i = 0; i < count; i++) {
105629 +               if (!unqueue_me(&q[i]))
105630 +                       ret = i;
105631 +       }
105632 +       return ret;
105636   * PI futexes can not be requeued and must remove themself from the
105637   * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
105638 @@ -2680,6 +2707,205 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
105639         return ret;
105643 + * futex_wait_multiple_setup() - Prepare to wait and enqueue multiple futexes
105644 + * @qs:                The corresponding futex list
105645 + * @count:     The size of the lists
105646 + * @flags:     Futex flags (FLAGS_SHARED, etc.)
105647 + * @awaken:    Index of the last awoken futex
105649 + * Prepare multiple futexes in a single step and enqueue them. This may fail if
105650 + * the futex list is invalid or if any futex was already awoken. On success the
105651 + * task is ready to interruptible sleep.
105653 + * Return:
105654 + *  -  1 - One of the futexes was awaken by another thread
105655 + *  -  0 - Success
105656 + *  - <0 - -EFAULT, -EWOULDBLOCK or -EINVAL
105657 + */
105658 +static int futex_wait_multiple_setup(struct futex_q *qs, int count,
105659 +                                    unsigned int flags, int *awaken)
105661 +       struct futex_hash_bucket *hb;
105662 +       int ret, i;
105663 +       u32 uval;
105665 +       /*
105666 +        * Enqueuing multiple futexes is tricky, because we need to
105667 +        * enqueue each futex in the list before dealing with the next
105668 +        * one to avoid deadlocking on the hash bucket.  But, before
105669 +        * enqueuing, we need to make sure that current->state is
105670 +        * TASK_INTERRUPTIBLE, so we don't absorb any awake events, which
105671 +        * cannot be done before the get_futex_key of the next key,
105672 +        * because it calls get_user_pages, which can sleep.  Thus, we
105673 +        * fetch the list of futexes keys in two steps, by first pinning
105674 +        * all the memory keys in the futex key, and only then we read
105675 +        * each key and queue the corresponding futex.
105676 +        */
105677 +retry:
105678 +       for (i = 0; i < count; i++) {
105679 +               qs[i].key = FUTEX_KEY_INIT;
105680 +               ret = get_futex_key(qs[i].uaddr, flags & FLAGS_SHARED,
105681 +                                   &qs[i].key, FUTEX_READ);
105682 +               if (unlikely(ret)) {
105683 +                       return ret;
105684 +               }
105685 +       }
105687 +       set_current_state(TASK_INTERRUPTIBLE);
105689 +       for (i = 0; i < count; i++) {
105690 +               struct futex_q *q = &qs[i];
105692 +               hb = queue_lock(q);
105694 +               ret = get_futex_value_locked(&uval, q->uaddr);
105695 +               if (ret) {
105696 +                       /*
105697 +                        * We need to try to handle the fault, which
105698 +                        * cannot be done without sleep, so we need to
105699 +                        * undo all the work already done, to make sure
105700 +                        * we don't miss any wake ups.  Therefore, clean
105701 +                        * up, handle the fault and retry from the
105702 +                        * beginning.
105703 +                        */
105704 +                       queue_unlock(hb);
105706 +                       /*
105707 +                        * Keys 0..(i-1) are implicitly put
105708 +                        * on unqueue_multiple.
105709 +                        */
105710 +                       *awaken = unqueue_multiple(qs, i);
105712 +                       __set_current_state(TASK_RUNNING);
105714 +                       /*
105715 +                        * On a real fault, prioritize the error even if
105716 +                        * some other futex was awoken.  Userspace gave
105717 +                        * us a bad address, -EFAULT them.
105718 +                        */
105719 +                       ret = get_user(uval, q->uaddr);
105720 +                       if (ret)
105721 +                               return ret;
105723 +                       /*
105724 +                        * Even if the page fault was handled, If
105725 +                        * something was already awaken, we can safely
105726 +                        * give up and succeed to give a hint for userspace to
105727 +                        * acquire the right futex faster.
105728 +                        */
105729 +                       if (*awaken >= 0)
105730 +                               return 1;
105732 +                       goto retry;
105733 +               }
105735 +               if (uval != q->uval) {
105736 +                       queue_unlock(hb);
105738 +                       /*
105739 +                        * If something was already awaken, we can
105740 +                        * safely ignore the error and succeed.
105741 +                        */
105742 +                       *awaken = unqueue_multiple(qs, i);
105743 +                       __set_current_state(TASK_RUNNING);
105744 +                       if (*awaken >= 0)
105745 +                               return 1;
105747 +                       return -EWOULDBLOCK;
105748 +               }
105750 +               /*
105751 +                * The bucket lock can't be held while dealing with the
105752 +                * next futex. Queue each futex at this moment so hb can
105753 +                * be unlocked.
105754 +                */
105755 +               queue_me(&qs[i], hb);
105756 +       }
105757 +       return 0;
105761 + * futex_wait_multiple() - Prepare to wait on and enqueue several futexes
105762 + * @qs:                The list of futexes to wait on
105763 + * @op:                Operation code from futex's syscall
105764 + * @count:     The number of objects
105765 + * @abs_time:  Timeout before giving up and returning to userspace
105767 + * Entry point for the FUTEX_WAIT_MULTIPLE futex operation, this function
105768 + * sleeps on a group of futexes and returns on the first futex that
105769 + * triggered, or after the timeout has elapsed.
105771 + * Return:
105772 + *  - >=0 - Hint to the futex that was awoken
105773 + *  - <0  - On error
105774 + */
105775 +static int futex_wait_multiple(struct futex_q *qs, int op,
105776 +                              u32 count, ktime_t *abs_time)
105778 +       struct hrtimer_sleeper timeout, *to;
105779 +       int ret, flags = 0, hint = 0;
105780 +       unsigned int i;
105782 +       if (!(op & FUTEX_PRIVATE_FLAG))
105783 +               flags |= FLAGS_SHARED;
105785 +       if (op & FUTEX_CLOCK_REALTIME)
105786 +               flags |= FLAGS_CLOCKRT;
105788 +       to = futex_setup_timer(abs_time, &timeout, flags, 0);
105789 +       while (1) {
105790 +               ret = futex_wait_multiple_setup(qs, count, flags, &hint);
105791 +               if (ret) {
105792 +                       if (ret > 0) {
105793 +                               /* A futex was awaken during setup */
105794 +                               ret = hint;
105795 +                       }
105796 +                       break;
105797 +               }
105799 +               if (to)
105800 +                       hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
105802 +               /*
105803 +                * Avoid sleeping if another thread already tried to
105804 +                * wake us.
105805 +                */
105806 +               for (i = 0; i < count; i++) {
105807 +                       if (plist_node_empty(&qs[i].list))
105808 +                               break;
105809 +               }
105811 +               if (i == count && (!to || to->task))
105812 +                       freezable_schedule();
105814 +               ret = unqueue_multiple(qs, count);
105816 +               __set_current_state(TASK_RUNNING);
105818 +               if (ret >= 0)
105819 +                       break;
105820 +               if (to && !to->task) {
105821 +                       ret = -ETIMEDOUT;
105822 +                       break;
105823 +               } else if (signal_pending(current)) {
105824 +                       ret = -ERESTARTSYS;
105825 +                       break;
105826 +               }
105827 +               /*
105828 +                * The final case is a spurious wakeup, for
105829 +                * which just retry.
105830 +                */
105831 +       }
105833 +       if (to) {
105834 +               hrtimer_cancel(&to->timer);
105835 +               destroy_hrtimer_on_stack(&to->timer);
105836 +       }
105838 +       return ret;
105841  static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
105842                       ktime_t *abs_time, u32 bitset)
105844 @@ -3711,8 +3937,7 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
105846         if (op & FUTEX_CLOCK_REALTIME) {
105847                 flags |= FLAGS_CLOCKRT;
105848 -               if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
105849 -                   cmd != FUTEX_WAIT_REQUEUE_PI)
105850 +               if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
105851                         return -ENOSYS;
105852         }
105854 @@ -3759,6 +3984,43 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
105855         return -ENOSYS;
105859 + * futex_read_wait_block - Read an array of futex_wait_block from userspace
105860 + * @uaddr:     Userspace address of the block
105861 + * @count:     Number of blocks to be read
105863 + * This function creates and allocate an array of futex_q (we zero it to
105864 + * initialize the fields) and then, for each futex_wait_block element from
105865 + * userspace, fill a futex_q element with proper values.
105866 + */
105867 +inline struct futex_q *futex_read_wait_block(u32 __user *uaddr, u32 count)
105869 +       unsigned int i;
105870 +       struct futex_q *qs;
105871 +       struct futex_wait_block fwb;
105872 +       struct futex_wait_block __user *entry =
105873 +               (struct futex_wait_block __user *)uaddr;
105875 +       if (!count || count > FUTEX_MULTIPLE_MAX_COUNT)
105876 +               return ERR_PTR(-EINVAL);
105878 +       qs = kcalloc(count, sizeof(*qs), GFP_KERNEL);
105879 +       if (!qs)
105880 +               return ERR_PTR(-ENOMEM);
105882 +       for (i = 0; i < count; i++) {
105883 +               if (copy_from_user(&fwb, &entry[i], sizeof(fwb))) {
105884 +                       kfree(qs);
105885 +                       return ERR_PTR(-EFAULT);
105886 +               }
105888 +               qs[i].uaddr = fwb.uaddr;
105889 +               qs[i].uval = fwb.val;
105890 +               qs[i].bitset = fwb.bitset;
105891 +       }
105893 +       return qs;
105896  SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
105897                 const struct __kernel_timespec __user *, utime,
105898 @@ -3771,7 +4033,8 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
105900         if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
105901                       cmd == FUTEX_WAIT_BITSET ||
105902 -                     cmd == FUTEX_WAIT_REQUEUE_PI)) {
105903 +                     cmd == FUTEX_WAIT_REQUEUE_PI ||
105904 +                     cmd == FUTEX_WAIT_MULTIPLE)) {
105905                 if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
105906                         return -EFAULT;
105907                 if (get_timespec64(&ts, utime))
105908 @@ -3780,9 +4043,9 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
105909                         return -EINVAL;
105911                 t = timespec64_to_ktime(ts);
105912 -               if (cmd == FUTEX_WAIT)
105913 +               if (cmd == FUTEX_WAIT || cmd == FUTEX_WAIT_MULTIPLE)
105914                         t = ktime_add_safe(ktime_get(), t);
105915 -               else if (!(op & FUTEX_CLOCK_REALTIME))
105916 +               else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME))
105917                         t = timens_ktime_to_host(CLOCK_MONOTONIC, t);
105918                 tp = &t;
105919         }
105920 @@ -3794,6 +4057,25 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
105921             cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
105922                 val2 = (u32) (unsigned long) utime;
105924 +       if (cmd == FUTEX_WAIT_MULTIPLE) {
105925 +               int ret;
105926 +               struct futex_q *qs;
105928 +#ifdef CONFIG_X86_X32
105929 +               if (unlikely(in_x32_syscall()))
105930 +                       return -ENOSYS;
105931 +#endif
105932 +               qs = futex_read_wait_block(uaddr, val);
105934 +               if (IS_ERR(qs))
105935 +                       return PTR_ERR(qs);
105937 +               ret = futex_wait_multiple(qs, op, val, tp);
105938 +               kfree(qs);
105940 +               return ret;
105941 +       }
105943         return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
105946 @@ -3956,6 +4238,58 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
105947  #endif /* CONFIG_COMPAT */
105949  #ifdef CONFIG_COMPAT_32BIT_TIME
105951 + * struct compat_futex_wait_block - Block of futexes to be waited for
105952 + * @uaddr:     User address of the futex (compatible pointer)
105953 + * @val:       Futex value expected by userspace
105954 + * @bitset:    Bitset for the optional bitmasked wakeup
105955 + */
105956 +struct compat_futex_wait_block {
105957 +       compat_uptr_t   uaddr;
105958 +       __u32 pad;
105959 +       __u32 val;
105960 +       __u32 bitset;
105964 + * compat_futex_read_wait_block - Read an array of futex_wait_block from
105965 + * userspace
105966 + * @uaddr:     Userspace address of the block
105967 + * @count:     Number of blocks to be read
105969 + * This function does the same as futex_read_wait_block(), except that it
105970 + * converts the pointer to the futex from the compat version to the regular one.
105971 + */
105972 +inline struct futex_q *compat_futex_read_wait_block(u32 __user *uaddr,
105973 +                                                   u32 count)
105975 +       unsigned int i;
105976 +       struct futex_q *qs;
105977 +       struct compat_futex_wait_block fwb;
105978 +       struct compat_futex_wait_block __user *entry =
105979 +               (struct compat_futex_wait_block __user *)uaddr;
105981 +       if (!count || count > FUTEX_MULTIPLE_MAX_COUNT)
105982 +               return ERR_PTR(-EINVAL);
105984 +       qs = kcalloc(count, sizeof(*qs), GFP_KERNEL);
105985 +       if (!qs)
105986 +               return ERR_PTR(-ENOMEM);
105988 +       for (i = 0; i < count; i++) {
105989 +               if (copy_from_user(&fwb, &entry[i], sizeof(fwb))) {
105990 +                       kfree(qs);
105991 +                       return ERR_PTR(-EFAULT);
105992 +               }
105994 +               qs[i].uaddr = compat_ptr(fwb.uaddr);
105995 +               qs[i].uval = fwb.val;
105996 +               qs[i].bitset = fwb.bitset;
105997 +       }
105999 +       return qs;
106002  SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
106003                 const struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
106004                 u32, val3)
106005 @@ -3967,16 +4301,17 @@ SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
106007         if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
106008                       cmd == FUTEX_WAIT_BITSET ||
106009 -                     cmd == FUTEX_WAIT_REQUEUE_PI)) {
106010 +                     cmd == FUTEX_WAIT_REQUEUE_PI ||
106011 +                     cmd == FUTEX_WAIT_MULTIPLE)) {
106012                 if (get_old_timespec32(&ts, utime))
106013                         return -EFAULT;
106014                 if (!timespec64_valid(&ts))
106015                         return -EINVAL;
106017                 t = timespec64_to_ktime(ts);
106018 -               if (cmd == FUTEX_WAIT)
106019 +               if (cmd == FUTEX_WAIT || cmd == FUTEX_WAIT_MULTIPLE)
106020                         t = ktime_add_safe(ktime_get(), t);
106021 -               else if (!(op & FUTEX_CLOCK_REALTIME))
106022 +               else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME))
106023                         t = timens_ktime_to_host(CLOCK_MONOTONIC, t);
106024                 tp = &t;
106025         }
106026 @@ -3984,6 +4319,19 @@ SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
106027             cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
106028                 val2 = (int) (unsigned long) utime;
106030 +       if (cmd == FUTEX_WAIT_MULTIPLE) {
106031 +               int ret;
106032 +               struct futex_q *qs = compat_futex_read_wait_block(uaddr, val);
106034 +               if (IS_ERR(qs))
106035 +                       return PTR_ERR(qs);
106037 +               ret = futex_wait_multiple(qs, op, val, tp);
106038 +               kfree(qs);
106040 +               return ret;
106041 +       }
106043         return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
106045  #endif /* CONFIG_COMPAT_32BIT_TIME */
106046 diff --git a/kernel/futex2.c b/kernel/futex2.c
106047 new file mode 100644
106048 index 000000000000..dd6f54ae0220
106049 --- /dev/null
106050 +++ b/kernel/futex2.c
106051 @@ -0,0 +1,1239 @@
106052 +// SPDX-License-Identifier: GPL-2.0-or-later
106054 + * futex2 system call interface by André Almeida <andrealmeid@collabora.com>
106056 + * Copyright 2021 Collabora Ltd.
106058 + * Based on original futex implementation by:
106059 + *  (C) 2002 Rusty Russell, IBM
106060 + *  (C) 2003, 2006 Ingo Molnar, Red Hat Inc.
106061 + *  (C) 2003, 2004 Jamie Lokier
106062 + *  (C) 2006 Thomas Gleixner, Timesys Corp.
106063 + *  (C) 2007 Eric Dumazet
106064 + *  (C) 2009 Darren Hart, IBM
106065 + */
106067 +#include <linux/freezer.h>
106068 +#include <linux/hugetlb.h>
106069 +#include <linux/jhash.h>
106070 +#include <linux/memblock.h>
106071 +#include <linux/pagemap.h>
106072 +#include <linux/sched/wake_q.h>
106073 +#include <linux/spinlock.h>
106074 +#include <linux/syscalls.h>
106075 +#include <uapi/linux/futex.h>
106077 +#ifdef CONFIG_X86_64
106078 +#include <linux/compat.h>
106079 +#endif
106082 + * struct futex_key - Components to build unique key for a futex
106083 + * @pointer: Pointer to current->mm or inode's UUID for file backed futexes
106084 + * @index: Start address of the page containing futex or index of the page
106085 + * @offset: Address offset of uaddr in a page
106086 + */
106087 +struct futex_key {
106088 +       u64 pointer;
106089 +       unsigned long index;
106090 +       unsigned long offset;
106094 + * struct futex_waiter - List entry for a waiter
106095 + * @uaddr:        Virtual address of userspace futex
106096 + * @key:          Information that uniquely identify a futex
106097 + * @list:        List node struct
106098 + * @val:         Expected value for this waiter
106099 + * @flags:        Flags
106100 + * @bucket:       Pointer to the bucket for this waiter
106101 + * @index:        Index of waiter in futexv list
106102 + */
106103 +struct futex_waiter {
106104 +       void __user *uaddr;
106105 +       struct futex_key key;
106106 +       struct list_head list;
106107 +       unsigned int val;
106108 +       unsigned int flags;
106109 +       struct futex_bucket *bucket;
106110 +       unsigned int index;
106114 + * struct futex_waiter_head - List of futexes to be waited
106115 + * @task:    Task to be awaken
106116 + * @hint:    Was someone on this list awakened?
106117 + * @objects: List of futexes
106118 + */
106119 +struct futex_waiter_head {
106120 +       struct task_struct *task;
106121 +       bool hint;
106122 +       struct futex_waiter objects[0];
106126 + * struct futex_bucket - A bucket of futex's hash table
106127 + * @waiters: Number of waiters in the bucket
106128 + * @lock:    Bucket lock
106129 + * @list:    List of waiters on this bucket
106130 + */
106131 +struct futex_bucket {
106132 +       atomic_t waiters;
106133 +       spinlock_t lock;
106134 +       struct list_head list;
106137 +/* Mask for futex2 flag operations */
106138 +#define FUTEX2_MASK (FUTEX_SIZE_MASK | FUTEX_CLOCK_REALTIME | FUTEX_SHARED_FLAG)
106140 +/* Mask for sys_futex_waitv flag */
106141 +#define FUTEXV_MASK (FUTEX_CLOCK_REALTIME)
106143 +/* Mask for each futex in futex_waitv list */
106144 +#define FUTEXV_WAITER_MASK (FUTEX_SIZE_MASK | FUTEX_SHARED_FLAG)
106146 +#define is_object_shared ((futexv->objects[i].flags & FUTEX_SHARED_FLAG) ? true : false)
106148 +#define FUT_OFF_INODE    1 /* We set bit 0 if key has a reference on inode */
106149 +#define FUT_OFF_MMSHARED 2 /* We set bit 1 if key has a reference on mm */
106151 +static struct futex_bucket *futex_table;
106152 +static unsigned int futex2_hashsize;
106155 + * Reflects a new waiter being added to the waitqueue.
106156 + */
106157 +static inline void bucket_inc_waiters(struct futex_bucket *bucket)
106159 +#ifdef CONFIG_SMP
106160 +       atomic_inc(&bucket->waiters);
106161 +       /*
106162 +        * Issue a barrier after adding so futex_wake() will see that the
106163 +        * value had increased
106164 +        */
106165 +       smp_mb__after_atomic();
106166 +#endif
106170 + * Reflects a waiter being removed from the waitqueue by wakeup
106171 + * paths.
106172 + */
106173 +static inline void bucket_dec_waiters(struct futex_bucket *bucket)
106175 +#ifdef CONFIG_SMP
106176 +       atomic_dec(&bucket->waiters);
106177 +#endif
106181 + * Get the number of waiters in a bucket
106182 + */
106183 +static inline int bucket_get_waiters(struct futex_bucket *bucket)
106185 +#ifdef CONFIG_SMP
106186 +       /*
106187 +        * Issue a barrier before reading so we get an updated value from
106188 +        * futex_wait()
106189 +        */
106190 +       smp_mb();
106191 +       return atomic_read(&bucket->waiters);
106192 +#else
106193 +       return 1;
106194 +#endif
106198 + * futex_get_inode_uuid - Gets an UUID for an inode
106199 + * @inode: inode to get UUID
106201 + * Generate a machine wide unique identifier for this inode.
106203 + * This relies on u64 not wrapping in the life-time of the machine; which with
106204 + * 1ns resolution means almost 585 years.
106206 + * This further relies on the fact that a well formed program will not unmap
106207 + * the file while it has a (shared) futex waiting on it. This mapping will have
106208 + * a file reference which pins the mount and inode.
106210 + * If for some reason an inode gets evicted and read back in again, it will get
106211 + * a new sequence number and will _NOT_ match, even though it is the exact same
106212 + * file.
106214 + * It is important that match_futex() will never have a false-positive, esp.
106215 + * for PI futexes that can mess up the state. The above argues that false-negatives
106216 + * are only possible for malformed programs.
106218 + * Returns: UUID for the given inode
106219 + */
106220 +static u64 futex_get_inode_uuid(struct inode *inode)
106222 +       static atomic64_t i_seq;
106223 +       u64 old;
106225 +       /* Does the inode already have a sequence number? */
106226 +       old = atomic64_read(&inode->i_sequence2);
106228 +       if (likely(old))
106229 +               return old;
106231 +       for (;;) {
106232 +               u64 new = atomic64_add_return(1, &i_seq);
106234 +               if (WARN_ON_ONCE(!new))
106235 +                       continue;
106237 +               old = atomic64_cmpxchg_relaxed(&inode->i_sequence2, 0, new);
106238 +               if (old)
106239 +                       return old;
106240 +               return new;
106241 +       }
106245 + * futex_get_shared_key - Get a key for a shared futex
106246 + * @address: Futex memory address
106247 + * @mm:      Current process mm_struct pointer
106248 + * @key:     Key struct to be filled
106250 + * Returns: 0 on success, error code otherwise
106251 + */
106252 +static int futex_get_shared_key(uintptr_t address, struct mm_struct *mm,
106253 +                               struct futex_key *key)
106255 +       int ret;
106256 +       struct page *page, *tail;
106257 +       struct address_space *mapping;
106259 +again:
106260 +       ret = get_user_pages_fast(address, 1, 0, &page);
106261 +       if (ret < 0)
106262 +               return ret;
106264 +       /*
106265 +        * The treatment of mapping from this point on is critical. The page
106266 +        * lock protects many things but in this context the page lock
106267 +        * stabilizes mapping, prevents inode freeing in the shared
106268 +        * file-backed region case and guards against movement to swap cache.
106269 +        *
106270 +        * Strictly speaking the page lock is not needed in all cases being
106271 +        * considered here and page lock forces unnecessarily serialization
106272 +        * From this point on, mapping will be re-verified if necessary and
106273 +        * page lock will be acquired only if it is unavoidable
106274 +        *
106275 +        * Mapping checks require the head page for any compound page so the
106276 +        * head page and mapping is looked up now. For anonymous pages, it
106277 +        * does not matter if the page splits in the future as the key is
106278 +        * based on the address. For filesystem-backed pages, the tail is
106279 +        * required as the index of the page determines the key. For
106280 +        * base pages, there is no tail page and tail == page.
106281 +        */
106282 +       tail = page;
106283 +       page = compound_head(page);
106284 +       mapping = READ_ONCE(page->mapping);
106286 +       /*
106287 +        * If page->mapping is NULL, then it cannot be a PageAnon
106288 +        * page; but it might be the ZERO_PAGE or in the gate area or
106289 +        * in a special mapping (all cases which we are happy to fail);
106290 +        * or it may have been a good file page when get_user_pages_fast
106291 +        * found it, but truncated or holepunched or subjected to
106292 +        * invalidate_complete_page2 before we got the page lock (also
106293 +        * cases which we are happy to fail).  And we hold a reference,
106294 +        * so refcount care in invalidate_complete_page's remove_mapping
106295 +        * prevents drop_caches from setting mapping to NULL beneath us.
106296 +        *
106297 +        * The case we do have to guard against is when memory pressure made
106298 +        * shmem_writepage move it from filecache to swapcache beneath us:
106299 +        * an unlikely race, but we do need to retry for page->mapping.
106300 +        */
106301 +       if (unlikely(!mapping)) {
106302 +               int shmem_swizzled;
106304 +               /*
106305 +                * Page lock is required to identify which special case above
106306 +                * applies. If this is really a shmem page then the page lock
106307 +                * will prevent unexpected transitions.
106308 +                */
106309 +               lock_page(page);
106310 +               shmem_swizzled = PageSwapCache(page) || page->mapping;
106311 +               unlock_page(page);
106312 +               put_page(page);
106314 +               if (shmem_swizzled)
106315 +                       goto again;
106317 +               return -EFAULT;
106318 +       }
106320 +       /*
106321 +        * Private mappings are handled in a simple way.
106322 +        *
106323 +        * If the futex key is stored on an anonymous page, then the associated
106324 +        * object is the mm which is implicitly pinned by the calling process.
106325 +        *
106326 +        * NOTE: When userspace waits on a MAP_SHARED mapping, even if
106327 +        * it's a read-only handle, it's expected that futexes attach to
106328 +        * the object not the particular process.
106329 +        */
106330 +       if (PageAnon(page)) {
106331 +               key->offset |= FUT_OFF_MMSHARED;
106332 +       } else {
106333 +               struct inode *inode;
106335 +               /*
106336 +                * The associated futex object in this case is the inode and
106337 +                * the page->mapping must be traversed. Ordinarily this should
106338 +                * be stabilised under page lock but it's not strictly
106339 +                * necessary in this case as we just want to pin the inode, not
106340 +                * update the radix tree or anything like that.
106341 +                *
106342 +                * The RCU read lock is taken as the inode is finally freed
106343 +                * under RCU. If the mapping still matches expectations then the
106344 +                * mapping->host can be safely accessed as being a valid inode.
106345 +                */
106346 +               rcu_read_lock();
106348 +               if (READ_ONCE(page->mapping) != mapping) {
106349 +                       rcu_read_unlock();
106350 +                       put_page(page);
106352 +                       goto again;
106353 +               }
106355 +               inode = READ_ONCE(mapping->host);
106356 +               if (!inode) {
106357 +                       rcu_read_unlock();
106358 +                       put_page(page);
106360 +                       goto again;
106361 +               }
106363 +               key->pointer = futex_get_inode_uuid(inode);
106364 +               key->index = (unsigned long)basepage_index(tail);
106365 +               key->offset |= FUT_OFF_INODE;
106367 +               rcu_read_unlock();
106368 +       }
106370 +       put_page(page);
106372 +       return 0;
106376 + * futex_get_bucket - Check if the user address is valid, prepare internal
106377 + *                    data and calculate the hash
106378 + * @uaddr:   futex user address
106379 + * @key:     data that uniquely identifies a futex
106380 + * @shared:  is this a shared futex?
106382 + * For private futexes, each uaddr will be unique for a given mm_struct, and it
106383 + * won't be freed for the life time of the process. For shared futexes, check
106384 + * futex_get_shared_key().
106386 + * Return: address of bucket on success, error code otherwise
106387 + */
106388 +static struct futex_bucket *futex_get_bucket(void __user *uaddr,
106389 +                                            struct futex_key *key,
106390 +                                            bool shared)
106392 +       uintptr_t address = (uintptr_t)uaddr;
106393 +       u32 hash_key;
106395 +       /* Checking if uaddr is valid and accessible */
106396 +       if (unlikely(!IS_ALIGNED(address, sizeof(u32))))
106397 +               return ERR_PTR(-EINVAL);
106398 +       if (unlikely(!access_ok(uaddr, sizeof(u32))))
106399 +               return ERR_PTR(-EFAULT);
106401 +       key->offset = address % PAGE_SIZE;
106402 +       address -= key->offset;
106403 +       key->pointer = (u64)address;
106404 +       key->index = (unsigned long)current->mm;
106406 +       if (shared)
106407 +               futex_get_shared_key(address, current->mm, key);
106409 +       /* Generate hash key for this futex using uaddr and current->mm */
106410 +       hash_key = jhash2((u32 *)key, sizeof(*key) / sizeof(u32), 0);
106412 +       /* Since HASH_SIZE is 2^n, subtracting 1 makes a perfect bit mask */
106413 +       return &futex_table[hash_key & (futex2_hashsize - 1)];
106417 + * futex_get_user - Get the userspace value on this address
106418 + * @uval:  variable to store the value
106419 + * @uaddr: userspace address
106421 + * Check the comment at futex_enqueue() for more information.
106422 + */
106423 +static int futex_get_user(u32 *uval, u32 __user *uaddr)
106425 +       int ret;
106427 +       pagefault_disable();
106428 +       ret = __get_user(*uval, uaddr);
106429 +       pagefault_enable();
106431 +       return ret;
106435 + * futex_setup_time - Prepare the timeout mechanism and start it.
106436 + * @timo:    Timeout value from userspace
106437 + * @timeout: Pointer to hrtimer handler
106438 + * @flags: Flags from userspace, to decide which clockid to use
106440 + * Return: 0 on success, error code otherwise
106441 + */
106442 +static int futex_setup_time(struct __kernel_timespec __user *timo,
106443 +                           struct hrtimer_sleeper *timeout,
106444 +                           unsigned int flags)
106446 +       ktime_t time;
106447 +       struct timespec64 ts;
106448 +       clockid_t clockid = (flags & FUTEX_CLOCK_REALTIME) ?
106449 +                           CLOCK_REALTIME : CLOCK_MONOTONIC;
106451 +       if (get_timespec64(&ts, timo))
106452 +               return -EFAULT;
106454 +       if (!timespec64_valid(&ts))
106455 +               return -EINVAL;
106457 +       time = timespec64_to_ktime(ts);
106459 +       hrtimer_init_sleeper(timeout, clockid, HRTIMER_MODE_ABS);
106461 +       hrtimer_set_expires(&timeout->timer, time);
106463 +       hrtimer_sleeper_start_expires(timeout, HRTIMER_MODE_ABS);
106465 +       return 0;
106469 + * futex_dequeue_multiple - Remove multiple futexes from hash table
106470 + * @futexv: list of waiters
106471 + * @nr:     number of futexes to be removed
106473 + * This function is used if (a) something went wrong while enqueuing, and we
106474 + * need to undo our work (then nr <= nr_futexes) or (b) we woke up, and thus
106475 + * need to remove every waiter, check if some was indeed woken and return.
106476 + * Before removing a waiter, we check if it's on the list, since we have no
106477 + * clue who have been waken.
106479 + * Return:
106480 + *  * -1  - If no futex was woken during the removal
106481 + *  * 0>= - At least one futex was found woken, index of the last one
106482 + */
106483 +static int futex_dequeue_multiple(struct futex_waiter_head *futexv, unsigned int nr)
106485 +       int i, ret = -1;
106487 +       for (i = 0; i < nr; i++) {
106488 +               spin_lock(&futexv->objects[i].bucket->lock);
106489 +               if (!list_empty(&futexv->objects[i].list)) {
106490 +                       list_del_init(&futexv->objects[i].list);
106491 +                       bucket_dec_waiters(futexv->objects[i].bucket);
106492 +               } else {
106493 +                       ret = i;
106494 +               }
106495 +               spin_unlock(&futexv->objects[i].bucket->lock);
106496 +       }
106498 +       return ret;
106502 + * futex_enqueue - Check the value and enqueue a futex on a wait list
106504 + * @futexv:     List of futexes
106505 + * @nr_futexes: Number of futexes in the list
106506 + * @awakened:  If a futex was awakened during enqueueing, store the index here
106508 + * Get the value from the userspace address and compares with the expected one.
106510 + * Getting the value from user futex address:
106512 + * Since we are in a hurry, we use a spin lock and we can't sleep.
106513 + * Try to get the value with page fault disabled (when enable, we might
106514 + * sleep).
106516 + * If we fail, we aren't sure if the address is invalid or is just a
106517 + * page fault. Then, release the lock (so we can sleep) and try to get
106518 + * the value with page fault enabled. In order to trigger a page fault
106519 + * handling, we just call __get_user() again. If we sleep with enqueued
106520 + * futexes, we might miss a wake, so dequeue everything before sleeping.
106522 + * If get_user succeeds, this mean that the address is valid and we do
106523 + * the work again. Since we just handled the page fault, the page is
106524 + * likely pinned in memory and we should be luckier this time and be
106525 + * able to get the value. If we fail anyway, we will try again.
106527 + * If even with page faults enabled we get and error, this means that
106528 + * the address is not valid and we return from the syscall.
106530 + * If we got an unexpected value or need to treat a page fault and realized that
106531 + * a futex was awakened, we can priority this and return success.
106533 + * In success, enqueue the futex in the correct bucket
106535 + * Return:
106536 + * * 1  - We were awake in the process and nothing is enqueued
106537 + * * 0  - Everything is enqueued and we are ready to sleep
106538 + * * 0< - Something went wrong, nothing is enqueued, return error code
106539 + */
106540 +static int futex_enqueue(struct futex_waiter_head *futexv, unsigned int nr_futexes,
106541 +                        int *awakened)
106543 +       int i, ret;
106544 +       u32 uval, val;
106545 +       u32 __user *uaddr;
106546 +       bool retry = false;
106547 +       struct futex_bucket *bucket;
106549 +retry:
106550 +       set_current_state(TASK_INTERRUPTIBLE);
106552 +       for (i = 0; i < nr_futexes; i++) {
106553 +               uaddr = (u32 __user *)futexv->objects[i].uaddr;
106554 +               val = (u32)futexv->objects[i].val;
106556 +               if (is_object_shared && retry) {
106557 +                       struct futex_bucket *tmp =
106558 +                               futex_get_bucket((void __user *)uaddr,
106559 +                                                &futexv->objects[i].key, true);
106560 +                       if (IS_ERR(tmp)) {
106561 +                               __set_current_state(TASK_RUNNING);
106562 +                               futex_dequeue_multiple(futexv, i);
106563 +                               return PTR_ERR(tmp);
106564 +                       }
106565 +                       futexv->objects[i].bucket = tmp;
106566 +               }
106568 +               bucket = futexv->objects[i].bucket;
106570 +               bucket_inc_waiters(bucket);
106571 +               spin_lock(&bucket->lock);
106573 +               ret = futex_get_user(&uval, uaddr);
106575 +               if (unlikely(ret)) {
106576 +                       spin_unlock(&bucket->lock);
106578 +                       bucket_dec_waiters(bucket);
106579 +                       __set_current_state(TASK_RUNNING);
106580 +                       *awakened = futex_dequeue_multiple(futexv, i);
106582 +                       if (*awakened >= 0)
106583 +                               return 1;
106585 +                       if (__get_user(uval, uaddr))
106586 +                               return -EFAULT;
106588 +                       retry = true;
106589 +                       goto retry;
106590 +               }
106592 +               if (uval != val) {
106593 +                       spin_unlock(&bucket->lock);
106595 +                       bucket_dec_waiters(bucket);
106596 +                       __set_current_state(TASK_RUNNING);
106597 +                       *awakened = futex_dequeue_multiple(futexv, i);
106599 +                       if (*awakened >= 0)
106600 +                               return 1;
106602 +                       return -EAGAIN;
106603 +               }
106605 +               list_add_tail(&futexv->objects[i].list, &bucket->list);
106606 +               spin_unlock(&bucket->lock);
106607 +       }
106609 +       return 0;
106613 + * __futex_waitv - Enqueue the list of futexes and wait to be woken
106614 + * @futexv: List of futexes to wait
106615 + * @nr_futexes: Length of futexv
106616 + * @timo:      Timeout
106617 + * @flags:     Timeout flags
106619 + * Return:
106620 + * * 0 >= - Hint of which futex woke us
106621 + * * 0 <  - Error code
106622 + */
106623 +static int __futex_waitv(struct futex_waiter_head *futexv, unsigned int nr_futexes,
106624 +                        struct __kernel_timespec __user *timo,
106625 +                        unsigned int flags)
106627 +       int ret;
106628 +       struct hrtimer_sleeper timeout;
106630 +       if (timo) {
106631 +               ret = futex_setup_time(timo, &timeout, flags);
106632 +               if (ret)
106633 +                       return ret;
106634 +       }
106636 +       while (1) {
106637 +               int awakened = -1;
106639 +               ret = futex_enqueue(futexv, nr_futexes, &awakened);
106641 +               if (ret) {
106642 +                       if (awakened >= 0)
106643 +                               ret = awakened;
106644 +                       break;
106645 +               }
106647 +               /* Before sleeping, check if someone was woken */
106648 +               if (!futexv->hint && (!timo || timeout.task))
106649 +                       freezable_schedule();
106651 +               __set_current_state(TASK_RUNNING);
106653 +               /*
106654 +                * One of those things triggered this wake:
106655 +                *
106656 +                * * We have been removed from the bucket. futex_wake() woke
106657 +                *   us. We just need to dequeue and return 0 to userspace.
106658 +                *
106659 +                * However, if no futex was dequeued by a futex_wake():
106660 +                *
106661 +                * * If the there's a timeout and it has expired,
106662 +                *   return -ETIMEDOUT.
106663 +                *
106664 +                * * If there is a signal pending, something wants to kill our
106665 +                *   thread, return -ERESTARTSYS.
106666 +                *
106667 +                * * If there's no signal pending, it was a spurious wake
106668 +                *   (scheduler gave us a chance to do some work, even if we
106669 +                *   don't want to). We need to remove ourselves from the
106670 +                *   bucket and add again, to prevent losing wakeups in the
106671 +                *   meantime.
106672 +                */
106674 +               ret = futex_dequeue_multiple(futexv, nr_futexes);
106676 +               /* Normal wake */
106677 +               if (ret >= 0)
106678 +                       break;
106680 +               if (timo && !timeout.task) {
106681 +                       ret = -ETIMEDOUT;
106682 +                       break;
106683 +               }
106685 +               if (signal_pending(current)) {
106686 +                       ret = -ERESTARTSYS;
106687 +                       break;
106688 +               }
106690 +               /* Spurious wake, do everything again */
106691 +       }
106693 +       if (timo)
106694 +               hrtimer_cancel(&timeout.timer);
106696 +       return ret;
106700 + * sys_futex_wait - Wait on a futex address if (*uaddr) == val
106701 + * @uaddr: User address of futex
106702 + * @val:   Expected value of futex
106703 + * @flags: Specify the size of futex and the clockid
106704 + * @timo:  Optional absolute timeout.
106706 + * The user thread is put to sleep, waiting for a futex_wake() at uaddr, if the
106707 + * value at *uaddr is the same as val (otherwise, the syscall returns
106708 + * immediately with -EAGAIN).
106710 + * Returns 0 on success, error code otherwise.
106711 + */
106712 +SYSCALL_DEFINE4(futex_wait, void __user *, uaddr, unsigned int, val,
106713 +               unsigned int, flags, struct __kernel_timespec __user *, timo)
106715 +       bool shared = (flags & FUTEX_SHARED_FLAG) ? true : false;
106716 +       unsigned int size = flags & FUTEX_SIZE_MASK;
106717 +       struct futex_waiter *waiter;
106718 +       struct futex_waiter_head *futexv;
106720 +       /* Wrapper for a futexv_waiter_head with one element */
106721 +       struct {
106722 +               struct futex_waiter_head futexv;
106723 +               struct futex_waiter waiter;
106724 +       } __packed wait_single;
106726 +       if (flags & ~FUTEX2_MASK)
106727 +               return -EINVAL;
106729 +       if (size != FUTEX_32)
106730 +               return -EINVAL;
106732 +       futexv = &wait_single.futexv;
106733 +       futexv->task = current;
106734 +       futexv->hint = false;
106736 +       waiter = &wait_single.waiter;
106737 +       waiter->index = 0;
106738 +       waiter->val = val;
106739 +       waiter->uaddr = uaddr;
106740 +       memset(&wait_single.waiter.key, 0, sizeof(struct futex_key));
106742 +       INIT_LIST_HEAD(&waiter->list);
106744 +       /* Get an unlocked hash bucket */
106745 +       waiter->bucket = futex_get_bucket(uaddr, &waiter->key, shared);
106746 +       if (IS_ERR(waiter->bucket))
106747 +               return PTR_ERR(waiter->bucket);
106749 +       return __futex_waitv(futexv, 1, timo, flags);
106752 +#ifdef CONFIG_COMPAT
106754 + * compat_futex_parse_waitv - Parse a waitv array from userspace
106755 + * @futexv:    Kernel side list of waiters to be filled
106756 + * @uwaitv:     Userspace list to be parsed
106757 + * @nr_futexes: Length of futexv
106759 + * Return: Error code on failure, pointer to a prepared futexv otherwise
106760 + */
106761 +static int compat_futex_parse_waitv(struct futex_waiter_head *futexv,
106762 +                                   struct compat_futex_waitv __user *uwaitv,
106763 +                                   unsigned int nr_futexes)
106765 +       struct futex_bucket *bucket;
106766 +       struct compat_futex_waitv waitv;
106767 +       unsigned int i;
106769 +       for (i = 0; i < nr_futexes; i++) {
106770 +               if (copy_from_user(&waitv, &uwaitv[i], sizeof(waitv)))
106771 +                       return -EFAULT;
106773 +               if ((waitv.flags & ~FUTEXV_WAITER_MASK) ||
106774 +                   (waitv.flags & FUTEX_SIZE_MASK) != FUTEX_32)
106775 +                       return -EINVAL;
106777 +               futexv->objects[i].key.pointer = 0;
106778 +               futexv->objects[i].flags  = waitv.flags;
106779 +               futexv->objects[i].uaddr  = compat_ptr(waitv.uaddr);
106780 +               futexv->objects[i].val    = waitv.val;
106781 +               futexv->objects[i].index  = i;
106783 +               bucket = futex_get_bucket(compat_ptr(waitv.uaddr),
106784 +                                         &futexv->objects[i].key,
106785 +                                         is_object_shared);
106787 +               if (IS_ERR(bucket))
106788 +                       return PTR_ERR(bucket);
106790 +               futexv->objects[i].bucket = bucket;
106792 +               INIT_LIST_HEAD(&futexv->objects[i].list);
106793 +       }
106795 +       return 0;
106798 +COMPAT_SYSCALL_DEFINE4(futex_waitv, struct compat_futex_waitv __user *, waiters,
106799 +                      unsigned int, nr_futexes, unsigned int, flags,
106800 +                      struct __kernel_timespec __user *, timo)
106802 +       struct futex_waiter_head *futexv;
106803 +       int ret;
106805 +       if (flags & ~FUTEXV_MASK)
106806 +               return -EINVAL;
106808 +       if (!nr_futexes || nr_futexes > FUTEX_WAITV_MAX || !waiters)
106809 +               return -EINVAL;
106811 +       futexv = kmalloc((sizeof(struct futex_waiter) * nr_futexes) +
106812 +                        sizeof(*futexv), GFP_KERNEL);
106813 +       if (!futexv)
106814 +               return -ENOMEM;
106816 +       futexv->hint = false;
106817 +       futexv->task = current;
106819 +       ret = compat_futex_parse_waitv(futexv, waiters, nr_futexes);
106821 +       if (!ret)
106822 +               ret = __futex_waitv(futexv, nr_futexes, timo, flags);
106824 +       kfree(futexv);
106826 +       return ret;
106828 +#endif
106831 + * futex_parse_waitv - Parse a waitv array from userspace
106832 + * @futexv:    Kernel side list of waiters to be filled
106833 + * @uwaitv:     Userspace list to be parsed
106834 + * @nr_futexes: Length of futexv
106836 + * Return: Error code on failure, pointer to a prepared futexv otherwise
106837 + */
106838 +static int futex_parse_waitv(struct futex_waiter_head *futexv,
106839 +                            struct futex_waitv __user *uwaitv,
106840 +                            unsigned int nr_futexes)
106842 +       struct futex_bucket *bucket;
106843 +       struct futex_waitv waitv;
106844 +       unsigned int i;
106846 +       for (i = 0; i < nr_futexes; i++) {
106847 +               if (copy_from_user(&waitv, &uwaitv[i], sizeof(waitv)))
106848 +                       return -EFAULT;
106850 +               if ((waitv.flags & ~FUTEXV_WAITER_MASK) ||
106851 +                   (waitv.flags & FUTEX_SIZE_MASK) != FUTEX_32)
106852 +                       return -EINVAL;
106854 +               futexv->objects[i].key.pointer = 0;
106855 +               futexv->objects[i].flags  = waitv.flags;
106856 +               futexv->objects[i].uaddr  = waitv.uaddr;
106857 +               futexv->objects[i].val    = waitv.val;
106858 +               futexv->objects[i].index  = i;
106860 +               bucket = futex_get_bucket(waitv.uaddr, &futexv->objects[i].key,
106861 +                                         is_object_shared);
106863 +               if (IS_ERR(bucket))
106864 +                       return PTR_ERR(bucket);
106866 +               futexv->objects[i].bucket = bucket;
106868 +               INIT_LIST_HEAD(&futexv->objects[i].list);
106869 +       }
106871 +       return 0;
106875 + * sys_futex_waitv - Wait on a list of futexes
106876 + * @waiters:    List of futexes to wait on
106877 + * @nr_futexes: Length of futexv
106878 + * @flags:      Flag for timeout (monotonic/realtime)
106879 + * @timo:      Optional absolute timeout.
106881 + * Given an array of `struct futex_waitv`, wait on each uaddr. The thread wakes
106882 + * if a futex_wake() is performed at any uaddr. The syscall returns immediately
106883 + * if any waiter has *uaddr != val. *timo is an optional timeout value for the
106884 + * operation. Each waiter has individual flags. The `flags` argument for the
106885 + * syscall should be used solely for specifying the timeout as realtime, if
106886 + * needed. Flags for shared futexes, sizes, etc. should be used on the
106887 + * individual flags of each waiter.
106889 + * Returns the array index of one of the awaken futexes. There's no given
106890 + * information of how many were awakened, or any particular attribute of it (if
106891 + * it's the first awakened, if it is of the smaller index...).
106892 + */
106893 +SYSCALL_DEFINE4(futex_waitv, struct futex_waitv __user *, waiters,
106894 +               unsigned int, nr_futexes, unsigned int, flags,
106895 +               struct __kernel_timespec __user *, timo)
106897 +       struct futex_waiter_head *futexv;
106898 +       int ret;
106900 +       if (flags & ~FUTEXV_MASK)
106901 +               return -EINVAL;
106903 +       if (!nr_futexes || nr_futexes > FUTEX_WAITV_MAX || !waiters)
106904 +               return -EINVAL;
106906 +       futexv = kmalloc((sizeof(struct futex_waiter) * nr_futexes) +
106907 +                        sizeof(*futexv), GFP_KERNEL);
106908 +       if (!futexv)
106909 +               return -ENOMEM;
106911 +       futexv->hint = false;
106912 +       futexv->task = current;
106914 +#ifdef CONFIG_X86_X32_ABI
106915 +       if (in_x32_syscall()) {
106916 +               ret = compat_futex_parse_waitv(futexv, (struct compat_futex_waitv *)waiters,
106917 +                                              nr_futexes);
106918 +       } else
106919 +#endif
106920 +       {
106921 +               ret = futex_parse_waitv(futexv, waiters, nr_futexes);
106922 +       }
106924 +       if (!ret)
106925 +               ret = __futex_waitv(futexv, nr_futexes, timo, flags);
106927 +       kfree(futexv);
106929 +       return ret;
106933 + * futex_get_parent - For a given futex in a futexv list, get a pointer to the futexv
106934 + * @waiter: Address of futex in the list
106935 + * @index: Index of futex in the list
106937 + * Return: A pointer to its futexv struct
106938 + */
106939 +static inline struct futex_waiter_head *futex_get_parent(uintptr_t waiter,
106940 +                                                        unsigned int index)
106942 +       uintptr_t parent = waiter - sizeof(struct futex_waiter_head)
106943 +                          - (uintptr_t)(index * sizeof(struct futex_waiter));
106945 +       return (struct futex_waiter_head *)parent;
106949 + * futex_mark_wake - Find the task to be wake and add it in wake queue
106950 + * @waiter: Waiter to be wake
106951 + * @bucket: Bucket to be decremented
106952 + * @wake_q: Wake queue to insert the task
106953 + */
106954 +static void futex_mark_wake(struct futex_waiter *waiter,
106955 +                           struct futex_bucket *bucket,
106956 +                           struct wake_q_head *wake_q)
106958 +       struct task_struct *task;
106959 +       struct futex_waiter_head *parent = futex_get_parent((uintptr_t)waiter,
106960 +                                                           waiter->index);
106962 +       lockdep_assert_held(&bucket->lock);
106963 +       parent->hint = true;
106964 +       task = parent->task;
106965 +       get_task_struct(task);
106966 +       list_del_init(&waiter->list);
106967 +       wake_q_add_safe(wake_q, task);
106968 +       bucket_dec_waiters(bucket);
106971 +static inline bool futex_match(struct futex_key key1, struct futex_key key2)
106973 +       return (key1.index == key2.index &&
106974 +               key1.pointer == key2.pointer &&
106975 +               key1.offset == key2.offset);
106978 +long ksys_futex_wake(void __user *uaddr, unsigned long nr_wake,
106979 +                    unsigned int flags)
106981 +       bool shared = (flags & FUTEX_SHARED_FLAG) ? true : false;
106982 +       unsigned int size = flags & FUTEX_SIZE_MASK;
106983 +       struct futex_waiter waiter, *aux, *tmp;
106984 +       struct futex_bucket *bucket;
106985 +       DEFINE_WAKE_Q(wake_q);
106986 +       int ret = 0;
106988 +       if (flags & ~FUTEX2_MASK)
106989 +               return -EINVAL;
106991 +       if (size != FUTEX_32)
106992 +               return -EINVAL;
106994 +       bucket = futex_get_bucket(uaddr, &waiter.key, shared);
106995 +       if (IS_ERR(bucket))
106996 +               return PTR_ERR(bucket);
106998 +       if (!bucket_get_waiters(bucket) || !nr_wake)
106999 +               return 0;
107001 +       spin_lock(&bucket->lock);
107002 +       list_for_each_entry_safe(aux, tmp, &bucket->list, list) {
107003 +               if (futex_match(waiter.key, aux->key)) {
107004 +                       futex_mark_wake(aux, bucket, &wake_q);
107005 +                       if (++ret >= nr_wake)
107006 +                               break;
107007 +               }
107008 +       }
107009 +       spin_unlock(&bucket->lock);
107011 +       wake_up_q(&wake_q);
107013 +       return ret;
107017 + * sys_futex_wake - Wake a number of futexes waiting on an address
107018 + * @uaddr:   Address of futex to be woken up
107019 + * @nr_wake: Number of futexes waiting in uaddr to be woken up
107020 + * @flags:   Flags for size and shared
107022 + * Wake `nr_wake` threads waiting at uaddr.
107024 + * Returns the number of woken threads on success, error code otherwise.
107025 + */
107026 +SYSCALL_DEFINE3(futex_wake, void __user *, uaddr, unsigned int, nr_wake,
107027 +               unsigned int, flags)
107029 +       return ksys_futex_wake(uaddr, nr_wake, flags);
107032 +static void futex_double_unlock(struct futex_bucket *b1, struct futex_bucket *b2)
107034 +       spin_unlock(&b1->lock);
107035 +       if (b1 != b2)
107036 +               spin_unlock(&b2->lock);
107039 +static inline int __futex_requeue(struct futex_requeue rq1,
107040 +                                 struct futex_requeue rq2, unsigned int nr_wake,
107041 +                                 unsigned int nr_requeue, unsigned int cmpval,
107042 +                                 bool shared1, bool shared2)
107044 +       struct futex_waiter w1, w2, *aux, *tmp;
107045 +       bool retry = false;
107046 +       struct futex_bucket *b1, *b2;
107047 +       DEFINE_WAKE_Q(wake_q);
107048 +       u32 uval;
107049 +       int ret;
107051 +       b1 = futex_get_bucket(rq1.uaddr, &w1.key, shared1);
107052 +       if (IS_ERR(b1))
107053 +               return PTR_ERR(b1);
107055 +       b2 = futex_get_bucket(rq2.uaddr, &w2.key, shared2);
107056 +       if (IS_ERR(b2))
107057 +               return PTR_ERR(b2);
107059 +retry:
107060 +       if (shared1 && retry) {
107061 +               b1 = futex_get_bucket(rq1.uaddr, &w1.key, shared1);
107062 +               if (IS_ERR(b1))
107063 +                       return PTR_ERR(b1);
107064 +       }
107066 +       if (shared2 && retry) {
107067 +               b2 = futex_get_bucket(rq2.uaddr, &w2.key, shared2);
107068 +               if (IS_ERR(b2))
107069 +                       return PTR_ERR(b2);
107070 +       }
107072 +       bucket_inc_waiters(b2);
107073 +       /*
107074 +        * To ensure the locks are taken in the same order for all threads (and
107075 +        * thus avoiding deadlocks), take the "smaller" one first
107076 +        */
107077 +       if (b1 <= b2) {
107078 +               spin_lock(&b1->lock);
107079 +               if (b1 < b2)
107080 +                       spin_lock_nested(&b2->lock, SINGLE_DEPTH_NESTING);
107081 +       } else {
107082 +               spin_lock(&b2->lock);
107083 +               spin_lock_nested(&b1->lock, SINGLE_DEPTH_NESTING);
107084 +       }
107086 +       ret = futex_get_user(&uval, rq1.uaddr);
107088 +       if (unlikely(ret)) {
107089 +               futex_double_unlock(b1, b2);
107090 +               if (__get_user(uval, (u32 __user *)rq1.uaddr))
107091 +                       return -EFAULT;
107093 +               bucket_dec_waiters(b2);
107094 +               retry = true;
107095 +               goto retry;
107096 +       }
107098 +       if (uval != cmpval) {
107099 +               futex_double_unlock(b1, b2);
107101 +               bucket_dec_waiters(b2);
107102 +               return -EAGAIN;
107103 +       }
107105 +       list_for_each_entry_safe(aux, tmp, &b1->list, list) {
107106 +               if (futex_match(w1.key, aux->key)) {
107107 +                       if (ret < nr_wake) {
107108 +                               futex_mark_wake(aux, b1, &wake_q);
107109 +                               ret++;
107110 +                               continue;
107111 +                       }
107113 +                       if (ret >= nr_wake + nr_requeue)
107114 +                               break;
107116 +                       aux->key.pointer = w2.key.pointer;
107117 +                       aux->key.index = w2.key.index;
107118 +                       aux->key.offset = w2.key.offset;
107120 +                       if (b1 != b2) {
107121 +                               list_del_init(&aux->list);
107122 +                               bucket_dec_waiters(b1);
107124 +                               list_add_tail(&aux->list, &b2->list);
107125 +                               bucket_inc_waiters(b2);
107126 +                       }
107127 +                       ret++;
107128 +               }
107129 +       }
107131 +       futex_double_unlock(b1, b2);
107132 +       wake_up_q(&wake_q);
107133 +       bucket_dec_waiters(b2);
107135 +       return ret;
107138 +#ifdef CONFIG_COMPAT
107139 +static int compat_futex_parse_requeue(struct futex_requeue *rq,
107140 +                                     struct compat_futex_requeue __user *uaddr,
107141 +                                     bool *shared)
107143 +       struct compat_futex_requeue tmp;
107145 +       if (copy_from_user(&tmp, uaddr, sizeof(tmp)))
107146 +               return -EFAULT;
107148 +       if (tmp.flags & ~FUTEXV_WAITER_MASK ||
107149 +           (tmp.flags & FUTEX_SIZE_MASK) != FUTEX_32)
107150 +               return -EINVAL;
107152 +       *shared = (tmp.flags & FUTEX_SHARED_FLAG) ? true : false;
107154 +       rq->uaddr = compat_ptr(tmp.uaddr);
107155 +       rq->flags = tmp.flags;
107157 +       return 0;
107160 +COMPAT_SYSCALL_DEFINE6(futex_requeue, struct compat_futex_requeue __user *, uaddr1,
107161 +                      struct compat_futex_requeue __user *, uaddr2,
107162 +                      unsigned int, nr_wake, unsigned int, nr_requeue,
107163 +                      unsigned int, cmpval, unsigned int, flags)
107165 +       struct futex_requeue rq1, rq2;
107166 +       bool shared1, shared2;
107167 +       int ret;
107169 +       if (flags)
107170 +               return -EINVAL;
107172 +       ret = compat_futex_parse_requeue(&rq1, uaddr1, &shared1);
107173 +       if (ret)
107174 +               return ret;
107176 +       ret = compat_futex_parse_requeue(&rq2, uaddr2, &shared2);
107177 +       if (ret)
107178 +               return ret;
107180 +       return __futex_requeue(rq1, rq2, nr_wake, nr_requeue, cmpval, shared1, shared2);
107182 +#endif
107185 + * futex_parse_requeue - Copy a user struct futex_requeue and check it's flags
107186 + * @rq:    Kernel struct
107187 + * @uaddr: Address of user struct
107188 + * @shared: Out parameter, defines if this is a shared futex
107190 + * Return: 0 on success, error code otherwise
107191 + */
107192 +static int futex_parse_requeue(struct futex_requeue *rq,
107193 +                              struct futex_requeue __user *uaddr, bool *shared)
107195 +       if (copy_from_user(rq, uaddr, sizeof(*rq)))
107196 +               return -EFAULT;
107198 +       if (rq->flags & ~FUTEXV_WAITER_MASK ||
107199 +           (rq->flags & FUTEX_SIZE_MASK) != FUTEX_32)
107200 +               return -EINVAL;
107202 +       *shared = (rq->flags & FUTEX_SHARED_FLAG) ? true : false;
107204 +       return 0;
107208 + * sys_futex_requeue - Wake futexes at uaddr1 and requeue from uaddr1 to uaddr2
107209 + * @uaddr1:    Address of futexes to be waken/dequeued
107210 + * @uaddr2:    Address for the futexes to be enqueued
107211 + * @nr_wake:   Number of futexes waiting in uaddr1 to be woken up
107212 + * @nr_requeue: Number of futexes to be requeued from uaddr1 to uaddr2
107213 + * @cmpval:    Expected value at uaddr1
107214 + * @flags:     Reserved flags arg for requeue operation expansion. Must be 0.
107216 + * If (uaddr1->uaddr == cmpval), wake at uaddr1->uaddr a nr_wake number of
107217 + * waiters and then, remove a number of nr_requeue waiters at uaddr1->uaddr
107218 + * and add then to uaddr2->uaddr list. Each uaddr has its own set of flags,
107219 + * that must be defined at struct futex_requeue (such as size, shared, NUMA).
107221 + * Return the number of the woken futexes + the number of requeued ones on
107222 + * success, error code otherwise.
107223 + */
107224 +SYSCALL_DEFINE6(futex_requeue, struct futex_requeue __user *, uaddr1,
107225 +               struct futex_requeue __user *, uaddr2,
107226 +               unsigned int, nr_wake, unsigned int, nr_requeue,
107227 +               unsigned int, cmpval, unsigned int, flags)
107229 +       struct futex_requeue rq1, rq2;
107230 +       bool shared1, shared2;
107231 +       int ret;
107233 +       if (flags)
107234 +               return -EINVAL;
107236 +#ifdef CONFIG_X86_X32_ABI
107237 +       if (in_x32_syscall()) {
107238 +               ret = compat_futex_parse_requeue(&rq1, (struct compat_futex_requeue *)uaddr1,
107239 +                                                &shared1);
107240 +               if (ret)
107241 +                       return ret;
107243 +               ret = compat_futex_parse_requeue(&rq2, (struct compat_futex_requeue *)uaddr2,
107244 +                                                &shared2);
107245 +               if (ret)
107246 +                       return ret;
107247 +       } else
107248 +#endif
107249 +       {
107250 +               ret = futex_parse_requeue(&rq1, uaddr1, &shared1);
107251 +               if (ret)
107252 +                       return ret;
107254 +               ret = futex_parse_requeue(&rq2, uaddr2, &shared2);
107255 +               if (ret)
107256 +                       return ret;
107257 +       }
107259 +       return __futex_requeue(rq1, rq2, nr_wake, nr_requeue, cmpval, shared1, shared2);
107262 +static int __init futex2_init(void)
107264 +       int i;
107265 +       unsigned int futex_shift;
107267 +#if CONFIG_BASE_SMALL
107268 +       futex2_hashsize = 16;
107269 +#else
107270 +       futex2_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
107271 +#endif
107273 +       futex_table = alloc_large_system_hash("futex2", sizeof(struct futex_bucket),
107274 +                                             futex2_hashsize, 0,
107275 +                                             futex2_hashsize < 256 ? HASH_SMALL : 0,
107276 +                                             &futex_shift, NULL,
107277 +                                             futex2_hashsize, futex2_hashsize);
107278 +       futex2_hashsize = 1UL << futex_shift;
107280 +       BUG_ON(!is_power_of_2(futex2_hashsize));
107282 +       for (i = 0; i < futex2_hashsize; i++) {
107283 +               INIT_LIST_HEAD(&futex_table[i].list);
107284 +               spin_lock_init(&futex_table[i].lock);
107285 +               atomic_set(&futex_table[i].waiters, 0);
107286 +       }
107288 +       return 0;
107290 +core_initcall(futex2_init);
107291 diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
107292 index 651a4ad6d711..8e586858bcf4 100644
107293 --- a/kernel/irq/matrix.c
107294 +++ b/kernel/irq/matrix.c
107295 @@ -423,7 +423,9 @@ void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
107296         if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
107297                 return;
107299 -       clear_bit(bit, cm->alloc_map);
107300 +       if (WARN_ON_ONCE(!test_and_clear_bit(bit, cm->alloc_map)))
107301 +               return;
107303         cm->allocated--;
107304         if(managed)
107305                 cm->managed_allocated--;
107306 diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
107307 index 3bf98db9c702..23e7acb5c667 100644
107308 --- a/kernel/kcsan/core.c
107309 +++ b/kernel/kcsan/core.c
107310 @@ -639,8 +639,6 @@ void __init kcsan_init(void)
107312         BUG_ON(!in_task());
107314 -       kcsan_debugfs_init();
107316         for_each_possible_cpu(cpu)
107317                 per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
107319 diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
107320 index 3c8093a371b1..62a52be8f6ba 100644
107321 --- a/kernel/kcsan/debugfs.c
107322 +++ b/kernel/kcsan/debugfs.c
107323 @@ -261,7 +261,10 @@ static const struct file_operations debugfs_ops =
107324         .release = single_release
107327 -void __init kcsan_debugfs_init(void)
107328 +static int __init kcsan_debugfs_init(void)
107330         debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops);
107331 +       return 0;
107334 +late_initcall(kcsan_debugfs_init);
107335 diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
107336 index 8d4bf3431b3c..87ccdb3b051f 100644
107337 --- a/kernel/kcsan/kcsan.h
107338 +++ b/kernel/kcsan/kcsan.h
107339 @@ -30,11 +30,6 @@ extern bool kcsan_enabled;
107340  void kcsan_save_irqtrace(struct task_struct *task);
107341  void kcsan_restore_irqtrace(struct task_struct *task);
107344 - * Initialize debugfs file.
107345 - */
107346 -void kcsan_debugfs_init(void);
107349   * Statistics counters displayed via debugfs; should only be modified in
107350   * slow-paths.
107351 diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
107352 index 5c3447cf7ad5..33400ff051a8 100644
107353 --- a/kernel/kexec_file.c
107354 +++ b/kernel/kexec_file.c
107355 @@ -740,8 +740,10 @@ static int kexec_calculate_store_digests(struct kimage *image)
107357         sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region);
107358         sha_regions = vzalloc(sha_region_sz);
107359 -       if (!sha_regions)
107360 +       if (!sha_regions) {
107361 +               ret = -ENOMEM;
107362                 goto out_free_desc;
107363 +       }
107365         desc->tfm   = tfm;
107367 diff --git a/kernel/kthread.c b/kernel/kthread.c
107368 index 1578973c5740..3b8dfbc24a22 100644
107369 --- a/kernel/kthread.c
107370 +++ b/kernel/kthread.c
107371 @@ -84,6 +84,25 @@ static inline struct kthread *to_kthread(struct task_struct *k)
107372         return (__force void *)k->set_child_tid;
107376 + * Variant of to_kthread() that doesn't assume @p is a kthread.
107378 + * Per construction; when:
107380 + *   (p->flags & PF_KTHREAD) && p->set_child_tid
107382 + * the task is both a kthread and struct kthread is persistent. However
107383 + * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
107384 + * begin_new_exec()).
107385 + */
107386 +static inline struct kthread *__to_kthread(struct task_struct *p)
107388 +       void *kthread = (__force void *)p->set_child_tid;
107389 +       if (kthread && !(p->flags & PF_KTHREAD))
107390 +               kthread = NULL;
107391 +       return kthread;
107394  void free_kthread_struct(struct task_struct *k)
107396         struct kthread *kthread;
107397 @@ -168,8 +187,9 @@ EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
107398   */
107399  void *kthread_func(struct task_struct *task)
107401 -       if (task->flags & PF_KTHREAD)
107402 -               return to_kthread(task)->threadfn;
107403 +       struct kthread *kthread = __to_kthread(task);
107404 +       if (kthread)
107405 +               return kthread->threadfn;
107406         return NULL;
107408  EXPORT_SYMBOL_GPL(kthread_func);
107409 @@ -199,10 +219,11 @@ EXPORT_SYMBOL_GPL(kthread_data);
107410   */
107411  void *kthread_probe_data(struct task_struct *task)
107413 -       struct kthread *kthread = to_kthread(task);
107414 +       struct kthread *kthread = __to_kthread(task);
107415         void *data = NULL;
107417 -       copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
107418 +       if (kthread)
107419 +               copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
107420         return data;
107423 @@ -514,9 +535,9 @@ void kthread_set_per_cpu(struct task_struct *k, int cpu)
107424         set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
107427 -bool kthread_is_per_cpu(struct task_struct *k)
107428 +bool kthread_is_per_cpu(struct task_struct *p)
107430 -       struct kthread *kthread = to_kthread(k);
107431 +       struct kthread *kthread = __to_kthread(p);
107432         if (!kthread)
107433                 return false;
107435 @@ -1303,6 +1324,7 @@ void kthread_use_mm(struct mm_struct *mm)
107436         tsk->mm = mm;
107437         membarrier_update_current_mm(mm);
107438         switch_mm_irqs_off(active_mm, mm, tsk);
107439 +       lru_gen_switch_mm(active_mm, mm);
107440         local_irq_enable();
107441         task_unlock(tsk);
107442  #ifdef finish_arch_post_lock_switch
107443 diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
107444 index f160f1c97ca1..f39c383c7180 100644
107445 --- a/kernel/locking/lockdep.c
107446 +++ b/kernel/locking/lockdep.c
107447 @@ -5731,7 +5731,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
107449         unsigned long flags;
107451 -       trace_lock_acquired(lock, ip);
107452 +       trace_lock_contended(lock, ip);
107454         if (unlikely(!lock_stat || !lockdep_enabled()))
107455                 return;
107456 @@ -5749,7 +5749,7 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
107458         unsigned long flags;
107460 -       trace_lock_contended(lock, ip);
107461 +       trace_lock_acquired(lock, ip);
107463         if (unlikely(!lock_stat || !lockdep_enabled()))
107464                 return;
107465 diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
107466 index a7276aaf2abc..db9301591e3f 100644
107467 --- a/kernel/locking/mutex-debug.c
107468 +++ b/kernel/locking/mutex-debug.c
107469 @@ -57,7 +57,7 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
107470         task->blocked_on = waiter;
107473 -void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
107474 +void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
107475                          struct task_struct *task)
107477         DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
107478 @@ -65,7 +65,7 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
107479         DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
107480         task->blocked_on = NULL;
107482 -       list_del_init(&waiter->list);
107483 +       INIT_LIST_HEAD(&waiter->list);
107484         waiter->task = NULL;
107487 diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
107488 index 1edd3f45a4ec..53e631e1d76d 100644
107489 --- a/kernel/locking/mutex-debug.h
107490 +++ b/kernel/locking/mutex-debug.h
107491 @@ -22,7 +22,7 @@ extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
107492  extern void debug_mutex_add_waiter(struct mutex *lock,
107493                                    struct mutex_waiter *waiter,
107494                                    struct task_struct *task);
107495 -extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
107496 +extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
107497                                 struct task_struct *task);
107498  extern void debug_mutex_unlock(struct mutex *lock);
107499  extern void debug_mutex_init(struct mutex *lock, const char *name,
107500 diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
107501 index 622ebdfcd083..3899157c13b1 100644
107502 --- a/kernel/locking/mutex.c
107503 +++ b/kernel/locking/mutex.c
107504 @@ -194,7 +194,7 @@ static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_wait
107505   * Add @waiter to a given location in the lock wait_list and set the
107506   * FLAG_WAITERS flag if it's the first waiter.
107507   */
107508 -static void __sched
107509 +static void
107510  __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
107511                    struct list_head *list)
107513 @@ -205,6 +205,16 @@ __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
107514                 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
107517 +static void
107518 +__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
107520 +       list_del(&waiter->list);
107521 +       if (likely(list_empty(&lock->wait_list)))
107522 +               __mutex_clear_flag(lock, MUTEX_FLAGS);
107524 +       debug_mutex_remove_waiter(lock, waiter, current);
107528   * Give up ownership to a specific task, when @task = NULL, this is equivalent
107529   * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
107530 @@ -1061,9 +1071,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
107531                         __ww_mutex_check_waiters(lock, ww_ctx);
107532         }
107534 -       mutex_remove_waiter(lock, &waiter, current);
107535 -       if (likely(list_empty(&lock->wait_list)))
107536 -               __mutex_clear_flag(lock, MUTEX_FLAGS);
107537 +       __mutex_remove_waiter(lock, &waiter);
107539         debug_mutex_free_waiter(&waiter);
107541 @@ -1080,7 +1088,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
107543  err:
107544         __set_current_state(TASK_RUNNING);
107545 -       mutex_remove_waiter(lock, &waiter, current);
107546 +       __mutex_remove_waiter(lock, &waiter);
107547  err_early_kill:
107548         spin_unlock(&lock->wait_lock);
107549         debug_mutex_free_waiter(&waiter);
107550 diff --git a/kernel/locking/mutex.h b/kernel/locking/mutex.h
107551 index 1c2287d3fa71..f0c710b1d192 100644
107552 --- a/kernel/locking/mutex.h
107553 +++ b/kernel/locking/mutex.h
107554 @@ -10,12 +10,10 @@
107555   * !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
107556   */
107558 -#define mutex_remove_waiter(lock, waiter, task) \
107559 -               __list_del((waiter)->list.prev, (waiter)->list.next)
107561  #define debug_mutex_wake_waiter(lock, waiter)          do { } while (0)
107562  #define debug_mutex_free_waiter(waiter)                        do { } while (0)
107563  #define debug_mutex_add_waiter(lock, waiter, ti)       do { } while (0)
107564 +#define debug_mutex_remove_waiter(lock, waiter, ti)     do { } while (0)
107565  #define debug_mutex_unlock(lock)                       do { } while (0)
107566  #define debug_mutex_init(lock, name, key)              do { } while (0)
107568 diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
107569 index abba5df50006..b9fab2d55b93 100644
107570 --- a/kernel/locking/rwsem.c
107571 +++ b/kernel/locking/rwsem.c
107572 @@ -668,6 +668,7 @@ rwsem_spin_on_owner(struct rw_semaphore *sem)
107573         struct task_struct *new, *owner;
107574         unsigned long flags, new_flags;
107575         enum owner_state state;
107576 +       int i = 0;
107578         owner = rwsem_owner_flags(sem, &flags);
107579         state = rwsem_owner_state(owner, flags);
107580 @@ -701,7 +702,8 @@ rwsem_spin_on_owner(struct rw_semaphore *sem)
107581                         break;
107582                 }
107584 -               cpu_relax();
107585 +               if (i++ > 1000)
107586 +                       cpu_relax();
107587         }
107588         rcu_read_unlock();
107590 diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
107591 index 575a34b88936..77ae2704e979 100644
107592 --- a/kernel/printk/printk.c
107593 +++ b/kernel/printk/printk.c
107594 @@ -1494,6 +1494,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
107595         struct printk_info info;
107596         unsigned int line_count;
107597         struct printk_record r;
107598 +       u64 max_seq;
107599         char *text;
107600         int len = 0;
107601         u64 seq;
107602 @@ -1512,9 +1513,15 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
107603         prb_for_each_info(clear_seq, prb, seq, &info, &line_count)
107604                 len += get_record_print_text_size(&info, line_count, true, time);
107606 +       /*
107607 +        * Set an upper bound for the next loop to avoid subtracting lengths
107608 +        * that were never added.
107609 +        */
107610 +       max_seq = seq;
107612         /* move first record forward until length fits into the buffer */
107613         prb_for_each_info(clear_seq, prb, seq, &info, &line_count) {
107614 -               if (len <= size)
107615 +               if (len <= size || info.seq >= max_seq)
107616                         break;
107617                 len -= get_record_print_text_size(&info, line_count, true, time);
107618         }
107619 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
107620 index 61db50f7ca86..5f50fdd1d855 100644
107621 --- a/kernel/ptrace.c
107622 +++ b/kernel/ptrace.c
107623 @@ -169,6 +169,21 @@ void __ptrace_unlink(struct task_struct *child)
107624         spin_unlock(&child->sighand->siglock);
107627 +static bool looks_like_a_spurious_pid(struct task_struct *task)
107629 +       if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP))
107630 +               return false;
107632 +       if (task_pid_vnr(task) == task->ptrace_message)
107633 +               return false;
107634 +       /*
107635 +        * The tracee changed its pid but the PTRACE_EVENT_EXEC event
107636 +        * was not wait()'ed, most probably debugger targets the old
107637 +        * leader which was destroyed in de_thread().
107638 +        */
107639 +       return true;
107642  /* Ensure that nothing can wake it up, even SIGKILL */
107643  static bool ptrace_freeze_traced(struct task_struct *task)
107645 @@ -179,7 +194,8 @@ static bool ptrace_freeze_traced(struct task_struct *task)
107646                 return ret;
107648         spin_lock_irq(&task->sighand->siglock);
107649 -       if (task_is_traced(task) && !__fatal_signal_pending(task)) {
107650 +       if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
107651 +           !__fatal_signal_pending(task)) {
107652                 task->state = __TASK_TRACED;
107653                 ret = true;
107654         }
107655 diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig
107656 index 3128b7cf8e1f..abfae9afbdc8 100644
107657 --- a/kernel/rcu/Kconfig
107658 +++ b/kernel/rcu/Kconfig
107659 @@ -189,8 +189,8 @@ config RCU_FAST_NO_HZ
107661  config RCU_BOOST
107662         bool "Enable RCU priority boosting"
107663 -       depends on (RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT) || PREEMPT_RT
107664 -       default y if PREEMPT_RT
107665 +       depends on (RT_MUTEXES && PREEMPT_RCU) || PREEMPT_RT
107666 +       default y
107667         help
107668           This option boosts the priority of preempted RCU readers that
107669           block the current preemptible RCU grace period for too long.
107670 @@ -204,7 +204,7 @@ config RCU_BOOST_DELAY
107671         int "Milliseconds to delay boosting after RCU grace-period start"
107672         range 0 3000
107673         depends on RCU_BOOST
107674 -       default 500
107675 +       default 0
107676         help
107677           This option specifies the time to wait after the beginning of
107678           a given grace period before priority-boosting preempted RCU
107679 diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
107680 index da6f5213fb74..7356764e49a0 100644
107681 --- a/kernel/rcu/tree.c
107682 +++ b/kernel/rcu/tree.c
107683 @@ -1077,7 +1077,6 @@ noinstr void rcu_nmi_enter(void)
107684         } else if (!in_nmi()) {
107685                 instrumentation_begin();
107686                 rcu_irq_enter_check_tick();
107687 -               instrumentation_end();
107688         } else  {
107689                 instrumentation_begin();
107690         }
107691 @@ -3464,7 +3463,7 @@ static void fill_page_cache_func(struct work_struct *work)
107693         for (i = 0; i < rcu_min_cached_objs; i++) {
107694                 bnode = (struct kvfree_rcu_bulk_data *)
107695 -                       __get_free_page(GFP_KERNEL | __GFP_NOWARN);
107696 +                       __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
107698                 if (bnode) {
107699                         raw_spin_lock_irqsave(&krcp->lock, flags);
107700 diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
107701 index 2d603771c7dc..0796a75b6e0e 100644
107702 --- a/kernel/rcu/tree_plugin.h
107703 +++ b/kernel/rcu/tree_plugin.h
107704 @@ -1646,7 +1646,11 @@ static bool wake_nocb_gp(struct rcu_data *rdp, bool force,
107705                 rcu_nocb_unlock_irqrestore(rdp, flags);
107706                 return false;
107707         }
107708 -       del_timer(&rdp->nocb_timer);
107710 +       if (READ_ONCE(rdp->nocb_defer_wakeup) > RCU_NOCB_WAKE_NOT) {
107711 +               WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
107712 +               del_timer(&rdp->nocb_timer);
107713 +       }
107714         rcu_nocb_unlock_irqrestore(rdp, flags);
107715         raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
107716         if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
107717 @@ -2265,7 +2269,6 @@ static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
107718                 return false;
107719         }
107720         ndw = READ_ONCE(rdp->nocb_defer_wakeup);
107721 -       WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
107722         ret = wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
107723         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
107725 diff --git a/kernel/resource.c b/kernel/resource.c
107726 index 627e61b0c124..16e0c7e8ed24 100644
107727 --- a/kernel/resource.c
107728 +++ b/kernel/resource.c
107729 @@ -457,7 +457,7 @@ int walk_system_ram_res(u64 start, u64 end, void *arg,
107731         unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
107733 -       return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
107734 +       return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, false,
107735                                      arg, func);
107738 @@ -470,7 +470,7 @@ int walk_mem_res(u64 start, u64 end, void *arg,
107740         unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
107742 -       return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
107743 +       return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, false,
107744                                      arg, func);
107747 diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c
107748 index 2067080bb235..573b313efe4c 100644
107749 --- a/kernel/sched/autogroup.c
107750 +++ b/kernel/sched/autogroup.c
107751 @@ -5,7 +5,8 @@
107752  #include <linux/nospec.h>
107753  #include "sched.h"
107755 -unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
107756 +unsigned int __read_mostly sysctl_sched_autogroup_enabled =
107757 +               IS_ENABLED(CONFIG_SCHED_AUTOGROUP_DEFAULT_ENABLED) ? 1 : 0;
107758  static struct autogroup autogroup_default;
107759  static atomic_t autogroup_seq_nr;
107761 @@ -197,11 +198,12 @@ void sched_autogroup_exit(struct signal_struct *sig)
107763  static int __init setup_autogroup(char *str)
107765 -       sysctl_sched_autogroup_enabled = 0;
107767 +       unsigned long enabled;
107768 +       if (!kstrtoul(str, 0, &enabled))
107769 +               sysctl_sched_autogroup_enabled = enabled ? 1 : 0;
107770         return 1;
107772 -__setup("noautogroup", setup_autogroup);
107773 +__setup("autogroup=", setup_autogroup);
107775  #ifdef CONFIG_PROC_FS
107777 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
107778 index 98191218d891..98bcafbe10d9 100644
107779 --- a/kernel/sched/core.c
107780 +++ b/kernel/sched/core.c
107781 @@ -928,7 +928,7 @@ DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
107783  static inline unsigned int uclamp_bucket_id(unsigned int clamp_value)
107785 -       return clamp_value / UCLAMP_BUCKET_DELTA;
107786 +       return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1);
107789  static inline unsigned int uclamp_none(enum uclamp_id clamp_id)
107790 @@ -3554,7 +3554,13 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
107791         p->se.sum_exec_runtime          = 0;
107792         p->se.prev_sum_exec_runtime     = 0;
107793         p->se.nr_migrations             = 0;
107795 +#ifdef CONFIG_CACULE_SCHED
107796 +       p->se.cacule_node.vruntime      = 0;
107797 +#else
107798         p->se.vruntime                  = 0;
107799 +#endif
107801         INIT_LIST_HEAD(&p->se.group_node);
107803  #ifdef CONFIG_FAIR_GROUP_SCHED
107804 @@ -3840,6 +3846,10 @@ void wake_up_new_task(struct task_struct *p)
107805         update_rq_clock(rq);
107806         post_init_entity_util_avg(p);
107808 +#ifdef CONFIG_CACULE_SCHED
107809 +       p->se.cacule_node.cacule_start_time = sched_clock();
107810 +#endif
107812         activate_task(rq, p, ENQUEUE_NOCLOCK);
107813         trace_sched_wakeup_new(p);
107814         check_preempt_curr(rq, p, WF_FORK);
107815 @@ -4306,6 +4316,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
107816                  * finish_task_switch()'s mmdrop().
107817                  */
107818                 switch_mm_irqs_off(prev->active_mm, next->mm, next);
107819 +               lru_gen_switch_mm(prev->active_mm, next->mm);
107821                 if (!prev->mm) {                        // from kernel
107822                         /* will mmdrop() in finish_task_switch(). */
107823 @@ -5765,6 +5776,7 @@ int can_nice(const struct task_struct *p, const int nice)
107824         return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
107825                 capable(CAP_SYS_NICE));
107827 +EXPORT_SYMBOL(can_nice);
107829  #ifdef __ARCH_WANT_SYS_NICE
107831 @@ -7597,6 +7609,7 @@ void idle_task_exit(void)
107833         if (mm != &init_mm) {
107834                 switch_mm(mm, &init_mm, current);
107835 +               lru_gen_switch_mm(mm, &init_mm);
107836                 finish_arch_post_lock_switch();
107837         }
107839 @@ -7652,7 +7665,7 @@ static void balance_push(struct rq *rq)
107840          * histerical raisins.
107841          */
107842         if (rq->idle == push_task ||
107843 -           ((push_task->flags & PF_KTHREAD) && kthread_is_per_cpu(push_task)) ||
107844 +           kthread_is_per_cpu(push_task) ||
107845             is_migration_disabled(push_task)) {
107847                 /*
107848 @@ -8094,6 +8107,10 @@ void __init sched_init(void)
107849         BUG_ON(&dl_sched_class + 1 != &stop_sched_class);
107850  #endif
107852 +#ifdef CONFIG_CACULE_SCHED
107853 +       printk(KERN_INFO "CacULE CPU scheduler v5.12 by Hamad Al Marri.");
107854 +#endif
107856         wait_bit_init();
107858  #ifdef CONFIG_FAIR_GROUP_SCHED
107859 diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
107860 index 486f403a778b..6542bd142365 100644
107861 --- a/kernel/sched/debug.c
107862 +++ b/kernel/sched/debug.c
107863 @@ -8,8 +8,6 @@
107864   */
107865  #include "sched.h"
107867 -static DEFINE_SPINLOCK(sched_debug_lock);
107870   * This allows printing both to /proc/sched_debug and
107871   * to the console
107872 @@ -470,16 +468,37 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
107873  #endif
107875  #ifdef CONFIG_CGROUP_SCHED
107876 +static DEFINE_SPINLOCK(sched_debug_lock);
107877  static char group_path[PATH_MAX];
107879 -static char *task_group_path(struct task_group *tg)
107880 +static void task_group_path(struct task_group *tg, char *path, int plen)
107882 -       if (autogroup_path(tg, group_path, PATH_MAX))
107883 -               return group_path;
107884 +       if (autogroup_path(tg, path, plen))
107885 +               return;
107887 -       cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
107888 +       cgroup_path(tg->css.cgroup, path, plen);
107891 -       return group_path;
107893 + * Only 1 SEQ_printf_task_group_path() caller can use the full length
107894 + * group_path[] for cgroup path. Other simultaneous callers will have
107895 + * to use a shorter stack buffer. A "..." suffix is appended at the end
107896 + * of the stack buffer so that it will show up in case the output length
107897 + * matches the given buffer size to indicate possible path name truncation.
107898 + */
107899 +#define SEQ_printf_task_group_path(m, tg, fmt...)                      \
107900 +{                                                                      \
107901 +       if (spin_trylock(&sched_debug_lock)) {                          \
107902 +               task_group_path(tg, group_path, sizeof(group_path));    \
107903 +               SEQ_printf(m, fmt, group_path);                         \
107904 +               spin_unlock(&sched_debug_lock);                         \
107905 +       } else {                                                        \
107906 +               char buf[128];                                          \
107907 +               char *bufend = buf + sizeof(buf) - 3;                   \
107908 +               task_group_path(tg, buf, bufend - buf);                 \
107909 +               strcpy(bufend - 1, "...");                              \
107910 +               SEQ_printf(m, fmt, buf);                                \
107911 +       }                                                               \
107913  #endif
107915 @@ -506,7 +525,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
107916         SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
107917  #endif
107918  #ifdef CONFIG_CGROUP_SCHED
107919 -       SEQ_printf(m, " %s", task_group_path(task_group(p)));
107920 +       SEQ_printf_task_group_path(m, task_group(p), " %s")
107921  #endif
107923         SEQ_printf(m, "\n");
107924 @@ -535,15 +554,18 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
107926  void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
107928 -       s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
107929 -               spread, rq0_min_vruntime, spread0;
107930 +       s64 MIN_vruntime = -1, max_vruntime = -1,
107931 +#if !defined(CONFIG_CACULE_SCHED)
107932 +       min_vruntime, rq0_min_vruntime, spread0,
107933 +#endif
107934 +       spread;
107935         struct rq *rq = cpu_rq(cpu);
107936         struct sched_entity *last;
107937         unsigned long flags;
107939  #ifdef CONFIG_FAIR_GROUP_SCHED
107940         SEQ_printf(m, "\n");
107941 -       SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
107942 +       SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
107943  #else
107944         SEQ_printf(m, "\n");
107945         SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
107946 @@ -557,21 +579,27 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
107947         last = __pick_last_entity(cfs_rq);
107948         if (last)
107949                 max_vruntime = last->vruntime;
107950 +#if !defined(CONFIG_CACULE_SCHED)
107951         min_vruntime = cfs_rq->min_vruntime;
107952         rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
107953 +#endif
107954         raw_spin_unlock_irqrestore(&rq->lock, flags);
107955         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
107956                         SPLIT_NS(MIN_vruntime));
107957 +#if !defined(CONFIG_CACULE_SCHED)
107958         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
107959                         SPLIT_NS(min_vruntime));
107960 +#endif
107961         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
107962                         SPLIT_NS(max_vruntime));
107963         spread = max_vruntime - MIN_vruntime;
107964         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
107965                         SPLIT_NS(spread));
107966 +#if !defined(CONFIG_CACULE_SCHED)
107967         spread0 = min_vruntime - rq0_min_vruntime;
107968         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
107969                         SPLIT_NS(spread0));
107970 +#endif
107971         SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
107972                         cfs_rq->nr_spread_over);
107973         SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
107974 @@ -614,7 +642,7 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
107976  #ifdef CONFIG_RT_GROUP_SCHED
107977         SEQ_printf(m, "\n");
107978 -       SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
107979 +       SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
107980  #else
107981         SEQ_printf(m, "\n");
107982         SEQ_printf(m, "rt_rq[%d]:\n", cpu);
107983 @@ -666,7 +694,6 @@ void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
107984  static void print_cpu(struct seq_file *m, int cpu)
107986         struct rq *rq = cpu_rq(cpu);
107987 -       unsigned long flags;
107989  #ifdef CONFIG_X86
107990         {
107991 @@ -717,13 +744,11 @@ do {                                                                      \
107992         }
107993  #undef P
107995 -       spin_lock_irqsave(&sched_debug_lock, flags);
107996         print_cfs_stats(m, cpu);
107997         print_rt_stats(m, cpu);
107998         print_dl_stats(m, cpu);
108000         print_rq(m, rq, cpu);
108001 -       spin_unlock_irqrestore(&sched_debug_lock, flags);
108002         SEQ_printf(m, "\n");
108005 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
108006 index 794c2cb945f8..39bad9ca2f49 100644
108007 --- a/kernel/sched/fair.c
108008 +++ b/kernel/sched/fair.c
108009 @@ -19,9 +19,18 @@
108010   *
108011   *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
108012   *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
108014 + *  CacULE enhancements CPU cache and scheduler based on
108015 + *  Interactivity Score.
108016 + *  (C) 2020 Hamad Al Marri <hamad.s.almarri@gmail.com>
108017   */
108018  #include "sched.h"
108020 +#ifdef CONFIG_CACULE_SCHED
108021 +/* Default XanMod's CacULE latency: 2ms * (1 + ilog(ncpus)) */
108022 +unsigned int sysctl_sched_latency                      = 2000000ULL;
108023 +static unsigned int normalized_sysctl_sched_latency    = 2000000ULL;
108024 +#else
108026   * Targeted preemption latency for CPU-bound tasks:
108027   *
108028 @@ -37,6 +46,7 @@
108029   */
108030  unsigned int sysctl_sched_latency                      = 6000000ULL;
108031  static unsigned int normalized_sysctl_sched_latency    = 6000000ULL;
108032 +#endif
108035   * The initial- and re-scaling of tunables is configurable
108036 @@ -51,6 +61,11 @@ static unsigned int normalized_sysctl_sched_latency  = 6000000ULL;
108037   */
108038  enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
108040 +#ifdef CONFIG_CACULE_SCHED
108041 +/* Default XanMod's CacULE granularity: 0.25 msec * (1 + ilog(ncpus)) */
108042 +unsigned int sysctl_sched_min_granularity                      = 250000ULL;
108043 +static unsigned int normalized_sysctl_sched_min_granularity    = 250000ULL;
108044 +#else
108046   * Minimal preemption granularity for CPU-bound tasks:
108047   *
108048 @@ -58,6 +73,7 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_L
108049   */
108050  unsigned int sysctl_sched_min_granularity                      = 750000ULL;
108051  static unsigned int normalized_sysctl_sched_min_granularity    = 750000ULL;
108052 +#endif
108055   * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
108056 @@ -113,6 +129,11 @@ int __weak arch_asym_cpu_priority(int cpu)
108057   */
108058  #define fits_capacity(cap, max)        ((cap) * 1280 < (max) * 1024)
108060 +#endif
108061 +#ifdef CONFIG_CACULE_SCHED
108062 +int __read_mostly cacule_max_lifetime                  = 22000; // in ms
108063 +int __read_mostly interactivity_factor                 = 32768;
108064 +unsigned int __read_mostly interactivity_threshold     = 1000;
108065  #endif
108067  #ifdef CONFIG_CFS_BANDWIDTH
108068 @@ -253,6 +274,14 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
108070  const struct sched_class fair_sched_class;
108073 +#ifdef CONFIG_CACULE_SCHED
108074 +static inline struct sched_entity *se_of(struct cacule_node *cn)
108076 +       return container_of(cn, struct sched_entity, cacule_node);
108078 +#endif
108080  /**************************************************************
108081   * CFS operations on generic schedulable entities:
108082   */
108083 @@ -512,7 +541,7 @@ void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
108084  /**************************************************************
108085   * Scheduling class tree data structure manipulation methods:
108086   */
108088 +#if !defined(CONFIG_CACULE_SCHED)
108089  static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
108091         s64 delta = (s64)(vruntime - max_vruntime);
108092 @@ -575,7 +604,169 @@ static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
108094         return entity_before(__node_2_se(a), __node_2_se(b));
108096 +#endif /* CONFIG_CACULE_SCHED */
108098 +#ifdef CONFIG_CACULE_SCHED
108099 +static unsigned int
108100 +calc_interactivity(u64 now, struct cacule_node *se)
108102 +       u64 l_se, vr_se, sleep_se = 1ULL, u64_factor;
108103 +       unsigned int score_se;
108105 +       /*
108106 +        * in case of vruntime==0, logical OR with 1 would
108107 +        * make sure that the least sig. bit is 1
108108 +        */
108109 +       l_se            = now - se->cacule_start_time;
108110 +       vr_se           = se->vruntime          | 1;
108111 +       u64_factor      = interactivity_factor;
108113 +       /* safety check */
108114 +       if (likely(l_se > vr_se))
108115 +               sleep_se = (l_se - vr_se) | 1;
108117 +       if (sleep_se >= vr_se)
108118 +               score_se = u64_factor / (sleep_se / vr_se);
108119 +       else
108120 +               score_se = (u64_factor << 1) - (u64_factor / (vr_se / sleep_se));
108122 +       return score_se;
108125 +static inline int is_interactive(struct cacule_node *cn)
108127 +       if (se_of(cn)->vruntime == 0)
108128 +               return 0;
108130 +       return calc_interactivity(sched_clock(), cn) < interactivity_threshold;
108133 +static inline int
108134 +entity_before_cached(u64 now, unsigned int score_curr, struct cacule_node *se)
108136 +       unsigned int score_se;
108137 +       int diff;
108139 +       score_se        = calc_interactivity(now, se);
108140 +       diff            = score_se - score_curr;
108142 +       if (diff <= 0)
108143 +               return 1;
108145 +       return -1;
108149 + * Does se have lower interactivity score value (i.e. interactive) than curr? If yes, return 1,
108150 + * otherwise return -1
108151 + * se is before curr if se has lower interactivity score value
108152 + * the lower score, the more interactive
108153 + */
108154 +static inline int
108155 +entity_before(u64 now, struct cacule_node *curr, struct cacule_node *se)
108157 +       unsigned int score_curr, score_se;
108158 +       int diff;
108160 +       score_curr      = calc_interactivity(now, curr);
108161 +       score_se        = calc_interactivity(now, se);
108163 +       diff            = score_se - score_curr;
108165 +       if (diff < 0)
108166 +               return 1;
108168 +       return -1;
108172 + * Enqueue an entity
108173 + */
108174 +static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *_se)
108176 +       struct cacule_node *se = &(_se->cacule_node);
108177 +       struct cacule_node *iter, *next = NULL;
108178 +       u64 now = sched_clock();
108179 +       unsigned int score_se = calc_interactivity(now, se);
108181 +       se->next = NULL;
108182 +       se->prev = NULL;
108184 +       if (likely(cfs_rq->head)) {
108186 +               // start from tail
108187 +               iter = cfs_rq->tail;
108189 +               // does se have higher IS than iter?
108190 +               while (iter && entity_before_cached(now, score_se, iter) == -1) {
108191 +                       next = iter;
108192 +                       iter = iter->prev;
108193 +               }
108195 +               // se in tail position
108196 +               if (iter == cfs_rq->tail) {
108197 +                       cfs_rq->tail->next      = se;
108198 +                       se->prev                = cfs_rq->tail;
108200 +                       cfs_rq->tail            = se;
108201 +               }
108202 +               // else if not head no tail, insert se after iter
108203 +               else if (iter) {
108204 +                       se->next        = next;
108205 +                       se->prev        = iter;
108207 +                       iter->next      = se;
108208 +                       next->prev      = se;
108209 +               }
108210 +               // insert se at head
108211 +               else {
108212 +                       se->next                = cfs_rq->head;
108213 +                       cfs_rq->head->prev      = se;
108215 +                       // lastly reset the head
108216 +                       cfs_rq->head            = se;
108217 +               }
108218 +       } else {
108219 +               // if empty rq
108220 +               cfs_rq->head = se;
108221 +               cfs_rq->tail = se;
108222 +       }
108225 +static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *_se)
108227 +       struct cacule_node *se = &(_se->cacule_node);
108229 +       // if only one se in rq
108230 +       if (cfs_rq->head == cfs_rq->tail) {
108231 +               cfs_rq->head = NULL;
108232 +               cfs_rq->tail = NULL;
108234 +       } else if (se == cfs_rq->head) {
108235 +               // if it is the head
108236 +               cfs_rq->head            = cfs_rq->head->next;
108237 +               cfs_rq->head->prev      = NULL;
108238 +       } else if (se == cfs_rq->tail) {
108239 +               // if it is the tail
108240 +               cfs_rq->tail            = cfs_rq->tail->prev;
108241 +               cfs_rq->tail->next      = NULL;
108242 +       } else {
108243 +               // if in the middle
108244 +               struct cacule_node *prev = se->prev;
108245 +               struct cacule_node *next = se->next;
108247 +               prev->next = next;
108249 +               if (next)
108250 +                       next->prev = prev;
108251 +       }
108254 +struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
108256 +       return se_of(cfs_rq->head);
108258 +#else
108260   * Enqueue an entity into the rb-tree:
108261   */
108262 @@ -608,16 +799,24 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
108264         return __node_2_se(next);
108266 +#endif /* CONFIG_CACULE_SCHED */
108268  #ifdef CONFIG_SCHED_DEBUG
108269  struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
108271 +#ifdef CONFIG_CACULE_SCHED
108272 +       if (!cfs_rq->tail)
108273 +               return NULL;
108275 +       return se_of(cfs_rq->tail);
108276 +#else
108277         struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
108279         if (!last)
108280                 return NULL;
108282         return __node_2_se(last);
108283 +#endif /* CONFIG_CACULE_SCHED */
108286  /**************************************************************
108287 @@ -682,7 +881,13 @@ static u64 __sched_period(unsigned long nr_running)
108288   */
108289  static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
108291 -       u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
108292 +       unsigned int nr_running = cfs_rq->nr_running;
108293 +       u64 slice;
108295 +       if (sched_feat(ALT_PERIOD))
108296 +               nr_running = rq_of(cfs_rq)->cfs.h_nr_running;
108298 +       slice = __sched_period(nr_running + !se->on_rq);
108300         for_each_sched_entity(se) {
108301                 struct load_weight *load;
108302 @@ -699,9 +904,14 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
108303                 }
108304                 slice = __calc_delta(slice, se->load.weight, load);
108305         }
108307 +       if (sched_feat(BASE_SLICE))
108308 +               slice = max(slice, (u64)sysctl_sched_min_granularity);
108310         return slice;
108313 +#if !defined(CONFIG_CACULE_SCHED)
108315   * We calculate the vruntime slice of a to-be-inserted task.
108316   *
108317 @@ -711,6 +921,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
108319         return calc_delta_fair(sched_slice(cfs_rq, se), se);
108321 +#endif /* CONFIG_CACULE_SCHED */
108323  #include "pelt.h"
108324  #ifdef CONFIG_SMP
108325 @@ -818,14 +1029,51 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
108327  #endif /* CONFIG_SMP */
108329 +#ifdef CONFIG_CACULE_SCHED
108330 +static void normalize_lifetime(u64 now, struct sched_entity *se)
108332 +       struct cacule_node *cn = &se->cacule_node;
108333 +       u64 max_life_ns, life_time;
108334 +       s64 diff;
108336 +       /*
108337 +        * left shift 20 bits is approximately = * 1000000
108338 +        * we don't need the precision of life time
108339 +        * Ex. for 30s, with left shift (20bits) == 31.457s
108340 +        */
108341 +       max_life_ns     = ((u64) cacule_max_lifetime) << 20;
108342 +       life_time       = now - cn->cacule_start_time;
108343 +       diff            = life_time - max_life_ns;
108345 +       if (diff > 0) {
108346 +               // multiply life_time by 1024 for more precision
108347 +               u64 old_hrrn_x  = (life_time << 7) / ((cn->vruntime >> 3) | 1);
108349 +               // reset life to half max_life (i.e ~15s)
108350 +               cn->cacule_start_time = now - (max_life_ns >> 1);
108352 +               // avoid division by zero
108353 +               if (old_hrrn_x == 0) old_hrrn_x = 1;
108355 +               // reset vruntime based on old hrrn ratio
108356 +               cn->vruntime = (max_life_ns << 9) / old_hrrn_x;
108357 +       }
108359 +#endif /* CONFIG_CACULE_SCHED */
108362   * Update the current task's runtime statistics.
108363   */
108364  static void update_curr(struct cfs_rq *cfs_rq)
108366         struct sched_entity *curr = cfs_rq->curr;
108367 +#ifdef CONFIG_CACULE_SCHED
108368 +       u64 now = sched_clock();
108369 +       u64 delta_exec, delta_fair;
108370 +#else
108371         u64 now = rq_clock_task(rq_of(cfs_rq));
108372         u64 delta_exec;
108373 +#endif
108375         if (unlikely(!curr))
108376                 return;
108377 @@ -842,8 +1090,15 @@ static void update_curr(struct cfs_rq *cfs_rq)
108378         curr->sum_exec_runtime += delta_exec;
108379         schedstat_add(cfs_rq->exec_clock, delta_exec);
108381 +#ifdef CONFIG_CACULE_SCHED
108382 +       delta_fair = calc_delta_fair(delta_exec, curr);
108383 +       curr->vruntime += delta_fair;
108384 +       curr->cacule_node.vruntime += delta_fair;
108385 +       normalize_lifetime(now, curr);
108386 +#else
108387         curr->vruntime += calc_delta_fair(delta_exec, curr);
108388         update_min_vruntime(cfs_rq);
108389 +#endif
108391         if (entity_is_task(curr)) {
108392                 struct task_struct *curtask = task_of(curr);
108393 @@ -1011,7 +1266,6 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
108394  static inline void
108395  update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
108398         if (!schedstat_enabled())
108399                 return;
108401 @@ -1043,7 +1297,11 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
108402         /*
108403          * We are starting a new run period:
108404          */
108405 +#ifdef CONFIG_CACULE_SCHED
108406 +       se->exec_start = sched_clock();
108407 +#else
108408         se->exec_start = rq_clock_task(rq_of(cfs_rq));
108409 +#endif
108412  /**************************************************
108413 @@ -3941,6 +4199,8 @@ static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
108414         trace_sched_util_est_cfs_tp(cfs_rq);
108417 +#define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100)
108420   * Check if a (signed) value is within a specified (unsigned) margin,
108421   * based on the observation that:
108422 @@ -3958,7 +4218,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
108423                                    struct task_struct *p,
108424                                    bool task_sleep)
108426 -       long last_ewma_diff;
108427 +       long last_ewma_diff, last_enqueued_diff;
108428         struct util_est ue;
108430         if (!sched_feat(UTIL_EST))
108431 @@ -3979,6 +4239,8 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
108432         if (ue.enqueued & UTIL_AVG_UNCHANGED)
108433                 return;
108435 +       last_enqueued_diff = ue.enqueued;
108437         /*
108438          * Reset EWMA on utilization increases, the moving average is used only
108439          * to smooth utilization decreases.
108440 @@ -3992,12 +4254,17 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
108441         }
108443         /*
108444 -        * Skip update of task's estimated utilization when its EWMA is
108445 +        * Skip update of task's estimated utilization when its members are
108446          * already ~1% close to its last activation value.
108447          */
108448         last_ewma_diff = ue.enqueued - ue.ewma;
108449 -       if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100)))
108450 +       last_enqueued_diff -= ue.enqueued;
108451 +       if (within_margin(last_ewma_diff, UTIL_EST_MARGIN)) {
108452 +               if (!within_margin(last_enqueued_diff, UTIL_EST_MARGIN))
108453 +                       goto done;
108455                 return;
108456 +       }
108458         /*
108459          * To avoid overestimation of actual task utilization, skip updates if
108460 @@ -4097,7 +4364,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
108462  static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
108464 -#ifdef CONFIG_SCHED_DEBUG
108465 +#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_CACULE_SCHED)
108466         s64 d = se->vruntime - cfs_rq->min_vruntime;
108468         if (d < 0)
108469 @@ -4108,6 +4375,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
108470  #endif
108473 +#if !defined(CONFIG_CACULE_SCHED)
108474  static void
108475  place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
108477 @@ -4139,6 +4407,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
108478         /* ensure we never gain time by being placed backwards. */
108479         se->vruntime = max_vruntime(se->vruntime, vruntime);
108481 +#endif /* CONFIG_CACULE_SCHED */
108483  static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
108485 @@ -4197,18 +4466,23 @@ static inline bool cfs_bandwidth_used(void);
108486  static void
108487  enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
108489 +#if !defined(CONFIG_CACULE_SCHED)
108490         bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
108491 +#endif
108492         bool curr = cfs_rq->curr == se;
108494 +#if !defined(CONFIG_CACULE_SCHED)
108495         /*
108496          * If we're the current task, we must renormalise before calling
108497          * update_curr().
108498          */
108499         if (renorm && curr)
108500                 se->vruntime += cfs_rq->min_vruntime;
108501 +#endif
108503         update_curr(cfs_rq);
108505 +#if !defined(CONFIG_CACULE_SCHED)
108506         /*
108507          * Otherwise, renormalise after, such that we're placed at the current
108508          * moment in time, instead of some random moment in the past. Being
108509 @@ -4217,6 +4491,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
108510          */
108511         if (renorm && !curr)
108512                 se->vruntime += cfs_rq->min_vruntime;
108513 +#endif
108515         /*
108516          * When enqueuing a sched_entity, we must:
108517 @@ -4231,8 +4506,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
108518         update_cfs_group(se);
108519         account_entity_enqueue(cfs_rq, se);
108521 +#if !defined(CONFIG_CACULE_SCHED)
108522         if (flags & ENQUEUE_WAKEUP)
108523                 place_entity(cfs_rq, se, 0);
108524 +#endif
108526         check_schedstat_required();
108527         update_stats_enqueue(cfs_rq, se, flags);
108528 @@ -4253,6 +4530,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
108529                 check_enqueue_throttle(cfs_rq);
108532 +#if !defined(CONFIG_CACULE_SCHED)
108533  static void __clear_buddies_last(struct sched_entity *se)
108535         for_each_sched_entity(se) {
108536 @@ -4297,6 +4575,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
108537         if (cfs_rq->skip == se)
108538                 __clear_buddies_skip(se);
108540 +#endif /* !CONFIG_CACULE_SCHED */
108542  static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
108544 @@ -4321,13 +4600,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
108546         update_stats_dequeue(cfs_rq, se, flags);
108548 +#if !defined(CONFIG_CACULE_SCHED)
108549         clear_buddies(cfs_rq, se);
108550 +#endif
108552         if (se != cfs_rq->curr)
108553                 __dequeue_entity(cfs_rq, se);
108554         se->on_rq = 0;
108555         account_entity_dequeue(cfs_rq, se);
108557 +#if !defined(CONFIG_CACULE_SCHED)
108558         /*
108559          * Normalize after update_curr(); which will also have moved
108560          * min_vruntime if @se is the one holding it back. But before doing
108561 @@ -4336,12 +4618,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
108562          */
108563         if (!(flags & DEQUEUE_SLEEP))
108564                 se->vruntime -= cfs_rq->min_vruntime;
108565 +#endif
108567         /* return excess runtime on last dequeue */
108568         return_cfs_rq_runtime(cfs_rq);
108570         update_cfs_group(se);
108572 +#if !defined(CONFIG_CACULE_SCHED)
108573         /*
108574          * Now advance min_vruntime if @se was the entity holding it back,
108575          * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
108576 @@ -4350,8 +4634,21 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
108577          */
108578         if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
108579                 update_min_vruntime(cfs_rq);
108580 +#endif
108583 +#ifdef CONFIG_CACULE_SCHED
108585 + * Preempt the current task with a newly woken task if needed:
108586 + */
108587 +static void
108588 +check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
108590 +       // does head have higher IS than curr
108591 +       if (entity_before(sched_clock(), &curr->cacule_node, cfs_rq->head) == 1)
108592 +               resched_curr(rq_of(cfs_rq));
108594 +#else
108596   * Preempt the current task with a newly woken task if needed:
108597   */
108598 @@ -4391,6 +4688,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
108599         if (delta > ideal_runtime)
108600                 resched_curr(rq_of(cfs_rq));
108602 +#endif /* CONFIG_CACULE_SCHED */
108604  static void
108605  set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
108606 @@ -4425,6 +4723,21 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
108607         se->prev_sum_exec_runtime = se->sum_exec_runtime;
108610 +#ifdef CONFIG_CACULE_SCHED
108611 +static struct sched_entity *
108612 +pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
108614 +       struct cacule_node *se = cfs_rq->head;
108616 +       if (unlikely(!se))
108617 +               se = &curr->cacule_node;
108618 +       else if (unlikely(curr
108619 +                       && entity_before(sched_clock(), se, &curr->cacule_node) == 1))
108620 +               se = &curr->cacule_node;
108622 +       return se_of(se);
108624 +#else
108625  static int
108626  wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
108628 @@ -4485,6 +4798,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
108630         return se;
108632 +#endif /* CONFIG_CACULE_SCHED */
108634  static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
108636 @@ -5587,7 +5901,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
108637         hrtick_update(rq);
108640 +#if !defined(CONFIG_CACULE_SCHED)
108641  static void set_next_buddy(struct sched_entity *se);
108642 +#endif
108645   * The dequeue_task method is called before nr_running is
108646 @@ -5619,12 +5935,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
108647                 if (cfs_rq->load.weight) {
108648                         /* Avoid re-evaluating load for this entity: */
108649                         se = parent_entity(se);
108650 +#if !defined(CONFIG_CACULE_SCHED)
108651                         /*
108652                          * Bias pick_next to pick a task from this cfs_rq, as
108653                          * p is sleeping when it is within its sched_slice.
108654                          */
108655                         if (task_sleep && se && !throttled_hierarchy(cfs_rq))
108656                                 set_next_buddy(se);
108657 +#endif
108658                         break;
108659                 }
108660                 flags |= DEQUEUE_SLEEP;
108661 @@ -5740,6 +6058,7 @@ static unsigned long capacity_of(int cpu)
108662         return cpu_rq(cpu)->cpu_capacity;
108665 +#if !defined(CONFIG_CACULE_SCHED)
108666  static void record_wakee(struct task_struct *p)
108668         /*
108669 @@ -5786,6 +6105,7 @@ static int wake_wide(struct task_struct *p)
108670                 return 0;
108671         return 1;
108673 +#endif /* CONFIG_CACULE_SCHED */
108676   * The purpose of wake_affine() is to quickly determine on which CPU we can run
108677 @@ -6098,6 +6418,24 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
108678         return -1;
108682 + * Scan the local SMT mask for idle CPUs.
108683 + */
108684 +static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
108686 +       int cpu;
108688 +       for_each_cpu(cpu, cpu_smt_mask(target)) {
108689 +               if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
108690 +                   !cpumask_test_cpu(cpu, sched_domain_span(sd)))
108691 +                       continue;
108692 +               if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
108693 +                       return cpu;
108694 +       }
108696 +       return -1;
108699  #else /* CONFIG_SCHED_SMT */
108701  static inline void set_idle_cores(int cpu, int val)
108702 @@ -6114,6 +6452,11 @@ static inline int select_idle_core(struct task_struct *p, int core, struct cpuma
108703         return __select_idle_cpu(core);
108706 +static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
108708 +       return -1;
108711  #endif /* CONFIG_SCHED_SMT */
108714 @@ -6121,11 +6464,10 @@ static inline int select_idle_core(struct task_struct *p, int core, struct cpuma
108715   * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
108716   * average idle time for this rq (as found in rq->avg_idle).
108717   */
108718 -static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
108719 +static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int target)
108721         struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
108722         int i, cpu, idle_cpu = -1, nr = INT_MAX;
108723 -       bool smt = test_idle_cores(target, false);
108724         int this = smp_processor_id();
108725         struct sched_domain *this_sd;
108726         u64 time;
108727 @@ -6136,7 +6478,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
108729         cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
108731 -       if (sched_feat(SIS_PROP) && !smt) {
108732 +       if (sched_feat(SIS_PROP) && !has_idle_core) {
108733                 u64 avg_cost, avg_idle, span_avg;
108735                 /*
108736 @@ -6156,7 +6498,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
108737         }
108739         for_each_cpu_wrap(cpu, cpus, target) {
108740 -               if (smt) {
108741 +               if (has_idle_core) {
108742                         i = select_idle_core(p, cpu, cpus, &idle_cpu);
108743                         if ((unsigned int)i < nr_cpumask_bits)
108744                                 return i;
108745 @@ -6170,10 +6512,10 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
108746                 }
108747         }
108749 -       if (smt)
108750 -               set_idle_cores(this, false);
108751 +       if (has_idle_core)
108752 +               set_idle_cores(target, false);
108754 -       if (sched_feat(SIS_PROP) && !smt) {
108755 +       if (sched_feat(SIS_PROP) && !has_idle_core) {
108756                 time = cpu_clock(this) - time;
108757                 update_avg(&this_sd->avg_scan_cost, time);
108758         }
108759 @@ -6228,6 +6570,7 @@ static inline bool asym_fits_capacity(int task_util, int cpu)
108760   */
108761  static int select_idle_sibling(struct task_struct *p, int prev, int target)
108763 +       bool has_idle_core = false;
108764         struct sched_domain *sd;
108765         unsigned long task_util;
108766         int i, recent_used_cpu;
108767 @@ -6307,7 +6650,17 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
108768         if (!sd)
108769                 return target;
108771 -       i = select_idle_cpu(p, sd, target);
108772 +       if (sched_smt_active()) {
108773 +               has_idle_core = test_idle_cores(target, false);
108775 +               if (!has_idle_core && cpus_share_cache(prev, target)) {
108776 +                       i = select_idle_smt(p, sd, prev);
108777 +                       if ((unsigned int)i < nr_cpumask_bits)
108778 +                               return i;
108779 +               }
108780 +       }
108782 +       i = select_idle_cpu(p, sd, has_idle_core, target);
108783         if ((unsigned)i < nr_cpumask_bits)
108784                 return i;
108786 @@ -6455,6 +6808,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
108787         return min_t(unsigned long, util, capacity_orig_of(cpu));
108790 +#if !defined(CONFIG_CACULE_SCHED)
108792   * Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued)
108793   * to @dst_cpu.
108794 @@ -6518,8 +6872,24 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
108795          * its pd list and will not be accounted by compute_energy().
108796          */
108797         for_each_cpu_and(cpu, pd_mask, cpu_online_mask) {
108798 -               unsigned long cpu_util, util_cfs = cpu_util_next(cpu, p, dst_cpu);
108799 -               struct task_struct *tsk = cpu == dst_cpu ? p : NULL;
108800 +               unsigned long util_freq = cpu_util_next(cpu, p, dst_cpu);
108801 +               unsigned long cpu_util, util_running = util_freq;
108802 +               struct task_struct *tsk = NULL;
108804 +               /*
108805 +                * When @p is placed on @cpu:
108806 +                *
108807 +                * util_running = max(cpu_util, cpu_util_est) +
108808 +                *                max(task_util, _task_util_est)
108809 +                *
108810 +                * while cpu_util_next is: max(cpu_util + task_util,
108811 +                *                             cpu_util_est + _task_util_est)
108812 +                */
108813 +               if (cpu == dst_cpu) {
108814 +                       tsk = p;
108815 +                       util_running =
108816 +                               cpu_util_next(cpu, p, -1) + task_util_est(p);
108817 +               }
108819                 /*
108820                  * Busy time computation: utilization clamping is not
108821 @@ -6527,7 +6897,7 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
108822                  * is already enough to scale the EM reported power
108823                  * consumption at the (eventually clamped) cpu_capacity.
108824                  */
108825 -               sum_util += effective_cpu_util(cpu, util_cfs, cpu_cap,
108826 +               sum_util += effective_cpu_util(cpu, util_running, cpu_cap,
108827                                                ENERGY_UTIL, NULL);
108829                 /*
108830 @@ -6537,7 +6907,7 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
108831                  * NOTE: in case RT tasks are running, by default the
108832                  * FREQUENCY_UTIL's utilization can be max OPP.
108833                  */
108834 -               cpu_util = effective_cpu_util(cpu, util_cfs, cpu_cap,
108835 +               cpu_util = effective_cpu_util(cpu, util_freq, cpu_cap,
108836                                               FREQUENCY_UTIL, tsk);
108837                 max_util = max(max_util, cpu_util);
108838         }
108839 @@ -6688,6 +7058,57 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
108841         return -1;
108843 +#endif /* CONFIG_CACULE_SCHED */
108845 +#ifdef CONFIG_CACULE_SCHED
108846 +static int
108847 +find_least_IS_cpu(struct task_struct *p)
108849 +       struct cfs_rq *cfs_rq;
108850 +       unsigned int max_IS = 0;
108851 +       unsigned int IS, IS_c, IS_h;
108852 +       struct sched_entity *curr_se;
108853 +       struct cacule_node *cn, *head;
108854 +       int cpu_i;
108855 +       int new_cpu = -1;
108857 +       for_each_online_cpu(cpu_i) {
108858 +               if (!cpumask_test_cpu(cpu_i, p->cpus_ptr))
108859 +                       continue;
108861 +               cn = NULL;
108862 +               cfs_rq = &cpu_rq(cpu_i)->cfs;
108864 +               curr_se = cfs_rq->curr;
108865 +               head = cfs_rq->head;
108867 +               if (!curr_se && head)
108868 +                       cn = head;
108869 +               else if (curr_se && !head)
108870 +                       cn = &curr_se->cacule_node;
108871 +               else if (curr_se && head) {
108872 +                       IS_c = calc_interactivity(sched_clock(), &curr_se->cacule_node);
108873 +                       IS_h = calc_interactivity(sched_clock(), head);
108875 +                       IS = IS_c > IS_h? IS_c : IS_h;
108876 +                       goto compare;
108877 +               }
108879 +               if (!cn)
108880 +                       return cpu_i;
108882 +               IS = calc_interactivity(sched_clock(), cn);
108884 +compare:
108885 +               if (IS > max_IS) {
108886 +                       max_IS = IS;
108887 +                       new_cpu = cpu_i;
108888 +               }
108889 +       }
108891 +       return new_cpu;
108893 +#endif
108896   * select_task_rq_fair: Select target runqueue for the waking task in domains
108897 @@ -6712,6 +7133,26 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
108898         /* SD_flags and WF_flags share the first nibble */
108899         int sd_flag = wake_flags & 0xF;
108901 +#ifdef CONFIG_CACULE_SCHED
108902 +       struct sched_entity *se = &p->se;
108904 +       if (!is_interactive(&se->cacule_node))
108905 +               goto cfs_way;
108907 +       // check first if the prev cpu
108908 +       // has 0 tasks
108909 +       if (cpumask_test_cpu(prev_cpu, p->cpus_ptr) &&
108910 +           cpu_rq(prev_cpu)->cfs.nr_running == 0)
108911 +               return prev_cpu;
108913 +       new_cpu = find_least_IS_cpu(p);
108915 +       if (new_cpu != -1)
108916 +               return new_cpu;
108918 +       new_cpu = prev_cpu;
108919 +cfs_way:
108920 +#else
108921         if (wake_flags & WF_TTWU) {
108922                 record_wakee(p);
108924 @@ -6724,6 +7165,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
108926                 want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
108927         }
108928 +#endif /* CONFIG_CACULE_SCHED */
108930         rcu_read_lock();
108931         for_each_domain(cpu, tmp) {
108932 @@ -6770,6 +7212,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
108933   */
108934  static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
108936 +#if !defined(CONFIG_CACULE_SCHED)
108937         /*
108938          * As blocked tasks retain absolute vruntime the migration needs to
108939          * deal with this by subtracting the old and adding the new
108940 @@ -6795,6 +7238,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
108942                 se->vruntime -= min_vruntime;
108943         }
108944 +#endif /* CONFIG_CACULE_SCHED */
108946         if (p->on_rq == TASK_ON_RQ_MIGRATING) {
108947                 /*
108948 @@ -6840,6 +7284,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
108950  #endif /* CONFIG_SMP */
108952 +#if !defined(CONFIG_CACULE_SCHED)
108953  static unsigned long wakeup_gran(struct sched_entity *se)
108955         unsigned long gran = sysctl_sched_wakeup_granularity;
108956 @@ -6918,6 +7363,7 @@ static void set_skip_buddy(struct sched_entity *se)
108957         for_each_sched_entity(se)
108958                 cfs_rq_of(se)->skip = se;
108960 +#endif /* CONFIG_CACULE_SCHED */
108963   * Preempt the current task with a newly woken task if needed:
108964 @@ -6926,9 +7372,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
108966         struct task_struct *curr = rq->curr;
108967         struct sched_entity *se = &curr->se, *pse = &p->se;
108969 +#if !defined(CONFIG_CACULE_SCHED)
108970         struct cfs_rq *cfs_rq = task_cfs_rq(curr);
108971         int scale = cfs_rq->nr_running >= sched_nr_latency;
108972         int next_buddy_marked = 0;
108973 +#endif /* CONFIG_CACULE_SCHED */
108975         if (unlikely(se == pse))
108976                 return;
108977 @@ -6942,10 +7391,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
108978         if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
108979                 return;
108981 +#if !defined(CONFIG_CACULE_SCHED)
108982         if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
108983                 set_next_buddy(pse);
108984                 next_buddy_marked = 1;
108985         }
108986 +#endif /* CONFIG_CACULE_SCHED */
108988         /*
108989          * We can come here with TIF_NEED_RESCHED already set from new task
108990 @@ -6975,6 +7426,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
108991         find_matching_se(&se, &pse);
108992         update_curr(cfs_rq_of(se));
108993         BUG_ON(!pse);
108995 +#ifdef CONFIG_CACULE_SCHED
108996 +       if (entity_before(sched_clock(), &se->cacule_node, &pse->cacule_node) == 1)
108997 +               goto preempt;
108998 +#else
108999         if (wakeup_preempt_entity(se, pse) == 1) {
109000                 /*
109001                  * Bias pick_next to pick the sched entity that is
109002 @@ -6984,11 +7440,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
109003                         set_next_buddy(pse);
109004                 goto preempt;
109005         }
109006 +#endif /* CONFIG_CACULE_SCHED */
109008         return;
109010  preempt:
109011         resched_curr(rq);
109013 +#if !defined(CONFIG_CACULE_SCHED)
109014         /*
109015          * Only set the backward buddy when the current task is still
109016          * on the rq. This can happen when a wakeup gets interleaved
109017 @@ -7003,6 +7462,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
109019         if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
109020                 set_last_buddy(se);
109021 +#endif /* CONFIG_CACULE_SCHED */
109024  struct task_struct *
109025 @@ -7177,7 +7637,10 @@ static void yield_task_fair(struct rq *rq)
109027         struct task_struct *curr = rq->curr;
109028         struct cfs_rq *cfs_rq = task_cfs_rq(curr);
109030 +#if !defined(CONFIG_CACULE_SCHED)
109031         struct sched_entity *se = &curr->se;
109032 +#endif
109034         /*
109035          * Are we the only task in the tree?
109036 @@ -7185,7 +7648,9 @@ static void yield_task_fair(struct rq *rq)
109037         if (unlikely(rq->nr_running == 1))
109038                 return;
109040 +#if !defined(CONFIG_CACULE_SCHED)
109041         clear_buddies(cfs_rq, se);
109042 +#endif
109044         if (curr->policy != SCHED_BATCH) {
109045                 update_rq_clock(rq);
109046 @@ -7201,7 +7666,9 @@ static void yield_task_fair(struct rq *rq)
109047                 rq_clock_skip_update(rq);
109048         }
109050 +#if !defined(CONFIG_CACULE_SCHED)
109051         set_skip_buddy(se);
109052 +#endif
109055  static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
109056 @@ -7212,8 +7679,10 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
109057         if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
109058                 return false;
109060 +#if !defined(CONFIG_CACULE_SCHED)
109061         /* Tell the scheduler that we'd really like pse to run next. */
109062         set_next_buddy(se);
109063 +#endif
109065         yield_task_fair(rq);
109067 @@ -7441,6 +7910,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
109068         if (env->sd->flags & SD_SHARE_CPUCAPACITY)
109069                 return 0;
109071 +#if !defined(CONFIG_CACULE_SCHED)
109072         /*
109073          * Buddy candidates are cache hot:
109074          */
109075 @@ -7448,6 +7918,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
109076                         (&p->se == cfs_rq_of(&p->se)->next ||
109077                          &p->se == cfs_rq_of(&p->se)->last))
109078                 return 1;
109079 +#endif
109081         if (sysctl_sched_migration_cost == -1)
109082                 return 1;
109083 @@ -7539,6 +8010,10 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
109084         if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
109085                 return 0;
109087 +       /* Disregard pcpu kthreads; they are where they need to be. */
109088 +       if (kthread_is_per_cpu(p))
109089 +               return 0;
109091         if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
109092                 int cpu;
109094 @@ -7708,8 +8183,7 @@ static int detach_tasks(struct lb_env *env)
109095                          * scheduler fails to find a good waiting task to
109096                          * migrate.
109097                          */
109099 -                       if ((load >> env->sd->nr_balance_failed) > env->imbalance)
109100 +                       if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance)
109101                                 goto next;
109103                         env->imbalance -= load;
109104 @@ -10746,11 +11220,30 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
109105         update_overutilized_status(task_rq(curr));
109108 +#ifdef CONFIG_CACULE_SCHED
109110   * called on fork with the child task as argument from the parent's context
109111   *  - child not yet on the tasklist
109112   *  - preemption disabled
109113   */
109114 + static void task_fork_fair(struct task_struct *p)
109116 +       struct cfs_rq *cfs_rq;
109117 +       struct sched_entity *curr;
109118 +       struct rq *rq = this_rq();
109119 +       struct rq_flags rf;
109121 +       rq_lock(rq, &rf);
109122 +       update_rq_clock(rq);
109124 +       cfs_rq = task_cfs_rq(current);
109125 +       curr = cfs_rq->curr;
109126 +       if (curr)
109127 +               update_curr(cfs_rq);
109129 +       rq_unlock(rq, &rf);
109131 +#else
109132  static void task_fork_fair(struct task_struct *p)
109134         struct cfs_rq *cfs_rq;
109135 @@ -10781,6 +11274,7 @@ static void task_fork_fair(struct task_struct *p)
109136         se->vruntime -= cfs_rq->min_vruntime;
109137         rq_unlock(rq, &rf);
109139 +#endif /* CONFIG_CACULE_SCHED */
109142   * Priority of the task has changed. Check to see if we preempt
109143 @@ -10844,16 +11338,22 @@ static void propagate_entity_cfs_rq(struct sched_entity *se)
109145         struct cfs_rq *cfs_rq;
109147 +       list_add_leaf_cfs_rq(cfs_rq_of(se));
109149         /* Start to propagate at parent */
109150         se = se->parent;
109152         for_each_sched_entity(se) {
109153                 cfs_rq = cfs_rq_of(se);
109155 -               if (cfs_rq_throttled(cfs_rq))
109156 -                       break;
109157 +               if (!cfs_rq_throttled(cfs_rq)){
109158 +                       update_load_avg(cfs_rq, se, UPDATE_TG);
109159 +                       list_add_leaf_cfs_rq(cfs_rq);
109160 +                       continue;
109161 +               }
109163 -               update_load_avg(cfs_rq, se, UPDATE_TG);
109164 +               if (list_add_leaf_cfs_rq(cfs_rq))
109165 +                       break;
109166         }
109168  #else
109169 @@ -10893,6 +11393,8 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
109170  static void detach_task_cfs_rq(struct task_struct *p)
109172         struct sched_entity *se = &p->se;
109174 +#if !defined(CONFIG_CACULE_SCHED)
109175         struct cfs_rq *cfs_rq = cfs_rq_of(se);
109177         if (!vruntime_normalized(p)) {
109178 @@ -10903,6 +11405,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
109179                 place_entity(cfs_rq, se, 0);
109180                 se->vruntime -= cfs_rq->min_vruntime;
109181         }
109182 +#endif
109184         detach_entity_cfs_rq(se);
109186 @@ -10910,12 +11413,17 @@ static void detach_task_cfs_rq(struct task_struct *p)
109187  static void attach_task_cfs_rq(struct task_struct *p)
109189         struct sched_entity *se = &p->se;
109191 +#if !defined(CONFIG_CACULE_SCHED)
109192         struct cfs_rq *cfs_rq = cfs_rq_of(se);
109193 +#endif
109195         attach_entity_cfs_rq(se);
109197 +#if !defined(CONFIG_CACULE_SCHED)
109198         if (!vruntime_normalized(p))
109199                 se->vruntime += cfs_rq->min_vruntime;
109200 +#endif
109203  static void switched_from_fair(struct rq *rq, struct task_struct *p)
109204 @@ -10971,13 +11479,22 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
109205  void init_cfs_rq(struct cfs_rq *cfs_rq)
109207         cfs_rq->tasks_timeline = RB_ROOT_CACHED;
109209 +#if !defined(CONFIG_CACULE_SCHED)
109210         cfs_rq->min_vruntime = (u64)(-(1LL << 20));
109211  #ifndef CONFIG_64BIT
109212         cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
109213  #endif
109214 +#endif /* CONFIG_CACULE_SCHED */
109216  #ifdef CONFIG_SMP
109217         raw_spin_lock_init(&cfs_rq->removed.lock);
109218  #endif
109220 +#ifdef CONFIG_CACULE_SCHED
109221 +       cfs_rq->head = NULL;
109222 +       cfs_rq->tail = NULL;
109223 +#endif
109226  #ifdef CONFIG_FAIR_GROUP_SCHED
109227 diff --git a/kernel/sched/features.h b/kernel/sched/features.h
109228 index 1bc2b158fc51..e911111df83a 100644
109229 --- a/kernel/sched/features.h
109230 +++ b/kernel/sched/features.h
109231 @@ -90,3 +90,6 @@ SCHED_FEAT(WA_BIAS, true)
109232   */
109233  SCHED_FEAT(UTIL_EST, true)
109234  SCHED_FEAT(UTIL_EST_FASTUP, true)
109236 +SCHED_FEAT(ALT_PERIOD, true)
109237 +SCHED_FEAT(BASE_SLICE, true)
109238 diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
109239 index 967732c0766c..651218ded981 100644
109240 --- a/kernel/sched/psi.c
109241 +++ b/kernel/sched/psi.c
109242 @@ -711,14 +711,15 @@ static void psi_group_change(struct psi_group *group, int cpu,
109243         for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
109244                 if (!(m & (1 << t)))
109245                         continue;
109246 -               if (groupc->tasks[t] == 0 && !psi_bug) {
109247 +               if (groupc->tasks[t]) {
109248 +                       groupc->tasks[t]--;
109249 +               } else if (!psi_bug) {
109250                         printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
109251                                         cpu, t, groupc->tasks[0],
109252                                         groupc->tasks[1], groupc->tasks[2],
109253                                         groupc->tasks[3], clear, set);
109254                         psi_bug = 1;
109255                 }
109256 -               groupc->tasks[t]--;
109257         }
109259         for (t = 0; set; set &= ~(1 << t), t++)
109260 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
109261 index 10a1522b1e30..0eb4fca83ffe 100644
109262 --- a/kernel/sched/sched.h
109263 +++ b/kernel/sched/sched.h
109264 @@ -204,6 +204,13 @@ static inline void update_avg(u64 *avg, u64 sample)
109265         *avg += diff / 8;
109269 + * Shifting a value by an exponent greater *or equal* to the size of said value
109270 + * is UB; cap at size-1.
109271 + */
109272 +#define shr_bound(val, shift)                                                  \
109273 +       (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1))
109276   * !! For sched_setattr_nocheck() (kernel) only !!
109277   *
109278 @@ -516,10 +523,13 @@ struct cfs_rq {
109279         unsigned int            idle_h_nr_running; /* SCHED_IDLE */
109281         u64                     exec_clock;
109283 +#if !defined(CONFIG_CACULE_SCHED)
109284         u64                     min_vruntime;
109285  #ifndef CONFIG_64BIT
109286         u64                     min_vruntime_copy;
109287  #endif
109288 +#endif /* CONFIG_CACULE_SCHED */
109290         struct rb_root_cached   tasks_timeline;
109292 @@ -528,9 +538,15 @@ struct cfs_rq {
109293          * It is set to NULL otherwise (i.e when none are currently running).
109294          */
109295         struct sched_entity     *curr;
109296 +#ifdef CONFIG_CACULE_SCHED
109297 +       struct cacule_node      *head;
109298 +       struct cacule_node      *tail;
109300 +#else
109301         struct sched_entity     *next;
109302         struct sched_entity     *last;
109303         struct sched_entity     *skip;
109304 +#endif // CONFIG_CACULE_SCHED
109306  #ifdef CONFIG_SCHED_DEBUG
109307         unsigned int            nr_spread_over;
109308 diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
109309 index 09d35044bd88..12f80587e127 100644
109310 --- a/kernel/sched/topology.c
109311 +++ b/kernel/sched/topology.c
109312 @@ -723,35 +723,6 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
109313         for (tmp = sd; tmp; tmp = tmp->parent)
109314                 numa_distance += !!(tmp->flags & SD_NUMA);
109316 -       /*
109317 -        * FIXME: Diameter >=3 is misrepresented.
109318 -        *
109319 -        * Smallest diameter=3 topology is:
109320 -        *
109321 -        *   node   0   1   2   3
109322 -        *     0:  10  20  30  40
109323 -        *     1:  20  10  20  30
109324 -        *     2:  30  20  10  20
109325 -        *     3:  40  30  20  10
109326 -        *
109327 -        *   0 --- 1 --- 2 --- 3
109328 -        *
109329 -        * NUMA-3       0-3             N/A             N/A             0-3
109330 -        *  groups:     {0-2},{1-3}                                     {1-3},{0-2}
109331 -        *
109332 -        * NUMA-2       0-2             0-3             0-3             1-3
109333 -        *  groups:     {0-1},{1-3}     {0-2},{2-3}     {1-3},{0-1}     {2-3},{0-2}
109334 -        *
109335 -        * NUMA-1       0-1             0-2             1-3             2-3
109336 -        *  groups:     {0},{1}         {1},{2},{0}     {2},{3},{1}     {3},{2}
109337 -        *
109338 -        * NUMA-0       0               1               2               3
109339 -        *
109340 -        * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the
109341 -        * group span isn't a subset of the domain span.
109342 -        */
109343 -       WARN_ONCE(numa_distance > 2, "Shortest NUMA path spans too many nodes\n");
109345         sched_domain_debug(sd, cpu);
109347         rq_attach_root(rq, rd);
109348 @@ -982,6 +953,31 @@ static void init_overlap_sched_group(struct sched_domain *sd,
109349         sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
109352 +static struct sched_domain *
109353 +find_descended_sibling(struct sched_domain *sd, struct sched_domain *sibling)
109355 +       /*
109356 +        * The proper descendant would be the one whose child won't span out
109357 +        * of sd
109358 +        */
109359 +       while (sibling->child &&
109360 +              !cpumask_subset(sched_domain_span(sibling->child),
109361 +                              sched_domain_span(sd)))
109362 +               sibling = sibling->child;
109364 +       /*
109365 +        * As we are referencing sgc across different topology level, we need
109366 +        * to go down to skip those sched_domains which don't contribute to
109367 +        * scheduling because they will be degenerated in cpu_attach_domain
109368 +        */
109369 +       while (sibling->child &&
109370 +              cpumask_equal(sched_domain_span(sibling->child),
109371 +                            sched_domain_span(sibling)))
109372 +               sibling = sibling->child;
109374 +       return sibling;
109377  static int
109378  build_overlap_sched_groups(struct sched_domain *sd, int cpu)
109380 @@ -1015,6 +1011,41 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
109381                 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
109382                         continue;
109384 +               /*
109385 +                * Usually we build sched_group by sibling's child sched_domain
109386 +                * But for machines whose NUMA diameter are 3 or above, we move
109387 +                * to build sched_group by sibling's proper descendant's child
109388 +                * domain because sibling's child sched_domain will span out of
109389 +                * the sched_domain being built as below.
109390 +                *
109391 +                * Smallest diameter=3 topology is:
109392 +                *
109393 +                *   node   0   1   2   3
109394 +                *     0:  10  20  30  40
109395 +                *     1:  20  10  20  30
109396 +                *     2:  30  20  10  20
109397 +                *     3:  40  30  20  10
109398 +                *
109399 +                *   0 --- 1 --- 2 --- 3
109400 +                *
109401 +                * NUMA-3       0-3             N/A             N/A             0-3
109402 +                *  groups:     {0-2},{1-3}                                     {1-3},{0-2}
109403 +                *
109404 +                * NUMA-2       0-2             0-3             0-3             1-3
109405 +                *  groups:     {0-1},{1-3}     {0-2},{2-3}     {1-3},{0-1}     {2-3},{0-2}
109406 +                *
109407 +                * NUMA-1       0-1             0-2             1-3             2-3
109408 +                *  groups:     {0},{1}         {1},{2},{0}     {2},{3},{1}     {3},{2}
109409 +                *
109410 +                * NUMA-0       0               1               2               3
109411 +                *
109412 +                * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the
109413 +                * group span isn't a subset of the domain span.
109414 +                */
109415 +               if (sibling->child &&
109416 +                   !cpumask_subset(sched_domain_span(sibling->child), span))
109417 +                       sibling = find_descended_sibling(sd, sibling);
109419                 sg = build_group_from_child_sched_domain(sibling, cpu);
109420                 if (!sg)
109421                         goto fail;
109422 @@ -1022,7 +1053,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
109423                 sg_span = sched_group_span(sg);
109424                 cpumask_or(covered, covered, sg_span);
109426 -               init_overlap_sched_group(sd, sg);
109427 +               init_overlap_sched_group(sibling, sg);
109429                 if (!first)
109430                         first = sg;
109431 diff --git a/kernel/smp.c b/kernel/smp.c
109432 index aeb0adfa0606..c678589fbb76 100644
109433 --- a/kernel/smp.c
109434 +++ b/kernel/smp.c
109435 @@ -110,7 +110,7 @@ static DEFINE_PER_CPU(void *, cur_csd_info);
109436  static atomic_t csd_bug_count = ATOMIC_INIT(0);
109438  /* Record current CSD work for current CPU, NULL to erase. */
109439 -static void csd_lock_record(call_single_data_t *csd)
109440 +static void csd_lock_record(struct __call_single_data *csd)
109442         if (!csd) {
109443                 smp_mb(); /* NULL cur_csd after unlock. */
109444 @@ -125,7 +125,7 @@ static void csd_lock_record(call_single_data_t *csd)
109445                   /* Or before unlock, as the case may be. */
109448 -static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd)
109449 +static __always_inline int csd_lock_wait_getcpu(struct __call_single_data *csd)
109451         unsigned int csd_type;
109453 @@ -140,7 +140,7 @@ static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd)
109454   * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
109455   * so waiting on other types gets much less information.
109456   */
109457 -static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
109458 +static __always_inline bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
109460         int cpu = -1;
109461         int cpux;
109462 @@ -204,7 +204,7 @@ static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 t
109463   * previous function call. For multi-cpu calls its even more interesting
109464   * as we'll have to ensure no other cpu is observing our csd.
109465   */
109466 -static __always_inline void csd_lock_wait(call_single_data_t *csd)
109467 +static __always_inline void csd_lock_wait(struct __call_single_data *csd)
109469         int bug_id = 0;
109470         u64 ts0, ts1;
109471 @@ -219,17 +219,17 @@ static __always_inline void csd_lock_wait(call_single_data_t *csd)
109474  #else
109475 -static void csd_lock_record(call_single_data_t *csd)
109476 +static void csd_lock_record(struct __call_single_data *csd)
109480 -static __always_inline void csd_lock_wait(call_single_data_t *csd)
109481 +static __always_inline void csd_lock_wait(struct __call_single_data *csd)
109483         smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
109485  #endif
109487 -static __always_inline void csd_lock(call_single_data_t *csd)
109488 +static __always_inline void csd_lock(struct __call_single_data *csd)
109490         csd_lock_wait(csd);
109491         csd->node.u_flags |= CSD_FLAG_LOCK;
109492 @@ -242,7 +242,7 @@ static __always_inline void csd_lock(call_single_data_t *csd)
109493         smp_wmb();
109496 -static __always_inline void csd_unlock(call_single_data_t *csd)
109497 +static __always_inline void csd_unlock(struct __call_single_data *csd)
109499         WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
109501 @@ -276,7 +276,7 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
109502   * for execution on the given CPU. data must already have
109503   * ->func, ->info, and ->flags set.
109504   */
109505 -static int generic_exec_single(int cpu, call_single_data_t *csd)
109506 +static int generic_exec_single(int cpu, struct __call_single_data *csd)
109508         if (cpu == smp_processor_id()) {
109509                 smp_call_func_t func = csd->func;
109510 @@ -542,7 +542,7 @@ EXPORT_SYMBOL(smp_call_function_single);
109511   * NOTE: Be careful, there is unfortunately no current debugging facility to
109512   * validate the correctness of this serialization.
109513   */
109514 -int smp_call_function_single_async(int cpu, call_single_data_t *csd)
109515 +int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
109517         int err = 0;
109519 diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
109520 index 19aa806890d5..1750dfc416d8 100644
109521 --- a/kernel/sys_ni.c
109522 +++ b/kernel/sys_ni.c
109523 @@ -150,6 +150,12 @@ COND_SYSCALL_COMPAT(set_robust_list);
109524  COND_SYSCALL(get_robust_list);
109525  COND_SYSCALL_COMPAT(get_robust_list);
109527 +/* kernel/futex2.c */
109528 +COND_SYSCALL(futex_wait);
109529 +COND_SYSCALL(futex_wake);
109530 +COND_SYSCALL(futex_waitv);
109531 +COND_SYSCALL(futex_requeue);
109533  /* kernel/hrtimer.c */
109535  /* kernel/itimer.c */
109536 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
109537 index 62fbd09b5dc1..c3fe3ddde393 100644
109538 --- a/kernel/sysctl.c
109539 +++ b/kernel/sysctl.c
109540 @@ -120,9 +120,9 @@ static unsigned long long_max = LONG_MAX;
109541  static int one_hundred = 100;
109542  static int two_hundred = 200;
109543  static int one_thousand = 1000;
109544 -#ifdef CONFIG_PRINTK
109545  static int ten_thousand = 10000;
109546 -#endif
109547 +extern int hrtimer_granularity_us;
109548 +extern int hrtimeout_min_us;
109549  #ifdef CONFIG_PERF_EVENTS
109550  static int six_hundred_forty_kb = 640 * 1024;
109551  #endif
109552 @@ -200,6 +200,10 @@ static int min_extfrag_threshold;
109553  static int max_extfrag_threshold = 1000;
109554  #endif
109556 +#ifdef CONFIG_USER_NS
109557 +extern int unprivileged_userns_clone;
109558 +#endif
109560  #endif /* CONFIG_SYSCTL */
109562  #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_SYSCTL)
109563 @@ -1652,6 +1656,24 @@ int proc_do_static_key(struct ctl_table *table, int write,
109566  static struct ctl_table kern_table[] = {
109567 +       {
109568 +               .procname       = "hrtimer_granularity_us",
109569 +               .data           = &hrtimer_granularity_us,
109570 +               .maxlen         = sizeof(int),
109571 +               .mode           = 0644,
109572 +               .proc_handler   = &proc_dointvec_minmax,
109573 +               .extra1         = SYSCTL_ONE,
109574 +               .extra2         = &ten_thousand,
109575 +       },
109576 +       {
109577 +               .procname       = "hrtimeout_min_us",
109578 +               .data           = &hrtimeout_min_us,
109579 +               .maxlen         = sizeof(int),
109580 +               .mode           = 0644,
109581 +               .proc_handler   = &proc_dointvec_minmax,
109582 +               .extra1         = SYSCTL_ONE,
109583 +               .extra2         = &ten_thousand,
109584 +       },
109585         {
109586                 .procname       = "sched_child_runs_first",
109587                 .data           = &sysctl_sched_child_runs_first,
109588 @@ -1659,6 +1681,29 @@ static struct ctl_table kern_table[] = {
109589                 .mode           = 0644,
109590                 .proc_handler   = proc_dointvec,
109591         },
109592 +#ifdef CONFIG_CACULE_SCHED
109593 +       {
109594 +               .procname       = "sched_interactivity_factor",
109595 +               .data           = &interactivity_factor,
109596 +               .maxlen         = sizeof(int),
109597 +               .mode           = 0644,
109598 +               .proc_handler   = proc_dointvec,
109599 +       },
109600 +       {
109601 +               .procname       = "sched_interactivity_threshold",
109602 +               .data           = &interactivity_threshold,
109603 +               .maxlen         = sizeof(unsigned int),
109604 +               .mode           = 0644,
109605 +               .proc_handler   = proc_dointvec,
109606 +       },
109607 +       {
109608 +               .procname       = "sched_max_lifetime_ms",
109609 +               .data           = &cacule_max_lifetime,
109610 +               .maxlen         = sizeof(int),
109611 +               .mode           = 0644,
109612 +               .proc_handler   = proc_dointvec,
109613 +       },
109614 +#endif
109615  #ifdef CONFIG_SCHED_DEBUG
109616         {
109617                 .procname       = "sched_min_granularity_ns",
109618 @@ -1902,6 +1947,15 @@ static struct ctl_table kern_table[] = {
109619                 .proc_handler   = proc_dointvec,
109620         },
109621  #endif
109622 +#ifdef CONFIG_USER_NS
109623 +       {
109624 +               .procname       = "unprivileged_userns_clone",
109625 +               .data           = &unprivileged_userns_clone,
109626 +               .maxlen         = sizeof(int),
109627 +               .mode           = 0644,
109628 +               .proc_handler   = proc_dointvec,
109629 +       },
109630 +#endif
109631  #ifdef CONFIG_PROC_SYSCTL
109632         {
109633                 .procname       = "tainted",
109634 @@ -3093,6 +3147,20 @@ static struct ctl_table vm_table[] = {
109635                 .extra2         = SYSCTL_ONE,
109636         },
109637  #endif
109638 +       {
109639 +               .procname       = "clean_low_kbytes",
109640 +               .data           = &sysctl_clean_low_kbytes,
109641 +               .maxlen         = sizeof(sysctl_clean_low_kbytes),
109642 +               .mode           = 0644,
109643 +               .proc_handler   = proc_doulongvec_minmax,
109644 +       },
109645 +       {
109646 +               .procname       = "clean_min_kbytes",
109647 +               .data           = &sysctl_clean_min_kbytes,
109648 +               .maxlen         = sizeof(sysctl_clean_min_kbytes),
109649 +               .mode           = 0644,
109650 +               .proc_handler   = proc_doulongvec_minmax,
109651 +       },
109652         {
109653                 .procname       = "user_reserve_kbytes",
109654                 .data           = &sysctl_user_reserve_kbytes,
109655 diff --git a/kernel/task_work.c b/kernel/task_work.c
109656 index 9cde961875c0..5c8dea45d4f8 100644
109657 --- a/kernel/task_work.c
109658 +++ b/kernel/task_work.c
109659 @@ -57,6 +57,7 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
109661         return 0;
109663 +EXPORT_SYMBOL(task_work_add);
109665  /**
109666   * task_work_cancel - cancel a pending work added by task_work_add()
109667 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
109668 index 4d94e2b5499d..a7924fedf479 100644
109669 --- a/kernel/time/alarmtimer.c
109670 +++ b/kernel/time/alarmtimer.c
109671 @@ -92,7 +92,7 @@ static int alarmtimer_rtc_add_device(struct device *dev,
109672         if (rtcdev)
109673                 return -EBUSY;
109675 -       if (!rtc->ops->set_alarm)
109676 +       if (!test_bit(RTC_FEATURE_ALARM, rtc->features))
109677                 return -1;
109678         if (!device_may_wakeup(rtc->dev.parent))
109679                 return -1;
109680 diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
109681 index f5490222e134..23db3c39e07a 100644
109682 --- a/kernel/time/clockevents.c
109683 +++ b/kernel/time/clockevents.c
109684 @@ -190,8 +190,9 @@ int clockevents_tick_resume(struct clock_event_device *dev)
109686  #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
109688 +int __read_mostly hrtimer_granularity_us = 100;
109689  /* Limit min_delta to a jiffie */
109690 -#define MIN_DELTA_LIMIT                (NSEC_PER_SEC / HZ)
109691 +#define MIN_DELTA_LIMIT                (hrtimer_granularity_us * NSEC_PER_USEC)
109693  /**
109694   * clockevents_increase_min_delta - raise minimum delta of a clock event device
109695 diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
109696 index 5c9d968187ae..7a3d640dc13a 100644
109697 --- a/kernel/time/hrtimer.c
109698 +++ b/kernel/time/hrtimer.c
109699 @@ -2236,3 +2236,113 @@ int __sched schedule_hrtimeout(ktime_t *expires,
109700         return schedule_hrtimeout_range(expires, 0, mode);
109702  EXPORT_SYMBOL_GPL(schedule_hrtimeout);
109705 + * As per schedule_hrtimeout but taskes a millisecond value and returns how
109706 + * many milliseconds are left.
109707 + */
109708 +long __sched schedule_msec_hrtimeout(long timeout)
109710 +       struct hrtimer_sleeper t;
109711 +       int delta, jiffs;
109712 +       ktime_t expires;
109714 +       if (!timeout) {
109715 +               __set_current_state(TASK_RUNNING);
109716 +               return 0;
109717 +       }
109719 +       jiffs = msecs_to_jiffies(timeout);
109720 +       /*
109721 +        * If regular timer resolution is adequate or hrtimer resolution is not
109722 +        * (yet) better than Hz, as would occur during startup, use regular
109723 +        * timers.
109724 +        */
109725 +       if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ || pm_freezing)
109726 +               return schedule_timeout(jiffs);
109728 +       delta = (timeout % 1000) * NSEC_PER_MSEC;
109729 +       expires = ktime_set(0, delta);
109731 +       hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
109732 +       hrtimer_set_expires_range_ns(&t.timer, expires, delta);
109734 +       hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_REL);
109736 +       if (likely(t.task))
109737 +               schedule();
109739 +       hrtimer_cancel(&t.timer);
109740 +       destroy_hrtimer_on_stack(&t.timer);
109742 +       __set_current_state(TASK_RUNNING);
109744 +       expires = hrtimer_expires_remaining(&t.timer);
109745 +       timeout = ktime_to_ms(expires);
109746 +       return timeout < 0 ? 0 : timeout;
109749 +EXPORT_SYMBOL(schedule_msec_hrtimeout);
109751 +#define USECS_PER_SEC 1000000
109752 +extern int hrtimer_granularity_us;
109754 +static inline long schedule_usec_hrtimeout(long timeout)
109756 +       struct hrtimer_sleeper t;
109757 +       ktime_t expires;
109758 +       int delta;
109760 +       if (!timeout) {
109761 +               __set_current_state(TASK_RUNNING);
109762 +               return 0;
109763 +       }
109765 +       if (hrtimer_resolution >= NSEC_PER_SEC / HZ)
109766 +               return schedule_timeout(usecs_to_jiffies(timeout));
109768 +       if (timeout < hrtimer_granularity_us)
109769 +               timeout = hrtimer_granularity_us;
109770 +       delta = (timeout % USECS_PER_SEC) * NSEC_PER_USEC;
109771 +       expires = ktime_set(0, delta);
109773 +       hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
109774 +       hrtimer_set_expires_range_ns(&t.timer, expires, delta);
109776 +       hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_REL);
109778 +       if (likely(t.task))
109779 +               schedule();
109781 +       hrtimer_cancel(&t.timer);
109782 +       destroy_hrtimer_on_stack(&t.timer);
109784 +       __set_current_state(TASK_RUNNING);
109786 +       expires = hrtimer_expires_remaining(&t.timer);
109787 +       timeout = ktime_to_us(expires);
109788 +       return timeout < 0 ? 0 : timeout;
109791 +int __read_mostly hrtimeout_min_us = 500;
109793 +long __sched schedule_min_hrtimeout(void)
109795 +       return usecs_to_jiffies(schedule_usec_hrtimeout(hrtimeout_min_us));
109798 +EXPORT_SYMBOL(schedule_min_hrtimeout);
109800 +long __sched schedule_msec_hrtimeout_interruptible(long timeout)
109802 +       __set_current_state(TASK_INTERRUPTIBLE);
109803 +       return schedule_msec_hrtimeout(timeout);
109805 +EXPORT_SYMBOL(schedule_msec_hrtimeout_interruptible);
109807 +long __sched schedule_msec_hrtimeout_uninterruptible(long timeout)
109809 +       __set_current_state(TASK_UNINTERRUPTIBLE);
109810 +       return schedule_msec_hrtimeout(timeout);
109812 +EXPORT_SYMBOL(schedule_msec_hrtimeout_uninterruptible);
109813 diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
109814 index bf540f5a4115..dd5697d7347b 100644
109815 --- a/kernel/time/posix-timers.c
109816 +++ b/kernel/time/posix-timers.c
109817 @@ -1191,8 +1191,8 @@ SYSCALL_DEFINE2(clock_adjtime32, clockid_t, which_clock,
109819         err = do_clock_adjtime(which_clock, &ktx);
109821 -       if (err >= 0)
109822 -               err = put_old_timex32(utp, &ktx);
109823 +       if (err >= 0 && put_old_timex32(utp, &ktx))
109824 +               return -EFAULT;
109826         return err;
109828 diff --git a/kernel/time/timer.c b/kernel/time/timer.c
109829 index f475f1a027c8..8d82fe9f6fbb 100644
109830 --- a/kernel/time/timer.c
109831 +++ b/kernel/time/timer.c
109832 @@ -44,6 +44,7 @@
109833  #include <linux/slab.h>
109834  #include <linux/compat.h>
109835  #include <linux/random.h>
109836 +#include <linux/freezer.h>
109838  #include <linux/uaccess.h>
109839  #include <asm/unistd.h>
109840 @@ -1886,6 +1887,18 @@ signed long __sched schedule_timeout(signed long timeout)
109842         expire = timeout + jiffies;
109844 +#ifdef CONFIG_HIGH_RES_TIMERS
109845 +       if (timeout == 1 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
109846 +               /*
109847 +                * Special case 1 as being a request for the minimum timeout
109848 +                * and use highres timers to timeout after 1ms to workaround
109849 +                * the granularity of low Hz tick timers.
109850 +                */
109851 +               if (!schedule_min_hrtimeout())
109852 +                       return 0;
109853 +               goto out_timeout;
109854 +       }
109855 +#endif
109856         timer.task = current;
109857         timer_setup_on_stack(&timer.timer, process_timeout, 0);
109858         __mod_timer(&timer.timer, expire, MOD_TIMER_NOTPENDING);
109859 @@ -1894,10 +1907,10 @@ signed long __sched schedule_timeout(signed long timeout)
109861         /* Remove the timer from the object tracker */
109862         destroy_timer_on_stack(&timer.timer);
109864 +out_timeout:
109865         timeout = expire - jiffies;
109867 - out:
109868 +out:
109869         return timeout < 0 ? 0 : timeout;
109871  EXPORT_SYMBOL(schedule_timeout);
109872 @@ -2040,7 +2053,19 @@ void __init init_timers(void)
109873   */
109874  void msleep(unsigned int msecs)
109876 -       unsigned long timeout = msecs_to_jiffies(msecs) + 1;
109877 +       int jiffs = msecs_to_jiffies(msecs);
109878 +       unsigned long timeout;
109880 +       /*
109881 +        * Use high resolution timers where the resolution of tick based
109882 +        * timers is inadequate.
109883 +        */
109884 +       if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) {
109885 +               while (msecs)
109886 +                       msecs = schedule_msec_hrtimeout_uninterruptible(msecs);
109887 +               return;
109888 +       }
109889 +       timeout = jiffs + 1;
109891         while (timeout)
109892                 timeout = schedule_timeout_uninterruptible(timeout);
109893 @@ -2054,7 +2079,15 @@ EXPORT_SYMBOL(msleep);
109894   */
109895  unsigned long msleep_interruptible(unsigned int msecs)
109897 -       unsigned long timeout = msecs_to_jiffies(msecs) + 1;
109898 +       int jiffs = msecs_to_jiffies(msecs);
109899 +       unsigned long timeout;
109901 +       if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) {
109902 +               while (msecs && !signal_pending(current))
109903 +                       msecs = schedule_msec_hrtimeout_interruptible(msecs);
109904 +               return msecs;
109905 +       }
109906 +       timeout = jiffs + 1;
109908         while (timeout && !signal_pending(current))
109909                 timeout = schedule_timeout_interruptible(timeout);
109910 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
109911 index 3ba52d4e1314..826b88b727a6 100644
109912 --- a/kernel/trace/ftrace.c
109913 +++ b/kernel/trace/ftrace.c
109914 @@ -5631,7 +5631,10 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
109916         parser = &iter->parser;
109917         if (trace_parser_loaded(parser)) {
109918 -               ftrace_match_records(iter->hash, parser->buffer, parser->idx);
109919 +               int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
109921 +               ftrace_process_regex(iter, parser->buffer,
109922 +                                    parser->idx, enable);
109923         }
109925         trace_parser_put(parser);
109926 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
109927 index c0c9aa5cd8e2..67c01dc5cdeb 100644
109928 --- a/kernel/trace/trace.c
109929 +++ b/kernel/trace/trace.c
109930 @@ -2390,14 +2390,13 @@ static void tracing_stop_tr(struct trace_array *tr)
109932  static int trace_save_cmdline(struct task_struct *tsk)
109934 -       unsigned pid, idx;
109935 +       unsigned tpid, idx;
109937         /* treat recording of idle task as a success */
109938         if (!tsk->pid)
109939                 return 1;
109941 -       if (unlikely(tsk->pid > PID_MAX_DEFAULT))
109942 -               return 0;
109943 +       tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
109945         /*
109946          * It's not the end of the world if we don't get
109947 @@ -2408,26 +2407,15 @@ static int trace_save_cmdline(struct task_struct *tsk)
109948         if (!arch_spin_trylock(&trace_cmdline_lock))
109949                 return 0;
109951 -       idx = savedcmd->map_pid_to_cmdline[tsk->pid];
109952 +       idx = savedcmd->map_pid_to_cmdline[tpid];
109953         if (idx == NO_CMDLINE_MAP) {
109954                 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
109956 -               /*
109957 -                * Check whether the cmdline buffer at idx has a pid
109958 -                * mapped. We are going to overwrite that entry so we
109959 -                * need to clear the map_pid_to_cmdline. Otherwise we
109960 -                * would read the new comm for the old pid.
109961 -                */
109962 -               pid = savedcmd->map_cmdline_to_pid[idx];
109963 -               if (pid != NO_CMDLINE_MAP)
109964 -                       savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
109966 -               savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
109967 -               savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
109969 +               savedcmd->map_pid_to_cmdline[tpid] = idx;
109970                 savedcmd->cmdline_idx = idx;
109971         }
109973 +       savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
109974         set_cmdline(idx, tsk->comm);
109976         arch_spin_unlock(&trace_cmdline_lock);
109977 @@ -2438,6 +2426,7 @@ static int trace_save_cmdline(struct task_struct *tsk)
109978  static void __trace_find_cmdline(int pid, char comm[])
109980         unsigned map;
109981 +       int tpid;
109983         if (!pid) {
109984                 strcpy(comm, "<idle>");
109985 @@ -2449,16 +2438,16 @@ static void __trace_find_cmdline(int pid, char comm[])
109986                 return;
109987         }
109989 -       if (pid > PID_MAX_DEFAULT) {
109990 -               strcpy(comm, "<...>");
109991 -               return;
109992 +       tpid = pid & (PID_MAX_DEFAULT - 1);
109993 +       map = savedcmd->map_pid_to_cmdline[tpid];
109994 +       if (map != NO_CMDLINE_MAP) {
109995 +               tpid = savedcmd->map_cmdline_to_pid[map];
109996 +               if (tpid == pid) {
109997 +                       strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
109998 +                       return;
109999 +               }
110000         }
110002 -       map = savedcmd->map_pid_to_cmdline[pid];
110003 -       if (map != NO_CMDLINE_MAP)
110004 -               strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
110005 -       else
110006 -               strcpy(comm, "<...>");
110007 +       strcpy(comm, "<...>");
110010  void trace_find_cmdline(int pid, char comm[])
110011 diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
110012 index aaf6793ededa..c1637f90c8a3 100644
110013 --- a/kernel/trace/trace_clock.c
110014 +++ b/kernel/trace/trace_clock.c
110015 @@ -95,33 +95,49 @@ u64 notrace trace_clock_global(void)
110017         unsigned long flags;
110018         int this_cpu;
110019 -       u64 now;
110020 +       u64 now, prev_time;
110022         raw_local_irq_save(flags);
110024         this_cpu = raw_smp_processor_id();
110025 -       now = sched_clock_cpu(this_cpu);
110027         /*
110028 -        * If in an NMI context then dont risk lockups and return the
110029 -        * cpu_clock() time:
110030 +        * The global clock "guarantees" that the events are ordered
110031 +        * between CPUs. But if two events on two different CPUS call
110032 +        * trace_clock_global at roughly the same time, it really does
110033 +        * not matter which one gets the earlier time. Just make sure
110034 +        * that the same CPU will always show a monotonic clock.
110035 +        *
110036 +        * Use a read memory barrier to get the latest written
110037 +        * time that was recorded.
110038          */
110039 -       if (unlikely(in_nmi()))
110040 -               goto out;
110041 +       smp_rmb();
110042 +       prev_time = READ_ONCE(trace_clock_struct.prev_time);
110043 +       now = sched_clock_cpu(this_cpu);
110045 -       arch_spin_lock(&trace_clock_struct.lock);
110046 +       /* Make sure that now is always greater than prev_time */
110047 +       if ((s64)(now - prev_time) < 0)
110048 +               now = prev_time + 1;
110050         /*
110051 -        * TODO: if this happens often then maybe we should reset
110052 -        * my_scd->clock to prev_time+1, to make sure
110053 -        * we start ticking with the local clock from now on?
110054 +        * If in an NMI context then dont risk lockups and simply return
110055 +        * the current time.
110056          */
110057 -       if ((s64)(now - trace_clock_struct.prev_time) < 0)
110058 -               now = trace_clock_struct.prev_time + 1;
110059 +       if (unlikely(in_nmi()))
110060 +               goto out;
110062 -       trace_clock_struct.prev_time = now;
110063 +       /* Tracing can cause strange recursion, always use a try lock */
110064 +       if (arch_spin_trylock(&trace_clock_struct.lock)) {
110065 +               /* Reread prev_time in case it was already updated */
110066 +               prev_time = READ_ONCE(trace_clock_struct.prev_time);
110067 +               if ((s64)(now - prev_time) < 0)
110068 +                       now = prev_time + 1;
110070 -       arch_spin_unlock(&trace_clock_struct.lock);
110071 +               trace_clock_struct.prev_time = now;
110073 +               /* The unlock acts as the wmb for the above rmb */
110074 +               arch_spin_unlock(&trace_clock_struct.lock);
110075 +       }
110076   out:
110077         raw_local_irq_restore(flags);
110079 diff --git a/kernel/up.c b/kernel/up.c
110080 index c6f323dcd45b..4edd5493eba2 100644
110081 --- a/kernel/up.c
110082 +++ b/kernel/up.c
110083 @@ -25,7 +25,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
110085  EXPORT_SYMBOL(smp_call_function_single);
110087 -int smp_call_function_single_async(int cpu, call_single_data_t *csd)
110088 +int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
110090         unsigned long flags;
110092 diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
110093 index 9a4b980d695b..0475d15b1c66 100644
110094 --- a/kernel/user_namespace.c
110095 +++ b/kernel/user_namespace.c
110096 @@ -21,6 +21,9 @@
110097  #include <linux/bsearch.h>
110098  #include <linux/sort.h>
110100 +/* sysctl */
110101 +int unprivileged_userns_clone = 1;
110103  static struct kmem_cache *user_ns_cachep __read_mostly;
110104  static DEFINE_MUTEX(userns_state_mutex);
110106 diff --git a/kernel/watchdog.c b/kernel/watchdog.c
110107 index 107bc38b1945..8cf0678378d2 100644
110108 --- a/kernel/watchdog.c
110109 +++ b/kernel/watchdog.c
110110 @@ -154,7 +154,11 @@ static void lockup_detector_update_enable(void)
110112  #ifdef CONFIG_SOFTLOCKUP_DETECTOR
110114 -#define SOFTLOCKUP_RESET       ULONG_MAX
110116 + * Delay the soflockup report when running a known slow code.
110117 + * It does _not_ affect the timestamp of the last successdul reschedule.
110118 + */
110119 +#define SOFTLOCKUP_DELAY_REPORT        ULONG_MAX
110121  #ifdef CONFIG_SMP
110122  int __read_mostly sysctl_softlockup_all_cpu_backtrace;
110123 @@ -169,10 +173,12 @@ unsigned int __read_mostly softlockup_panic =
110124  static bool softlockup_initialized __read_mostly;
110125  static u64 __read_mostly sample_period;
110127 +/* Timestamp taken after the last successful reschedule. */
110128  static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
110129 +/* Timestamp of the last softlockup report. */
110130 +static DEFINE_PER_CPU(unsigned long, watchdog_report_ts);
110131  static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
110132  static DEFINE_PER_CPU(bool, softlockup_touch_sync);
110133 -static DEFINE_PER_CPU(bool, soft_watchdog_warn);
110134  static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
110135  static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
110136  static unsigned long soft_lockup_nmi_warn;
110137 @@ -235,10 +241,16 @@ static void set_sample_period(void)
110138         watchdog_update_hrtimer_threshold(sample_period);
110141 +static void update_report_ts(void)
110143 +       __this_cpu_write(watchdog_report_ts, get_timestamp());
110146  /* Commands for resetting the watchdog */
110147 -static void __touch_watchdog(void)
110148 +static void update_touch_ts(void)
110150         __this_cpu_write(watchdog_touch_ts, get_timestamp());
110151 +       update_report_ts();
110154  /**
110155 @@ -252,10 +264,10 @@ static void __touch_watchdog(void)
110156  notrace void touch_softlockup_watchdog_sched(void)
110158         /*
110159 -        * Preemption can be enabled.  It doesn't matter which CPU's timestamp
110160 -        * gets zeroed here, so use the raw_ operation.
110161 +        * Preemption can be enabled.  It doesn't matter which CPU's watchdog
110162 +        * report period gets restarted here, so use the raw_ operation.
110163          */
110164 -       raw_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
110165 +       raw_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
110168  notrace void touch_softlockup_watchdog(void)
110169 @@ -279,7 +291,7 @@ void touch_all_softlockup_watchdogs(void)
110170          * the softlockup check.
110171          */
110172         for_each_cpu(cpu, &watchdog_allowed_mask) {
110173 -               per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET;
110174 +               per_cpu(watchdog_report_ts, cpu) = SOFTLOCKUP_DELAY_REPORT;
110175                 wq_watchdog_touch(cpu);
110176         }
110178 @@ -287,16 +299,16 @@ void touch_all_softlockup_watchdogs(void)
110179  void touch_softlockup_watchdog_sync(void)
110181         __this_cpu_write(softlockup_touch_sync, true);
110182 -       __this_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
110183 +       __this_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
110186 -static int is_softlockup(unsigned long touch_ts)
110187 +static int is_softlockup(unsigned long touch_ts, unsigned long period_ts)
110189         unsigned long now = get_timestamp();
110191         if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
110192                 /* Warn about unreasonable delays. */
110193 -               if (time_after(now, touch_ts + get_softlockup_thresh()))
110194 +               if (time_after(now, period_ts + get_softlockup_thresh()))
110195                         return now - touch_ts;
110196         }
110197         return 0;
110198 @@ -332,7 +344,7 @@ static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
110199   */
110200  static int softlockup_fn(void *data)
110202 -       __touch_watchdog();
110203 +       update_touch_ts();
110204         complete(this_cpu_ptr(&softlockup_completion));
110206         return 0;
110207 @@ -342,6 +354,7 @@ static int softlockup_fn(void *data)
110208  static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
110210         unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
110211 +       unsigned long period_ts = __this_cpu_read(watchdog_report_ts);
110212         struct pt_regs *regs = get_irq_regs();
110213         int duration;
110214         int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
110215 @@ -363,7 +376,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
110216         /* .. and repeat */
110217         hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
110219 -       if (touch_ts == SOFTLOCKUP_RESET) {
110220 +       /* Reset the interval when touched externally by a known slow code. */
110221 +       if (period_ts == SOFTLOCKUP_DELAY_REPORT) {
110222                 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
110223                         /*
110224                          * If the time stamp was touched atomically
110225 @@ -375,7 +389,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
110227                 /* Clear the guest paused flag on watchdog reset */
110228                 kvm_check_and_clear_guest_paused();
110229 -               __touch_watchdog();
110230 +               update_report_ts();
110232                 return HRTIMER_RESTART;
110233         }
110235 @@ -385,7 +400,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
110236          * indicate it is getting cpu time.  If it hasn't then
110237          * this is a good indication some task is hogging the cpu
110238          */
110239 -       duration = is_softlockup(touch_ts);
110240 +       duration = is_softlockup(touch_ts, period_ts);
110241         if (unlikely(duration)) {
110242                 /*
110243                  * If a virtual machine is stopped by the host it can look to
110244 @@ -395,21 +410,18 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
110245                 if (kvm_check_and_clear_guest_paused())
110246                         return HRTIMER_RESTART;
110248 -               /* only warn once */
110249 -               if (__this_cpu_read(soft_watchdog_warn) == true)
110250 -                       return HRTIMER_RESTART;
110252 +               /*
110253 +                * Prevent multiple soft-lockup reports if one cpu is already
110254 +                * engaged in dumping all cpu back traces.
110255 +                */
110256                 if (softlockup_all_cpu_backtrace) {
110257 -                       /* Prevent multiple soft-lockup reports if one cpu is already
110258 -                        * engaged in dumping cpu back traces
110259 -                        */
110260 -                       if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
110261 -                               /* Someone else will report us. Let's give up */
110262 -                               __this_cpu_write(soft_watchdog_warn, true);
110263 +                       if (test_and_set_bit_lock(0, &soft_lockup_nmi_warn))
110264                                 return HRTIMER_RESTART;
110265 -                       }
110266                 }
110268 +               /* Start period for the next softlockup warning. */
110269 +               update_report_ts();
110271                 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
110272                         smp_processor_id(), duration,
110273                         current->comm, task_pid_nr(current));
110274 @@ -421,22 +433,14 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
110275                         dump_stack();
110277                 if (softlockup_all_cpu_backtrace) {
110278 -                       /* Avoid generating two back traces for current
110279 -                        * given that one is already made above
110280 -                        */
110281                         trigger_allbutself_cpu_backtrace();
110283 -                       clear_bit(0, &soft_lockup_nmi_warn);
110284 -                       /* Barrier to sync with other cpus */
110285 -                       smp_mb__after_atomic();
110286 +                       clear_bit_unlock(0, &soft_lockup_nmi_warn);
110287                 }
110289                 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
110290                 if (softlockup_panic)
110291                         panic("softlockup: hung tasks");
110292 -               __this_cpu_write(soft_watchdog_warn, true);
110293 -       } else
110294 -               __this_cpu_write(soft_watchdog_warn, false);
110295 +       }
110297         return HRTIMER_RESTART;
110299 @@ -461,7 +465,7 @@ static void watchdog_enable(unsigned int cpu)
110300                       HRTIMER_MODE_REL_PINNED_HARD);
110302         /* Initialize timestamp */
110303 -       __touch_watchdog();
110304 +       update_touch_ts();
110305         /* Enable the perf event */
110306         if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
110307                 watchdog_nmi_enable(cpu);
110308 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
110309 index 417c3d3e521b..03d75fe17edf 100644
110310 --- a/lib/Kconfig.debug
110311 +++ b/lib/Kconfig.debug
110312 @@ -179,7 +179,7 @@ config DYNAMIC_DEBUG_CORE
110314  config SYMBOLIC_ERRNAME
110315         bool "Support symbolic error names in printf"
110316 -       default y if PRINTK
110317 +       default n
110318         help
110319           If you say Y here, the kernel's printf implementation will
110320           be able to print symbolic error names such as ENOSPC instead
110321 @@ -189,7 +189,7 @@ config SYMBOLIC_ERRNAME
110322  config DEBUG_BUGVERBOSE
110323         bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT
110324         depends on BUG && (GENERIC_BUG || HAVE_DEBUG_BUGVERBOSE)
110325 -       default y
110326 +       default n
110327         help
110328           Say Y here to make BUG() panics output the file name and line number
110329           of the BUG call as well as the EIP and oops trace.  This aids
110330 diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence
110331 index 78f50ccb3b45..e641add33947 100644
110332 --- a/lib/Kconfig.kfence
110333 +++ b/lib/Kconfig.kfence
110334 @@ -7,6 +7,7 @@ menuconfig KFENCE
110335         bool "KFENCE: low-overhead sampling-based memory safety error detector"
110336         depends on HAVE_ARCH_KFENCE && (SLAB || SLUB)
110337         select STACKTRACE
110338 +       select IRQ_WORK
110339         help
110340           KFENCE is a low-overhead sampling-based detector of heap out-of-bounds
110341           access, use-after-free, and invalid-free errors. KFENCE is designed
110342 diff --git a/lib/bug.c b/lib/bug.c
110343 index 8f9d537bfb2a..b92da1f6e21b 100644
110344 --- a/lib/bug.c
110345 +++ b/lib/bug.c
110346 @@ -155,30 +155,27 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
110348         file = NULL;
110349         line = 0;
110350 -       warning = 0;
110352 -       if (bug) {
110353  #ifdef CONFIG_DEBUG_BUGVERBOSE
110354  #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
110355 -               file = bug->file;
110356 +       file = bug->file;
110357  #else
110358 -               file = (const char *)bug + bug->file_disp;
110359 +       file = (const char *)bug + bug->file_disp;
110360  #endif
110361 -               line = bug->line;
110362 +       line = bug->line;
110363  #endif
110364 -               warning = (bug->flags & BUGFLAG_WARNING) != 0;
110365 -               once = (bug->flags & BUGFLAG_ONCE) != 0;
110366 -               done = (bug->flags & BUGFLAG_DONE) != 0;
110368 -               if (warning && once) {
110369 -                       if (done)
110370 -                               return BUG_TRAP_TYPE_WARN;
110372 -                       /*
110373 -                        * Since this is the only store, concurrency is not an issue.
110374 -                        */
110375 -                       bug->flags |= BUGFLAG_DONE;
110376 -               }
110377 +       warning = (bug->flags & BUGFLAG_WARNING) != 0;
110378 +       once = (bug->flags & BUGFLAG_ONCE) != 0;
110379 +       done = (bug->flags & BUGFLAG_DONE) != 0;
110381 +       if (warning && once) {
110382 +               if (done)
110383 +                       return BUG_TRAP_TYPE_WARN;
110385 +               /*
110386 +                * Since this is the only store, concurrency is not an issue.
110387 +                */
110388 +               bug->flags |= BUGFLAG_DONE;
110389         }
110391         /*
110392 diff --git a/lib/crypto/poly1305-donna32.c b/lib/crypto/poly1305-donna32.c
110393 index 3cc77d94390b..7fb71845cc84 100644
110394 --- a/lib/crypto/poly1305-donna32.c
110395 +++ b/lib/crypto/poly1305-donna32.c
110396 @@ -10,7 +10,8 @@
110397  #include <asm/unaligned.h>
110398  #include <crypto/internal/poly1305.h>
110400 -void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16])
110401 +void poly1305_core_setkey(struct poly1305_core_key *key,
110402 +                         const u8 raw_key[POLY1305_BLOCK_SIZE])
110404         /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
110405         key->key.r[0] = (get_unaligned_le32(&raw_key[0])) & 0x3ffffff;
110406 diff --git a/lib/crypto/poly1305-donna64.c b/lib/crypto/poly1305-donna64.c
110407 index 6ae181bb4345..d34cf4053668 100644
110408 --- a/lib/crypto/poly1305-donna64.c
110409 +++ b/lib/crypto/poly1305-donna64.c
110410 @@ -12,7 +12,8 @@
110412  typedef __uint128_t u128;
110414 -void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16])
110415 +void poly1305_core_setkey(struct poly1305_core_key *key,
110416 +                         const u8 raw_key[POLY1305_BLOCK_SIZE])
110418         u64 t0, t1;
110420 diff --git a/lib/crypto/poly1305.c b/lib/crypto/poly1305.c
110421 index 9d2d14df0fee..26d87fc3823e 100644
110422 --- a/lib/crypto/poly1305.c
110423 +++ b/lib/crypto/poly1305.c
110424 @@ -12,7 +12,8 @@
110425  #include <linux/module.h>
110426  #include <asm/unaligned.h>
110428 -void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key)
110429 +void poly1305_init_generic(struct poly1305_desc_ctx *desc,
110430 +                          const u8 key[POLY1305_KEY_SIZE])
110432         poly1305_core_setkey(&desc->core_r, key);
110433         desc->s[0] = get_unaligned_le32(key + 16);
110434 diff --git a/lib/decompress_unzstd.c b/lib/decompress_unzstd.c
110435 index 790abc472f5b..6e5ecfba0a8d 100644
110436 --- a/lib/decompress_unzstd.c
110437 +++ b/lib/decompress_unzstd.c
110438 @@ -68,11 +68,7 @@
110439  #ifdef STATIC
110440  # define UNZSTD_PREBOOT
110441  # include "xxhash.c"
110442 -# include "zstd/entropy_common.c"
110443 -# include "zstd/fse_decompress.c"
110444 -# include "zstd/huf_decompress.c"
110445 -# include "zstd/zstd_common.c"
110446 -# include "zstd/decompress.c"
110447 +# include "zstd/decompress_sources.h"
110448  #endif
110450  #include <linux/decompress/mm.h>
110451 @@ -91,11 +87,15 @@
110453  static int INIT handle_zstd_error(size_t ret, void (*error)(char *x))
110455 -       const int err = ZSTD_getErrorCode(ret);
110456 +       const zstd_error_code err = zstd_get_error_code(ret);
110458 -       if (!ZSTD_isError(ret))
110459 +       if (!zstd_is_error(ret))
110460                 return 0;
110462 +       /*
110463 +        * zstd_get_error_name() cannot be used because error takes a char *
110464 +        * not a const char *
110465 +        */
110466         switch (err) {
110467         case ZSTD_error_memory_allocation:
110468                 error("ZSTD decompressor ran out of memory");
110469 @@ -124,28 +124,28 @@ static int INIT decompress_single(const u8 *in_buf, long in_len, u8 *out_buf,
110470                                   long out_len, long *in_pos,
110471                                   void (*error)(char *x))
110473 -       const size_t wksp_size = ZSTD_DCtxWorkspaceBound();
110474 +       const size_t wksp_size = zstd_dctx_workspace_bound();
110475         void *wksp = large_malloc(wksp_size);
110476 -       ZSTD_DCtx *dctx = ZSTD_initDCtx(wksp, wksp_size);
110477 +       zstd_dctx *dctx = zstd_init_dctx(wksp, wksp_size);
110478         int err;
110479         size_t ret;
110481         if (dctx == NULL) {
110482 -               error("Out of memory while allocating ZSTD_DCtx");
110483 +               error("Out of memory while allocating zstd_dctx");
110484                 err = -1;
110485                 goto out;
110486         }
110487         /*
110488          * Find out how large the frame actually is, there may be junk at
110489 -        * the end of the frame that ZSTD_decompressDCtx() can't handle.
110490 +        * the end of the frame that zstd_decompress_dctx() can't handle.
110491          */
110492 -       ret = ZSTD_findFrameCompressedSize(in_buf, in_len);
110493 +       ret = zstd_find_frame_compressed_size(in_buf, in_len);
110494         err = handle_zstd_error(ret, error);
110495         if (err)
110496                 goto out;
110497         in_len = (long)ret;
110499 -       ret = ZSTD_decompressDCtx(dctx, out_buf, out_len, in_buf, in_len);
110500 +       ret = zstd_decompress_dctx(dctx, out_buf, out_len, in_buf, in_len);
110501         err = handle_zstd_error(ret, error);
110502         if (err)
110503                 goto out;
110504 @@ -167,14 +167,14 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
110505                          long *in_pos,
110506                          void (*error)(char *x))
110508 -       ZSTD_inBuffer in;
110509 -       ZSTD_outBuffer out;
110510 -       ZSTD_frameParams params;
110511 +       zstd_in_buffer in;
110512 +       zstd_out_buffer out;
110513 +       zstd_frame_header header;
110514         void *in_allocated = NULL;
110515         void *out_allocated = NULL;
110516         void *wksp = NULL;
110517         size_t wksp_size;
110518 -       ZSTD_DStream *dstream;
110519 +       zstd_dstream *dstream;
110520         int err;
110521         size_t ret;
110523 @@ -238,13 +238,13 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
110524         out.size = out_len;
110526         /*
110527 -        * We need to know the window size to allocate the ZSTD_DStream.
110528 +        * We need to know the window size to allocate the zstd_dstream.
110529          * Since we are streaming, we need to allocate a buffer for the sliding
110530          * window. The window size varies from 1 KB to ZSTD_WINDOWSIZE_MAX
110531          * (8 MB), so it is important to use the actual value so as not to
110532          * waste memory when it is smaller.
110533          */
110534 -       ret = ZSTD_getFrameParams(&params, in.src, in.size);
110535 +       ret = zstd_get_frame_header(&header, in.src, in.size);
110536         err = handle_zstd_error(ret, error);
110537         if (err)
110538                 goto out;
110539 @@ -253,19 +253,19 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
110540                 err = -1;
110541                 goto out;
110542         }
110543 -       if (params.windowSize > ZSTD_WINDOWSIZE_MAX) {
110544 +       if (header.windowSize > ZSTD_WINDOWSIZE_MAX) {
110545                 error("ZSTD-compressed data has too large a window size");
110546                 err = -1;
110547                 goto out;
110548         }
110550         /*
110551 -        * Allocate the ZSTD_DStream now that we know how much memory is
110552 +        * Allocate the zstd_dstream now that we know how much memory is
110553          * required.
110554          */
110555 -       wksp_size = ZSTD_DStreamWorkspaceBound(params.windowSize);
110556 +       wksp_size = zstd_dstream_workspace_bound(header.windowSize);
110557         wksp = large_malloc(wksp_size);
110558 -       dstream = ZSTD_initDStream(params.windowSize, wksp, wksp_size);
110559 +       dstream = zstd_init_dstream(header.windowSize, wksp, wksp_size);
110560         if (dstream == NULL) {
110561                 error("Out of memory while allocating ZSTD_DStream");
110562                 err = -1;
110563 @@ -298,7 +298,7 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
110564                         in.size = in_len;
110565                 }
110566                 /* Returns zero when the frame is complete. */
110567 -               ret = ZSTD_decompressStream(dstream, &out, &in);
110568 +               ret = zstd_decompress_stream(dstream, &out, &in);
110569                 err = handle_zstd_error(ret, error);
110570                 if (err)
110571                         goto out;
110572 diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
110573 index c70d6347afa2..921d0a654243 100644
110574 --- a/lib/dynamic_debug.c
110575 +++ b/lib/dynamic_debug.c
110576 @@ -396,7 +396,7 @@ static int ddebug_parse_query(char *words[], int nwords,
110577                         /* tail :$info is function or line-range */
110578                         fline = strchr(query->filename, ':');
110579                         if (!fline)
110580 -                               break;
110581 +                               continue;
110582                         *fline++ = '\0';
110583                         if (isalpha(*fline) || *fline == '*' || *fline == '?') {
110584                                 /* take as function name */
110585 diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
110586 index 7998affa45d4..c87d5b6a8a55 100644
110587 --- a/lib/kobject_uevent.c
110588 +++ b/lib/kobject_uevent.c
110589 @@ -251,12 +251,13 @@ static int kobj_usermode_filter(struct kobject *kobj)
110591  static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem)
110593 +       int buffer_size = sizeof(env->buf) - env->buflen;
110594         int len;
110596 -       len = strlcpy(&env->buf[env->buflen], subsystem,
110597 -                     sizeof(env->buf) - env->buflen);
110598 -       if (len >= (sizeof(env->buf) - env->buflen)) {
110599 -               WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n");
110600 +       len = strlcpy(&env->buf[env->buflen], subsystem, buffer_size);
110601 +       if (len >= buffer_size) {
110602 +               pr_warn("init_uevent_argv: buffer size of %d too small, needed %d\n",
110603 +                       buffer_size, len);
110604                 return -ENOMEM;
110605         }
110607 diff --git a/lib/nlattr.c b/lib/nlattr.c
110608 index 5b6116e81f9f..1d051ef66afe 100644
110609 --- a/lib/nlattr.c
110610 +++ b/lib/nlattr.c
110611 @@ -828,7 +828,7 @@ int nla_strcmp(const struct nlattr *nla, const char *str)
110612         int attrlen = nla_len(nla);
110613         int d;
110615 -       if (attrlen > 0 && buf[attrlen - 1] == '\0')
110616 +       while (attrlen > 0 && buf[attrlen - 1] == '\0')
110617                 attrlen--;
110619         d = attrlen - len;
110620 diff --git a/lib/stackdepot.c b/lib/stackdepot.c
110621 index 49f67a0c6e5d..df9179f4f441 100644
110622 --- a/lib/stackdepot.c
110623 +++ b/lib/stackdepot.c
110624 @@ -71,7 +71,7 @@ static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
110625  static int depot_index;
110626  static int next_slab_inited;
110627  static size_t depot_offset;
110628 -static DEFINE_SPINLOCK(depot_lock);
110629 +static DEFINE_RAW_SPINLOCK(depot_lock);
110631  static bool init_stack_slab(void **prealloc)
110633 @@ -305,7 +305,7 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
110634                         prealloc = page_address(page);
110635         }
110637 -       spin_lock_irqsave(&depot_lock, flags);
110638 +       raw_spin_lock_irqsave(&depot_lock, flags);
110640         found = find_stack(*bucket, entries, nr_entries, hash);
110641         if (!found) {
110642 @@ -329,7 +329,7 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
110643                 WARN_ON(!init_stack_slab(&prealloc));
110644         }
110646 -       spin_unlock_irqrestore(&depot_lock, flags);
110647 +       raw_spin_unlock_irqrestore(&depot_lock, flags);
110648  exit:
110649         if (prealloc) {
110650                 /* Nobody used this memory, ok to free it. */
110651 diff --git a/lib/test_kasan.c b/lib/test_kasan.c
110652 index e5647d147b35..be69c3aa615a 100644
110653 --- a/lib/test_kasan.c
110654 +++ b/lib/test_kasan.c
110655 @@ -646,8 +646,20 @@ static char global_array[10];
110657  static void kasan_global_oob(struct kunit *test)
110659 -       volatile int i = 3;
110660 -       char *p = &global_array[ARRAY_SIZE(global_array) + i];
110661 +       /*
110662 +        * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
110663 +        * from failing here and panicing the kernel, access the array via a
110664 +        * volatile pointer, which will prevent the compiler from being able to
110665 +        * determine the array bounds.
110666 +        *
110667 +        * This access uses a volatile pointer to char (char *volatile) rather
110668 +        * than the more conventional pointer to volatile char (volatile char *)
110669 +        * because we want to prevent the compiler from making inferences about
110670 +        * the pointer itself (i.e. its array bounds), not the data that it
110671 +        * refers to.
110672 +        */
110673 +       char *volatile array = global_array;
110674 +       char *p = &array[ARRAY_SIZE(global_array) + 3];
110676         /* Only generic mode instruments globals. */
110677         KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
110678 @@ -695,8 +707,9 @@ static void ksize_uaf(struct kunit *test)
110679  static void kasan_stack_oob(struct kunit *test)
110681         char stack_array[10];
110682 -       volatile int i = OOB_TAG_OFF;
110683 -       char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
110684 +       /* See comment in kasan_global_oob. */
110685 +       char *volatile array = stack_array;
110686 +       char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
110688         KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
110690 @@ -707,7 +720,9 @@ static void kasan_alloca_oob_left(struct kunit *test)
110692         volatile int i = 10;
110693         char alloca_array[i];
110694 -       char *p = alloca_array - 1;
110695 +       /* See comment in kasan_global_oob. */
110696 +       char *volatile array = alloca_array;
110697 +       char *p = array - 1;
110699         /* Only generic mode instruments dynamic allocas. */
110700         KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
110701 @@ -720,7 +735,9 @@ static void kasan_alloca_oob_right(struct kunit *test)
110703         volatile int i = 10;
110704         char alloca_array[i];
110705 -       char *p = alloca_array + i;
110706 +       /* See comment in kasan_global_oob. */
110707 +       char *volatile array = alloca_array;
110708 +       char *p = array + i;
110710         /* Only generic mode instruments dynamic allocas. */
110711         KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
110712 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
110713 index 41ddc353ebb8..39ef2e314da5 100644
110714 --- a/lib/vsprintf.c
110715 +++ b/lib/vsprintf.c
110716 @@ -3135,8 +3135,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
110717                         switch (*fmt) {
110718                         case 'S':
110719                         case 's':
110720 -                       case 'F':
110721 -                       case 'f':
110722                         case 'x':
110723                         case 'K':
110724                         case 'e':
110725 diff --git a/lib/zstd/Makefile b/lib/zstd/Makefile
110726 index f5d778e7e5c7..19485e3cc7c9 100644
110727 --- a/lib/zstd/Makefile
110728 +++ b/lib/zstd/Makefile
110729 @@ -1,10 +1,46 @@
110730  # SPDX-License-Identifier: GPL-2.0-only
110731 +# ################################################################
110732 +# Copyright (c) Facebook, Inc.
110733 +# All rights reserved.
110735 +# This source code is licensed under both the BSD-style license (found in the
110736 +# LICENSE file in the root directory of this source tree) and the GPLv2 (found
110737 +# in the COPYING file in the root directory of this source tree).
110738 +# You may select, at your option, one of the above-listed licenses.
110739 +# ################################################################
110740  obj-$(CONFIG_ZSTD_COMPRESS) += zstd_compress.o
110741  obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o
110743  ccflags-y += -O3
110745 -zstd_compress-y := fse_compress.o huf_compress.o compress.o \
110746 -                  entropy_common.o fse_decompress.o zstd_common.o
110747 -zstd_decompress-y := huf_decompress.o decompress.o \
110748 -                    entropy_common.o fse_decompress.o zstd_common.o
110749 +zstd_compress-y := \
110750 +               zstd_compress_module.o \
110751 +               common/debug.o \
110752 +               common/entropy_common.o \
110753 +               common/error_private.o \
110754 +               common/fse_decompress.o \
110755 +               common/zstd_common.o \
110756 +               compress/fse_compress.o \
110757 +               compress/hist.o \
110758 +               compress/huf_compress.o \
110759 +               compress/zstd_compress.o \
110760 +               compress/zstd_compress_literals.o \
110761 +               compress/zstd_compress_sequences.o \
110762 +               compress/zstd_compress_superblock.o \
110763 +               compress/zstd_double_fast.o \
110764 +               compress/zstd_fast.o \
110765 +               compress/zstd_lazy.o \
110766 +               compress/zstd_ldm.o \
110767 +               compress/zstd_opt.o \
110769 +zstd_decompress-y := \
110770 +               zstd_decompress_module.o \
110771 +               common/debug.o \
110772 +               common/entropy_common.o \
110773 +               common/error_private.o \
110774 +               common/fse_decompress.o \
110775 +               common/zstd_common.o \
110776 +               decompress/huf_decompress.o \
110777 +               decompress/zstd_ddict.o \
110778 +               decompress/zstd_decompress.o \
110779 +               decompress/zstd_decompress_block.o \
110780 diff --git a/lib/zstd/bitstream.h b/lib/zstd/bitstream.h
110781 deleted file mode 100644
110782 index 5d6343c1a909..000000000000
110783 --- a/lib/zstd/bitstream.h
110784 +++ /dev/null
110785 @@ -1,380 +0,0 @@
110787 - * bitstream
110788 - * Part of FSE library
110789 - * header file (to include)
110790 - * Copyright (C) 2013-2016, Yann Collet.
110792 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
110794 - * Redistribution and use in source and binary forms, with or without
110795 - * modification, are permitted provided that the following conditions are
110796 - * met:
110798 - *   * Redistributions of source code must retain the above copyright
110799 - * notice, this list of conditions and the following disclaimer.
110800 - *   * Redistributions in binary form must reproduce the above
110801 - * copyright notice, this list of conditions and the following disclaimer
110802 - * in the documentation and/or other materials provided with the
110803 - * distribution.
110805 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
110806 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
110807 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
110808 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
110809 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
110810 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
110811 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
110812 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
110813 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
110814 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
110815 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
110817 - * This program is free software; you can redistribute it and/or modify it under
110818 - * the terms of the GNU General Public License version 2 as published by the
110819 - * Free Software Foundation. This program is dual-licensed; you may select
110820 - * either version 2 of the GNU General Public License ("GPL") or BSD license
110821 - * ("BSD").
110823 - * You can contact the author at :
110824 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
110825 - */
110826 -#ifndef BITSTREAM_H_MODULE
110827 -#define BITSTREAM_H_MODULE
110830 -*  This API consists of small unitary functions, which must be inlined for best performance.
110831 -*  Since link-time-optimization is not available for all compilers,
110832 -*  these functions are defined into a .h to be included.
110835 -/*-****************************************
110836 -*  Dependencies
110837 -******************************************/
110838 -#include "error_private.h" /* error codes and messages */
110839 -#include "mem.h"          /* unaligned access routines */
110841 -/*=========================================
110842 -*  Target specific
110843 -=========================================*/
110844 -#define STREAM_ACCUMULATOR_MIN_32 25
110845 -#define STREAM_ACCUMULATOR_MIN_64 57
110846 -#define STREAM_ACCUMULATOR_MIN ((U32)(ZSTD_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
110848 -/*-******************************************
110849 -*  bitStream encoding API (write forward)
110850 -********************************************/
110851 -/* bitStream can mix input from multiple sources.
110852 -*  A critical property of these streams is that they encode and decode in **reverse** direction.
110853 -*  So the first bit sequence you add will be the last to be read, like a LIFO stack.
110855 -typedef struct {
110856 -       size_t bitContainer;
110857 -       int bitPos;
110858 -       char *startPtr;
110859 -       char *ptr;
110860 -       char *endPtr;
110861 -} BIT_CStream_t;
110863 -ZSTD_STATIC size_t BIT_initCStream(BIT_CStream_t *bitC, void *dstBuffer, size_t dstCapacity);
110864 -ZSTD_STATIC void BIT_addBits(BIT_CStream_t *bitC, size_t value, unsigned nbBits);
110865 -ZSTD_STATIC void BIT_flushBits(BIT_CStream_t *bitC);
110866 -ZSTD_STATIC size_t BIT_closeCStream(BIT_CStream_t *bitC);
110868 -/* Start with initCStream, providing the size of buffer to write into.
110869 -*  bitStream will never write outside of this buffer.
110870 -*  `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
110872 -*  bits are first added to a local register.
110873 -*  Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
110874 -*  Writing data into memory is an explicit operation, performed by the flushBits function.
110875 -*  Hence keep track how many bits are potentially stored into local register to avoid register overflow.
110876 -*  After a flushBits, a maximum of 7 bits might still be stored into local register.
110878 -*  Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.
110880 -*  Last operation is to close the bitStream.
110881 -*  The function returns the final size of CStream in bytes.
110882 -*  If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)
110885 -/*-********************************************
110886 -*  bitStream decoding API (read backward)
110887 -**********************************************/
110888 -typedef struct {
110889 -       size_t bitContainer;
110890 -       unsigned bitsConsumed;
110891 -       const char *ptr;
110892 -       const char *start;
110893 -} BIT_DStream_t;
110895 -typedef enum {
110896 -       BIT_DStream_unfinished = 0,
110897 -       BIT_DStream_endOfBuffer = 1,
110898 -       BIT_DStream_completed = 2,
110899 -       BIT_DStream_overflow = 3
110900 -} BIT_DStream_status; /* result of BIT_reloadDStream() */
110901 -/* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
110903 -ZSTD_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, size_t srcSize);
110904 -ZSTD_STATIC size_t BIT_readBits(BIT_DStream_t *bitD, unsigned nbBits);
110905 -ZSTD_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t *bitD);
110906 -ZSTD_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t *bitD);
110908 -/* Start by invoking BIT_initDStream().
110909 -*  A chunk of the bitStream is then stored into a local register.
110910 -*  Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
110911 -*  You can then retrieve bitFields stored into the local register, **in reverse order**.
110912 -*  Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
110913 -*  A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
110914 -*  Otherwise, it can be less than that, so proceed accordingly.
110915 -*  Checking if DStream has reached its end can be performed with BIT_endOfDStream().
110918 -/*-****************************************
110919 -*  unsafe API
110920 -******************************************/
110921 -ZSTD_STATIC void BIT_addBitsFast(BIT_CStream_t *bitC, size_t value, unsigned nbBits);
110922 -/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
110924 -ZSTD_STATIC void BIT_flushBitsFast(BIT_CStream_t *bitC);
110925 -/* unsafe version; does not check buffer overflow */
110927 -ZSTD_STATIC size_t BIT_readBitsFast(BIT_DStream_t *bitD, unsigned nbBits);
110928 -/* faster, but works only if nbBits >= 1 */
110930 -/*-**************************************************************
110931 -*  Internal functions
110932 -****************************************************************/
110933 -ZSTD_STATIC unsigned BIT_highbit32(register U32 val) { return 31 - __builtin_clz(val); }
110935 -/*=====    Local Constants   =====*/
110936 -static const unsigned BIT_mask[] = {0,       1,       3,       7,      0xF,      0x1F,     0x3F,     0x7F,      0xFF,
110937 -                                   0x1FF,   0x3FF,   0x7FF,   0xFFF,    0x1FFF,   0x3FFF,   0x7FFF,   0xFFFF,    0x1FFFF,
110938 -                                   0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF}; /* up to 26 bits */
110940 -/*-**************************************************************
110941 -*  bitStream encoding
110942 -****************************************************************/
110943 -/*! BIT_initCStream() :
110944 - *  `dstCapacity` must be > sizeof(void*)
110945 - *  @return : 0 if success,
110946 -                         otherwise an error code (can be tested using ERR_isError() ) */
110947 -ZSTD_STATIC size_t BIT_initCStream(BIT_CStream_t *bitC, void *startPtr, size_t dstCapacity)
110949 -       bitC->bitContainer = 0;
110950 -       bitC->bitPos = 0;
110951 -       bitC->startPtr = (char *)startPtr;
110952 -       bitC->ptr = bitC->startPtr;
110953 -       bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->ptr);
110954 -       if (dstCapacity <= sizeof(bitC->ptr))
110955 -               return ERROR(dstSize_tooSmall);
110956 -       return 0;
110959 -/*! BIT_addBits() :
110960 -       can add up to 26 bits into `bitC`.
110961 -       Does not check for register overflow ! */
110962 -ZSTD_STATIC void BIT_addBits(BIT_CStream_t *bitC, size_t value, unsigned nbBits)
110964 -       bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
110965 -       bitC->bitPos += nbBits;
110968 -/*! BIT_addBitsFast() :
110969 - *  works only if `value` is _clean_, meaning all high bits above nbBits are 0 */
110970 -ZSTD_STATIC void BIT_addBitsFast(BIT_CStream_t *bitC, size_t value, unsigned nbBits)
110972 -       bitC->bitContainer |= value << bitC->bitPos;
110973 -       bitC->bitPos += nbBits;
110976 -/*! BIT_flushBitsFast() :
110977 - *  unsafe version; does not check buffer overflow */
110978 -ZSTD_STATIC void BIT_flushBitsFast(BIT_CStream_t *bitC)
110980 -       size_t const nbBytes = bitC->bitPos >> 3;
110981 -       ZSTD_writeLEST(bitC->ptr, bitC->bitContainer);
110982 -       bitC->ptr += nbBytes;
110983 -       bitC->bitPos &= 7;
110984 -       bitC->bitContainer >>= nbBytes * 8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */
110987 -/*! BIT_flushBits() :
110988 - *  safe version; check for buffer overflow, and prevents it.
110989 - *  note : does not signal buffer overflow. This will be revealed later on using BIT_closeCStream() */
110990 -ZSTD_STATIC void BIT_flushBits(BIT_CStream_t *bitC)
110992 -       size_t const nbBytes = bitC->bitPos >> 3;
110993 -       ZSTD_writeLEST(bitC->ptr, bitC->bitContainer);
110994 -       bitC->ptr += nbBytes;
110995 -       if (bitC->ptr > bitC->endPtr)
110996 -               bitC->ptr = bitC->endPtr;
110997 -       bitC->bitPos &= 7;
110998 -       bitC->bitContainer >>= nbBytes * 8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */
111001 -/*! BIT_closeCStream() :
111002 - *  @return : size of CStream, in bytes,
111003 -                         or 0 if it could not fit into dstBuffer */
111004 -ZSTD_STATIC size_t BIT_closeCStream(BIT_CStream_t *bitC)
111006 -       BIT_addBitsFast(bitC, 1, 1); /* endMark */
111007 -       BIT_flushBits(bitC);
111009 -       if (bitC->ptr >= bitC->endPtr)
111010 -               return 0; /* doesn't fit within authorized budget : cancel */
111012 -       return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
111015 -/*-********************************************************
111016 -* bitStream decoding
111017 -**********************************************************/
111018 -/*! BIT_initDStream() :
111019 -*   Initialize a BIT_DStream_t.
111020 -*   `bitD` : a pointer to an already allocated BIT_DStream_t structure.
111021 -*   `srcSize` must be the *exact* size of the bitStream, in bytes.
111022 -*   @return : size of stream (== srcSize) or an errorCode if a problem is detected
111024 -ZSTD_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, size_t srcSize)
111026 -       if (srcSize < 1) {
111027 -               memset(bitD, 0, sizeof(*bitD));
111028 -               return ERROR(srcSize_wrong);
111029 -       }
111031 -       if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */
111032 -               bitD->start = (const char *)srcBuffer;
111033 -               bitD->ptr = (const char *)srcBuffer + srcSize - sizeof(bitD->bitContainer);
111034 -               bitD->bitContainer = ZSTD_readLEST(bitD->ptr);
111035 -               {
111036 -                       BYTE const lastByte = ((const BYTE *)srcBuffer)[srcSize - 1];
111037 -                       bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */
111038 -                       if (lastByte == 0)
111039 -                               return ERROR(GENERIC); /* endMark not present */
111040 -               }
111041 -       } else {
111042 -               bitD->start = (const char *)srcBuffer;
111043 -               bitD->ptr = bitD->start;
111044 -               bitD->bitContainer = *(const BYTE *)(bitD->start);
111045 -               switch (srcSize) {
111046 -               case 7: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[6]) << (sizeof(bitD->bitContainer) * 8 - 16);
111047 -                       fallthrough;
111048 -               case 6: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[5]) << (sizeof(bitD->bitContainer) * 8 - 24);
111049 -                       fallthrough;
111050 -               case 5: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[4]) << (sizeof(bitD->bitContainer) * 8 - 32);
111051 -                       fallthrough;
111052 -               case 4: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[3]) << 24;
111053 -                       fallthrough;
111054 -               case 3: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[2]) << 16;
111055 -                       fallthrough;
111056 -               case 2: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[1]) << 8;
111057 -                       fallthrough;
111058 -               default:;
111059 -               }
111060 -               {
111061 -                       BYTE const lastByte = ((const BYTE *)srcBuffer)[srcSize - 1];
111062 -                       bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
111063 -                       if (lastByte == 0)
111064 -                               return ERROR(GENERIC); /* endMark not present */
111065 -               }
111066 -               bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize) * 8;
111067 -       }
111069 -       return srcSize;
111072 -ZSTD_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start) { return bitContainer >> start; }
111074 -ZSTD_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits) { return (bitContainer >> start) & BIT_mask[nbBits]; }
111076 -ZSTD_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) { return bitContainer & BIT_mask[nbBits]; }
111078 -/*! BIT_lookBits() :
111079 - *  Provides next n bits from local register.
111080 - *  local register is not modified.
111081 - *  On 32-bits, maxNbBits==24.
111082 - *  On 64-bits, maxNbBits==56.
111083 - *  @return : value extracted
111084 - */
111085 -ZSTD_STATIC size_t BIT_lookBits(const BIT_DStream_t *bitD, U32 nbBits)
111087 -       U32 const bitMask = sizeof(bitD->bitContainer) * 8 - 1;
111088 -       return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask - nbBits) & bitMask);
111091 -/*! BIT_lookBitsFast() :
111092 -*   unsafe version; only works only if nbBits >= 1 */
111093 -ZSTD_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t *bitD, U32 nbBits)
111095 -       U32 const bitMask = sizeof(bitD->bitContainer) * 8 - 1;
111096 -       return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask + 1) - nbBits) & bitMask);
111099 -ZSTD_STATIC void BIT_skipBits(BIT_DStream_t *bitD, U32 nbBits) { bitD->bitsConsumed += nbBits; }
111101 -/*! BIT_readBits() :
111102 - *  Read (consume) next n bits from local register and update.
111103 - *  Pay attention to not read more than nbBits contained into local register.
111104 - *  @return : extracted value.
111105 - */
111106 -ZSTD_STATIC size_t BIT_readBits(BIT_DStream_t *bitD, U32 nbBits)
111108 -       size_t const value = BIT_lookBits(bitD, nbBits);
111109 -       BIT_skipBits(bitD, nbBits);
111110 -       return value;
111113 -/*! BIT_readBitsFast() :
111114 -*   unsafe version; only works only if nbBits >= 1 */
111115 -ZSTD_STATIC size_t BIT_readBitsFast(BIT_DStream_t *bitD, U32 nbBits)
111117 -       size_t const value = BIT_lookBitsFast(bitD, nbBits);
111118 -       BIT_skipBits(bitD, nbBits);
111119 -       return value;
111122 -/*! BIT_reloadDStream() :
111123 -*   Refill `bitD` from buffer previously set in BIT_initDStream() .
111124 -*   This function is safe, it guarantees it will not read beyond src buffer.
111125 -*   @return : status of `BIT_DStream_t` internal register.
111126 -                         if status == BIT_DStream_unfinished, internal register is filled with >= (sizeof(bitD->bitContainer)*8 - 7) bits */
111127 -ZSTD_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t *bitD)
111129 -       if (bitD->bitsConsumed > (sizeof(bitD->bitContainer) * 8)) /* should not happen => corruption detected */
111130 -               return BIT_DStream_overflow;
111132 -       if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
111133 -               bitD->ptr -= bitD->bitsConsumed >> 3;
111134 -               bitD->bitsConsumed &= 7;
111135 -               bitD->bitContainer = ZSTD_readLEST(bitD->ptr);
111136 -               return BIT_DStream_unfinished;
111137 -       }
111138 -       if (bitD->ptr == bitD->start) {
111139 -               if (bitD->bitsConsumed < sizeof(bitD->bitContainer) * 8)
111140 -                       return BIT_DStream_endOfBuffer;
111141 -               return BIT_DStream_completed;
111142 -       }
111143 -       {
111144 -               U32 nbBytes = bitD->bitsConsumed >> 3;
111145 -               BIT_DStream_status result = BIT_DStream_unfinished;
111146 -               if (bitD->ptr - nbBytes < bitD->start) {
111147 -                       nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
111148 -                       result = BIT_DStream_endOfBuffer;
111149 -               }
111150 -               bitD->ptr -= nbBytes;
111151 -               bitD->bitsConsumed -= nbBytes * 8;
111152 -               bitD->bitContainer = ZSTD_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
111153 -               return result;
111154 -       }
111157 -/*! BIT_endOfDStream() :
111158 -*   @return Tells if DStream has exactly reached its end (all bits consumed).
111160 -ZSTD_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t *DStream)
111162 -       return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer) * 8));
111165 -#endif /* BITSTREAM_H_MODULE */
111166 diff --git a/lib/zstd/common/bitstream.h b/lib/zstd/common/bitstream.h
111167 new file mode 100644
111168 index 000000000000..2d6c95b4f40c
111169 --- /dev/null
111170 +++ b/lib/zstd/common/bitstream.h
111171 @@ -0,0 +1,437 @@
111172 +/* ******************************************************************
111173 + * bitstream
111174 + * Part of FSE library
111175 + * Copyright (c) Yann Collet, Facebook, Inc.
111177 + * You can contact the author at :
111178 + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
111180 + * This source code is licensed under both the BSD-style license (found in the
111181 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
111182 + * in the COPYING file in the root directory of this source tree).
111183 + * You may select, at your option, one of the above-listed licenses.
111184 +****************************************************************** */
111185 +#ifndef BITSTREAM_H_MODULE
111186 +#define BITSTREAM_H_MODULE
111189 +*  This API consists of small unitary functions, which must be inlined for best performance.
111190 +*  Since link-time-optimization is not available for all compilers,
111191 +*  these functions are defined into a .h to be included.
111194 +/*-****************************************
111195 +*  Dependencies
111196 +******************************************/
111197 +#include "mem.h"            /* unaligned access routines */
111198 +#include "compiler.h"       /* UNLIKELY() */
111199 +#include "debug.h"          /* assert(), DEBUGLOG(), RAWLOG() */
111200 +#include "error_private.h"  /* error codes and messages */
111203 +/*=========================================
111204 +*  Target specific
111205 +=========================================*/
111207 +#define STREAM_ACCUMULATOR_MIN_32  25
111208 +#define STREAM_ACCUMULATOR_MIN_64  57
111209 +#define STREAM_ACCUMULATOR_MIN    ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
111212 +/*-******************************************
111213 +*  bitStream encoding API (write forward)
111214 +********************************************/
111215 +/* bitStream can mix input from multiple sources.
111216 + * A critical property of these streams is that they encode and decode in **reverse** direction.
111217 + * So the first bit sequence you add will be the last to be read, like a LIFO stack.
111218 + */
111219 +typedef struct {
111220 +    size_t bitContainer;
111221 +    unsigned bitPos;
111222 +    char*  startPtr;
111223 +    char*  ptr;
111224 +    char*  endPtr;
111225 +} BIT_CStream_t;
111227 +MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity);
111228 +MEM_STATIC void   BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
111229 +MEM_STATIC void   BIT_flushBits(BIT_CStream_t* bitC);
111230 +MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
111232 +/* Start with initCStream, providing the size of buffer to write into.
111233 +*  bitStream will never write outside of this buffer.
111234 +*  `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
111236 +*  bits are first added to a local register.
111237 +*  Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
111238 +*  Writing data into memory is an explicit operation, performed by the flushBits function.
111239 +*  Hence keep track how many bits are potentially stored into local register to avoid register overflow.
111240 +*  After a flushBits, a maximum of 7 bits might still be stored into local register.
111242 +*  Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.
111244 +*  Last operation is to close the bitStream.
111245 +*  The function returns the final size of CStream in bytes.
111246 +*  If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)
111250 +/*-********************************************
111251 +*  bitStream decoding API (read backward)
111252 +**********************************************/
111253 +typedef struct {
111254 +    size_t   bitContainer;
111255 +    unsigned bitsConsumed;
111256 +    const char* ptr;
111257 +    const char* start;
111258 +    const char* limitPtr;
111259 +} BIT_DStream_t;
111261 +typedef enum { BIT_DStream_unfinished = 0,
111262 +               BIT_DStream_endOfBuffer = 1,
111263 +               BIT_DStream_completed = 2,
111264 +               BIT_DStream_overflow = 3 } BIT_DStream_status;  /* result of BIT_reloadDStream() */
111265 +               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
111267 +MEM_STATIC size_t   BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
111268 +MEM_STATIC size_t   BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
111269 +MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
111270 +MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
111273 +/* Start by invoking BIT_initDStream().
111274 +*  A chunk of the bitStream is then stored into a local register.
111275 +*  Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
111276 +*  You can then retrieve bitFields stored into the local register, **in reverse order**.
111277 +*  Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
111278 +*  A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
111279 +*  Otherwise, it can be less than that, so proceed accordingly.
111280 +*  Checking if DStream has reached its end can be performed with BIT_endOfDStream().
111284 +/*-****************************************
111285 +*  unsafe API
111286 +******************************************/
111287 +MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
111288 +/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
111290 +MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC);
111291 +/* unsafe version; does not check buffer overflow */
111293 +MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
111294 +/* faster, but works only if nbBits >= 1 */
111298 +/*-**************************************************************
111299 +*  Internal functions
111300 +****************************************************************/
111301 +MEM_STATIC unsigned BIT_highbit32 (U32 val)
111303 +    assert(val != 0);
111304 +    {
111305 +#   if (__GNUC__ >= 3)   /* Use GCC Intrinsic */
111306 +        return __builtin_clz (val) ^ 31;
111307 +#   else   /* Software version */
111308 +        static const unsigned DeBruijnClz[32] = { 0,  9,  1, 10, 13, 21,  2, 29,
111309 +                                                 11, 14, 16, 18, 22, 25,  3, 30,
111310 +                                                  8, 12, 20, 28, 15, 17, 24,  7,
111311 +                                                 19, 27, 23,  6, 26,  5,  4, 31 };
111312 +        U32 v = val;
111313 +        v |= v >> 1;
111314 +        v |= v >> 2;
111315 +        v |= v >> 4;
111316 +        v |= v >> 8;
111317 +        v |= v >> 16;
111318 +        return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
111319 +#   endif
111320 +    }
111323 +/*=====    Local Constants   =====*/
111324 +static const unsigned BIT_mask[] = {
111325 +    0,          1,         3,         7,         0xF,       0x1F,
111326 +    0x3F,       0x7F,      0xFF,      0x1FF,     0x3FF,     0x7FF,
111327 +    0xFFF,      0x1FFF,    0x3FFF,    0x7FFF,    0xFFFF,    0x1FFFF,
111328 +    0x3FFFF,    0x7FFFF,   0xFFFFF,   0x1FFFFF,  0x3FFFFF,  0x7FFFFF,
111329 +    0xFFFFFF,   0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF,
111330 +    0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */
111331 +#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0]))
111333 +/*-**************************************************************
111334 +*  bitStream encoding
111335 +****************************************************************/
111336 +/*! BIT_initCStream() :
111337 + *  `dstCapacity` must be > sizeof(size_t)
111338 + *  @return : 0 if success,
111339 + *            otherwise an error code (can be tested using ERR_isError()) */
111340 +MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
111341 +                                  void* startPtr, size_t dstCapacity)
111343 +    bitC->bitContainer = 0;
111344 +    bitC->bitPos = 0;
111345 +    bitC->startPtr = (char*)startPtr;
111346 +    bitC->ptr = bitC->startPtr;
111347 +    bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer);
111348 +    if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall);
111349 +    return 0;
111352 +/*! BIT_addBits() :
111353 + *  can add up to 31 bits into `bitC`.
111354 + *  Note : does not check for register overflow ! */
111355 +MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
111356 +                            size_t value, unsigned nbBits)
111358 +    DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32);
111359 +    assert(nbBits < BIT_MASK_SIZE);
111360 +    assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
111361 +    bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
111362 +    bitC->bitPos += nbBits;
111365 +/*! BIT_addBitsFast() :
111366 + *  works only if `value` is _clean_,
111367 + *  meaning all high bits above nbBits are 0 */
111368 +MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
111369 +                                size_t value, unsigned nbBits)
111371 +    assert((value>>nbBits) == 0);
111372 +    assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
111373 +    bitC->bitContainer |= value << bitC->bitPos;
111374 +    bitC->bitPos += nbBits;
111377 +/*! BIT_flushBitsFast() :
111378 + *  assumption : bitContainer has not overflowed
111379 + *  unsafe version; does not check buffer overflow */
111380 +MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
111382 +    size_t const nbBytes = bitC->bitPos >> 3;
111383 +    assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
111384 +    assert(bitC->ptr <= bitC->endPtr);
111385 +    MEM_writeLEST(bitC->ptr, bitC->bitContainer);
111386 +    bitC->ptr += nbBytes;
111387 +    bitC->bitPos &= 7;
111388 +    bitC->bitContainer >>= nbBytes*8;
111391 +/*! BIT_flushBits() :
111392 + *  assumption : bitContainer has not overflowed
111393 + *  safe version; check for buffer overflow, and prevents it.
111394 + *  note : does not signal buffer overflow.
111395 + *  overflow will be revealed later on using BIT_closeCStream() */
111396 +MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
111398 +    size_t const nbBytes = bitC->bitPos >> 3;
111399 +    assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
111400 +    assert(bitC->ptr <= bitC->endPtr);
111401 +    MEM_writeLEST(bitC->ptr, bitC->bitContainer);
111402 +    bitC->ptr += nbBytes;
111403 +    if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
111404 +    bitC->bitPos &= 7;
111405 +    bitC->bitContainer >>= nbBytes*8;
111408 +/*! BIT_closeCStream() :
111409 + *  @return : size of CStream, in bytes,
111410 + *            or 0 if it could not fit into dstBuffer */
111411 +MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
111413 +    BIT_addBitsFast(bitC, 1, 1);   /* endMark */
111414 +    BIT_flushBits(bitC);
111415 +    if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
111416 +    return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
111420 +/*-********************************************************
111421 +*  bitStream decoding
111422 +**********************************************************/
111423 +/*! BIT_initDStream() :
111424 + *  Initialize a BIT_DStream_t.
111425 + * `bitD` : a pointer to an already allocated BIT_DStream_t structure.
111426 + * `srcSize` must be the *exact* size of the bitStream, in bytes.
111427 + * @return : size of stream (== srcSize), or an errorCode if a problem is detected
111428 + */
111429 +MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
111431 +    if (srcSize < 1) { ZSTD_memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
111433 +    bitD->start = (const char*)srcBuffer;
111434 +    bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer);
111436 +    if (srcSize >=  sizeof(bitD->bitContainer)) {  /* normal case */
111437 +        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
111438 +        bitD->bitContainer = MEM_readLEST(bitD->ptr);
111439 +        { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
111440 +          bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;  /* ensures bitsConsumed is always set */
111441 +          if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
111442 +    } else {
111443 +        bitD->ptr   = bitD->start;
111444 +        bitD->bitContainer = *(const BYTE*)(bitD->start);
111445 +        switch(srcSize)
111446 +        {
111447 +        case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
111448 +                /* fall-through */
111450 +        case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
111451 +                /* fall-through */
111453 +        case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
111454 +                /* fall-through */
111456 +        case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
111457 +                /* fall-through */
111459 +        case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
111460 +                /* fall-through */
111462 +        case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) <<  8;
111463 +                /* fall-through */
111465 +        default: break;
111466 +        }
111467 +        {   BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
111468 +            bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
111469 +            if (lastByte == 0) return ERROR(corruption_detected);  /* endMark not present */
111470 +        }
111471 +        bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
111472 +    }
111474 +    return srcSize;
111477 +MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
111479 +    return bitContainer >> start;
111482 +MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
111484 +    U32 const regMask = sizeof(bitContainer)*8 - 1;
111485 +    /* if start > regMask, bitstream is corrupted, and result is undefined */
111486 +    assert(nbBits < BIT_MASK_SIZE);
111487 +    return (bitContainer >> (start & regMask)) & BIT_mask[nbBits];
111490 +MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
111492 +    assert(nbBits < BIT_MASK_SIZE);
111493 +    return bitContainer & BIT_mask[nbBits];
111496 +/*! BIT_lookBits() :
111497 + *  Provides next n bits from local register.
111498 + *  local register is not modified.
111499 + *  On 32-bits, maxNbBits==24.
111500 + *  On 64-bits, maxNbBits==56.
111501 + * @return : value extracted */
111502 +MEM_STATIC  FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t*  bitD, U32 nbBits)
111504 +    /* arbitrate between double-shift and shift+mask */
111505 +#if 1
111506 +    /* if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8,
111507 +     * bitstream is likely corrupted, and result is undefined */
111508 +    return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits);
111509 +#else
111510 +    /* this code path is slower on my os-x laptop */
111511 +    U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
111512 +    return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask);
111513 +#endif
111516 +/*! BIT_lookBitsFast() :
111517 + *  unsafe version; only works if nbBits >= 1 */
111518 +MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
111520 +    U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
111521 +    assert(nbBits >= 1);
111522 +    return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);
111525 +MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
111527 +    bitD->bitsConsumed += nbBits;
111530 +/*! BIT_readBits() :
111531 + *  Read (consume) next n bits from local register and update.
111532 + *  Pay attention to not read more than nbBits contained into local register.
111533 + * @return : extracted value. */
111534 +MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
111536 +    size_t const value = BIT_lookBits(bitD, nbBits);
111537 +    BIT_skipBits(bitD, nbBits);
111538 +    return value;
111541 +/*! BIT_readBitsFast() :
111542 + *  unsafe version; only works only if nbBits >= 1 */
111543 +MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
111545 +    size_t const value = BIT_lookBitsFast(bitD, nbBits);
111546 +    assert(nbBits >= 1);
111547 +    BIT_skipBits(bitD, nbBits);
111548 +    return value;
111551 +/*! BIT_reloadDStreamFast() :
111552 + *  Similar to BIT_reloadDStream(), but with two differences:
111553 + *  1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold!
111554 + *  2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this
111555 + *     point you must use BIT_reloadDStream() to reload.
111556 + */
111557 +MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD)
111559 +    if (UNLIKELY(bitD->ptr < bitD->limitPtr))
111560 +        return BIT_DStream_overflow;
111561 +    assert(bitD->bitsConsumed <= sizeof(bitD->bitContainer)*8);
111562 +    bitD->ptr -= bitD->bitsConsumed >> 3;
111563 +    bitD->bitsConsumed &= 7;
111564 +    bitD->bitContainer = MEM_readLEST(bitD->ptr);
111565 +    return BIT_DStream_unfinished;
111568 +/*! BIT_reloadDStream() :
111569 + *  Refill `bitD` from buffer previously set in BIT_initDStream() .
111570 + *  This function is safe, it guarantees it will not read beyond src buffer.
111571 + * @return : status of `BIT_DStream_t` internal register.
111572 + *           when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
111573 +MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
111575 +    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* overflow detected, like end of stream */
111576 +        return BIT_DStream_overflow;
111578 +    if (bitD->ptr >= bitD->limitPtr) {
111579 +        return BIT_reloadDStreamFast(bitD);
111580 +    }
111581 +    if (bitD->ptr == bitD->start) {
111582 +        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
111583 +        return BIT_DStream_completed;
111584 +    }
111585 +    /* start < ptr < limitPtr */
111586 +    {   U32 nbBytes = bitD->bitsConsumed >> 3;
111587 +        BIT_DStream_status result = BIT_DStream_unfinished;
111588 +        if (bitD->ptr - nbBytes < bitD->start) {
111589 +            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */
111590 +            result = BIT_DStream_endOfBuffer;
111591 +        }
111592 +        bitD->ptr -= nbBytes;
111593 +        bitD->bitsConsumed -= nbBytes*8;
111594 +        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */
111595 +        return result;
111596 +    }
111599 +/*! BIT_endOfDStream() :
111600 + * @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
111601 + */
111602 +MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
111604 +    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
111608 +#endif /* BITSTREAM_H_MODULE */
111609 diff --git a/lib/zstd/common/compiler.h b/lib/zstd/common/compiler.h
111610 new file mode 100644
111611 index 000000000000..9269b58a93e2
111612 --- /dev/null
111613 +++ b/lib/zstd/common/compiler.h
111614 @@ -0,0 +1,151 @@
111616 + * Copyright (c) Yann Collet, Facebook, Inc.
111617 + * All rights reserved.
111619 + * This source code is licensed under both the BSD-style license (found in the
111620 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
111621 + * in the COPYING file in the root directory of this source tree).
111622 + * You may select, at your option, one of the above-listed licenses.
111623 + */
111625 +#ifndef ZSTD_COMPILER_H
111626 +#define ZSTD_COMPILER_H
111628 +/*-*******************************************************
111629 +*  Compiler specifics
111630 +*********************************************************/
111631 +/* force inlining */
111633 +#if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
111634 +#  define INLINE_KEYWORD inline
111635 +#else
111636 +#  define INLINE_KEYWORD
111637 +#endif
111639 +#define FORCE_INLINE_ATTR __attribute__((always_inline))
111643 +  On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC).
111644 +  This explictly marks such functions as __cdecl so that the code will still compile
111645 +  if a CC other than __cdecl has been made the default.
111647 +#define WIN_CDECL
111650 + * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
111651 + * parameters. They must be inlined for the compiler to eliminate the constant
111652 + * branches.
111653 + */
111654 +#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
111656 + * HINT_INLINE is used to help the compiler generate better code. It is *not*
111657 + * used for "templates", so it can be tweaked based on the compilers
111658 + * performance.
111660 + * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the
111661 + * always_inline attribute.
111663 + * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline
111664 + * attribute.
111665 + */
111666 +#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
111667 +#  define HINT_INLINE static INLINE_KEYWORD
111668 +#else
111669 +#  define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
111670 +#endif
111672 +/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
111673 +#define UNUSED_ATTR __attribute__((unused))
111675 +/* force no inlining */
111676 +#define FORCE_NOINLINE static __attribute__((__noinline__))
111679 +/* target attribute */
111680 +#ifndef __has_attribute
111681 +  #define __has_attribute(x) 0  /* Compatibility with non-clang compilers. */
111682 +#endif
111683 +#define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
111685 +/* Enable runtime BMI2 dispatch based on the CPU.
111686 + * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
111687 + */
111688 +#ifndef DYNAMIC_BMI2
111689 +  #if ((defined(__clang__) && __has_attribute(__target__)) \
111690 +      || (defined(__GNUC__) \
111691 +          && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
111692 +      && (defined(__x86_64__) || defined(_M_X86)) \
111693 +      && !defined(__BMI2__)
111694 +  #  define DYNAMIC_BMI2 1
111695 +  #else
111696 +  #  define DYNAMIC_BMI2 0
111697 +  #endif
111698 +#endif
111700 +/* prefetch
111701 + * can be disabled, by declaring NO_PREFETCH build macro */
111702 +#if ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
111703 +#  define PREFETCH_L1(ptr)  __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
111704 +#  define PREFETCH_L2(ptr)  __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */)
111705 +#elif defined(__aarch64__)
111706 +#  define PREFETCH_L1(ptr)  __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr)))
111707 +#  define PREFETCH_L2(ptr)  __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr)))
111708 +#else
111709 +#  define PREFETCH_L1(ptr) (void)(ptr)  /* disabled */
111710 +#  define PREFETCH_L2(ptr) (void)(ptr)  /* disabled */
111711 +#endif  /* NO_PREFETCH */
111713 +#define CACHELINE_SIZE 64
111715 +#define PREFETCH_AREA(p, s)  {            \
111716 +    const char* const _ptr = (const char*)(p);  \
111717 +    size_t const _size = (size_t)(s);     \
111718 +    size_t _pos;                          \
111719 +    for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) {  \
111720 +        PREFETCH_L2(_ptr + _pos);         \
111721 +    }                                     \
111724 +/* vectorization
111725 + * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax */
111726 +#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__)
111727 +#  if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5)
111728 +#    define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
111729 +#  else
111730 +#    define DONT_VECTORIZE _Pragma("GCC optimize(\"no-tree-vectorize\")")
111731 +#  endif
111732 +#else
111733 +#  define DONT_VECTORIZE
111734 +#endif
111736 +/* Tell the compiler that a branch is likely or unlikely.
111737 + * Only use these macros if it causes the compiler to generate better code.
111738 + * If you can remove a LIKELY/UNLIKELY annotation without speed changes in gcc
111739 + * and clang, please do.
111740 + */
111741 +#define LIKELY(x) (__builtin_expect((x), 1))
111742 +#define UNLIKELY(x) (__builtin_expect((x), 0))
111744 +/* disable warnings */
111746 +/*Like DYNAMIC_BMI2 but for compile time determination of BMI2 support*/
111749 +/* compat. with non-clang compilers */
111750 +#ifndef __has_builtin
111751 +#  define __has_builtin(x) 0
111752 +#endif
111754 +/* compat. with non-clang compilers */
111755 +#ifndef __has_feature
111756 +#  define __has_feature(x) 0
111757 +#endif
111759 +/* detects whether we are being compiled under msan */
111762 +/* detects whether we are being compiled under asan */
111765 +#endif /* ZSTD_COMPILER_H */
111766 diff --git a/lib/zstd/common/cpu.h b/lib/zstd/common/cpu.h
111767 new file mode 100644
111768 index 000000000000..0202d94076a3
111769 --- /dev/null
111770 +++ b/lib/zstd/common/cpu.h
111771 @@ -0,0 +1,194 @@
111773 + * Copyright (c) Facebook, Inc.
111774 + * All rights reserved.
111776 + * This source code is licensed under both the BSD-style license (found in the
111777 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
111778 + * in the COPYING file in the root directory of this source tree).
111779 + * You may select, at your option, one of the above-listed licenses.
111780 + */
111782 +#ifndef ZSTD_COMMON_CPU_H
111783 +#define ZSTD_COMMON_CPU_H
111786 + * Implementation taken from folly/CpuId.h
111787 + * https://github.com/facebook/folly/blob/master/folly/CpuId.h
111788 + */
111790 +#include "mem.h"
111793 +typedef struct {
111794 +    U32 f1c;
111795 +    U32 f1d;
111796 +    U32 f7b;
111797 +    U32 f7c;
111798 +} ZSTD_cpuid_t;
111800 +MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) {
111801 +    U32 f1c = 0;
111802 +    U32 f1d = 0;
111803 +    U32 f7b = 0;
111804 +    U32 f7c = 0;
111805 +#if defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__)
111806 +    /* The following block like the normal cpuid branch below, but gcc
111807 +     * reserves ebx for use of its pic register so we must specially
111808 +     * handle the save and restore to avoid clobbering the register
111809 +     */
111810 +    U32 n;
111811 +    __asm__(
111812 +        "pushl %%ebx\n\t"
111813 +        "cpuid\n\t"
111814 +        "popl %%ebx\n\t"
111815 +        : "=a"(n)
111816 +        : "a"(0)
111817 +        : "ecx", "edx");
111818 +    if (n >= 1) {
111819 +      U32 f1a;
111820 +      __asm__(
111821 +          "pushl %%ebx\n\t"
111822 +          "cpuid\n\t"
111823 +          "popl %%ebx\n\t"
111824 +          : "=a"(f1a), "=c"(f1c), "=d"(f1d)
111825 +          : "a"(1));
111826 +    }
111827 +    if (n >= 7) {
111828 +      __asm__(
111829 +          "pushl %%ebx\n\t"
111830 +          "cpuid\n\t"
111831 +          "movl %%ebx, %%eax\n\t"
111832 +          "popl %%ebx"
111833 +          : "=a"(f7b), "=c"(f7c)
111834 +          : "a"(7), "c"(0)
111835 +          : "edx");
111836 +    }
111837 +#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__)
111838 +    U32 n;
111839 +    __asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx");
111840 +    if (n >= 1) {
111841 +      U32 f1a;
111842 +      __asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx");
111843 +    }
111844 +    if (n >= 7) {
111845 +      U32 f7a;
111846 +      __asm__("cpuid"
111847 +              : "=a"(f7a), "=b"(f7b), "=c"(f7c)
111848 +              : "a"(7), "c"(0)
111849 +              : "edx");
111850 +    }
111851 +#endif
111852 +    {
111853 +        ZSTD_cpuid_t cpuid;
111854 +        cpuid.f1c = f1c;
111855 +        cpuid.f1d = f1d;
111856 +        cpuid.f7b = f7b;
111857 +        cpuid.f7c = f7c;
111858 +        return cpuid;
111859 +    }
111862 +#define X(name, r, bit)                                                        \
111863 +  MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) {                 \
111864 +    return ((cpuid.r) & (1U << bit)) != 0;                                     \
111865 +  }
111867 +/* cpuid(1): Processor Info and Feature Bits. */
111868 +#define C(name, bit) X(name, f1c, bit)
111869 +  C(sse3, 0)
111870 +  C(pclmuldq, 1)
111871 +  C(dtes64, 2)
111872 +  C(monitor, 3)
111873 +  C(dscpl, 4)
111874 +  C(vmx, 5)
111875 +  C(smx, 6)
111876 +  C(eist, 7)
111877 +  C(tm2, 8)
111878 +  C(ssse3, 9)
111879 +  C(cnxtid, 10)
111880 +  C(fma, 12)
111881 +  C(cx16, 13)
111882 +  C(xtpr, 14)
111883 +  C(pdcm, 15)
111884 +  C(pcid, 17)
111885 +  C(dca, 18)
111886 +  C(sse41, 19)
111887 +  C(sse42, 20)
111888 +  C(x2apic, 21)
111889 +  C(movbe, 22)
111890 +  C(popcnt, 23)
111891 +  C(tscdeadline, 24)
111892 +  C(aes, 25)
111893 +  C(xsave, 26)
111894 +  C(osxsave, 27)
111895 +  C(avx, 28)
111896 +  C(f16c, 29)
111897 +  C(rdrand, 30)
111898 +#undef C
111899 +#define D(name, bit) X(name, f1d, bit)
111900 +  D(fpu, 0)
111901 +  D(vme, 1)
111902 +  D(de, 2)
111903 +  D(pse, 3)
111904 +  D(tsc, 4)
111905 +  D(msr, 5)
111906 +  D(pae, 6)
111907 +  D(mce, 7)
111908 +  D(cx8, 8)
111909 +  D(apic, 9)
111910 +  D(sep, 11)
111911 +  D(mtrr, 12)
111912 +  D(pge, 13)
111913 +  D(mca, 14)
111914 +  D(cmov, 15)
111915 +  D(pat, 16)
111916 +  D(pse36, 17)
111917 +  D(psn, 18)
111918 +  D(clfsh, 19)
111919 +  D(ds, 21)
111920 +  D(acpi, 22)
111921 +  D(mmx, 23)
111922 +  D(fxsr, 24)
111923 +  D(sse, 25)
111924 +  D(sse2, 26)
111925 +  D(ss, 27)
111926 +  D(htt, 28)
111927 +  D(tm, 29)
111928 +  D(pbe, 31)
111929 +#undef D
111931 +/* cpuid(7): Extended Features. */
111932 +#define B(name, bit) X(name, f7b, bit)
111933 +  B(bmi1, 3)
111934 +  B(hle, 4)
111935 +  B(avx2, 5)
111936 +  B(smep, 7)
111937 +  B(bmi2, 8)
111938 +  B(erms, 9)
111939 +  B(invpcid, 10)
111940 +  B(rtm, 11)
111941 +  B(mpx, 14)
111942 +  B(avx512f, 16)
111943 +  B(avx512dq, 17)
111944 +  B(rdseed, 18)
111945 +  B(adx, 19)
111946 +  B(smap, 20)
111947 +  B(avx512ifma, 21)
111948 +  B(pcommit, 22)
111949 +  B(clflushopt, 23)
111950 +  B(clwb, 24)
111951 +  B(avx512pf, 26)
111952 +  B(avx512er, 27)
111953 +  B(avx512cd, 28)
111954 +  B(sha, 29)
111955 +  B(avx512bw, 30)
111956 +  B(avx512vl, 31)
111957 +#undef B
111958 +#define C(name, bit) X(name, f7c, bit)
111959 +  C(prefetchwt1, 0)
111960 +  C(avx512vbmi, 1)
111961 +#undef C
111963 +#undef X
111965 +#endif /* ZSTD_COMMON_CPU_H */
111966 diff --git a/lib/zstd/common/debug.c b/lib/zstd/common/debug.c
111967 new file mode 100644
111968 index 000000000000..bb863c9ea616
111969 --- /dev/null
111970 +++ b/lib/zstd/common/debug.c
111971 @@ -0,0 +1,24 @@
111972 +/* ******************************************************************
111973 + * debug
111974 + * Part of FSE library
111975 + * Copyright (c) Yann Collet, Facebook, Inc.
111977 + * You can contact the author at :
111978 + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
111980 + * This source code is licensed under both the BSD-style license (found in the
111981 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
111982 + * in the COPYING file in the root directory of this source tree).
111983 + * You may select, at your option, one of the above-listed licenses.
111984 +****************************************************************** */
111988 + * This module only hosts one global variable
111989 + * which can be used to dynamically influence the verbosity of traces,
111990 + * such as DEBUGLOG and RAWLOG
111991 + */
111993 +#include "debug.h"
111995 +int g_debuglevel = DEBUGLEVEL;
111996 diff --git a/lib/zstd/common/debug.h b/lib/zstd/common/debug.h
111997 new file mode 100644
111998 index 000000000000..6dd88d1fbd02
111999 --- /dev/null
112000 +++ b/lib/zstd/common/debug.h
112001 @@ -0,0 +1,101 @@
112002 +/* ******************************************************************
112003 + * debug
112004 + * Part of FSE library
112005 + * Copyright (c) Yann Collet, Facebook, Inc.
112007 + * You can contact the author at :
112008 + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
112010 + * This source code is licensed under both the BSD-style license (found in the
112011 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
112012 + * in the COPYING file in the root directory of this source tree).
112013 + * You may select, at your option, one of the above-listed licenses.
112014 +****************************************************************** */
112018 + * The purpose of this header is to enable debug functions.
112019 + * They regroup assert(), DEBUGLOG() and RAWLOG() for run-time,
112020 + * and DEBUG_STATIC_ASSERT() for compile-time.
112022 + * By default, DEBUGLEVEL==0, which means run-time debug is disabled.
112024 + * Level 1 enables assert() only.
112025 + * Starting level 2, traces can be generated and pushed to stderr.
112026 + * The higher the level, the more verbose the traces.
112028 + * It's possible to dynamically adjust level using variable g_debug_level,
112029 + * which is only declared if DEBUGLEVEL>=2,
112030 + * and is a global variable, not multi-thread protected (use with care)
112031 + */
112033 +#ifndef DEBUG_H_12987983217
112034 +#define DEBUG_H_12987983217
112038 +/* static assert is triggered at compile time, leaving no runtime artefact.
112039 + * static assert only works with compile-time constants.
112040 + * Also, this variant can only be used inside a function. */
112041 +#define DEBUG_STATIC_ASSERT(c) (void)sizeof(char[(c) ? 1 : -1])
112044 +/* DEBUGLEVEL is expected to be defined externally,
112045 + * typically through compiler command line.
112046 + * Value must be a number. */
112047 +#ifndef DEBUGLEVEL
112048 +#  define DEBUGLEVEL 0
112049 +#endif
112052 +/* recommended values for DEBUGLEVEL :
112053 + * 0 : release mode, no debug, all run-time checks disabled
112054 + * 1 : enables assert() only, no display
112055 + * 2 : reserved, for currently active debug path
112056 + * 3 : events once per object lifetime (CCtx, CDict, etc.)
112057 + * 4 : events once per frame
112058 + * 5 : events once per block
112059 + * 6 : events once per sequence (verbose)
112060 + * 7+: events at every position (*very* verbose)
112062 + * It's generally inconvenient to output traces > 5.
112063 + * In which case, it's possible to selectively trigger high verbosity levels
112064 + * by modifying g_debug_level.
112065 + */
112067 +#if (DEBUGLEVEL>=1)
112068 +#  define ZSTD_DEPS_NEED_ASSERT
112069 +#  include "zstd_deps.h"
112070 +#else
112071 +#  ifndef assert   /* assert may be already defined, due to prior #include <assert.h> */
112072 +#    define assert(condition) ((void)0)   /* disable assert (default) */
112073 +#  endif
112074 +#endif
112076 +#if (DEBUGLEVEL>=2)
112077 +#  define ZSTD_DEPS_NEED_IO
112078 +#  include "zstd_deps.h"
112079 +extern int g_debuglevel; /* the variable is only declared,
112080 +                            it actually lives in debug.c,
112081 +                            and is shared by the whole process.
112082 +                            It's not thread-safe.
112083 +                            It's useful when enabling very verbose levels
112084 +                            on selective conditions (such as position in src) */
112086 +#  define RAWLOG(l, ...) {                                       \
112087 +                if (l<=g_debuglevel) {                           \
112088 +                    ZSTD_DEBUG_PRINT(__VA_ARGS__);               \
112089 +            }   }
112090 +#  define DEBUGLOG(l, ...) {                                     \
112091 +                if (l<=g_debuglevel) {                           \
112092 +                    ZSTD_DEBUG_PRINT(__FILE__ ": " __VA_ARGS__); \
112093 +                    ZSTD_DEBUG_PRINT(" \n");                     \
112094 +            }   }
112095 +#else
112096 +#  define RAWLOG(l, ...)      {}    /* disabled */
112097 +#  define DEBUGLOG(l, ...)    {}    /* disabled */
112098 +#endif
112102 +#endif /* DEBUG_H_12987983217 */
112103 diff --git a/lib/zstd/common/entropy_common.c b/lib/zstd/common/entropy_common.c
112104 new file mode 100644
112105 index 000000000000..53b47a2b52ff
112106 --- /dev/null
112107 +++ b/lib/zstd/common/entropy_common.c
112108 @@ -0,0 +1,357 @@
112109 +/* ******************************************************************
112110 + * Common functions of New Generation Entropy library
112111 + * Copyright (c) Yann Collet, Facebook, Inc.
112113 + *  You can contact the author at :
112114 + *  - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
112115 + *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
112117 + * This source code is licensed under both the BSD-style license (found in the
112118 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
112119 + * in the COPYING file in the root directory of this source tree).
112120 + * You may select, at your option, one of the above-listed licenses.
112121 +****************************************************************** */
112123 +/* *************************************
112124 +*  Dependencies
112125 +***************************************/
112126 +#include "mem.h"
112127 +#include "error_private.h"       /* ERR_*, ERROR */
112128 +#define FSE_STATIC_LINKING_ONLY  /* FSE_MIN_TABLELOG */
112129 +#include "fse.h"
112130 +#define HUF_STATIC_LINKING_ONLY  /* HUF_TABLELOG_ABSOLUTEMAX */
112131 +#include "huf.h"
112134 +/*===   Version   ===*/
112135 +unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
112138 +/*===   Error Management   ===*/
112139 +unsigned FSE_isError(size_t code) { return ERR_isError(code); }
112140 +const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
112142 +unsigned HUF_isError(size_t code) { return ERR_isError(code); }
112143 +const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
112146 +/*-**************************************************************
112147 +*  FSE NCount encoding-decoding
112148 +****************************************************************/
112149 +static U32 FSE_ctz(U32 val)
112151 +    assert(val != 0);
112152 +    {
112153 +#   if (__GNUC__ >= 3)   /* GCC Intrinsic */
112154 +        return __builtin_ctz(val);
112155 +#   else   /* Software version */
112156 +        U32 count = 0;
112157 +        while ((val & 1) == 0) {
112158 +            val >>= 1;
112159 +            ++count;
112160 +        }
112161 +        return count;
112162 +#   endif
112163 +    }
112166 +FORCE_INLINE_TEMPLATE
112167 +size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
112168 +                           const void* headerBuffer, size_t hbSize)
112170 +    const BYTE* const istart = (const BYTE*) headerBuffer;
112171 +    const BYTE* const iend = istart + hbSize;
112172 +    const BYTE* ip = istart;
112173 +    int nbBits;
112174 +    int remaining;
112175 +    int threshold;
112176 +    U32 bitStream;
112177 +    int bitCount;
112178 +    unsigned charnum = 0;
112179 +    unsigned const maxSV1 = *maxSVPtr + 1;
112180 +    int previous0 = 0;
112182 +    if (hbSize < 8) {
112183 +        /* This function only works when hbSize >= 8 */
112184 +        char buffer[8] = {0};
112185 +        ZSTD_memcpy(buffer, headerBuffer, hbSize);
112186 +        {   size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr,
112187 +                                                    buffer, sizeof(buffer));
112188 +            if (FSE_isError(countSize)) return countSize;
112189 +            if (countSize > hbSize) return ERROR(corruption_detected);
112190 +            return countSize;
112191 +    }   }
112192 +    assert(hbSize >= 8);
112194 +    /* init */
112195 +    ZSTD_memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0]));   /* all symbols not present in NCount have a frequency of 0 */
112196 +    bitStream = MEM_readLE32(ip);
112197 +    nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG;   /* extract tableLog */
112198 +    if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
112199 +    bitStream >>= 4;
112200 +    bitCount = 4;
112201 +    *tableLogPtr = nbBits;
112202 +    remaining = (1<<nbBits)+1;
112203 +    threshold = 1<<nbBits;
112204 +    nbBits++;
112206 +    for (;;) {
112207 +        if (previous0) {
112208 +            /* Count the number of repeats. Each time the
112209 +             * 2-bit repeat code is 0b11 there is another
112210 +             * repeat.
112211 +             * Avoid UB by setting the high bit to 1.
112212 +             */
112213 +            int repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
112214 +            while (repeats >= 12) {
112215 +                charnum += 3 * 12;
112216 +                if (LIKELY(ip <= iend-7)) {
112217 +                    ip += 3;
112218 +                } else {
112219 +                    bitCount -= (int)(8 * (iend - 7 - ip));
112220 +                    bitCount &= 31;
112221 +                    ip = iend - 4;
112222 +                }
112223 +                bitStream = MEM_readLE32(ip) >> bitCount;
112224 +                repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
112225 +            }
112226 +            charnum += 3 * repeats;
112227 +            bitStream >>= 2 * repeats;
112228 +            bitCount += 2 * repeats;
112230 +            /* Add the final repeat which isn't 0b11. */
112231 +            assert((bitStream & 3) < 3);
112232 +            charnum += bitStream & 3;
112233 +            bitCount += 2;
112235 +            /* This is an error, but break and return an error
112236 +             * at the end, because returning out of a loop makes
112237 +             * it harder for the compiler to optimize.
112238 +             */
112239 +            if (charnum >= maxSV1) break;
112241 +            /* We don't need to set the normalized count to 0
112242 +             * because we already memset the whole buffer to 0.
112243 +             */
112245 +            if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
112246 +                assert((bitCount >> 3) <= 3); /* For first condition to work */
112247 +                ip += bitCount>>3;
112248 +                bitCount &= 7;
112249 +            } else {
112250 +                bitCount -= (int)(8 * (iend - 4 - ip));
112251 +                bitCount &= 31;
112252 +                ip = iend - 4;
112253 +            }
112254 +            bitStream = MEM_readLE32(ip) >> bitCount;
112255 +        }
112256 +        {
112257 +            int const max = (2*threshold-1) - remaining;
112258 +            int count;
112260 +            if ((bitStream & (threshold-1)) < (U32)max) {
112261 +                count = bitStream & (threshold-1);
112262 +                bitCount += nbBits-1;
112263 +            } else {
112264 +                count = bitStream & (2*threshold-1);
112265 +                if (count >= threshold) count -= max;
112266 +                bitCount += nbBits;
112267 +            }
112269 +            count--;   /* extra accuracy */
112270 +            /* When it matters (small blocks), this is a
112271 +             * predictable branch, because we don't use -1.
112272 +             */
112273 +            if (count >= 0) {
112274 +                remaining -= count;
112275 +            } else {
112276 +                assert(count == -1);
112277 +                remaining += count;
112278 +            }
112279 +            normalizedCounter[charnum++] = (short)count;
112280 +            previous0 = !count;
112282 +            assert(threshold > 1);
112283 +            if (remaining < threshold) {
112284 +                /* This branch can be folded into the
112285 +                 * threshold update condition because we
112286 +                 * know that threshold > 1.
112287 +                 */
112288 +                if (remaining <= 1) break;
112289 +                nbBits = BIT_highbit32(remaining) + 1;
112290 +                threshold = 1 << (nbBits - 1);
112291 +            }
112292 +            if (charnum >= maxSV1) break;
112294 +            if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
112295 +                ip += bitCount>>3;
112296 +                bitCount &= 7;
112297 +            } else {
112298 +                bitCount -= (int)(8 * (iend - 4 - ip));
112299 +                bitCount &= 31;
112300 +                ip = iend - 4;
112301 +            }
112302 +            bitStream = MEM_readLE32(ip) >> bitCount;
112303 +    }   }
112304 +    if (remaining != 1) return ERROR(corruption_detected);
112305 +    /* Only possible when there are too many zeros. */
112306 +    if (charnum > maxSV1) return ERROR(maxSymbolValue_tooSmall);
112307 +    if (bitCount > 32) return ERROR(corruption_detected);
112308 +    *maxSVPtr = charnum-1;
112310 +    ip += (bitCount+7)>>3;
112311 +    return ip-istart;
112314 +/* Avoids the FORCE_INLINE of the _body() function. */
112315 +static size_t FSE_readNCount_body_default(
112316 +        short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
112317 +        const void* headerBuffer, size_t hbSize)
112319 +    return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
112322 +#if DYNAMIC_BMI2
112323 +TARGET_ATTRIBUTE("bmi2") static size_t FSE_readNCount_body_bmi2(
112324 +        short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
112325 +        const void* headerBuffer, size_t hbSize)
112327 +    return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
112329 +#endif
112331 +size_t FSE_readNCount_bmi2(
112332 +        short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
112333 +        const void* headerBuffer, size_t hbSize, int bmi2)
112335 +#if DYNAMIC_BMI2
112336 +    if (bmi2) {
112337 +        return FSE_readNCount_body_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
112338 +    }
112339 +#endif
112340 +    (void)bmi2;
112341 +    return FSE_readNCount_body_default(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
112344 +size_t FSE_readNCount(
112345 +        short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
112346 +        const void* headerBuffer, size_t hbSize)
112348 +    return FSE_readNCount_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize, /* bmi2 */ 0);
112352 +/*! HUF_readStats() :
112353 +    Read compact Huffman tree, saved by HUF_writeCTable().
112354 +    `huffWeight` is destination buffer.
112355 +    `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
112356 +    @return : size read from `src` , or an error Code .
112357 +    Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
112359 +size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
112360 +                     U32* nbSymbolsPtr, U32* tableLogPtr,
112361 +                     const void* src, size_t srcSize)
112363 +    U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
112364 +    return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* bmi2 */ 0);
112367 +FORCE_INLINE_TEMPLATE size_t
112368 +HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats,
112369 +                   U32* nbSymbolsPtr, U32* tableLogPtr,
112370 +                   const void* src, size_t srcSize,
112371 +                   void* workSpace, size_t wkspSize,
112372 +                   int bmi2)
112374 +    U32 weightTotal;
112375 +    const BYTE* ip = (const BYTE*) src;
112376 +    size_t iSize;
112377 +    size_t oSize;
112379 +    if (!srcSize) return ERROR(srcSize_wrong);
112380 +    iSize = ip[0];
112381 +    /* ZSTD_memset(huffWeight, 0, hwSize);   *//* is not necessary, even though some analyzer complain ... */
112383 +    if (iSize >= 128) {  /* special header */
112384 +        oSize = iSize - 127;
112385 +        iSize = ((oSize+1)/2);
112386 +        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
112387 +        if (oSize >= hwSize) return ERROR(corruption_detected);
112388 +        ip += 1;
112389 +        {   U32 n;
112390 +            for (n=0; n<oSize; n+=2) {
112391 +                huffWeight[n]   = ip[n/2] >> 4;
112392 +                huffWeight[n+1] = ip[n/2] & 15;
112393 +    }   }   }
112394 +    else  {   /* header compressed with FSE (normal case) */
112395 +        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
112396 +        /* max (hwSize-1) values decoded, as last one is implied */
112397 +        oSize = FSE_decompress_wksp_bmi2(huffWeight, hwSize-1, ip+1, iSize, 6, workSpace, wkspSize, bmi2);
112398 +        if (FSE_isError(oSize)) return oSize;
112399 +    }
112401 +    /* collect weight stats */
112402 +    ZSTD_memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
112403 +    weightTotal = 0;
112404 +    {   U32 n; for (n=0; n<oSize; n++) {
112405 +            if (huffWeight[n] >= HUF_TABLELOG_MAX) return ERROR(corruption_detected);
112406 +            rankStats[huffWeight[n]]++;
112407 +            weightTotal += (1 << huffWeight[n]) >> 1;
112408 +    }   }
112409 +    if (weightTotal == 0) return ERROR(corruption_detected);
112411 +    /* get last non-null symbol weight (implied, total must be 2^n) */
112412 +    {   U32 const tableLog = BIT_highbit32(weightTotal) + 1;
112413 +        if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
112414 +        *tableLogPtr = tableLog;
112415 +        /* determine last weight */
112416 +        {   U32 const total = 1 << tableLog;
112417 +            U32 const rest = total - weightTotal;
112418 +            U32 const verif = 1 << BIT_highbit32(rest);
112419 +            U32 const lastWeight = BIT_highbit32(rest) + 1;
112420 +            if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */
112421 +            huffWeight[oSize] = (BYTE)lastWeight;
112422 +            rankStats[lastWeight]++;
112423 +    }   }
112425 +    /* check tree construction validity */
112426 +    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */
112428 +    /* results */
112429 +    *nbSymbolsPtr = (U32)(oSize+1);
112430 +    return iSize+1;
112433 +/* Avoids the FORCE_INLINE of the _body() function. */
112434 +static size_t HUF_readStats_body_default(BYTE* huffWeight, size_t hwSize, U32* rankStats,
112435 +                     U32* nbSymbolsPtr, U32* tableLogPtr,
112436 +                     const void* src, size_t srcSize,
112437 +                     void* workSpace, size_t wkspSize)
112439 +    return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 0);
112442 +#if DYNAMIC_BMI2
112443 +static TARGET_ATTRIBUTE("bmi2") size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats,
112444 +                     U32* nbSymbolsPtr, U32* tableLogPtr,
112445 +                     const void* src, size_t srcSize,
112446 +                     void* workSpace, size_t wkspSize)
112448 +    return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 1);
112450 +#endif
112452 +size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats,
112453 +                     U32* nbSymbolsPtr, U32* tableLogPtr,
112454 +                     const void* src, size_t srcSize,
112455 +                     void* workSpace, size_t wkspSize,
112456 +                     int bmi2)
112458 +#if DYNAMIC_BMI2
112459 +    if (bmi2) {
112460 +        return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
112461 +    }
112462 +#endif
112463 +    (void)bmi2;
112464 +    return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
112466 diff --git a/lib/zstd/common/error_private.c b/lib/zstd/common/error_private.c
112467 new file mode 100644
112468 index 000000000000..6d1135f8c373
112469 --- /dev/null
112470 +++ b/lib/zstd/common/error_private.c
112471 @@ -0,0 +1,56 @@
112473 + * Copyright (c) Yann Collet, Facebook, Inc.
112474 + * All rights reserved.
112476 + * This source code is licensed under both the BSD-style license (found in the
112477 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
112478 + * in the COPYING file in the root directory of this source tree).
112479 + * You may select, at your option, one of the above-listed licenses.
112480 + */
112482 +/* The purpose of this file is to have a single list of error strings embedded in binary */
112484 +#include "error_private.h"
112486 +const char* ERR_getErrorString(ERR_enum code)
112488 +#ifdef ZSTD_STRIP_ERROR_STRINGS
112489 +    (void)code;
112490 +    return "Error strings stripped";
112491 +#else
112492 +    static const char* const notErrorCode = "Unspecified error code";
112493 +    switch( code )
112494 +    {
112495 +    case PREFIX(no_error): return "No error detected";
112496 +    case PREFIX(GENERIC):  return "Error (generic)";
112497 +    case PREFIX(prefix_unknown): return "Unknown frame descriptor";
112498 +    case PREFIX(version_unsupported): return "Version not supported";
112499 +    case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter";
112500 +    case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding";
112501 +    case PREFIX(corruption_detected): return "Corrupted block detected";
112502 +    case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
112503 +    case PREFIX(parameter_unsupported): return "Unsupported parameter";
112504 +    case PREFIX(parameter_outOfBound): return "Parameter is out of bound";
112505 +    case PREFIX(init_missing): return "Context should be init first";
112506 +    case PREFIX(memory_allocation): return "Allocation error : not enough memory";
112507 +    case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough";
112508 +    case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
112509 +    case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
112510 +    case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
112511 +    case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
112512 +    case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
112513 +    case PREFIX(dictionary_wrong): return "Dictionary mismatch";
112514 +    case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
112515 +    case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
112516 +    case PREFIX(srcSize_wrong): return "Src size is incorrect";
112517 +    case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer";
112518 +        /* following error codes are not stable and may be removed or changed in a future version */
112519 +    case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
112520 +    case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
112521 +    case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong";
112522 +    case PREFIX(srcBuffer_wrong): return "Source buffer is wrong";
112523 +    case PREFIX(maxCode):
112524 +    default: return notErrorCode;
112525 +    }
112526 +#endif
112528 diff --git a/lib/zstd/common/error_private.h b/lib/zstd/common/error_private.h
112529 new file mode 100644
112530 index 000000000000..d14e686adf95
112531 --- /dev/null
112532 +++ b/lib/zstd/common/error_private.h
112533 @@ -0,0 +1,66 @@
112535 + * Copyright (c) Yann Collet, Facebook, Inc.
112536 + * All rights reserved.
112538 + * This source code is licensed under both the BSD-style license (found in the
112539 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
112540 + * in the COPYING file in the root directory of this source tree).
112541 + * You may select, at your option, one of the above-listed licenses.
112542 + */
112544 +/* Note : this module is expected to remain private, do not expose it */
112546 +#ifndef ERROR_H_MODULE
112547 +#define ERROR_H_MODULE
112551 +/* ****************************************
112552 +*  Dependencies
112553 +******************************************/
112554 +#include "zstd_deps.h"    /* size_t */
112555 +#include <linux/zstd_errors.h>  /* enum list */
112558 +/* ****************************************
112559 +*  Compiler-specific
112560 +******************************************/
112561 +#define ERR_STATIC static __attribute__((unused))
112564 +/*-****************************************
112565 +*  Customization (error_public.h)
112566 +******************************************/
112567 +typedef ZSTD_ErrorCode ERR_enum;
112568 +#define PREFIX(name) ZSTD_error_##name
112571 +/*-****************************************
112572 +*  Error codes handling
112573 +******************************************/
112574 +#undef ERROR   /* already defined on Visual Studio */
112575 +#define ERROR(name) ZSTD_ERROR(name)
112576 +#define ZSTD_ERROR(name) ((size_t)-PREFIX(name))
112578 +ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
112580 +ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); }
112582 +/* check and forward error code */
112583 +#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
112584 +#define CHECK_F(f)   { CHECK_V_F(_var_err__, f); }
112587 +/*-****************************************
112588 +*  Error Strings
112589 +******************************************/
112591 +const char* ERR_getErrorString(ERR_enum code);   /* error_private.c */
112593 +ERR_STATIC const char* ERR_getErrorName(size_t code)
112595 +    return ERR_getErrorString(ERR_getErrorCode(code));
112599 +#endif /* ERROR_H_MODULE */
112600 diff --git a/lib/zstd/common/fse.h b/lib/zstd/common/fse.h
112601 new file mode 100644
112602 index 000000000000..477e642ffb41
112603 --- /dev/null
112604 +++ b/lib/zstd/common/fse.h
112605 @@ -0,0 +1,708 @@
112606 +/* ******************************************************************
112607 + * FSE : Finite State Entropy codec
112608 + * Public Prototypes declaration
112609 + * Copyright (c) Yann Collet, Facebook, Inc.
112611 + * You can contact the author at :
112612 + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
112614 + * This source code is licensed under both the BSD-style license (found in the
112615 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
112616 + * in the COPYING file in the root directory of this source tree).
112617 + * You may select, at your option, one of the above-listed licenses.
112618 +****************************************************************** */
112621 +#ifndef FSE_H
112622 +#define FSE_H
112625 +/*-*****************************************
112626 +*  Dependencies
112627 +******************************************/
112628 +#include "zstd_deps.h"    /* size_t, ptrdiff_t */
112631 +/*-*****************************************
112632 +*  FSE_PUBLIC_API : control library symbols visibility
112633 +******************************************/
112634 +#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
112635 +#  define FSE_PUBLIC_API __attribute__ ((visibility ("default")))
112636 +#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1)   /* Visual expected */
112637 +#  define FSE_PUBLIC_API __declspec(dllexport)
112638 +#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
112639 +#  define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
112640 +#else
112641 +#  define FSE_PUBLIC_API
112642 +#endif
112644 +/*------   Version   ------*/
112645 +#define FSE_VERSION_MAJOR    0
112646 +#define FSE_VERSION_MINOR    9
112647 +#define FSE_VERSION_RELEASE  0
112649 +#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
112650 +#define FSE_QUOTE(str) #str
112651 +#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
112652 +#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
112654 +#define FSE_VERSION_NUMBER  (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE)
112655 +FSE_PUBLIC_API unsigned FSE_versionNumber(void);   /**< library version number; to be used when checking dll version */
112658 +/*-****************************************
112659 +*  FSE simple functions
112660 +******************************************/
112661 +/*! FSE_compress() :
112662 +    Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'.
112663 +    'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize).
112664 +    @return : size of compressed data (<= dstCapacity).
112665 +    Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
112666 +                     if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead.
112667 +                     if FSE_isError(return), compression failed (more details using FSE_getErrorName())
112669 +FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity,
112670 +                             const void* src, size_t srcSize);
112672 +/*! FSE_decompress():
112673 +    Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
112674 +    into already allocated destination buffer 'dst', of size 'dstCapacity'.
112675 +    @return : size of regenerated data (<= maxDstSize),
112676 +              or an error code, which can be tested using FSE_isError() .
112678 +    ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!!
112679 +    Why ? : making this distinction requires a header.
112680 +    Header management is intentionally delegated to the user layer, which can better manage special cases.
112682 +FSE_PUBLIC_API size_t FSE_decompress(void* dst,  size_t dstCapacity,
112683 +                               const void* cSrc, size_t cSrcSize);
112686 +/*-*****************************************
112687 +*  Tool functions
112688 +******************************************/
112689 +FSE_PUBLIC_API size_t FSE_compressBound(size_t size);       /* maximum compressed size */
112691 +/* Error Management */
112692 +FSE_PUBLIC_API unsigned    FSE_isError(size_t code);        /* tells if a return value is an error code */
112693 +FSE_PUBLIC_API const char* FSE_getErrorName(size_t code);   /* provides error code string (useful for debugging) */
112696 +/*-*****************************************
112697 +*  FSE advanced functions
112698 +******************************************/
112699 +/*! FSE_compress2() :
112700 +    Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog'
112701 +    Both parameters can be defined as '0' to mean : use default value
112702 +    @return : size of compressed data
112703 +    Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!!
112704 +                     if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression.
112705 +                     if FSE_isError(return), it's an error code.
112707 +FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
112710 +/*-*****************************************
112711 +*  FSE detailed API
112712 +******************************************/
112714 +FSE_compress() does the following:
112715 +1. count symbol occurrence from source[] into table count[] (see hist.h)
112716 +2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
112717 +3. save normalized counters to memory buffer using writeNCount()
112718 +4. build encoding table 'CTable' from normalized counters
112719 +5. encode the data stream using encoding table 'CTable'
112721 +FSE_decompress() does the following:
112722 +1. read normalized counters with readNCount()
112723 +2. build decoding table 'DTable' from normalized counters
112724 +3. decode the data stream using decoding table 'DTable'
112726 +The following API allows targeting specific sub-functions for advanced tasks.
112727 +For example, it's possible to compress several blocks using the same 'CTable',
112728 +or to save and provide normalized distribution using external method.
112731 +/* *** COMPRESSION *** */
112733 +/*! FSE_optimalTableLog():
112734 +    dynamically downsize 'tableLog' when conditions are met.
112735 +    It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
112736 +    @return : recommended tableLog (necessarily <= 'maxTableLog') */
112737 +FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
112739 +/*! FSE_normalizeCount():
112740 +    normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
112741 +    'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
112742 +    useLowProbCount is a boolean parameter which trades off compressed size for
112743 +    faster header decoding. When it is set to 1, the compressed data will be slightly
112744 +    smaller. And when it is set to 0, FSE_readNCount() and FSE_buildDTable() will be
112745 +    faster. If you are compressing a small amount of data (< 2 KB) then useLowProbCount=0
112746 +    is a good default, since header deserialization makes a big speed difference.
112747 +    Otherwise, useLowProbCount=1 is a good default, since the speed difference is small.
112748 +    @return : tableLog,
112749 +              or an errorCode, which can be tested using FSE_isError() */
112750 +FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog,
112751 +                    const unsigned* count, size_t srcSize, unsigned maxSymbolValue, unsigned useLowProbCount);
112753 +/*! FSE_NCountWriteBound():
112754 +    Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
112755 +    Typically useful for allocation purpose. */
112756 +FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
112758 +/*! FSE_writeNCount():
112759 +    Compactly save 'normalizedCounter' into 'buffer'.
112760 +    @return : size of the compressed table,
112761 +              or an errorCode, which can be tested using FSE_isError(). */
112762 +FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize,
112763 +                                 const short* normalizedCounter,
112764 +                                 unsigned maxSymbolValue, unsigned tableLog);
112766 +/*! Constructor and Destructor of FSE_CTable.
112767 +    Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
112768 +typedef unsigned FSE_CTable;   /* don't allocate that. It's only meant to be more restrictive than void* */
112769 +FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog);
112770 +FSE_PUBLIC_API void        FSE_freeCTable (FSE_CTable* ct);
112772 +/*! FSE_buildCTable():
112773 +    Builds `ct`, which must be already allocated, using FSE_createCTable().
112774 +    @return : 0, or an errorCode, which can be tested using FSE_isError() */
112775 +FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
112777 +/*! FSE_compress_usingCTable():
112778 +    Compress `src` using `ct` into `dst` which must be already allocated.
112779 +    @return : size of compressed data (<= `dstCapacity`),
112780 +              or 0 if compressed data could not fit into `dst`,
112781 +              or an errorCode, which can be tested using FSE_isError() */
112782 +FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct);
112785 +Tutorial :
112786 +----------
112787 +The first step is to count all symbols. FSE_count() does this job very fast.
112788 +Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.
112789 +'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]
112790 +maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)
112791 +FSE_count() will return the number of occurrence of the most frequent symbol.
112792 +This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.
112793 +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
112795 +The next step is to normalize the frequencies.
112796 +FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.
112797 +It also guarantees a minimum of 1 to any Symbol with frequency >= 1.
112798 +You can use 'tableLog'==0 to mean "use default tableLog value".
112799 +If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),
112800 +which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
112802 +The result of FSE_normalizeCount() will be saved into a table,
112803 +called 'normalizedCounter', which is a table of signed short.
112804 +'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.
112805 +The return value is tableLog if everything proceeded as expected.
112806 +It is 0 if there is a single symbol within distribution.
112807 +If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
112809 +'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().
112810 +'buffer' must be already allocated.
112811 +For guaranteed success, buffer size must be at least FSE_headerBound().
112812 +The result of the function is the number of bytes written into 'buffer'.
112813 +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
112815 +'normalizedCounter' can then be used to create the compression table 'CTable'.
112816 +The space required by 'CTable' must be already allocated, using FSE_createCTable().
112817 +You can then use FSE_buildCTable() to fill 'CTable'.
112818 +If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
112820 +'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().
112821 +Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'
112822 +The function returns the size of compressed data (without header), necessarily <= `dstCapacity`.
112823 +If it returns '0', compressed data could not fit into 'dst'.
112824 +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
112828 +/* *** DECOMPRESSION *** */
112830 +/*! FSE_readNCount():
112831 +    Read compactly saved 'normalizedCounter' from 'rBuffer'.
112832 +    @return : size read from 'rBuffer',
112833 +              or an errorCode, which can be tested using FSE_isError().
112834 +              maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
112835 +FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter,
112836 +                           unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
112837 +                           const void* rBuffer, size_t rBuffSize);
112839 +/*! FSE_readNCount_bmi2():
112840 + * Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise.
112841 + */
112842 +FSE_PUBLIC_API size_t FSE_readNCount_bmi2(short* normalizedCounter,
112843 +                           unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
112844 +                           const void* rBuffer, size_t rBuffSize, int bmi2);
112846 +/*! Constructor and Destructor of FSE_DTable.
112847 +    Note that its size depends on 'tableLog' */
112848 +typedef unsigned FSE_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
112849 +FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog);
112850 +FSE_PUBLIC_API void        FSE_freeDTable(FSE_DTable* dt);
112852 +/*! FSE_buildDTable():
112853 +    Builds 'dt', which must be already allocated, using FSE_createDTable().
112854 +    return : 0, or an errorCode, which can be tested using FSE_isError() */
112855 +FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
112857 +/*! FSE_decompress_usingDTable():
112858 +    Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
112859 +    into `dst` which must be already allocated.
112860 +    @return : size of regenerated data (necessarily <= `dstCapacity`),
112861 +              or an errorCode, which can be tested using FSE_isError() */
112862 +FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
112865 +Tutorial :
112866 +----------
112867 +(Note : these functions only decompress FSE-compressed blocks.
112868 + If block is uncompressed, use memcpy() instead
112869 + If block is a single repeated byte, use memset() instead )
112871 +The first step is to obtain the normalized frequencies of symbols.
112872 +This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
112873 +'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
112874 +In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
112875 +or size the table to handle worst case situations (typically 256).
112876 +FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
112877 +The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
112878 +Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
112879 +If there is an error, the function will return an error code, which can be tested using FSE_isError().
112881 +The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
112882 +This is performed by the function FSE_buildDTable().
112883 +The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
112884 +If there is an error, the function will return an error code, which can be tested using FSE_isError().
112886 +`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().
112887 +`cSrcSize` must be strictly correct, otherwise decompression will fail.
112888 +FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
112889 +If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
112892 +#endif  /* FSE_H */
112894 +#if !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
112895 +#define FSE_H_FSE_STATIC_LINKING_ONLY
112897 +/* *** Dependency *** */
112898 +#include "bitstream.h"
112901 +/* *****************************************
112902 +*  Static allocation
112903 +*******************************************/
112904 +/* FSE buffer bounds */
112905 +#define FSE_NCOUNTBOUND 512
112906 +#define FSE_BLOCKBOUND(size) ((size) + ((size)>>7) + 4 /* fse states */ + sizeof(size_t) /* bitContainer */)
112907 +#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
112909 +/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
112910 +#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue)   (1 + (1<<((maxTableLog)-1)) + (((maxSymbolValue)+1)*2))
112911 +#define FSE_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<(maxTableLog)))
112913 +/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */
112914 +#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue)   (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable))
112915 +#define FSE_DTABLE_SIZE(maxTableLog)                   (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable))
112918 +/* *****************************************
112919 + *  FSE advanced API
112920 + ***************************************** */
112922 +unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
112923 +/**< same as FSE_optimalTableLog(), which used `minus==2` */
112925 +/* FSE_compress_wksp() :
112926 + * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
112927 + * FSE_COMPRESS_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable.
112928 + */
112929 +#define FSE_COMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue)   ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) )
112930 +size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
112932 +size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits);
112933 +/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
112935 +size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
112936 +/**< build a fake FSE_CTable, designed to compress always the same symbolValue */
112938 +/* FSE_buildCTable_wksp() :
112939 + * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
112940 + * `wkspSize` must be >= `FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)` of `unsigned`.
112941 + */
112942 +#define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (maxSymbolValue + 2 + (1ull << (tableLog - 2)))
112943 +#define FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) (sizeof(unsigned) * FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog))
112944 +size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
112946 +#define FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) (sizeof(short) * (maxSymbolValue + 1) + (1ULL << maxTableLog) + 8)
112947 +#define FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ((FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) + sizeof(unsigned) - 1) / sizeof(unsigned))
112948 +FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
112949 +/**< Same as FSE_buildDTable(), using an externally allocated `workspace` produced with `FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxSymbolValue)` */
112951 +size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
112952 +/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
112954 +size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
112955 +/**< build a fake FSE_DTable, designed to always generate the same symbolValue */
112957 +#define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1)
112958 +#define FSE_DECOMPRESS_WKSP_SIZE(maxTableLog, maxSymbolValue) (FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(unsigned))
112959 +size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize);
112960 +/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)` */
112962 +size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2);
112963 +/**< Same as FSE_decompress_wksp() but with dynamic BMI2 support. Pass 1 if your CPU supports BMI2 or 0 if it doesn't. */
112965 +typedef enum {
112966 +   FSE_repeat_none,  /**< Cannot use the previous table */
112967 +   FSE_repeat_check, /**< Can use the previous table but it must be checked */
112968 +   FSE_repeat_valid  /**< Can use the previous table and it is assumed to be valid */
112969 + } FSE_repeat;
112971 +/* *****************************************
112972 +*  FSE symbol compression API
112973 +*******************************************/
112975 +   This API consists of small unitary functions, which highly benefit from being inlined.
112976 +   Hence their body are included in next section.
112978 +typedef struct {
112979 +    ptrdiff_t   value;
112980 +    const void* stateTable;
112981 +    const void* symbolTT;
112982 +    unsigned    stateLog;
112983 +} FSE_CState_t;
112985 +static void FSE_initCState(FSE_CState_t* CStatePtr, const FSE_CTable* ct);
112987 +static void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* CStatePtr, unsigned symbol);
112989 +static void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* CStatePtr);
112991 +/**<
112992 +These functions are inner components of FSE_compress_usingCTable().
112993 +They allow the creation of custom streams, mixing multiple tables and bit sources.
112995 +A key property to keep in mind is that encoding and decoding are done **in reverse direction**.
112996 +So the first symbol you will encode is the last you will decode, like a LIFO stack.
112998 +You will need a few variables to track your CStream. They are :
113000 +FSE_CTable    ct;         // Provided by FSE_buildCTable()
113001 +BIT_CStream_t bitStream;  // bitStream tracking structure
113002 +FSE_CState_t  state;      // State tracking structure (can have several)
113005 +The first thing to do is to init bitStream and state.
113006 +    size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);
113007 +    FSE_initCState(&state, ct);
113009 +Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();
113010 +You can then encode your input data, byte after byte.
113011 +FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.
113012 +Remember decoding will be done in reverse direction.
113013 +    FSE_encodeByte(&bitStream, &state, symbol);
113015 +At any time, you can also add any bit sequence.
113016 +Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders
113017 +    BIT_addBits(&bitStream, bitField, nbBits);
113019 +The above methods don't commit data to memory, they just store it into local register, for speed.
113020 +Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
113021 +Writing data to memory is a manual operation, performed by the flushBits function.
113022 +    BIT_flushBits(&bitStream);
113024 +Your last FSE encoding operation shall be to flush your last state value(s).
113025 +    FSE_flushState(&bitStream, &state);
113027 +Finally, you must close the bitStream.
113028 +The function returns the size of CStream in bytes.
113029 +If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)
113030 +If there is an error, it returns an errorCode (which can be tested using FSE_isError()).
113031 +    size_t size = BIT_closeCStream(&bitStream);
113035 +/* *****************************************
113036 +*  FSE symbol decompression API
113037 +*******************************************/
113038 +typedef struct {
113039 +    size_t      state;
113040 +    const void* table;   /* precise table may vary, depending on U16 */
113041 +} FSE_DState_t;
113044 +static void     FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
113046 +static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
113048 +static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
113050 +/**<
113051 +Let's now decompose FSE_decompress_usingDTable() into its unitary components.
113052 +You will decode FSE-encoded symbols from the bitStream,
113053 +and also any other bitFields you put in, **in reverse order**.
113055 +You will need a few variables to track your bitStream. They are :
113057 +BIT_DStream_t DStream;    // Stream context
113058 +FSE_DState_t  DState;     // State context. Multiple ones are possible
113059 +FSE_DTable*   DTablePtr;  // Decoding table, provided by FSE_buildDTable()
113061 +The first thing to do is to init the bitStream.
113062 +    errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
113064 +You should then retrieve your initial state(s)
113065 +(in reverse flushing order if you have several ones) :
113066 +    errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
113068 +You can then decode your data, symbol after symbol.
113069 +For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.
113070 +Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).
113071 +    unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
113073 +You can retrieve any bitfield you eventually stored into the bitStream (in reverse order)
113074 +Note : maximum allowed nbBits is 25, for 32-bits compatibility
113075 +    size_t bitField = BIT_readBits(&DStream, nbBits);
113077 +All above operations only read from local register (which size depends on size_t).
113078 +Refueling the register from memory is manually performed by the reload method.
113079 +    endSignal = FSE_reloadDStream(&DStream);
113081 +BIT_reloadDStream() result tells if there is still some more data to read from DStream.
113082 +BIT_DStream_unfinished : there is still some data left into the DStream.
113083 +BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.
113084 +BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.
113085 +BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
113087 +When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,
113088 +to properly detect the exact end of stream.
113089 +After each decoded symbol, check if DStream is fully consumed using this simple test :
113090 +    BIT_reloadDStream(&DStream) >= BIT_DStream_completed
113092 +When it's done, verify decompression is fully completed, by checking both DStream and the relevant states.
113093 +Checking if DStream has reached its end is performed by :
113094 +    BIT_endOfDStream(&DStream);
113095 +Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.
113096 +    FSE_endOfDState(&DState);
113100 +/* *****************************************
113101 +*  FSE unsafe API
113102 +*******************************************/
113103 +static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
113104 +/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
113107 +/* *****************************************
113108 +*  Implementation of inlined functions
113109 +*******************************************/
113110 +typedef struct {
113111 +    int deltaFindState;
113112 +    U32 deltaNbBits;
113113 +} FSE_symbolCompressionTransform; /* total 8 bytes */
113115 +MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct)
113117 +    const void* ptr = ct;
113118 +    const U16* u16ptr = (const U16*) ptr;
113119 +    const U32 tableLog = MEM_read16(ptr);
113120 +    statePtr->value = (ptrdiff_t)1<<tableLog;
113121 +    statePtr->stateTable = u16ptr+2;
113122 +    statePtr->symbolTT = ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1);
113123 +    statePtr->stateLog = tableLog;
113127 +/*! FSE_initCState2() :
113128 +*   Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
113129 +*   uses the smallest state value possible, saving the cost of this symbol */
113130 +MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol)
113132 +    FSE_initCState(statePtr, ct);
113133 +    {   const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
113134 +        const U16* stateTable = (const U16*)(statePtr->stateTable);
113135 +        U32 nbBitsOut  = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16);
113136 +        statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
113137 +        statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
113138 +    }
113141 +MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, unsigned symbol)
113143 +    FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
113144 +    const U16* const stateTable = (const U16*)(statePtr->stateTable);
113145 +    U32 const nbBitsOut  = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
113146 +    BIT_addBits(bitC, statePtr->value, nbBitsOut);
113147 +    statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
113150 +MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr)
113152 +    BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
113153 +    BIT_flushBits(bitC);
113157 +/* FSE_getMaxNbBits() :
113158 + * Approximate maximum cost of a symbol, in bits.
113159 + * Fractional get rounded up (i.e : a symbol with a normalized frequency of 3 gives the same result as a frequency of 2)
113160 + * note 1 : assume symbolValue is valid (<= maxSymbolValue)
113161 + * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
113162 +MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue)
113164 +    const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
113165 +    return (symbolTT[symbolValue].deltaNbBits + ((1<<16)-1)) >> 16;
113168 +/* FSE_bitCost() :
113169 + * Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits)
113170 + * note 1 : assume symbolValue is valid (<= maxSymbolValue)
113171 + * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
113172 +MEM_STATIC U32 FSE_bitCost(const void* symbolTTPtr, U32 tableLog, U32 symbolValue, U32 accuracyLog)
113174 +    const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
113175 +    U32 const minNbBits = symbolTT[symbolValue].deltaNbBits >> 16;
113176 +    U32 const threshold = (minNbBits+1) << 16;
113177 +    assert(tableLog < 16);
113178 +    assert(accuracyLog < 31-tableLog);  /* ensure enough room for renormalization double shift */
113179 +    {   U32 const tableSize = 1 << tableLog;
113180 +        U32 const deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize);
113181 +        U32 const normalizedDeltaFromThreshold = (deltaFromThreshold << accuracyLog) >> tableLog;   /* linear interpolation (very approximate) */
113182 +        U32 const bitMultiplier = 1 << accuracyLog;
113183 +        assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold);
113184 +        assert(normalizedDeltaFromThreshold <= bitMultiplier);
113185 +        return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold;
113186 +    }
113190 +/* ======    Decompression    ====== */
113192 +typedef struct {
113193 +    U16 tableLog;
113194 +    U16 fastMode;
113195 +} FSE_DTableHeader;   /* sizeof U32 */
113197 +typedef struct
113199 +    unsigned short newState;
113200 +    unsigned char  symbol;
113201 +    unsigned char  nbBits;
113202 +} FSE_decode_t;   /* size == U32 */
113204 +MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
113206 +    const void* ptr = dt;
113207 +    const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr;
113208 +    DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
113209 +    BIT_reloadDStream(bitD);
113210 +    DStatePtr->table = dt + 1;
113213 +MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr)
113215 +    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
113216 +    return DInfo.symbol;
113219 +MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
113221 +    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
113222 +    U32 const nbBits = DInfo.nbBits;
113223 +    size_t const lowBits = BIT_readBits(bitD, nbBits);
113224 +    DStatePtr->state = DInfo.newState + lowBits;
113227 +MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
113229 +    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
113230 +    U32 const nbBits = DInfo.nbBits;
113231 +    BYTE const symbol = DInfo.symbol;
113232 +    size_t const lowBits = BIT_readBits(bitD, nbBits);
113234 +    DStatePtr->state = DInfo.newState + lowBits;
113235 +    return symbol;
113238 +/*! FSE_decodeSymbolFast() :
113239 +    unsafe, only works if no symbol has a probability > 50% */
113240 +MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
113242 +    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
113243 +    U32 const nbBits = DInfo.nbBits;
113244 +    BYTE const symbol = DInfo.symbol;
113245 +    size_t const lowBits = BIT_readBitsFast(bitD, nbBits);
113247 +    DStatePtr->state = DInfo.newState + lowBits;
113248 +    return symbol;
113251 +MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
113253 +    return DStatePtr->state == 0;
113258 +#ifndef FSE_COMMONDEFS_ONLY
113260 +/* **************************************************************
113261 +*  Tuning parameters
113262 +****************************************************************/
113263 +/*!MEMORY_USAGE :
113264 +*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
113265 +*  Increasing memory usage improves compression ratio
113266 +*  Reduced memory usage can improve speed, due to cache effect
113267 +*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
113268 +#ifndef FSE_MAX_MEMORY_USAGE
113269 +#  define FSE_MAX_MEMORY_USAGE 14
113270 +#endif
113271 +#ifndef FSE_DEFAULT_MEMORY_USAGE
113272 +#  define FSE_DEFAULT_MEMORY_USAGE 13
113273 +#endif
113274 +#if (FSE_DEFAULT_MEMORY_USAGE > FSE_MAX_MEMORY_USAGE)
113275 +#  error "FSE_DEFAULT_MEMORY_USAGE must be <= FSE_MAX_MEMORY_USAGE"
113276 +#endif
113278 +/*!FSE_MAX_SYMBOL_VALUE :
113279 +*  Maximum symbol value authorized.
113280 +*  Required for proper stack allocation */
113281 +#ifndef FSE_MAX_SYMBOL_VALUE
113282 +#  define FSE_MAX_SYMBOL_VALUE 255
113283 +#endif
113285 +/* **************************************************************
113286 +*  template functions type & suffix
113287 +****************************************************************/
113288 +#define FSE_FUNCTION_TYPE BYTE
113289 +#define FSE_FUNCTION_EXTENSION
113290 +#define FSE_DECODE_TYPE FSE_decode_t
113293 +#endif   /* !FSE_COMMONDEFS_ONLY */
113296 +/* ***************************************************************
113297 +*  Constants
113298 +*****************************************************************/
113299 +#define FSE_MAX_TABLELOG  (FSE_MAX_MEMORY_USAGE-2)
113300 +#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
113301 +#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
113302 +#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
113303 +#define FSE_MIN_TABLELOG 5
113305 +#define FSE_TABLELOG_ABSOLUTE_MAX 15
113306 +#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
113307 +#  error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
113308 +#endif
113310 +#define FSE_TABLESTEP(tableSize) (((tableSize)>>1) + ((tableSize)>>3) + 3)
113313 +#endif /* FSE_STATIC_LINKING_ONLY */
113314 diff --git a/lib/zstd/common/fse_decompress.c b/lib/zstd/common/fse_decompress.c
113315 new file mode 100644
113316 index 000000000000..2c8bbe3e4c14
113317 --- /dev/null
113318 +++ b/lib/zstd/common/fse_decompress.c
113319 @@ -0,0 +1,390 @@
113320 +/* ******************************************************************
113321 + * FSE : Finite State Entropy decoder
113322 + * Copyright (c) Yann Collet, Facebook, Inc.
113324 + *  You can contact the author at :
113325 + *  - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
113326 + *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
113328 + * This source code is licensed under both the BSD-style license (found in the
113329 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
113330 + * in the COPYING file in the root directory of this source tree).
113331 + * You may select, at your option, one of the above-listed licenses.
113332 +****************************************************************** */
113335 +/* **************************************************************
113336 +*  Includes
113337 +****************************************************************/
113338 +#include "debug.h"      /* assert */
113339 +#include "bitstream.h"
113340 +#include "compiler.h"
113341 +#define FSE_STATIC_LINKING_ONLY
113342 +#include "fse.h"
113343 +#include "error_private.h"
113344 +#define ZSTD_DEPS_NEED_MALLOC
113345 +#include "zstd_deps.h"
113348 +/* **************************************************************
113349 +*  Error Management
113350 +****************************************************************/
113351 +#define FSE_isError ERR_isError
113352 +#define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)   /* use only *after* variable declarations */
113355 +/* **************************************************************
113356 +*  Templates
113357 +****************************************************************/
113359 +  designed to be included
113360 +  for type-specific functions (template emulation in C)
113361 +  Objective is to write these functions only once, for improved maintenance
113364 +/* safety checks */
113365 +#ifndef FSE_FUNCTION_EXTENSION
113366 +#  error "FSE_FUNCTION_EXTENSION must be defined"
113367 +#endif
113368 +#ifndef FSE_FUNCTION_TYPE
113369 +#  error "FSE_FUNCTION_TYPE must be defined"
113370 +#endif
113372 +/* Function names */
113373 +#define FSE_CAT(X,Y) X##Y
113374 +#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
113375 +#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
113378 +/* Function templates */
113379 +FSE_DTable* FSE_createDTable (unsigned tableLog)
113381 +    if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
113382 +    return (FSE_DTable*)ZSTD_malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
113385 +void FSE_freeDTable (FSE_DTable* dt)
113387 +    ZSTD_free(dt);
113390 +static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
113392 +    void* const tdPtr = dt+1;   /* because *dt is unsigned, 32-bits aligned on 32-bits */
113393 +    FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
113394 +    U16* symbolNext = (U16*)workSpace;
113395 +    BYTE* spread = (BYTE*)(symbolNext + maxSymbolValue + 1);
113397 +    U32 const maxSV1 = maxSymbolValue + 1;
113398 +    U32 const tableSize = 1 << tableLog;
113399 +    U32 highThreshold = tableSize-1;
113401 +    /* Sanity Checks */
113402 +    if (FSE_BUILD_DTABLE_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(maxSymbolValue_tooLarge);
113403 +    if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
113404 +    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
113406 +    /* Init, lay down lowprob symbols */
113407 +    {   FSE_DTableHeader DTableH;
113408 +        DTableH.tableLog = (U16)tableLog;
113409 +        DTableH.fastMode = 1;
113410 +        {   S16 const largeLimit= (S16)(1 << (tableLog-1));
113411 +            U32 s;
113412 +            for (s=0; s<maxSV1; s++) {
113413 +                if (normalizedCounter[s]==-1) {
113414 +                    tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
113415 +                    symbolNext[s] = 1;
113416 +                } else {
113417 +                    if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
113418 +                    symbolNext[s] = normalizedCounter[s];
113419 +        }   }   }
113420 +        ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
113421 +    }
113423 +    /* Spread symbols */
113424 +    if (highThreshold == tableSize - 1) {
113425 +        size_t const tableMask = tableSize-1;
113426 +        size_t const step = FSE_TABLESTEP(tableSize);
113427 +        /* First lay down the symbols in order.
113428 +         * We use a uint64_t to lay down 8 bytes at a time. This reduces branch
113429 +         * misses since small blocks generally have small table logs, so nearly
113430 +         * all symbols have counts <= 8. We ensure we have 8 bytes at the end of
113431 +         * our buffer to handle the over-write.
113432 +         */
113433 +        {
113434 +            U64 const add = 0x0101010101010101ull;
113435 +            size_t pos = 0;
113436 +            U64 sv = 0;
113437 +            U32 s;
113438 +            for (s=0; s<maxSV1; ++s, sv += add) {
113439 +                int i;
113440 +                int const n = normalizedCounter[s];
113441 +                MEM_write64(spread + pos, sv);
113442 +                for (i = 8; i < n; i += 8) {
113443 +                    MEM_write64(spread + pos + i, sv);
113444 +                }
113445 +                pos += n;
113446 +            }
113447 +        }
113448 +        /* Now we spread those positions across the table.
113449 +         * The benefit of doing it in two stages is that we avoid the the
113450 +         * variable size inner loop, which caused lots of branch misses.
113451 +         * Now we can run through all the positions without any branch misses.
113452 +         * We unroll the loop twice, since that is what emperically worked best.
113453 +         */
113454 +        {
113455 +            size_t position = 0;
113456 +            size_t s;
113457 +            size_t const unroll = 2;
113458 +            assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
113459 +            for (s = 0; s < (size_t)tableSize; s += unroll) {
113460 +                size_t u;
113461 +                for (u = 0; u < unroll; ++u) {
113462 +                    size_t const uPosition = (position + (u * step)) & tableMask;
113463 +                    tableDecode[uPosition].symbol = spread[s + u];
113464 +                }
113465 +                position = (position + (unroll * step)) & tableMask;
113466 +            }
113467 +            assert(position == 0);
113468 +        }
113469 +    } else {
113470 +        U32 const tableMask = tableSize-1;
113471 +        U32 const step = FSE_TABLESTEP(tableSize);
113472 +        U32 s, position = 0;
113473 +        for (s=0; s<maxSV1; s++) {
113474 +            int i;
113475 +            for (i=0; i<normalizedCounter[s]; i++) {
113476 +                tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
113477 +                position = (position + step) & tableMask;
113478 +                while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
113479 +        }   }
113480 +        if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */
113481 +    }
113483 +    /* Build Decoding table */
113484 +    {   U32 u;
113485 +        for (u=0; u<tableSize; u++) {
113486 +            FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
113487 +            U32 const nextState = symbolNext[symbol]++;
113488 +            tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
113489 +            tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
113490 +    }   }
113492 +    return 0;
113495 +size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
113497 +    return FSE_buildDTable_internal(dt, normalizedCounter, maxSymbolValue, tableLog, workSpace, wkspSize);
113501 +#ifndef FSE_COMMONDEFS_ONLY
113503 +/*-*******************************************************
113504 +*  Decompression (Byte symbols)
113505 +*********************************************************/
113506 +size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
113508 +    void* ptr = dt;
113509 +    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
113510 +    void* dPtr = dt + 1;
113511 +    FSE_decode_t* const cell = (FSE_decode_t*)dPtr;
113513 +    DTableH->tableLog = 0;
113514 +    DTableH->fastMode = 0;
113516 +    cell->newState = 0;
113517 +    cell->symbol = symbolValue;
113518 +    cell->nbBits = 0;
113520 +    return 0;
113524 +size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
113526 +    void* ptr = dt;
113527 +    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
113528 +    void* dPtr = dt + 1;
113529 +    FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
113530 +    const unsigned tableSize = 1 << nbBits;
113531 +    const unsigned tableMask = tableSize - 1;
113532 +    const unsigned maxSV1 = tableMask+1;
113533 +    unsigned s;
113535 +    /* Sanity checks */
113536 +    if (nbBits < 1) return ERROR(GENERIC);         /* min size */
113538 +    /* Build Decoding Table */
113539 +    DTableH->tableLog = (U16)nbBits;
113540 +    DTableH->fastMode = 1;
113541 +    for (s=0; s<maxSV1; s++) {
113542 +        dinfo[s].newState = 0;
113543 +        dinfo[s].symbol = (BYTE)s;
113544 +        dinfo[s].nbBits = (BYTE)nbBits;
113545 +    }
113547 +    return 0;
113550 +FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
113551 +          void* dst, size_t maxDstSize,
113552 +    const void* cSrc, size_t cSrcSize,
113553 +    const FSE_DTable* dt, const unsigned fast)
113555 +    BYTE* const ostart = (BYTE*) dst;
113556 +    BYTE* op = ostart;
113557 +    BYTE* const omax = op + maxDstSize;
113558 +    BYTE* const olimit = omax-3;
113560 +    BIT_DStream_t bitD;
113561 +    FSE_DState_t state1;
113562 +    FSE_DState_t state2;
113564 +    /* Init */
113565 +    CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
113567 +    FSE_initDState(&state1, &bitD, dt);
113568 +    FSE_initDState(&state2, &bitD, dt);
113570 +#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
113572 +    /* 4 symbols per loop */
113573 +    for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) & (op<olimit) ; op+=4) {
113574 +        op[0] = FSE_GETSYMBOL(&state1);
113576 +        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
113577 +            BIT_reloadDStream(&bitD);
113579 +        op[1] = FSE_GETSYMBOL(&state2);
113581 +        if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
113582 +            { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
113584 +        op[2] = FSE_GETSYMBOL(&state1);
113586 +        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
113587 +            BIT_reloadDStream(&bitD);
113589 +        op[3] = FSE_GETSYMBOL(&state2);
113590 +    }
113592 +    /* tail */
113593 +    /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
113594 +    while (1) {
113595 +        if (op>(omax-2)) return ERROR(dstSize_tooSmall);
113596 +        *op++ = FSE_GETSYMBOL(&state1);
113597 +        if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
113598 +            *op++ = FSE_GETSYMBOL(&state2);
113599 +            break;
113600 +        }
113602 +        if (op>(omax-2)) return ERROR(dstSize_tooSmall);
113603 +        *op++ = FSE_GETSYMBOL(&state2);
113604 +        if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
113605 +            *op++ = FSE_GETSYMBOL(&state1);
113606 +            break;
113607 +    }   }
113609 +    return op-ostart;
113613 +size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
113614 +                            const void* cSrc, size_t cSrcSize,
113615 +                            const FSE_DTable* dt)
113617 +    const void* ptr = dt;
113618 +    const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
113619 +    const U32 fastMode = DTableH->fastMode;
113621 +    /* select fast mode (static) */
113622 +    if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
113623 +    return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
113627 +size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
113629 +    return FSE_decompress_wksp_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, /* bmi2 */ 0);
113632 +typedef struct {
113633 +    short ncount[FSE_MAX_SYMBOL_VALUE + 1];
113634 +    FSE_DTable dtable[1]; /* Dynamically sized */
113635 +} FSE_DecompressWksp;
113638 +FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body(
113639 +        void* dst, size_t dstCapacity,
113640 +        const void* cSrc, size_t cSrcSize,
113641 +        unsigned maxLog, void* workSpace, size_t wkspSize,
113642 +        int bmi2)
113644 +    const BYTE* const istart = (const BYTE*)cSrc;
113645 +    const BYTE* ip = istart;
113646 +    unsigned tableLog;
113647 +    unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
113648 +    FSE_DecompressWksp* const wksp = (FSE_DecompressWksp*)workSpace;
113650 +    DEBUG_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0);
113651 +    if (wkspSize < sizeof(*wksp)) return ERROR(GENERIC);
113653 +    /* normal FSE decoding mode */
113654 +    {
113655 +        size_t const NCountLength = FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2);
113656 +        if (FSE_isError(NCountLength)) return NCountLength;
113657 +        if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
113658 +        assert(NCountLength <= cSrcSize);
113659 +        ip += NCountLength;
113660 +        cSrcSize -= NCountLength;
113661 +    }
113663 +    if (FSE_DECOMPRESS_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(tableLog_tooLarge);
113664 +    workSpace = wksp->dtable + FSE_DTABLE_SIZE_U32(tableLog);
113665 +    wkspSize -= sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
113667 +    CHECK_F( FSE_buildDTable_internal(wksp->dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) );
113669 +    {
113670 +        const void* ptr = wksp->dtable;
113671 +        const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
113672 +        const U32 fastMode = DTableH->fastMode;
113674 +        /* select fast mode (static) */
113675 +        if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 1);
113676 +        return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 0);
113677 +    }
113680 +/* Avoids the FORCE_INLINE of the _body() function. */
113681 +static size_t FSE_decompress_wksp_body_default(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
113683 +    return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 0);
113686 +#if DYNAMIC_BMI2
113687 +TARGET_ATTRIBUTE("bmi2") static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
113689 +    return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 1);
113691 +#endif
113693 +size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2)
113695 +#if DYNAMIC_BMI2
113696 +    if (bmi2) {
113697 +        return FSE_decompress_wksp_body_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
113698 +    }
113699 +#endif
113700 +    (void)bmi2;
113701 +    return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
113705 +typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
113709 +#endif   /* FSE_COMMONDEFS_ONLY */
113710 diff --git a/lib/zstd/common/huf.h b/lib/zstd/common/huf.h
113711 new file mode 100644
113712 index 000000000000..b5dbd386c5e6
113713 --- /dev/null
113714 +++ b/lib/zstd/common/huf.h
113715 @@ -0,0 +1,355 @@
113716 +/* ******************************************************************
113717 + * huff0 huffman codec,
113718 + * part of Finite State Entropy library
113719 + * Copyright (c) Yann Collet, Facebook, Inc.
113721 + * You can contact the author at :
113722 + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
113724 + * This source code is licensed under both the BSD-style license (found in the
113725 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
113726 + * in the COPYING file in the root directory of this source tree).
113727 + * You may select, at your option, one of the above-listed licenses.
113728 +****************************************************************** */
113731 +#ifndef HUF_H_298734234
113732 +#define HUF_H_298734234
113734 +/* *** Dependencies *** */
113735 +#include "zstd_deps.h"    /* size_t */
113738 +/* *** library symbols visibility *** */
113739 +/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual,
113740 + *        HUF symbols remain "private" (internal symbols for library only).
113741 + *        Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */
113742 +#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
113743 +#  define HUF_PUBLIC_API __attribute__ ((visibility ("default")))
113744 +#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1)   /* Visual expected */
113745 +#  define HUF_PUBLIC_API __declspec(dllexport)
113746 +#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
113747 +#  define HUF_PUBLIC_API __declspec(dllimport)  /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */
113748 +#else
113749 +#  define HUF_PUBLIC_API
113750 +#endif
113753 +/* ========================== */
113754 +/* ***  simple functions  *** */
113755 +/* ========================== */
113757 +/** HUF_compress() :
113758 + *  Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'.
113759 + * 'dst' buffer must be already allocated.
113760 + *  Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize).
113761 + * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB.
113762 + * @return : size of compressed data (<= `dstCapacity`).
113763 + *  Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
113764 + *                   if HUF_isError(return), compression failed (more details using HUF_getErrorName())
113765 + */
113766 +HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity,
113767 +                             const void* src, size_t srcSize);
113769 +/** HUF_decompress() :
113770 + *  Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
113771 + *  into already allocated buffer 'dst', of minimum size 'dstSize'.
113772 + * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data.
113773 + *  Note : in contrast with FSE, HUF_decompress can regenerate
113774 + *         RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
113775 + *         because it knows size to regenerate (originalSize).
113776 + * @return : size of regenerated data (== originalSize),
113777 + *           or an error code, which can be tested using HUF_isError()
113778 + */
113779 +HUF_PUBLIC_API size_t HUF_decompress(void* dst,  size_t originalSize,
113780 +                               const void* cSrc, size_t cSrcSize);
113783 +/* ***   Tool functions *** */
113784 +#define HUF_BLOCKSIZE_MAX (128 * 1024)                  /**< maximum input size for a single block compressed with HUF_compress */
113785 +HUF_PUBLIC_API size_t HUF_compressBound(size_t size);   /**< maximum compressed size (worst case) */
113787 +/* Error Management */
113788 +HUF_PUBLIC_API unsigned    HUF_isError(size_t code);       /**< tells if a return value is an error code */
113789 +HUF_PUBLIC_API const char* HUF_getErrorName(size_t code);  /**< provides error code string (useful for debugging) */
113792 +/* ***   Advanced function   *** */
113794 +/** HUF_compress2() :
113795 + *  Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`.
113796 + * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX .
113797 + * `tableLog` must be `<= HUF_TABLELOG_MAX` . */
113798 +HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity,
113799 +                               const void* src, size_t srcSize,
113800 +                               unsigned maxSymbolValue, unsigned tableLog);
113802 +/** HUF_compress4X_wksp() :
113803 + *  Same as HUF_compress2(), but uses externally allocated `workSpace`.
113804 + * `workspace` must have minimum alignment of 4, and be at least as large as HUF_WORKSPACE_SIZE */
113805 +#define HUF_WORKSPACE_SIZE ((6 << 10) + 256)
113806 +#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32))
113807 +HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
113808 +                                     const void* src, size_t srcSize,
113809 +                                     unsigned maxSymbolValue, unsigned tableLog,
113810 +                                     void* workSpace, size_t wkspSize);
113812 +#endif   /* HUF_H_298734234 */
113814 +/* ******************************************************************
113815 + *  WARNING !!
113816 + *  The following section contains advanced and experimental definitions
113817 + *  which shall never be used in the context of a dynamic library,
113818 + *  because they are not guaranteed to remain stable in the future.
113819 + *  Only consider them in association with static linking.
113820 + * *****************************************************************/
113821 +#if !defined(HUF_H_HUF_STATIC_LINKING_ONLY)
113822 +#define HUF_H_HUF_STATIC_LINKING_ONLY
113824 +/* *** Dependencies *** */
113825 +#include "mem.h"   /* U32 */
113826 +#define FSE_STATIC_LINKING_ONLY
113827 +#include "fse.h"
113830 +/* *** Constants *** */
113831 +#define HUF_TABLELOG_MAX      12      /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
113832 +#define HUF_TABLELOG_DEFAULT  11      /* default tableLog value when none specified */
113833 +#define HUF_SYMBOLVALUE_MAX  255
113835 +#define HUF_TABLELOG_ABSOLUTEMAX  15  /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
113836 +#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
113837 +#  error "HUF_TABLELOG_MAX is too large !"
113838 +#endif
113841 +/* ****************************************
113842 +*  Static allocation
113843 +******************************************/
113844 +/* HUF buffer bounds */
113845 +#define HUF_CTABLEBOUND 129
113846 +#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8)   /* only true when incompressible is pre-filtered with fast heuristic */
113847 +#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
113849 +/* static allocation of HUF's Compression Table */
113850 +/* this is a private definition, just exposed for allocation and strict aliasing purpose. never EVER access its members directly */
113851 +struct HUF_CElt_s {
113852 +  U16  val;
113853 +  BYTE nbBits;
113854 +};   /* typedef'd to HUF_CElt */
113855 +typedef struct HUF_CElt_s HUF_CElt;   /* consider it an incomplete type */
113856 +#define HUF_CTABLE_SIZE_U32(maxSymbolValue)   ((maxSymbolValue)+1)   /* Use tables of U32, for proper alignment */
113857 +#define HUF_CTABLE_SIZE(maxSymbolValue)       (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32))
113858 +#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
113859 +    HUF_CElt name[HUF_CTABLE_SIZE_U32(maxSymbolValue)] /* no final ; */
113861 +/* static allocation of HUF's DTable */
113862 +typedef U32 HUF_DTable;
113863 +#define HUF_DTABLE_SIZE(maxTableLog)   (1 + (1<<(maxTableLog)))
113864 +#define HUF_CREATE_STATIC_DTABLEX1(DTable, maxTableLog) \
113865 +        HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) }
113866 +#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
113867 +        HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }
113870 +/* ****************************************
113871 +*  Advanced decompression functions
113872 +******************************************/
113873 +size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
113874 +#ifndef HUF_FORCE_DECOMPRESS_X1
113875 +size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
113876 +#endif
113878 +size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< decodes RLE and uncompressed */
113879 +size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */
113880 +size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */
113881 +size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
113882 +size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< single-symbol decoder */
113883 +#ifndef HUF_FORCE_DECOMPRESS_X1
113884 +size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
113885 +size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< double-symbols decoder */
113886 +#endif
113889 +/* ****************************************
113890 + *  HUF detailed API
113891 + * ****************************************/
113893 +/*! HUF_compress() does the following:
113894 + *  1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h")
113895 + *  2. (optional) refine tableLog using HUF_optimalTableLog()
113896 + *  3. build Huffman table from count using HUF_buildCTable()
113897 + *  4. save Huffman table to memory buffer using HUF_writeCTable()
113898 + *  5. encode the data stream using HUF_compress4X_usingCTable()
113900 + *  The following API allows targeting specific sub-functions for advanced tasks.
113901 + *  For example, it's possible to compress several blocks using the same 'CTable',
113902 + *  or to save and regenerate 'CTable' using external methods.
113903 + */
113904 +unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
113905 +size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits);   /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */
113906 +size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
113907 +size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize);
113908 +size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
113909 +size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
113910 +int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
113912 +typedef enum {
113913 +   HUF_repeat_none,  /**< Cannot use the previous table */
113914 +   HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
113915 +   HUF_repeat_valid  /**< Can use the previous table and it is assumed to be valid */
113916 + } HUF_repeat;
113917 +/** HUF_compress4X_repeat() :
113918 + *  Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
113919 + *  If it uses hufTable it does not modify hufTable or repeat.
113920 + *  If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
113921 + *  If preferRepeat then the old table will always be used if valid. */
113922 +size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
113923 +                       const void* src, size_t srcSize,
113924 +                       unsigned maxSymbolValue, unsigned tableLog,
113925 +                       void* workSpace, size_t wkspSize,    /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
113926 +                       HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
113928 +/** HUF_buildCTable_wksp() :
113929 + *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
113930 + * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.
113931 + */
113932 +#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1)
113933 +#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
113934 +size_t HUF_buildCTable_wksp (HUF_CElt* tree,
113935 +                       const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
113936 +                             void* workSpace, size_t wkspSize);
113938 +/*! HUF_readStats() :
113939 + *  Read compact Huffman tree, saved by HUF_writeCTable().
113940 + * `huffWeight` is destination buffer.
113941 + * @return : size read from `src` , or an error Code .
113942 + *  Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
113943 +size_t HUF_readStats(BYTE* huffWeight, size_t hwSize,
113944 +                     U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
113945 +                     const void* src, size_t srcSize);
113947 +/*! HUF_readStats_wksp() :
113948 + * Same as HUF_readStats() but takes an external workspace which must be
113949 + * 4-byte aligned and its size must be >= HUF_READ_STATS_WORKSPACE_SIZE.
113950 + * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
113951 + */
113952 +#define HUF_READ_STATS_WORKSPACE_SIZE_U32 FSE_DECOMPRESS_WKSP_SIZE_U32(6, HUF_TABLELOG_MAX-1)
113953 +#define HUF_READ_STATS_WORKSPACE_SIZE (HUF_READ_STATS_WORKSPACE_SIZE_U32 * sizeof(unsigned))
113954 +size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize,
113955 +                          U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
113956 +                          const void* src, size_t srcSize,
113957 +                          void* workspace, size_t wkspSize,
113958 +                          int bmi2);
113960 +/** HUF_readCTable() :
113961 + *  Loading a CTable saved with HUF_writeCTable() */
113962 +size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights);
113964 +/** HUF_getNbBits() :
113965 + *  Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX
113966 + *  Note 1 : is not inlined, as HUF_CElt definition is private
113967 + *  Note 2 : const void* used, so that it can provide a statically allocated table as argument (which uses type U32) */
113968 +U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue);
113971 + * HUF_decompress() does the following:
113972 + * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics
113973 + * 2. build Huffman table from save, using HUF_readDTableX?()
113974 + * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable()
113975 + */
113977 +/** HUF_selectDecoder() :
113978 + *  Tells which decoder is likely to decode faster,
113979 + *  based on a set of pre-computed metrics.
113980 + * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
113981 + *  Assumption : 0 < dstSize <= 128 KB */
113982 +U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
113985 + *  The minimum workspace size for the `workSpace` used in
113986 + *  HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp().
113988 + *  The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when
113989 + *  HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.
113990 + *  Buffer overflow errors may potentially occur if code modifications result in
113991 + *  a required workspace size greater than that specified in the following
113992 + *  macro.
113993 + */
113994 +#define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9))
113995 +#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
113997 +#ifndef HUF_FORCE_DECOMPRESS_X2
113998 +size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize);
113999 +size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
114000 +#endif
114001 +#ifndef HUF_FORCE_DECOMPRESS_X1
114002 +size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
114003 +size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
114004 +#endif
114006 +size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
114007 +#ifndef HUF_FORCE_DECOMPRESS_X2
114008 +size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
114009 +#endif
114010 +#ifndef HUF_FORCE_DECOMPRESS_X1
114011 +size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
114012 +#endif
114015 +/* ====================== */
114016 +/* single stream variants */
114017 +/* ====================== */
114019 +size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
114020 +size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);  /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
114021 +size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
114022 +/** HUF_compress1X_repeat() :
114023 + *  Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
114024 + *  If it uses hufTable it does not modify hufTable or repeat.
114025 + *  If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
114026 + *  If preferRepeat then the old table will always be used if valid. */
114027 +size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
114028 +                       const void* src, size_t srcSize,
114029 +                       unsigned maxSymbolValue, unsigned tableLog,
114030 +                       void* workSpace, size_t wkspSize,   /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
114031 +                       HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
114033 +size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
114034 +#ifndef HUF_FORCE_DECOMPRESS_X1
114035 +size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbol decoder */
114036 +#endif
114038 +size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
114039 +size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);
114040 +#ifndef HUF_FORCE_DECOMPRESS_X2
114041 +size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
114042 +size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< single-symbol decoder */
114043 +#endif
114044 +#ifndef HUF_FORCE_DECOMPRESS_X1
114045 +size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
114046 +size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< double-symbols decoder */
114047 +#endif
114049 +size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);   /**< automatic selection of sing or double symbol decoder, based on DTable */
114050 +#ifndef HUF_FORCE_DECOMPRESS_X2
114051 +size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
114052 +#endif
114053 +#ifndef HUF_FORCE_DECOMPRESS_X1
114054 +size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
114055 +#endif
114057 +/* BMI2 variants.
114058 + * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
114059 + */
114060 +size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
114061 +#ifndef HUF_FORCE_DECOMPRESS_X2
114062 +size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
114063 +#endif
114064 +size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
114065 +size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
114066 +#ifndef HUF_FORCE_DECOMPRESS_X2
114067 +size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2);
114068 +#endif
114070 +#endif /* HUF_STATIC_LINKING_ONLY */
114071 diff --git a/lib/zstd/common/mem.h b/lib/zstd/common/mem.h
114072 new file mode 100644
114073 index 000000000000..4b5db5756a6f
114074 --- /dev/null
114075 +++ b/lib/zstd/common/mem.h
114076 @@ -0,0 +1,259 @@
114077 +/* SPDX-License-Identifier: GPL-2.0-only */
114079 + * Copyright (c) Yann Collet, Facebook, Inc.
114080 + * All rights reserved.
114082 + * This source code is licensed under both the BSD-style license (found in the
114083 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
114084 + * in the COPYING file in the root directory of this source tree).
114085 + * You may select, at your option, one of the above-listed licenses.
114086 + */
114088 +#ifndef MEM_H_MODULE
114089 +#define MEM_H_MODULE
114091 +/*-****************************************
114092 +*  Dependencies
114093 +******************************************/
114094 +#include <asm/unaligned.h>  /* get_unaligned, put_unaligned* */
114095 +#include <linux/compiler.h>  /* inline */
114096 +#include <linux/swab.h>  /* swab32, swab64 */
114097 +#include <linux/types.h>  /* size_t, ptrdiff_t */
114098 +#include "debug.h"  /* DEBUG_STATIC_ASSERT */
114100 +/*-****************************************
114101 +*  Compiler specifics
114102 +******************************************/
114103 +#define MEM_STATIC static inline
114105 +/*-**************************************************************
114106 +*  Basic Types
114107 +*****************************************************************/
114108 +typedef uint8_t  BYTE;
114109 +typedef uint16_t U16;
114110 +typedef int16_t  S16;
114111 +typedef uint32_t U32;
114112 +typedef int32_t  S32;
114113 +typedef uint64_t U64;
114114 +typedef int64_t  S64;
114116 +/*-**************************************************************
114117 +*  Memory I/O API
114118 +*****************************************************************/
114119 +/*=== Static platform detection ===*/
114120 +MEM_STATIC unsigned MEM_32bits(void);
114121 +MEM_STATIC unsigned MEM_64bits(void);
114122 +MEM_STATIC unsigned MEM_isLittleEndian(void);
114124 +/*=== Native unaligned read/write ===*/
114125 +MEM_STATIC U16 MEM_read16(const void* memPtr);
114126 +MEM_STATIC U32 MEM_read32(const void* memPtr);
114127 +MEM_STATIC U64 MEM_read64(const void* memPtr);
114128 +MEM_STATIC size_t MEM_readST(const void* memPtr);
114130 +MEM_STATIC void MEM_write16(void* memPtr, U16 value);
114131 +MEM_STATIC void MEM_write32(void* memPtr, U32 value);
114132 +MEM_STATIC void MEM_write64(void* memPtr, U64 value);
114134 +/*=== Little endian unaligned read/write ===*/
114135 +MEM_STATIC U16 MEM_readLE16(const void* memPtr);
114136 +MEM_STATIC U32 MEM_readLE24(const void* memPtr);
114137 +MEM_STATIC U32 MEM_readLE32(const void* memPtr);
114138 +MEM_STATIC U64 MEM_readLE64(const void* memPtr);
114139 +MEM_STATIC size_t MEM_readLEST(const void* memPtr);
114141 +MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val);
114142 +MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val);
114143 +MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32);
114144 +MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64);
114145 +MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val);
114147 +/*=== Big endian unaligned read/write ===*/
114148 +MEM_STATIC U32 MEM_readBE32(const void* memPtr);
114149 +MEM_STATIC U64 MEM_readBE64(const void* memPtr);
114150 +MEM_STATIC size_t MEM_readBEST(const void* memPtr);
114152 +MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32);
114153 +MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64);
114154 +MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val);
114156 +/*=== Byteswap ===*/
114157 +MEM_STATIC U32 MEM_swap32(U32 in);
114158 +MEM_STATIC U64 MEM_swap64(U64 in);
114159 +MEM_STATIC size_t MEM_swapST(size_t in);
114161 +/*-**************************************************************
114162 +*  Memory I/O Implementation
114163 +*****************************************************************/
114164 +MEM_STATIC unsigned MEM_32bits(void)
114166 +    return sizeof(size_t) == 4;
114169 +MEM_STATIC unsigned MEM_64bits(void)
114171 +    return sizeof(size_t) == 8;
114174 +#if defined(__LITTLE_ENDIAN)
114175 +#define MEM_LITTLE_ENDIAN 1
114176 +#else
114177 +#define MEM_LITTLE_ENDIAN 0
114178 +#endif
114180 +MEM_STATIC unsigned MEM_isLittleEndian(void)
114182 +    return MEM_LITTLE_ENDIAN;
114185 +MEM_STATIC U16 MEM_read16(const void *memPtr)
114187 +    return get_unaligned((const U16 *)memPtr);
114190 +MEM_STATIC U32 MEM_read32(const void *memPtr)
114192 +    return get_unaligned((const U32 *)memPtr);
114195 +MEM_STATIC U64 MEM_read64(const void *memPtr)
114197 +    return get_unaligned((const U64 *)memPtr);
114200 +MEM_STATIC size_t MEM_readST(const void *memPtr)
114202 +    return get_unaligned((const size_t *)memPtr);
114205 +MEM_STATIC void MEM_write16(void *memPtr, U16 value)
114207 +    put_unaligned(value, (U16 *)memPtr);
114210 +MEM_STATIC void MEM_write32(void *memPtr, U32 value)
114212 +    put_unaligned(value, (U32 *)memPtr);
114215 +MEM_STATIC void MEM_write64(void *memPtr, U64 value)
114217 +    put_unaligned(value, (U64 *)memPtr);
114220 +/*=== Little endian r/w ===*/
114222 +MEM_STATIC U16 MEM_readLE16(const void *memPtr)
114224 +    return get_unaligned_le16(memPtr);
114227 +MEM_STATIC void MEM_writeLE16(void *memPtr, U16 val)
114229 +    put_unaligned_le16(val, memPtr);
114232 +MEM_STATIC U32 MEM_readLE24(const void *memPtr)
114234 +    return MEM_readLE16(memPtr) + (((const BYTE *)memPtr)[2] << 16);
114237 +MEM_STATIC void MEM_writeLE24(void *memPtr, U32 val)
114239 +       MEM_writeLE16(memPtr, (U16)val);
114240 +       ((BYTE *)memPtr)[2] = (BYTE)(val >> 16);
114243 +MEM_STATIC U32 MEM_readLE32(const void *memPtr)
114245 +    return get_unaligned_le32(memPtr);
114248 +MEM_STATIC void MEM_writeLE32(void *memPtr, U32 val32)
114250 +    put_unaligned_le32(val32, memPtr);
114253 +MEM_STATIC U64 MEM_readLE64(const void *memPtr)
114255 +    return get_unaligned_le64(memPtr);
114258 +MEM_STATIC void MEM_writeLE64(void *memPtr, U64 val64)
114260 +    put_unaligned_le64(val64, memPtr);
114263 +MEM_STATIC size_t MEM_readLEST(const void *memPtr)
114265 +       if (MEM_32bits())
114266 +               return (size_t)MEM_readLE32(memPtr);
114267 +       else
114268 +               return (size_t)MEM_readLE64(memPtr);
114271 +MEM_STATIC void MEM_writeLEST(void *memPtr, size_t val)
114273 +       if (MEM_32bits())
114274 +               MEM_writeLE32(memPtr, (U32)val);
114275 +       else
114276 +               MEM_writeLE64(memPtr, (U64)val);
114279 +/*=== Big endian r/w ===*/
114281 +MEM_STATIC U32 MEM_readBE32(const void *memPtr)
114283 +    return get_unaligned_be32(memPtr);
114286 +MEM_STATIC void MEM_writeBE32(void *memPtr, U32 val32)
114288 +    put_unaligned_be32(val32, memPtr);
114291 +MEM_STATIC U64 MEM_readBE64(const void *memPtr)
114293 +    return get_unaligned_be64(memPtr);
114296 +MEM_STATIC void MEM_writeBE64(void *memPtr, U64 val64)
114298 +    put_unaligned_be64(val64, memPtr);
114301 +MEM_STATIC size_t MEM_readBEST(const void *memPtr)
114303 +       if (MEM_32bits())
114304 +               return (size_t)MEM_readBE32(memPtr);
114305 +       else
114306 +               return (size_t)MEM_readBE64(memPtr);
114309 +MEM_STATIC void MEM_writeBEST(void *memPtr, size_t val)
114311 +       if (MEM_32bits())
114312 +               MEM_writeBE32(memPtr, (U32)val);
114313 +       else
114314 +               MEM_writeBE64(memPtr, (U64)val);
114317 +MEM_STATIC U32 MEM_swap32(U32 in)
114319 +    return swab32(in);
114322 +MEM_STATIC U64 MEM_swap64(U64 in)
114324 +    return swab64(in);
114327 +MEM_STATIC size_t MEM_swapST(size_t in)
114329 +    if (MEM_32bits())
114330 +        return (size_t)MEM_swap32((U32)in);
114331 +    else
114332 +        return (size_t)MEM_swap64((U64)in);
114335 +#endif /* MEM_H_MODULE */
114336 diff --git a/lib/zstd/common/zstd_common.c b/lib/zstd/common/zstd_common.c
114337 new file mode 100644
114338 index 000000000000..3d7e35b309b5
114339 --- /dev/null
114340 +++ b/lib/zstd/common/zstd_common.c
114341 @@ -0,0 +1,83 @@
114343 + * Copyright (c) Yann Collet, Facebook, Inc.
114344 + * All rights reserved.
114346 + * This source code is licensed under both the BSD-style license (found in the
114347 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
114348 + * in the COPYING file in the root directory of this source tree).
114349 + * You may select, at your option, one of the above-listed licenses.
114350 + */
114354 +/*-*************************************
114355 +*  Dependencies
114356 +***************************************/
114357 +#define ZSTD_DEPS_NEED_MALLOC
114358 +#include "zstd_deps.h"   /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */
114359 +#include "error_private.h"
114360 +#include "zstd_internal.h"
114363 +/*-****************************************
114364 +*  Version
114365 +******************************************/
114366 +unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; }
114368 +const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; }
114371 +/*-****************************************
114372 +*  ZSTD Error Management
114373 +******************************************/
114374 +#undef ZSTD_isError   /* defined within zstd_internal.h */
114375 +/*! ZSTD_isError() :
114376 + *  tells if a return value is an error code
114377 + *  symbol is required for external callers */
114378 +unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
114380 +/*! ZSTD_getErrorName() :
114381 + *  provides error code string from function result (useful for debugging) */
114382 +const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); }
114384 +/*! ZSTD_getError() :
114385 + *  convert a `size_t` function result into a proper ZSTD_errorCode enum */
114386 +ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
114388 +/*! ZSTD_getErrorString() :
114389 + *  provides error code string from enum */
114390 +const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
114394 +/*=**************************************************************
114395 +*  Custom allocator
114396 +****************************************************************/
114397 +void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem)
114399 +    if (customMem.customAlloc)
114400 +        return customMem.customAlloc(customMem.opaque, size);
114401 +    return ZSTD_malloc(size);
114404 +void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem)
114406 +    if (customMem.customAlloc) {
114407 +        /* calloc implemented as malloc+memset;
114408 +         * not as efficient as calloc, but next best guess for custom malloc */
114409 +        void* const ptr = customMem.customAlloc(customMem.opaque, size);
114410 +        ZSTD_memset(ptr, 0, size);
114411 +        return ptr;
114412 +    }
114413 +    return ZSTD_calloc(1, size);
114416 +void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
114418 +    if (ptr!=NULL) {
114419 +        if (customMem.customFree)
114420 +            customMem.customFree(customMem.opaque, ptr);
114421 +        else
114422 +            ZSTD_free(ptr);
114423 +    }
114425 diff --git a/lib/zstd/common/zstd_deps.h b/lib/zstd/common/zstd_deps.h
114426 new file mode 100644
114427 index 000000000000..853b72426215
114428 --- /dev/null
114429 +++ b/lib/zstd/common/zstd_deps.h
114430 @@ -0,0 +1,125 @@
114431 +/* SPDX-License-Identifier: GPL-2.0-only */
114433 + * Copyright (c) Facebook, Inc.
114434 + * All rights reserved.
114436 + * This source code is licensed under both the BSD-style license (found in the
114437 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
114438 + * in the COPYING file in the root directory of this source tree).
114439 + * You may select, at your option, one of the above-listed licenses.
114440 + */
114443 + * This file provides common libc dependencies that zstd requires.
114444 + * The purpose is to allow replacing this file with a custom implementation
114445 + * to compile zstd without libc support.
114446 + */
114448 +/* Need:
114449 + * NULL
114450 + * INT_MAX
114451 + * UINT_MAX
114452 + * ZSTD_memcpy()
114453 + * ZSTD_memset()
114454 + * ZSTD_memmove()
114455 + */
114456 +#ifndef ZSTD_DEPS_COMMON
114457 +#define ZSTD_DEPS_COMMON
114459 +#include <linux/limits.h>
114460 +#include <linux/stddef.h>
114462 +#define ZSTD_memcpy(d,s,n) __builtin_memcpy((d),(s),(n))
114463 +#define ZSTD_memmove(d,s,n) __builtin_memmove((d),(s),(n))
114464 +#define ZSTD_memset(d,s,n) __builtin_memset((d),(s),(n))
114466 +#endif /* ZSTD_DEPS_COMMON */
114469 + * Define malloc as always failing. That means the user must
114470 + * either use ZSTD_customMem or statically allocate memory.
114471 + * Need:
114472 + * ZSTD_malloc()
114473 + * ZSTD_free()
114474 + * ZSTD_calloc()
114475 + */
114476 +#ifdef ZSTD_DEPS_NEED_MALLOC
114477 +#ifndef ZSTD_DEPS_MALLOC
114478 +#define ZSTD_DEPS_MALLOC
114480 +#define ZSTD_malloc(s) ({ (void)(s); NULL; })
114481 +#define ZSTD_free(p) ((void)(p))
114482 +#define ZSTD_calloc(n,s) ({ (void)(n); (void)(s); NULL; })
114484 +#endif /* ZSTD_DEPS_MALLOC */
114485 +#endif /* ZSTD_DEPS_NEED_MALLOC */
114488 + * Provides 64-bit math support.
114489 + * Need:
114490 + * U64 ZSTD_div64(U64 dividend, U32 divisor)
114491 + */
114492 +#ifdef ZSTD_DEPS_NEED_MATH64
114493 +#ifndef ZSTD_DEPS_MATH64
114494 +#define ZSTD_DEPS_MATH64
114496 +#include <linux/math64.h>
114498 +static uint64_t ZSTD_div64(uint64_t dividend, uint32_t divisor) {
114499 +  return div_u64(dividend, divisor);
114502 +#endif /* ZSTD_DEPS_MATH64 */
114503 +#endif /* ZSTD_DEPS_NEED_MATH64 */
114506 + * This is only requested when DEBUGLEVEL >= 1, meaning
114507 + * it is disabled in production.
114508 + * Need:
114509 + * assert()
114510 + */
114511 +#ifdef ZSTD_DEPS_NEED_ASSERT
114512 +#ifndef ZSTD_DEPS_ASSERT
114513 +#define ZSTD_DEPS_ASSERT
114515 +#include <linux/kernel.h>
114517 +#define assert(x) WARN_ON((x))
114519 +#endif /* ZSTD_DEPS_ASSERT */
114520 +#endif /* ZSTD_DEPS_NEED_ASSERT */
114523 + * This is only requested when DEBUGLEVEL >= 2, meaning
114524 + * it is disabled in production.
114525 + * Need:
114526 + * ZSTD_DEBUG_PRINT()
114527 + */
114528 +#ifdef ZSTD_DEPS_NEED_IO
114529 +#ifndef ZSTD_DEPS_IO
114530 +#define ZSTD_DEPS_IO
114532 +#include <linux/printk.h>
114534 +#define ZSTD_DEBUG_PRINT(...) pr_debug(__VA_ARGS__)
114536 +#endif /* ZSTD_DEPS_IO */
114537 +#endif /* ZSTD_DEPS_NEED_IO */
114540 + * Only requested when MSAN is enabled.
114541 + * Need:
114542 + * intptr_t
114543 + */
114544 +#ifdef ZSTD_DEPS_NEED_STDINT
114545 +#ifndef ZSTD_DEPS_STDINT
114546 +#define ZSTD_DEPS_STDINT
114549 + * The Linux Kernel doesn't provide intptr_t, only uintptr_t, which
114550 + * is an unsigned long.
114551 + */
114552 +typedef long intptr_t;
114554 +#endif /* ZSTD_DEPS_STDINT */
114555 +#endif /* ZSTD_DEPS_NEED_STDINT */
114556 diff --git a/lib/zstd/common/zstd_internal.h b/lib/zstd/common/zstd_internal.h
114557 new file mode 100644
114558 index 000000000000..1f939cbe05ed
114559 --- /dev/null
114560 +++ b/lib/zstd/common/zstd_internal.h
114561 @@ -0,0 +1,450 @@
114563 + * Copyright (c) Yann Collet, Facebook, Inc.
114564 + * All rights reserved.
114566 + * This source code is licensed under both the BSD-style license (found in the
114567 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
114568 + * in the COPYING file in the root directory of this source tree).
114569 + * You may select, at your option, one of the above-listed licenses.
114570 + */
114572 +#ifndef ZSTD_CCOMMON_H_MODULE
114573 +#define ZSTD_CCOMMON_H_MODULE
114575 +/* this module contains definitions which must be identical
114576 + * across compression, decompression and dictBuilder.
114577 + * It also contains a few functions useful to at least 2 of them
114578 + * and which benefit from being inlined */
114580 +/*-*************************************
114581 +*  Dependencies
114582 +***************************************/
114583 +#include "compiler.h"
114584 +#include "mem.h"
114585 +#include "debug.h"                 /* assert, DEBUGLOG, RAWLOG, g_debuglevel */
114586 +#include "error_private.h"
114587 +#define ZSTD_STATIC_LINKING_ONLY
114588 +#include <linux/zstd.h>
114589 +#define FSE_STATIC_LINKING_ONLY
114590 +#include "fse.h"
114591 +#define HUF_STATIC_LINKING_ONLY
114592 +#include "huf.h"
114593 +#include <linux/xxhash.h>                /* XXH_reset, update, digest */
114594 +#define ZSTD_TRACE 0
114597 +/* ---- static assert (debug) --- */
114598 +#define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)
114599 +#define ZSTD_isError ERR_isError   /* for inlining */
114600 +#define FSE_isError  ERR_isError
114601 +#define HUF_isError  ERR_isError
114604 +/*-*************************************
114605 +*  shared macros
114606 +***************************************/
114607 +#undef MIN
114608 +#undef MAX
114609 +#define MIN(a,b) ((a)<(b) ? (a) : (b))
114610 +#define MAX(a,b) ((a)>(b) ? (a) : (b))
114613 + * Ignore: this is an internal helper.
114615 + * This is a helper function to help force C99-correctness during compilation.
114616 + * Under strict compilation modes, variadic macro arguments can't be empty.
114617 + * However, variadic function arguments can be. Using a function therefore lets
114618 + * us statically check that at least one (string) argument was passed,
114619 + * independent of the compilation flags.
114620 + */
114621 +static INLINE_KEYWORD UNUSED_ATTR
114622 +void _force_has_format_string(const char *format, ...) {
114623 +  (void)format;
114627 + * Ignore: this is an internal helper.
114629 + * We want to force this function invocation to be syntactically correct, but
114630 + * we don't want to force runtime evaluation of its arguments.
114631 + */
114632 +#define _FORCE_HAS_FORMAT_STRING(...) \
114633 +  if (0) { \
114634 +    _force_has_format_string(__VA_ARGS__); \
114635 +  }
114638 + * Return the specified error if the condition evaluates to true.
114640 + * In debug modes, prints additional information.
114641 + * In order to do that (particularly, printing the conditional that failed),
114642 + * this can't just wrap RETURN_ERROR().
114643 + */
114644 +#define RETURN_ERROR_IF(cond, err, ...) \
114645 +  if (cond) { \
114646 +    RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \
114647 +           __FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \
114648 +    _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
114649 +    RAWLOG(3, ": " __VA_ARGS__); \
114650 +    RAWLOG(3, "\n"); \
114651 +    return ERROR(err); \
114652 +  }
114655 + * Unconditionally return the specified error.
114657 + * In debug modes, prints additional information.
114658 + */
114659 +#define RETURN_ERROR(err, ...) \
114660 +  do { \
114661 +    RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \
114662 +           __FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \
114663 +    _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
114664 +    RAWLOG(3, ": " __VA_ARGS__); \
114665 +    RAWLOG(3, "\n"); \
114666 +    return ERROR(err); \
114667 +  } while(0);
114670 + * If the provided expression evaluates to an error code, returns that error code.
114672 + * In debug modes, prints additional information.
114673 + */
114674 +#define FORWARD_IF_ERROR(err, ...) \
114675 +  do { \
114676 +    size_t const err_code = (err); \
114677 +    if (ERR_isError(err_code)) { \
114678 +      RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \
114679 +             __FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \
114680 +      _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
114681 +      RAWLOG(3, ": " __VA_ARGS__); \
114682 +      RAWLOG(3, "\n"); \
114683 +      return err_code; \
114684 +    } \
114685 +  } while(0);
114688 +/*-*************************************
114689 +*  Common constants
114690 +***************************************/
114691 +#define ZSTD_OPT_NUM    (1<<12)
114693 +#define ZSTD_REP_NUM      3                 /* number of repcodes */
114694 +#define ZSTD_REP_MOVE     (ZSTD_REP_NUM-1)
114695 +static UNUSED_ATTR const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
114697 +#define KB *(1 <<10)
114698 +#define MB *(1 <<20)
114699 +#define GB *(1U<<30)
114701 +#define BIT7 128
114702 +#define BIT6  64
114703 +#define BIT5  32
114704 +#define BIT4  16
114705 +#define BIT1   2
114706 +#define BIT0   1
114708 +#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
114709 +static UNUSED_ATTR const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
114710 +static UNUSED_ATTR const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
114712 +#define ZSTD_FRAMEIDSIZE 4   /* magic number size */
114714 +#define ZSTD_BLOCKHEADERSIZE 3   /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
114715 +static UNUSED_ATTR const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
114716 +typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
114718 +#define ZSTD_FRAMECHECKSUMSIZE 4
114720 +#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
114721 +#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */)   /* for a non-null block */
114723 +#define HufLog 12
114724 +typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
114726 +#define LONGNBSEQ 0x7F00
114728 +#define MINMATCH 3
114730 +#define Litbits  8
114731 +#define MaxLit ((1<<Litbits) - 1)
114732 +#define MaxML   52
114733 +#define MaxLL   35
114734 +#define DefaultMaxOff 28
114735 +#define MaxOff  31
114736 +#define MaxSeq MAX(MaxLL, MaxML)   /* Assumption : MaxOff < MaxLL,MaxML */
114737 +#define MLFSELog    9
114738 +#define LLFSELog    9
114739 +#define OffFSELog   8
114740 +#define MaxFSELog  MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
114742 +#define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */
114743 +/* Each table cannot take more than #symbols * FSELog bits */
114744 +#define ZSTD_MAX_FSE_HEADERS_SIZE (((MaxML + 1) * MLFSELog + (MaxLL + 1) * LLFSELog + (MaxOff + 1) * OffFSELog + 7) / 8)
114746 +static UNUSED_ATTR const U32 LL_bits[MaxLL+1] = {
114747 +     0, 0, 0, 0, 0, 0, 0, 0,
114748 +     0, 0, 0, 0, 0, 0, 0, 0,
114749 +     1, 1, 1, 1, 2, 2, 3, 3,
114750 +     4, 6, 7, 8, 9,10,11,12,
114751 +    13,14,15,16
114753 +static UNUSED_ATTR const S16 LL_defaultNorm[MaxLL+1] = {
114754 +     4, 3, 2, 2, 2, 2, 2, 2,
114755 +     2, 2, 2, 2, 2, 1, 1, 1,
114756 +     2, 2, 2, 2, 2, 2, 2, 2,
114757 +     2, 3, 2, 1, 1, 1, 1, 1,
114758 +    -1,-1,-1,-1
114760 +#define LL_DEFAULTNORMLOG 6  /* for static allocation */
114761 +static UNUSED_ATTR const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
114763 +static UNUSED_ATTR const U32 ML_bits[MaxML+1] = {
114764 +     0, 0, 0, 0, 0, 0, 0, 0,
114765 +     0, 0, 0, 0, 0, 0, 0, 0,
114766 +     0, 0, 0, 0, 0, 0, 0, 0,
114767 +     0, 0, 0, 0, 0, 0, 0, 0,
114768 +     1, 1, 1, 1, 2, 2, 3, 3,
114769 +     4, 4, 5, 7, 8, 9,10,11,
114770 +    12,13,14,15,16
114772 +static UNUSED_ATTR const S16 ML_defaultNorm[MaxML+1] = {
114773 +     1, 4, 3, 2, 2, 2, 2, 2,
114774 +     2, 1, 1, 1, 1, 1, 1, 1,
114775 +     1, 1, 1, 1, 1, 1, 1, 1,
114776 +     1, 1, 1, 1, 1, 1, 1, 1,
114777 +     1, 1, 1, 1, 1, 1, 1, 1,
114778 +     1, 1, 1, 1, 1, 1,-1,-1,
114779 +    -1,-1,-1,-1,-1
114781 +#define ML_DEFAULTNORMLOG 6  /* for static allocation */
114782 +static UNUSED_ATTR const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
114784 +static UNUSED_ATTR const S16 OF_defaultNorm[DefaultMaxOff+1] = {
114785 +     1, 1, 1, 1, 1, 1, 2, 2,
114786 +     2, 1, 1, 1, 1, 1, 1, 1,
114787 +     1, 1, 1, 1, 1, 1, 1, 1,
114788 +    -1,-1,-1,-1,-1
114790 +#define OF_DEFAULTNORMLOG 5  /* for static allocation */
114791 +static UNUSED_ATTR const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
114794 +/*-*******************************************
114795 +*  Shared functions to include for inlining
114796 +*********************************************/
114797 +static void ZSTD_copy8(void* dst, const void* src) {
114798 +    ZSTD_memcpy(dst, src, 8);
114801 +#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
114802 +static void ZSTD_copy16(void* dst, const void* src) {
114803 +    ZSTD_memcpy(dst, src, 16);
114805 +#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }
114807 +#define WILDCOPY_OVERLENGTH 32
114808 +#define WILDCOPY_VECLEN 16
114810 +typedef enum {
114811 +    ZSTD_no_overlap,
114812 +    ZSTD_overlap_src_before_dst
114813 +    /*  ZSTD_overlap_dst_before_src, */
114814 +} ZSTD_overlap_e;
114816 +/*! ZSTD_wildcopy() :
114817 + *  Custom version of ZSTD_memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0)
114818 + *  @param ovtype controls the overlap detection
114819 + *         - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
114820 + *         - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart.
114821 + *           The src buffer must be before the dst buffer.
114822 + */
114823 +MEM_STATIC FORCE_INLINE_ATTR
114824 +void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype)
114826 +    ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
114827 +    const BYTE* ip = (const BYTE*)src;
114828 +    BYTE* op = (BYTE*)dst;
114829 +    BYTE* const oend = op + length;
114831 +    assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff <= -WILDCOPY_VECLEN));
114833 +    if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
114834 +        /* Handle short offset copies. */
114835 +        do {
114836 +            COPY8(op, ip)
114837 +        } while (op < oend);
114838 +    } else {
114839 +        assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
114840 +        /* Separate out the first COPY16() call because the copy length is
114841 +         * almost certain to be short, so the branches have different
114842 +         * probabilities. Since it is almost certain to be short, only do
114843 +         * one COPY16() in the first call. Then, do two calls per loop since
114844 +         * at that point it is more likely to have a high trip count.
114845 +         */
114846 +#ifdef __aarch64__
114847 +        do {
114848 +            COPY16(op, ip);
114849 +        }
114850 +        while (op < oend);
114851 +#else
114852 +        ZSTD_copy16(op, ip);
114853 +        if (16 >= length) return;
114854 +        op += 16;
114855 +        ip += 16;
114856 +        do {
114857 +            COPY16(op, ip);
114858 +            COPY16(op, ip);
114859 +        }
114860 +        while (op < oend);
114861 +#endif
114862 +    }
114865 +MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
114867 +    size_t const length = MIN(dstCapacity, srcSize);
114868 +    if (length > 0) {
114869 +        ZSTD_memcpy(dst, src, length);
114870 +    }
114871 +    return length;
114874 +/* define "workspace is too large" as this number of times larger than needed */
114875 +#define ZSTD_WORKSPACETOOLARGE_FACTOR 3
114877 +/* when workspace is continuously too large
114878 + * during at least this number of times,
114879 + * context's memory usage is considered wasteful,
114880 + * because it's sized to handle a worst case scenario which rarely happens.
114881 + * In which case, resize it down to free some memory */
114882 +#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128
114884 +/* Controls whether the input/output buffer is buffered or stable. */
114885 +typedef enum {
114886 +    ZSTD_bm_buffered = 0,  /* Buffer the input/output */
114887 +    ZSTD_bm_stable = 1     /* ZSTD_inBuffer/ZSTD_outBuffer is stable */
114888 +} ZSTD_bufferMode_e;
114891 +/*-*******************************************
114892 +*  Private declarations
114893 +*********************************************/
114894 +typedef struct seqDef_s {
114895 +    U32 offset;         /* Offset code of the sequence */
114896 +    U16 litLength;
114897 +    U16 matchLength;
114898 +} seqDef;
114900 +typedef struct {
114901 +    seqDef* sequencesStart;
114902 +    seqDef* sequences;      /* ptr to end of sequences */
114903 +    BYTE* litStart;
114904 +    BYTE* lit;              /* ptr to end of literals */
114905 +    BYTE* llCode;
114906 +    BYTE* mlCode;
114907 +    BYTE* ofCode;
114908 +    size_t maxNbSeq;
114909 +    size_t maxNbLit;
114911 +    /* longLengthPos and longLengthID to allow us to represent either a single litLength or matchLength
114912 +     * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment
114913 +     * the existing value of the litLength or matchLength by 0x10000.
114914 +     */
114915 +    U32   longLengthID;   /* 0 == no longLength; 1 == Represent the long literal; 2 == Represent the long match; */
114916 +    U32   longLengthPos;  /* Index of the sequence to apply long length modification to */
114917 +} seqStore_t;
114919 +typedef struct {
114920 +    U32 litLength;
114921 +    U32 matchLength;
114922 +} ZSTD_sequenceLength;
114925 + * Returns the ZSTD_sequenceLength for the given sequences. It handles the decoding of long sequences
114926 + * indicated by longLengthPos and longLengthID, and adds MINMATCH back to matchLength.
114927 + */
114928 +MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore, seqDef const* seq)
114930 +    ZSTD_sequenceLength seqLen;
114931 +    seqLen.litLength = seq->litLength;
114932 +    seqLen.matchLength = seq->matchLength + MINMATCH;
114933 +    if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) {
114934 +        if (seqStore->longLengthID == 1) {
114935 +            seqLen.litLength += 0xFFFF;
114936 +        }
114937 +        if (seqStore->longLengthID == 2) {
114938 +            seqLen.matchLength += 0xFFFF;
114939 +        }
114940 +    }
114941 +    return seqLen;
114945 + * Contains the compressed frame size and an upper-bound for the decompressed frame size.
114946 + * Note: before using `compressedSize`, check for errors using ZSTD_isError().
114947 + *       similarly, before using `decompressedBound`, check for errors using:
114948 + *          `decompressedBound != ZSTD_CONTENTSIZE_ERROR`
114949 + */
114950 +typedef struct {
114951 +    size_t compressedSize;
114952 +    unsigned long long decompressedBound;
114953 +} ZSTD_frameSizeInfo;   /* decompress & legacy */
114955 +const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx);   /* compress & dictBuilder */
114956 +void ZSTD_seqToCodes(const seqStore_t* seqStorePtr);   /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
114958 +/* custom memory allocation functions */
114959 +void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem);
114960 +void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem);
114961 +void ZSTD_customFree(void* ptr, ZSTD_customMem customMem);
114964 +MEM_STATIC U32 ZSTD_highbit32(U32 val)   /* compress, dictBuilder, decodeCorpus */
114966 +    assert(val != 0);
114967 +    {
114968 +#   if (__GNUC__ >= 3)   /* GCC Intrinsic */
114969 +        return __builtin_clz (val) ^ 31;
114970 +#   else   /* Software version */
114971 +        static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
114972 +        U32 v = val;
114973 +        v |= v >> 1;
114974 +        v |= v >> 2;
114975 +        v |= v >> 4;
114976 +        v |= v >> 8;
114977 +        v |= v >> 16;
114978 +        return DeBruijnClz[(v * 0x07C4ACDDU) >> 27];
114979 +#   endif
114980 +    }
114984 +/* ZSTD_invalidateRepCodes() :
114985 + * ensures next compression will not use repcodes from previous block.
114986 + * Note : only works with regular variant;
114987 + *        do not use with extDict variant ! */
114988 +void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx);   /* zstdmt, adaptive_compression (shouldn't get this definition from here) */
114991 +typedef struct {
114992 +    blockType_e blockType;
114993 +    U32 lastBlock;
114994 +    U32 origSize;
114995 +} blockProperties_t;   /* declared here for decompress and fullbench */
114997 +/*! ZSTD_getcBlockSize() :
114998 + *  Provides the size of compressed block from block header `src` */
114999 +/* Used by: decompress, fullbench (does not get its definition from here) */
115000 +size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
115001 +                          blockProperties_t* bpPtr);
115003 +/*! ZSTD_decodeSeqHeaders() :
115004 + *  decode sequence header from src */
115005 +/* Used by: decompress, fullbench (does not get its definition from here) */
115006 +size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
115007 +                       const void* src, size_t srcSize);
115011 +#endif   /* ZSTD_CCOMMON_H_MODULE */
115012 diff --git a/lib/zstd/compress.c b/lib/zstd/compress.c
115013 deleted file mode 100644
115014 index b080264ed3ad..000000000000
115015 --- a/lib/zstd/compress.c
115016 +++ /dev/null
115017 @@ -1,3485 +0,0 @@
115019 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
115020 - * All rights reserved.
115022 - * This source code is licensed under the BSD-style license found in the
115023 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
115024 - * An additional grant of patent rights can be found in the PATENTS file in the
115025 - * same directory.
115027 - * This program is free software; you can redistribute it and/or modify it under
115028 - * the terms of the GNU General Public License version 2 as published by the
115029 - * Free Software Foundation. This program is dual-licensed; you may select
115030 - * either version 2 of the GNU General Public License ("GPL") or BSD license
115031 - * ("BSD").
115032 - */
115034 -/*-*************************************
115035 -*  Dependencies
115036 -***************************************/
115037 -#include "fse.h"
115038 -#include "huf.h"
115039 -#include "mem.h"
115040 -#include "zstd_internal.h" /* includes zstd.h */
115041 -#include <linux/kernel.h>
115042 -#include <linux/module.h>
115043 -#include <linux/string.h> /* memset */
115045 -/*-*************************************
115046 -*  Constants
115047 -***************************************/
115048 -static const U32 g_searchStrength = 8; /* control skip over incompressible data */
115049 -#define HASH_READ_SIZE 8
115050 -typedef enum { ZSTDcs_created = 0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
115052 -/*-*************************************
115053 -*  Helper functions
115054 -***************************************/
115055 -size_t ZSTD_compressBound(size_t srcSize) { return FSE_compressBound(srcSize) + 12; }
115057 -/*-*************************************
115058 -*  Sequence storage
115059 -***************************************/
115060 -static void ZSTD_resetSeqStore(seqStore_t *ssPtr)
115062 -       ssPtr->lit = ssPtr->litStart;
115063 -       ssPtr->sequences = ssPtr->sequencesStart;
115064 -       ssPtr->longLengthID = 0;
115067 -/*-*************************************
115068 -*  Context memory management
115069 -***************************************/
115070 -struct ZSTD_CCtx_s {
115071 -       const BYTE *nextSrc;  /* next block here to continue on curr prefix */
115072 -       const BYTE *base;     /* All regular indexes relative to this position */
115073 -       const BYTE *dictBase; /* extDict indexes relative to this position */
115074 -       U32 dictLimit;  /* below that point, need extDict */
115075 -       U32 lowLimit;    /* below that point, no more data */
115076 -       U32 nextToUpdate;     /* index from which to continue dictionary update */
115077 -       U32 nextToUpdate3;    /* index from which to continue dictionary update */
115078 -       U32 hashLog3;    /* dispatch table : larger == faster, more memory */
115079 -       U32 loadedDictEnd;    /* index of end of dictionary */
115080 -       U32 forceWindow;      /* force back-references to respect limit of 1<<wLog, even for dictionary */
115081 -       U32 forceRawDict;     /* Force loading dictionary in "content-only" mode (no header analysis) */
115082 -       ZSTD_compressionStage_e stage;
115083 -       U32 rep[ZSTD_REP_NUM];
115084 -       U32 repToConfirm[ZSTD_REP_NUM];
115085 -       U32 dictID;
115086 -       ZSTD_parameters params;
115087 -       void *workSpace;
115088 -       size_t workSpaceSize;
115089 -       size_t blockSize;
115090 -       U64 frameContentSize;
115091 -       struct xxh64_state xxhState;
115092 -       ZSTD_customMem customMem;
115094 -       seqStore_t seqStore; /* sequences storage ptrs */
115095 -       U32 *hashTable;
115096 -       U32 *hashTable3;
115097 -       U32 *chainTable;
115098 -       HUF_CElt *hufTable;
115099 -       U32 flagStaticTables;
115100 -       HUF_repeat flagStaticHufTable;
115101 -       FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
115102 -       FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
115103 -       FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
115104 -       unsigned tmpCounters[HUF_COMPRESS_WORKSPACE_SIZE_U32];
115107 -size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams)
115109 -       size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << cParams.windowLog);
115110 -       U32 const divider = (cParams.searchLength == 3) ? 3 : 4;
115111 -       size_t const maxNbSeq = blockSize / divider;
115112 -       size_t const tokenSpace = blockSize + 11 * maxNbSeq;
115113 -       size_t const chainSize = (cParams.strategy == ZSTD_fast) ? 0 : (1 << cParams.chainLog);
115114 -       size_t const hSize = ((size_t)1) << cParams.hashLog;
115115 -       U32 const hashLog3 = (cParams.searchLength > 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog);
115116 -       size_t const h3Size = ((size_t)1) << hashLog3;
115117 -       size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
115118 -       size_t const optSpace =
115119 -           ((MaxML + 1) + (MaxLL + 1) + (MaxOff + 1) + (1 << Litbits)) * sizeof(U32) + (ZSTD_OPT_NUM + 1) * (sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
115120 -       size_t const workspaceSize = tableSpace + (256 * sizeof(U32)) /* huffTable */ + tokenSpace +
115121 -                                    (((cParams.strategy == ZSTD_btopt) || (cParams.strategy == ZSTD_btopt2)) ? optSpace : 0);
115123 -       return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_CCtx)) + ZSTD_ALIGN(workspaceSize);
115126 -static ZSTD_CCtx *ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
115128 -       ZSTD_CCtx *cctx;
115129 -       if (!customMem.customAlloc || !customMem.customFree)
115130 -               return NULL;
115131 -       cctx = (ZSTD_CCtx *)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);
115132 -       if (!cctx)
115133 -               return NULL;
115134 -       memset(cctx, 0, sizeof(ZSTD_CCtx));
115135 -       cctx->customMem = customMem;
115136 -       return cctx;
115139 -ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize)
115141 -       ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
115142 -       ZSTD_CCtx *cctx = ZSTD_createCCtx_advanced(stackMem);
115143 -       if (cctx) {
115144 -               cctx->workSpace = ZSTD_stackAllocAll(cctx->customMem.opaque, &cctx->workSpaceSize);
115145 -       }
115146 -       return cctx;
115149 -size_t ZSTD_freeCCtx(ZSTD_CCtx *cctx)
115151 -       if (cctx == NULL)
115152 -               return 0; /* support free on NULL */
115153 -       ZSTD_free(cctx->workSpace, cctx->customMem);
115154 -       ZSTD_free(cctx, cctx->customMem);
115155 -       return 0; /* reserved as a potential error code in the future */
115158 -const seqStore_t *ZSTD_getSeqStore(const ZSTD_CCtx *ctx) /* hidden interface */ { return &(ctx->seqStore); }
115160 -static ZSTD_parameters ZSTD_getParamsFromCCtx(const ZSTD_CCtx *cctx) { return cctx->params; }
115162 -/** ZSTD_checkParams() :
115163 -       ensure param values remain within authorized range.
115164 -       @return : 0, or an error code if one value is beyond authorized range */
115165 -size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
115167 -#define CLAMPCHECK(val, min, max)                                       \
115168 -       {                                                               \
115169 -               if ((val < min) | (val > max))                          \
115170 -                       return ERROR(compressionParameter_unsupported); \
115171 -       }
115172 -       CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
115173 -       CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
115174 -       CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
115175 -       CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
115176 -       CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
115177 -       CLAMPCHECK(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX);
115178 -       if ((U32)(cParams.strategy) > (U32)ZSTD_btopt2)
115179 -               return ERROR(compressionParameter_unsupported);
115180 -       return 0;
115183 -/** ZSTD_cycleLog() :
115184 - *  condition for correct operation : hashLog > 1 */
115185 -static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
115187 -       U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
115188 -       return hashLog - btScale;
115191 -/** ZSTD_adjustCParams() :
115192 -       optimize `cPar` for a given input (`srcSize` and `dictSize`).
115193 -       mostly downsizing to reduce memory consumption and initialization.
115194 -       Both `srcSize` and `dictSize` are optional (use 0 if unknown),
115195 -       but if both are 0, no optimization can be done.
115196 -       Note : cPar is considered validated at this stage. Use ZSTD_checkParams() to ensure that. */
115197 -ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
115199 -       if (srcSize + dictSize == 0)
115200 -               return cPar; /* no size information available : no adjustment */
115202 -       /* resize params, to use less memory when necessary */
115203 -       {
115204 -               U32 const minSrcSize = (srcSize == 0) ? 500 : 0;
115205 -               U64 const rSize = srcSize + dictSize + minSrcSize;
115206 -               if (rSize < ((U64)1 << ZSTD_WINDOWLOG_MAX)) {
115207 -                       U32 const srcLog = MAX(ZSTD_HASHLOG_MIN, ZSTD_highbit32((U32)(rSize)-1) + 1);
115208 -                       if (cPar.windowLog > srcLog)
115209 -                               cPar.windowLog = srcLog;
115210 -               }
115211 -       }
115212 -       if (cPar.hashLog > cPar.windowLog)
115213 -               cPar.hashLog = cPar.windowLog;
115214 -       {
115215 -               U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
115216 -               if (cycleLog > cPar.windowLog)
115217 -                       cPar.chainLog -= (cycleLog - cPar.windowLog);
115218 -       }
115220 -       if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
115221 -               cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* required for frame header */
115223 -       return cPar;
115226 -static U32 ZSTD_equivalentParams(ZSTD_parameters param1, ZSTD_parameters param2)
115228 -       return (param1.cParams.hashLog == param2.cParams.hashLog) & (param1.cParams.chainLog == param2.cParams.chainLog) &
115229 -              (param1.cParams.strategy == param2.cParams.strategy) & ((param1.cParams.searchLength == 3) == (param2.cParams.searchLength == 3));
115232 -/*! ZSTD_continueCCtx() :
115233 -       reuse CCtx without reset (note : requires no dictionary) */
115234 -static size_t ZSTD_continueCCtx(ZSTD_CCtx *cctx, ZSTD_parameters params, U64 frameContentSize)
115236 -       U32 const end = (U32)(cctx->nextSrc - cctx->base);
115237 -       cctx->params = params;
115238 -       cctx->frameContentSize = frameContentSize;
115239 -       cctx->lowLimit = end;
115240 -       cctx->dictLimit = end;
115241 -       cctx->nextToUpdate = end + 1;
115242 -       cctx->stage = ZSTDcs_init;
115243 -       cctx->dictID = 0;
115244 -       cctx->loadedDictEnd = 0;
115245 -       {
115246 -               int i;
115247 -               for (i = 0; i < ZSTD_REP_NUM; i++)
115248 -                       cctx->rep[i] = repStartValue[i];
115249 -       }
115250 -       cctx->seqStore.litLengthSum = 0; /* force reset of btopt stats */
115251 -       xxh64_reset(&cctx->xxhState, 0);
115252 -       return 0;
115255 -typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset, ZSTDcrp_fullReset } ZSTD_compResetPolicy_e;
115257 -/*! ZSTD_resetCCtx_advanced() :
115258 -       note : `params` must be validated */
115259 -static size_t ZSTD_resetCCtx_advanced(ZSTD_CCtx *zc, ZSTD_parameters params, U64 frameContentSize, ZSTD_compResetPolicy_e const crp)
115261 -       if (crp == ZSTDcrp_continue)
115262 -               if (ZSTD_equivalentParams(params, zc->params)) {
115263 -                       zc->flagStaticTables = 0;
115264 -                       zc->flagStaticHufTable = HUF_repeat_none;
115265 -                       return ZSTD_continueCCtx(zc, params, frameContentSize);
115266 -               }
115268 -       {
115269 -               size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog);
115270 -               U32 const divider = (params.cParams.searchLength == 3) ? 3 : 4;
115271 -               size_t const maxNbSeq = blockSize / divider;
115272 -               size_t const tokenSpace = blockSize + 11 * maxNbSeq;
115273 -               size_t const chainSize = (params.cParams.strategy == ZSTD_fast) ? 0 : (1 << params.cParams.chainLog);
115274 -               size_t const hSize = ((size_t)1) << params.cParams.hashLog;
115275 -               U32 const hashLog3 = (params.cParams.searchLength > 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, params.cParams.windowLog);
115276 -               size_t const h3Size = ((size_t)1) << hashLog3;
115277 -               size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
115278 -               void *ptr;
115280 -               /* Check if workSpace is large enough, alloc a new one if needed */
115281 -               {
115282 -                       size_t const optSpace = ((MaxML + 1) + (MaxLL + 1) + (MaxOff + 1) + (1 << Litbits)) * sizeof(U32) +
115283 -                                               (ZSTD_OPT_NUM + 1) * (sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
115284 -                       size_t const neededSpace = tableSpace + (256 * sizeof(U32)) /* huffTable */ + tokenSpace +
115285 -                                                  (((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) ? optSpace : 0);
115286 -                       if (zc->workSpaceSize < neededSpace) {
115287 -                               ZSTD_free(zc->workSpace, zc->customMem);
115288 -                               zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
115289 -                               if (zc->workSpace == NULL)
115290 -                                       return ERROR(memory_allocation);
115291 -                               zc->workSpaceSize = neededSpace;
115292 -                       }
115293 -               }
115295 -               if (crp != ZSTDcrp_noMemset)
115296 -                       memset(zc->workSpace, 0, tableSpace); /* reset tables only */
115297 -               xxh64_reset(&zc->xxhState, 0);
115298 -               zc->hashLog3 = hashLog3;
115299 -               zc->hashTable = (U32 *)(zc->workSpace);
115300 -               zc->chainTable = zc->hashTable + hSize;
115301 -               zc->hashTable3 = zc->chainTable + chainSize;
115302 -               ptr = zc->hashTable3 + h3Size;
115303 -               zc->hufTable = (HUF_CElt *)ptr;
115304 -               zc->flagStaticTables = 0;
115305 -               zc->flagStaticHufTable = HUF_repeat_none;
115306 -               ptr = ((U32 *)ptr) + 256; /* note : HUF_CElt* is incomplete type, size is simulated using U32 */
115308 -               zc->nextToUpdate = 1;
115309 -               zc->nextSrc = NULL;
115310 -               zc->base = NULL;
115311 -               zc->dictBase = NULL;
115312 -               zc->dictLimit = 0;
115313 -               zc->lowLimit = 0;
115314 -               zc->params = params;
115315 -               zc->blockSize = blockSize;
115316 -               zc->frameContentSize = frameContentSize;
115317 -               {
115318 -                       int i;
115319 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
115320 -                               zc->rep[i] = repStartValue[i];
115321 -               }
115323 -               if ((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) {
115324 -                       zc->seqStore.litFreq = (U32 *)ptr;
115325 -                       zc->seqStore.litLengthFreq = zc->seqStore.litFreq + (1 << Litbits);
115326 -                       zc->seqStore.matchLengthFreq = zc->seqStore.litLengthFreq + (MaxLL + 1);
115327 -                       zc->seqStore.offCodeFreq = zc->seqStore.matchLengthFreq + (MaxML + 1);
115328 -                       ptr = zc->seqStore.offCodeFreq + (MaxOff + 1);
115329 -                       zc->seqStore.matchTable = (ZSTD_match_t *)ptr;
115330 -                       ptr = zc->seqStore.matchTable + ZSTD_OPT_NUM + 1;
115331 -                       zc->seqStore.priceTable = (ZSTD_optimal_t *)ptr;
115332 -                       ptr = zc->seqStore.priceTable + ZSTD_OPT_NUM + 1;
115333 -                       zc->seqStore.litLengthSum = 0;
115334 -               }
115335 -               zc->seqStore.sequencesStart = (seqDef *)ptr;
115336 -               ptr = zc->seqStore.sequencesStart + maxNbSeq;
115337 -               zc->seqStore.llCode = (BYTE *)ptr;
115338 -               zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
115339 -               zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
115340 -               zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
115342 -               zc->stage = ZSTDcs_init;
115343 -               zc->dictID = 0;
115344 -               zc->loadedDictEnd = 0;
115346 -               return 0;
115347 -       }
115350 -/* ZSTD_invalidateRepCodes() :
115351 - * ensures next compression will not use repcodes from previous block.
115352 - * Note : only works with regular variant;
115353 - *        do not use with extDict variant ! */
115354 -void ZSTD_invalidateRepCodes(ZSTD_CCtx *cctx)
115356 -       int i;
115357 -       for (i = 0; i < ZSTD_REP_NUM; i++)
115358 -               cctx->rep[i] = 0;
115361 -/*! ZSTD_copyCCtx() :
115362 -*   Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
115363 -*   Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
115364 -*   @return : 0, or an error code */
115365 -size_t ZSTD_copyCCtx(ZSTD_CCtx *dstCCtx, const ZSTD_CCtx *srcCCtx, unsigned long long pledgedSrcSize)
115367 -       if (srcCCtx->stage != ZSTDcs_init)
115368 -               return ERROR(stage_wrong);
115370 -       memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
115371 -       {
115372 -               ZSTD_parameters params = srcCCtx->params;
115373 -               params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
115374 -               ZSTD_resetCCtx_advanced(dstCCtx, params, pledgedSrcSize, ZSTDcrp_noMemset);
115375 -       }
115377 -       /* copy tables */
115378 -       {
115379 -               size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog);
115380 -               size_t const hSize = ((size_t)1) << srcCCtx->params.cParams.hashLog;
115381 -               size_t const h3Size = (size_t)1 << srcCCtx->hashLog3;
115382 -               size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
115383 -               memcpy(dstCCtx->workSpace, srcCCtx->workSpace, tableSpace);
115384 -       }
115386 -       /* copy dictionary offsets */
115387 -       dstCCtx->nextToUpdate = srcCCtx->nextToUpdate;
115388 -       dstCCtx->nextToUpdate3 = srcCCtx->nextToUpdate3;
115389 -       dstCCtx->nextSrc = srcCCtx->nextSrc;
115390 -       dstCCtx->base = srcCCtx->base;
115391 -       dstCCtx->dictBase = srcCCtx->dictBase;
115392 -       dstCCtx->dictLimit = srcCCtx->dictLimit;
115393 -       dstCCtx->lowLimit = srcCCtx->lowLimit;
115394 -       dstCCtx->loadedDictEnd = srcCCtx->loadedDictEnd;
115395 -       dstCCtx->dictID = srcCCtx->dictID;
115397 -       /* copy entropy tables */
115398 -       dstCCtx->flagStaticTables = srcCCtx->flagStaticTables;
115399 -       dstCCtx->flagStaticHufTable = srcCCtx->flagStaticHufTable;
115400 -       if (srcCCtx->flagStaticTables) {
115401 -               memcpy(dstCCtx->litlengthCTable, srcCCtx->litlengthCTable, sizeof(dstCCtx->litlengthCTable));
115402 -               memcpy(dstCCtx->matchlengthCTable, srcCCtx->matchlengthCTable, sizeof(dstCCtx->matchlengthCTable));
115403 -               memcpy(dstCCtx->offcodeCTable, srcCCtx->offcodeCTable, sizeof(dstCCtx->offcodeCTable));
115404 -       }
115405 -       if (srcCCtx->flagStaticHufTable) {
115406 -               memcpy(dstCCtx->hufTable, srcCCtx->hufTable, 256 * 4);
115407 -       }
115409 -       return 0;
115412 -/*! ZSTD_reduceTable() :
115413 -*   reduce table indexes by `reducerValue` */
115414 -static void ZSTD_reduceTable(U32 *const table, U32 const size, U32 const reducerValue)
115416 -       U32 u;
115417 -       for (u = 0; u < size; u++) {
115418 -               if (table[u] < reducerValue)
115419 -                       table[u] = 0;
115420 -               else
115421 -                       table[u] -= reducerValue;
115422 -       }
115425 -/*! ZSTD_reduceIndex() :
115426 -*   rescale all indexes to avoid future overflow (indexes are U32) */
115427 -static void ZSTD_reduceIndex(ZSTD_CCtx *zc, const U32 reducerValue)
115429 -       {
115430 -               U32 const hSize = 1 << zc->params.cParams.hashLog;
115431 -               ZSTD_reduceTable(zc->hashTable, hSize, reducerValue);
115432 -       }
115434 -       {
115435 -               U32 const chainSize = (zc->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << zc->params.cParams.chainLog);
115436 -               ZSTD_reduceTable(zc->chainTable, chainSize, reducerValue);
115437 -       }
115439 -       {
115440 -               U32 const h3Size = (zc->hashLog3) ? 1 << zc->hashLog3 : 0;
115441 -               ZSTD_reduceTable(zc->hashTable3, h3Size, reducerValue);
115442 -       }
115445 -/*-*******************************************************
115446 -*  Block entropic compression
115447 -*********************************************************/
115449 -/* See doc/zstd_compression_format.md for detailed format description */
115451 -size_t ZSTD_noCompressBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
115453 -       if (srcSize + ZSTD_blockHeaderSize > dstCapacity)
115454 -               return ERROR(dstSize_tooSmall);
115455 -       memcpy((BYTE *)dst + ZSTD_blockHeaderSize, src, srcSize);
115456 -       ZSTD_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw);
115457 -       return ZSTD_blockHeaderSize + srcSize;
115460 -static size_t ZSTD_noCompressLiterals(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
115462 -       BYTE *const ostart = (BYTE * const)dst;
115463 -       U32 const flSize = 1 + (srcSize > 31) + (srcSize > 4095);
115465 -       if (srcSize + flSize > dstCapacity)
115466 -               return ERROR(dstSize_tooSmall);
115468 -       switch (flSize) {
115469 -       case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_basic + (srcSize << 3)); break;
115470 -       case 2: /* 2 - 2 - 12 */ ZSTD_writeLE16(ostart, (U16)((U32)set_basic + (1 << 2) + (srcSize << 4))); break;
115471 -       default: /*note : should not be necessary : flSize is within {1,2,3} */
115472 -       case 3: /* 2 - 2 - 20 */ ZSTD_writeLE32(ostart, (U32)((U32)set_basic + (3 << 2) + (srcSize << 4))); break;
115473 -       }
115475 -       memcpy(ostart + flSize, src, srcSize);
115476 -       return srcSize + flSize;
115479 -static size_t ZSTD_compressRleLiteralsBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
115481 -       BYTE *const ostart = (BYTE * const)dst;
115482 -       U32 const flSize = 1 + (srcSize > 31) + (srcSize > 4095);
115484 -       (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */
115486 -       switch (flSize) {
115487 -       case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_rle + (srcSize << 3)); break;
115488 -       case 2: /* 2 - 2 - 12 */ ZSTD_writeLE16(ostart, (U16)((U32)set_rle + (1 << 2) + (srcSize << 4))); break;
115489 -       default: /*note : should not be necessary : flSize is necessarily within {1,2,3} */
115490 -       case 3: /* 2 - 2 - 20 */ ZSTD_writeLE32(ostart, (U32)((U32)set_rle + (3 << 2) + (srcSize << 4))); break;
115491 -       }
115493 -       ostart[flSize] = *(const BYTE *)src;
115494 -       return flSize + 1;
115497 -static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; }
115499 -static size_t ZSTD_compressLiterals(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
115501 -       size_t const minGain = ZSTD_minGain(srcSize);
115502 -       size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
115503 -       BYTE *const ostart = (BYTE *)dst;
115504 -       U32 singleStream = srcSize < 256;
115505 -       symbolEncodingType_e hType = set_compressed;
115506 -       size_t cLitSize;
115508 -/* small ? don't even attempt compression (speed opt) */
115509 -#define LITERAL_NOENTROPY 63
115510 -       {
115511 -               size_t const minLitSize = zc->flagStaticHufTable == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY;
115512 -               if (srcSize <= minLitSize)
115513 -                       return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
115514 -       }
115516 -       if (dstCapacity < lhSize + 1)
115517 -               return ERROR(dstSize_tooSmall); /* not enough space for compression */
115518 -       {
115519 -               HUF_repeat repeat = zc->flagStaticHufTable;
115520 -               int const preferRepeat = zc->params.cParams.strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
115521 -               if (repeat == HUF_repeat_valid && lhSize == 3)
115522 -                       singleStream = 1;
115523 -               cLitSize = singleStream ? HUF_compress1X_repeat(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, zc->tmpCounters,
115524 -                                                               sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat)
115525 -                                       : HUF_compress4X_repeat(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, zc->tmpCounters,
115526 -                                                               sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat);
115527 -               if (repeat != HUF_repeat_none) {
115528 -                       hType = set_repeat;
115529 -               } /* reused the existing table */
115530 -               else {
115531 -                       zc->flagStaticHufTable = HUF_repeat_check;
115532 -               } /* now have a table to reuse */
115533 -       }
115535 -       if ((cLitSize == 0) | (cLitSize >= srcSize - minGain)) {
115536 -               zc->flagStaticHufTable = HUF_repeat_none;
115537 -               return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
115538 -       }
115539 -       if (cLitSize == 1) {
115540 -               zc->flagStaticHufTable = HUF_repeat_none;
115541 -               return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
115542 -       }
115544 -       /* Build header */
115545 -       switch (lhSize) {
115546 -       case 3: /* 2 - 2 - 10 - 10 */
115547 -       {
115548 -               U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 14);
115549 -               ZSTD_writeLE24(ostart, lhc);
115550 -               break;
115551 -       }
115552 -       case 4: /* 2 - 2 - 14 - 14 */
115553 -       {
115554 -               U32 const lhc = hType + (2 << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 18);
115555 -               ZSTD_writeLE32(ostart, lhc);
115556 -               break;
115557 -       }
115558 -       default: /* should not be necessary, lhSize is only {3,4,5} */
115559 -       case 5:  /* 2 - 2 - 18 - 18 */
115560 -       {
115561 -               U32 const lhc = hType + (3 << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 22);
115562 -               ZSTD_writeLE32(ostart, lhc);
115563 -               ostart[4] = (BYTE)(cLitSize >> 10);
115564 -               break;
115565 -       }
115566 -       }
115567 -       return lhSize + cLitSize;
115570 -static const BYTE LL_Code[64] = {0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 16, 16, 17, 17, 18, 18,
115571 -                                19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23,
115572 -                                23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24};
115574 -static const BYTE ML_Code[128] = {0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
115575 -                                 26, 27, 28, 29, 30, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38,
115576 -                                 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
115577 -                                 40, 40, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 42,
115578 -                                 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42};
115580 -void ZSTD_seqToCodes(const seqStore_t *seqStorePtr)
115582 -       BYTE const LL_deltaCode = 19;
115583 -       BYTE const ML_deltaCode = 36;
115584 -       const seqDef *const sequences = seqStorePtr->sequencesStart;
115585 -       BYTE *const llCodeTable = seqStorePtr->llCode;
115586 -       BYTE *const ofCodeTable = seqStorePtr->ofCode;
115587 -       BYTE *const mlCodeTable = seqStorePtr->mlCode;
115588 -       U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
115589 -       U32 u;
115590 -       for (u = 0; u < nbSeq; u++) {
115591 -               U32 const llv = sequences[u].litLength;
115592 -               U32 const mlv = sequences[u].matchLength;
115593 -               llCodeTable[u] = (llv > 63) ? (BYTE)ZSTD_highbit32(llv) + LL_deltaCode : LL_Code[llv];
115594 -               ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
115595 -               mlCodeTable[u] = (mlv > 127) ? (BYTE)ZSTD_highbit32(mlv) + ML_deltaCode : ML_Code[mlv];
115596 -       }
115597 -       if (seqStorePtr->longLengthID == 1)
115598 -               llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
115599 -       if (seqStorePtr->longLengthID == 2)
115600 -               mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
115603 -ZSTD_STATIC size_t ZSTD_compressSequences_internal(ZSTD_CCtx *zc, void *dst, size_t dstCapacity)
115605 -       const int longOffsets = zc->params.cParams.windowLog > STREAM_ACCUMULATOR_MIN;
115606 -       const seqStore_t *seqStorePtr = &(zc->seqStore);
115607 -       FSE_CTable *CTable_LitLength = zc->litlengthCTable;
115608 -       FSE_CTable *CTable_OffsetBits = zc->offcodeCTable;
115609 -       FSE_CTable *CTable_MatchLength = zc->matchlengthCTable;
115610 -       U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */
115611 -       const seqDef *const sequences = seqStorePtr->sequencesStart;
115612 -       const BYTE *const ofCodeTable = seqStorePtr->ofCode;
115613 -       const BYTE *const llCodeTable = seqStorePtr->llCode;
115614 -       const BYTE *const mlCodeTable = seqStorePtr->mlCode;
115615 -       BYTE *const ostart = (BYTE *)dst;
115616 -       BYTE *const oend = ostart + dstCapacity;
115617 -       BYTE *op = ostart;
115618 -       size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
115619 -       BYTE *seqHead;
115621 -       U32 *count;
115622 -       S16 *norm;
115623 -       U32 *workspace;
115624 -       size_t workspaceSize = sizeof(zc->tmpCounters);
115625 -       {
115626 -               size_t spaceUsed32 = 0;
115627 -               count = (U32 *)zc->tmpCounters + spaceUsed32;
115628 -               spaceUsed32 += MaxSeq + 1;
115629 -               norm = (S16 *)((U32 *)zc->tmpCounters + spaceUsed32);
115630 -               spaceUsed32 += ALIGN(sizeof(S16) * (MaxSeq + 1), sizeof(U32)) >> 2;
115632 -               workspace = (U32 *)zc->tmpCounters + spaceUsed32;
115633 -               workspaceSize -= (spaceUsed32 << 2);
115634 -       }
115636 -       /* Compress literals */
115637 -       {
115638 -               const BYTE *const literals = seqStorePtr->litStart;
115639 -               size_t const litSize = seqStorePtr->lit - literals;
115640 -               size_t const cSize = ZSTD_compressLiterals(zc, op, dstCapacity, literals, litSize);
115641 -               if (ZSTD_isError(cSize))
115642 -                       return cSize;
115643 -               op += cSize;
115644 -       }
115646 -       /* Sequences Header */
115647 -       if ((oend - op) < 3 /*max nbSeq Size*/ + 1 /*seqHead */)
115648 -               return ERROR(dstSize_tooSmall);
115649 -       if (nbSeq < 0x7F)
115650 -               *op++ = (BYTE)nbSeq;
115651 -       else if (nbSeq < LONGNBSEQ)
115652 -               op[0] = (BYTE)((nbSeq >> 8) + 0x80), op[1] = (BYTE)nbSeq, op += 2;
115653 -       else
115654 -               op[0] = 0xFF, ZSTD_writeLE16(op + 1, (U16)(nbSeq - LONGNBSEQ)), op += 3;
115655 -       if (nbSeq == 0)
115656 -               return op - ostart;
115658 -       /* seqHead : flags for FSE encoding type */
115659 -       seqHead = op++;
115661 -#define MIN_SEQ_FOR_DYNAMIC_FSE 64
115662 -#define MAX_SEQ_FOR_STATIC_FSE 1000
115664 -       /* convert length/distances into codes */
115665 -       ZSTD_seqToCodes(seqStorePtr);
115667 -       /* CTable for Literal Lengths */
115668 -       {
115669 -               U32 max = MaxLL;
115670 -               size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace);
115671 -               if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
115672 -                       *op++ = llCodeTable[0];
115673 -                       FSE_buildCTable_rle(CTable_LitLength, (BYTE)max);
115674 -                       LLtype = set_rle;
115675 -               } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
115676 -                       LLtype = set_repeat;
115677 -               } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (LL_defaultNormLog - 1)))) {
115678 -                       FSE_buildCTable_wksp(CTable_LitLength, LL_defaultNorm, MaxLL, LL_defaultNormLog, workspace, workspaceSize);
115679 -                       LLtype = set_basic;
115680 -               } else {
115681 -                       size_t nbSeq_1 = nbSeq;
115682 -                       const U32 tableLog = FSE_optimalTableLog(LLFSELog, nbSeq, max);
115683 -                       if (count[llCodeTable[nbSeq - 1]] > 1) {
115684 -                               count[llCodeTable[nbSeq - 1]]--;
115685 -                               nbSeq_1--;
115686 -                       }
115687 -                       FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
115688 -                       {
115689 -                               size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
115690 -                               if (FSE_isError(NCountSize))
115691 -                                       return NCountSize;
115692 -                               op += NCountSize;
115693 -                       }
115694 -                       FSE_buildCTable_wksp(CTable_LitLength, norm, max, tableLog, workspace, workspaceSize);
115695 -                       LLtype = set_compressed;
115696 -               }
115697 -       }
115699 -       /* CTable for Offsets */
115700 -       {
115701 -               U32 max = MaxOff;
115702 -               size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace);
115703 -               if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
115704 -                       *op++ = ofCodeTable[0];
115705 -                       FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max);
115706 -                       Offtype = set_rle;
115707 -               } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
115708 -                       Offtype = set_repeat;
115709 -               } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (OF_defaultNormLog - 1)))) {
115710 -                       FSE_buildCTable_wksp(CTable_OffsetBits, OF_defaultNorm, MaxOff, OF_defaultNormLog, workspace, workspaceSize);
115711 -                       Offtype = set_basic;
115712 -               } else {
115713 -                       size_t nbSeq_1 = nbSeq;
115714 -                       const U32 tableLog = FSE_optimalTableLog(OffFSELog, nbSeq, max);
115715 -                       if (count[ofCodeTable[nbSeq - 1]] > 1) {
115716 -                               count[ofCodeTable[nbSeq - 1]]--;
115717 -                               nbSeq_1--;
115718 -                       }
115719 -                       FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
115720 -                       {
115721 -                               size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
115722 -                               if (FSE_isError(NCountSize))
115723 -                                       return NCountSize;
115724 -                               op += NCountSize;
115725 -                       }
115726 -                       FSE_buildCTable_wksp(CTable_OffsetBits, norm, max, tableLog, workspace, workspaceSize);
115727 -                       Offtype = set_compressed;
115728 -               }
115729 -       }
115731 -       /* CTable for MatchLengths */
115732 -       {
115733 -               U32 max = MaxML;
115734 -               size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace);
115735 -               if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
115736 -                       *op++ = *mlCodeTable;
115737 -                       FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max);
115738 -                       MLtype = set_rle;
115739 -               } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
115740 -                       MLtype = set_repeat;
115741 -               } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (ML_defaultNormLog - 1)))) {
115742 -                       FSE_buildCTable_wksp(CTable_MatchLength, ML_defaultNorm, MaxML, ML_defaultNormLog, workspace, workspaceSize);
115743 -                       MLtype = set_basic;
115744 -               } else {
115745 -                       size_t nbSeq_1 = nbSeq;
115746 -                       const U32 tableLog = FSE_optimalTableLog(MLFSELog, nbSeq, max);
115747 -                       if (count[mlCodeTable[nbSeq - 1]] > 1) {
115748 -                               count[mlCodeTable[nbSeq - 1]]--;
115749 -                               nbSeq_1--;
115750 -                       }
115751 -                       FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
115752 -                       {
115753 -                               size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
115754 -                               if (FSE_isError(NCountSize))
115755 -                                       return NCountSize;
115756 -                               op += NCountSize;
115757 -                       }
115758 -                       FSE_buildCTable_wksp(CTable_MatchLength, norm, max, tableLog, workspace, workspaceSize);
115759 -                       MLtype = set_compressed;
115760 -               }
115761 -       }
115763 -       *seqHead = (BYTE)((LLtype << 6) + (Offtype << 4) + (MLtype << 2));
115764 -       zc->flagStaticTables = 0;
115766 -       /* Encoding Sequences */
115767 -       {
115768 -               BIT_CStream_t blockStream;
115769 -               FSE_CState_t stateMatchLength;
115770 -               FSE_CState_t stateOffsetBits;
115771 -               FSE_CState_t stateLitLength;
115773 -               CHECK_E(BIT_initCStream(&blockStream, op, oend - op), dstSize_tooSmall); /* not enough space remaining */
115775 -               /* first symbols */
115776 -               FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq - 1]);
115777 -               FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq - 1]);
115778 -               FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq - 1]);
115779 -               BIT_addBits(&blockStream, sequences[nbSeq - 1].litLength, LL_bits[llCodeTable[nbSeq - 1]]);
115780 -               if (ZSTD_32bits())
115781 -                       BIT_flushBits(&blockStream);
115782 -               BIT_addBits(&blockStream, sequences[nbSeq - 1].matchLength, ML_bits[mlCodeTable[nbSeq - 1]]);
115783 -               if (ZSTD_32bits())
115784 -                       BIT_flushBits(&blockStream);
115785 -               if (longOffsets) {
115786 -                       U32 const ofBits = ofCodeTable[nbSeq - 1];
115787 -                       int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN - 1);
115788 -                       if (extraBits) {
115789 -                               BIT_addBits(&blockStream, sequences[nbSeq - 1].offset, extraBits);
115790 -                               BIT_flushBits(&blockStream);
115791 -                       }
115792 -                       BIT_addBits(&blockStream, sequences[nbSeq - 1].offset >> extraBits, ofBits - extraBits);
115793 -               } else {
115794 -                       BIT_addBits(&blockStream, sequences[nbSeq - 1].offset, ofCodeTable[nbSeq - 1]);
115795 -               }
115796 -               BIT_flushBits(&blockStream);
115798 -               {
115799 -                       size_t n;
115800 -                       for (n = nbSeq - 2; n < nbSeq; n--) { /* intentional underflow */
115801 -                               BYTE const llCode = llCodeTable[n];
115802 -                               BYTE const ofCode = ofCodeTable[n];
115803 -                               BYTE const mlCode = mlCodeTable[n];
115804 -                               U32 const llBits = LL_bits[llCode];
115805 -                               U32 const ofBits = ofCode; /* 32b*/ /* 64b*/
115806 -                               U32 const mlBits = ML_bits[mlCode];
115807 -                               /* (7)*/                                                            /* (7)*/
115808 -                               FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */  /* 15 */
115809 -                               FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */
115810 -                               if (ZSTD_32bits())
115811 -                                       BIT_flushBits(&blockStream);                              /* (7)*/
115812 -                               FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */
115813 -                               if (ZSTD_32bits() || (ofBits + mlBits + llBits >= 64 - 7 - (LLFSELog + MLFSELog + OffFSELog)))
115814 -                                       BIT_flushBits(&blockStream); /* (7)*/
115815 -                               BIT_addBits(&blockStream, sequences[n].litLength, llBits);
115816 -                               if (ZSTD_32bits() && ((llBits + mlBits) > 24))
115817 -                                       BIT_flushBits(&blockStream);
115818 -                               BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
115819 -                               if (ZSTD_32bits())
115820 -                                       BIT_flushBits(&blockStream); /* (7)*/
115821 -                               if (longOffsets) {
115822 -                                       int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN - 1);
115823 -                                       if (extraBits) {
115824 -                                               BIT_addBits(&blockStream, sequences[n].offset, extraBits);
115825 -                                               BIT_flushBits(&blockStream); /* (7)*/
115826 -                                       }
115827 -                                       BIT_addBits(&blockStream, sequences[n].offset >> extraBits, ofBits - extraBits); /* 31 */
115828 -                               } else {
115829 -                                       BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */
115830 -                               }
115831 -                               BIT_flushBits(&blockStream); /* (7)*/
115832 -                       }
115833 -               }
115835 -               FSE_flushCState(&blockStream, &stateMatchLength);
115836 -               FSE_flushCState(&blockStream, &stateOffsetBits);
115837 -               FSE_flushCState(&blockStream, &stateLitLength);
115839 -               {
115840 -                       size_t const streamSize = BIT_closeCStream(&blockStream);
115841 -                       if (streamSize == 0)
115842 -                               return ERROR(dstSize_tooSmall); /* not enough space */
115843 -                       op += streamSize;
115844 -               }
115845 -       }
115846 -       return op - ostart;
115849 -ZSTD_STATIC size_t ZSTD_compressSequences(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, size_t srcSize)
115851 -       size_t const cSize = ZSTD_compressSequences_internal(zc, dst, dstCapacity);
115852 -       size_t const minGain = ZSTD_minGain(srcSize);
115853 -       size_t const maxCSize = srcSize - minGain;
115854 -       /* If the srcSize <= dstCapacity, then there is enough space to write a
115855 -        * raw uncompressed block. Since we ran out of space, the block must not
115856 -        * be compressible, so fall back to a raw uncompressed block.
115857 -        */
115858 -       int const uncompressibleError = cSize == ERROR(dstSize_tooSmall) && srcSize <= dstCapacity;
115859 -       int i;
115861 -       if (ZSTD_isError(cSize) && !uncompressibleError)
115862 -               return cSize;
115863 -       if (cSize >= maxCSize || uncompressibleError) {
115864 -               zc->flagStaticHufTable = HUF_repeat_none;
115865 -               return 0;
115866 -       }
115867 -       /* confirm repcodes */
115868 -       for (i = 0; i < ZSTD_REP_NUM; i++)
115869 -               zc->rep[i] = zc->repToConfirm[i];
115870 -       return cSize;
115873 -/*! ZSTD_storeSeq() :
115874 -       Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
115875 -       `offsetCode` : distance to match, or 0 == repCode.
115876 -       `matchCode` : matchLength - MINMATCH
115878 -ZSTD_STATIC void ZSTD_storeSeq(seqStore_t *seqStorePtr, size_t litLength, const void *literals, U32 offsetCode, size_t matchCode)
115880 -       /* copy Literals */
115881 -       ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
115882 -       seqStorePtr->lit += litLength;
115884 -       /* literal Length */
115885 -       if (litLength > 0xFFFF) {
115886 -               seqStorePtr->longLengthID = 1;
115887 -               seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
115888 -       }
115889 -       seqStorePtr->sequences[0].litLength = (U16)litLength;
115891 -       /* match offset */
115892 -       seqStorePtr->sequences[0].offset = offsetCode + 1;
115894 -       /* match Length */
115895 -       if (matchCode > 0xFFFF) {
115896 -               seqStorePtr->longLengthID = 2;
115897 -               seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
115898 -       }
115899 -       seqStorePtr->sequences[0].matchLength = (U16)matchCode;
115901 -       seqStorePtr->sequences++;
115904 -/*-*************************************
115905 -*  Match length counter
115906 -***************************************/
115907 -static unsigned ZSTD_NbCommonBytes(register size_t val)
115909 -       if (ZSTD_isLittleEndian()) {
115910 -               if (ZSTD_64bits()) {
115911 -                       return (__builtin_ctzll((U64)val) >> 3);
115912 -               } else { /* 32 bits */
115913 -                       return (__builtin_ctz((U32)val) >> 3);
115914 -               }
115915 -       } else { /* Big Endian CPU */
115916 -               if (ZSTD_64bits()) {
115917 -                       return (__builtin_clzll(val) >> 3);
115918 -               } else { /* 32 bits */
115919 -                       return (__builtin_clz((U32)val) >> 3);
115920 -               }
115921 -       }
115924 -static size_t ZSTD_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *const pInLimit)
115926 -       const BYTE *const pStart = pIn;
115927 -       const BYTE *const pInLoopLimit = pInLimit - (sizeof(size_t) - 1);
115929 -       while (pIn < pInLoopLimit) {
115930 -               size_t const diff = ZSTD_readST(pMatch) ^ ZSTD_readST(pIn);
115931 -               if (!diff) {
115932 -                       pIn += sizeof(size_t);
115933 -                       pMatch += sizeof(size_t);
115934 -                       continue;
115935 -               }
115936 -               pIn += ZSTD_NbCommonBytes(diff);
115937 -               return (size_t)(pIn - pStart);
115938 -       }
115939 -       if (ZSTD_64bits())
115940 -               if ((pIn < (pInLimit - 3)) && (ZSTD_read32(pMatch) == ZSTD_read32(pIn))) {
115941 -                       pIn += 4;
115942 -                       pMatch += 4;
115943 -               }
115944 -       if ((pIn < (pInLimit - 1)) && (ZSTD_read16(pMatch) == ZSTD_read16(pIn))) {
115945 -               pIn += 2;
115946 -               pMatch += 2;
115947 -       }
115948 -       if ((pIn < pInLimit) && (*pMatch == *pIn))
115949 -               pIn++;
115950 -       return (size_t)(pIn - pStart);
115953 -/** ZSTD_count_2segments() :
115954 -*   can count match length with `ip` & `match` in 2 different segments.
115955 -*   convention : on reaching mEnd, match count continue starting from iStart
115957 -static size_t ZSTD_count_2segments(const BYTE *ip, const BYTE *match, const BYTE *iEnd, const BYTE *mEnd, const BYTE *iStart)
115959 -       const BYTE *const vEnd = MIN(ip + (mEnd - match), iEnd);
115960 -       size_t const matchLength = ZSTD_count(ip, match, vEnd);
115961 -       if (match + matchLength != mEnd)
115962 -               return matchLength;
115963 -       return matchLength + ZSTD_count(ip + matchLength, iStart, iEnd);
115966 -/*-*************************************
115967 -*  Hashes
115968 -***************************************/
115969 -static const U32 prime3bytes = 506832829U;
115970 -static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32 - 24)) * prime3bytes) >> (32 - h); }
115971 -ZSTD_STATIC size_t ZSTD_hash3Ptr(const void *ptr, U32 h) { return ZSTD_hash3(ZSTD_readLE32(ptr), h); } /* only in zstd_opt.h */
115973 -static const U32 prime4bytes = 2654435761U;
115974 -static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32 - h); }
115975 -static size_t ZSTD_hash4Ptr(const void *ptr, U32 h) { return ZSTD_hash4(ZSTD_read32(ptr), h); }
115977 -static const U64 prime5bytes = 889523592379ULL;
115978 -static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64 - 40)) * prime5bytes) >> (64 - h)); }
115979 -static size_t ZSTD_hash5Ptr(const void *p, U32 h) { return ZSTD_hash5(ZSTD_readLE64(p), h); }
115981 -static const U64 prime6bytes = 227718039650203ULL;
115982 -static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64 - 48)) * prime6bytes) >> (64 - h)); }
115983 -static size_t ZSTD_hash6Ptr(const void *p, U32 h) { return ZSTD_hash6(ZSTD_readLE64(p), h); }
115985 -static const U64 prime7bytes = 58295818150454627ULL;
115986 -static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64 - 56)) * prime7bytes) >> (64 - h)); }
115987 -static size_t ZSTD_hash7Ptr(const void *p, U32 h) { return ZSTD_hash7(ZSTD_readLE64(p), h); }
115989 -static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
115990 -static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u)*prime8bytes) >> (64 - h)); }
115991 -static size_t ZSTD_hash8Ptr(const void *p, U32 h) { return ZSTD_hash8(ZSTD_readLE64(p), h); }
115993 -static size_t ZSTD_hashPtr(const void *p, U32 hBits, U32 mls)
115995 -       switch (mls) {
115996 -       // case 3: return ZSTD_hash3Ptr(p, hBits);
115997 -       default:
115998 -       case 4: return ZSTD_hash4Ptr(p, hBits);
115999 -       case 5: return ZSTD_hash5Ptr(p, hBits);
116000 -       case 6: return ZSTD_hash6Ptr(p, hBits);
116001 -       case 7: return ZSTD_hash7Ptr(p, hBits);
116002 -       case 8: return ZSTD_hash8Ptr(p, hBits);
116003 -       }
116006 -/*-*************************************
116007 -*  Fast Scan
116008 -***************************************/
116009 -static void ZSTD_fillHashTable(ZSTD_CCtx *zc, const void *end, const U32 mls)
116011 -       U32 *const hashTable = zc->hashTable;
116012 -       U32 const hBits = zc->params.cParams.hashLog;
116013 -       const BYTE *const base = zc->base;
116014 -       const BYTE *ip = base + zc->nextToUpdate;
116015 -       const BYTE *const iend = ((const BYTE *)end) - HASH_READ_SIZE;
116016 -       const size_t fastHashFillStep = 3;
116018 -       while (ip <= iend) {
116019 -               hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
116020 -               ip += fastHashFillStep;
116021 -       }
116024 -FORCE_INLINE
116025 -void ZSTD_compressBlock_fast_generic(ZSTD_CCtx *cctx, const void *src, size_t srcSize, const U32 mls)
116027 -       U32 *const hashTable = cctx->hashTable;
116028 -       U32 const hBits = cctx->params.cParams.hashLog;
116029 -       seqStore_t *seqStorePtr = &(cctx->seqStore);
116030 -       const BYTE *const base = cctx->base;
116031 -       const BYTE *const istart = (const BYTE *)src;
116032 -       const BYTE *ip = istart;
116033 -       const BYTE *anchor = istart;
116034 -       const U32 lowestIndex = cctx->dictLimit;
116035 -       const BYTE *const lowest = base + lowestIndex;
116036 -       const BYTE *const iend = istart + srcSize;
116037 -       const BYTE *const ilimit = iend - HASH_READ_SIZE;
116038 -       U32 offset_1 = cctx->rep[0], offset_2 = cctx->rep[1];
116039 -       U32 offsetSaved = 0;
116041 -       /* init */
116042 -       ip += (ip == lowest);
116043 -       {
116044 -               U32 const maxRep = (U32)(ip - lowest);
116045 -               if (offset_2 > maxRep)
116046 -                       offsetSaved = offset_2, offset_2 = 0;
116047 -               if (offset_1 > maxRep)
116048 -                       offsetSaved = offset_1, offset_1 = 0;
116049 -       }
116051 -       /* Main Search Loop */
116052 -       while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
116053 -               size_t mLength;
116054 -               size_t const h = ZSTD_hashPtr(ip, hBits, mls);
116055 -               U32 const curr = (U32)(ip - base);
116056 -               U32 const matchIndex = hashTable[h];
116057 -               const BYTE *match = base + matchIndex;
116058 -               hashTable[h] = curr; /* update hash table */
116060 -               if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
116061 -                       mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4;
116062 -                       ip++;
116063 -                       ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
116064 -               } else {
116065 -                       U32 offset;
116066 -                       if ((matchIndex <= lowestIndex) || (ZSTD_read32(match) != ZSTD_read32(ip))) {
116067 -                               ip += ((ip - anchor) >> g_searchStrength) + 1;
116068 -                               continue;
116069 -                       }
116070 -                       mLength = ZSTD_count(ip + 4, match + 4, iend) + 4;
116071 -                       offset = (U32)(ip - match);
116072 -                       while (((ip > anchor) & (match > lowest)) && (ip[-1] == match[-1])) {
116073 -                               ip--;
116074 -                               match--;
116075 -                               mLength++;
116076 -                       } /* catch up */
116077 -                       offset_2 = offset_1;
116078 -                       offset_1 = offset;
116080 -                       ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
116081 -               }
116083 -               /* match found */
116084 -               ip += mLength;
116085 -               anchor = ip;
116087 -               if (ip <= ilimit) {
116088 -                       /* Fill Table */
116089 -                       hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2; /* here because curr+2 could be > iend-8 */
116090 -                       hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base);
116091 -                       /* check immediate repcode */
116092 -                       while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
116093 -                               /* store sequence */
116094 -                               size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4;
116095 -                               {
116096 -                                       U32 const tmpOff = offset_2;
116097 -                                       offset_2 = offset_1;
116098 -                                       offset_1 = tmpOff;
116099 -                               } /* swap offset_2 <=> offset_1 */
116100 -                               hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
116101 -                               ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength - MINMATCH);
116102 -                               ip += rLength;
116103 -                               anchor = ip;
116104 -                               continue; /* faster when present ... (?) */
116105 -                       }
116106 -               }
116107 -       }
116109 -       /* save reps for next block */
116110 -       cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
116111 -       cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
116113 -       /* Last Literals */
116114 -       {
116115 -               size_t const lastLLSize = iend - anchor;
116116 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
116117 -               seqStorePtr->lit += lastLLSize;
116118 -       }
116121 -static void ZSTD_compressBlock_fast(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
116123 -       const U32 mls = ctx->params.cParams.searchLength;
116124 -       switch (mls) {
116125 -       default: /* includes case 3 */
116126 -       case 4: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4); return;
116127 -       case 5: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5); return;
116128 -       case 6: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6); return;
116129 -       case 7: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7); return;
116130 -       }
116133 -static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 mls)
116135 -       U32 *hashTable = ctx->hashTable;
116136 -       const U32 hBits = ctx->params.cParams.hashLog;
116137 -       seqStore_t *seqStorePtr = &(ctx->seqStore);
116138 -       const BYTE *const base = ctx->base;
116139 -       const BYTE *const dictBase = ctx->dictBase;
116140 -       const BYTE *const istart = (const BYTE *)src;
116141 -       const BYTE *ip = istart;
116142 -       const BYTE *anchor = istart;
116143 -       const U32 lowestIndex = ctx->lowLimit;
116144 -       const BYTE *const dictStart = dictBase + lowestIndex;
116145 -       const U32 dictLimit = ctx->dictLimit;
116146 -       const BYTE *const lowPrefixPtr = base + dictLimit;
116147 -       const BYTE *const dictEnd = dictBase + dictLimit;
116148 -       const BYTE *const iend = istart + srcSize;
116149 -       const BYTE *const ilimit = iend - 8;
116150 -       U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
116152 -       /* Search Loop */
116153 -       while (ip < ilimit) { /* < instead of <=, because (ip+1) */
116154 -               const size_t h = ZSTD_hashPtr(ip, hBits, mls);
116155 -               const U32 matchIndex = hashTable[h];
116156 -               const BYTE *matchBase = matchIndex < dictLimit ? dictBase : base;
116157 -               const BYTE *match = matchBase + matchIndex;
116158 -               const U32 curr = (U32)(ip - base);
116159 -               const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
116160 -               const BYTE *repBase = repIndex < dictLimit ? dictBase : base;
116161 -               const BYTE *repMatch = repBase + repIndex;
116162 -               size_t mLength;
116163 -               hashTable[h] = curr; /* update hash table */
116165 -               if ((((U32)((dictLimit - 1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) &&
116166 -                   (ZSTD_read32(repMatch) == ZSTD_read32(ip + 1))) {
116167 -                       const BYTE *repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
116168 -                       mLength = ZSTD_count_2segments(ip + 1 + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repMatchEnd, lowPrefixPtr) + EQUAL_READ32;
116169 -                       ip++;
116170 -                       ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
116171 -               } else {
116172 -                       if ((matchIndex < lowestIndex) || (ZSTD_read32(match) != ZSTD_read32(ip))) {
116173 -                               ip += ((ip - anchor) >> g_searchStrength) + 1;
116174 -                               continue;
116175 -                       }
116176 -                       {
116177 -                               const BYTE *matchEnd = matchIndex < dictLimit ? dictEnd : iend;
116178 -                               const BYTE *lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
116179 -                               U32 offset;
116180 -                               mLength = ZSTD_count_2segments(ip + EQUAL_READ32, match + EQUAL_READ32, iend, matchEnd, lowPrefixPtr) + EQUAL_READ32;
116181 -                               while (((ip > anchor) & (match > lowMatchPtr)) && (ip[-1] == match[-1])) {
116182 -                                       ip--;
116183 -                                       match--;
116184 -                                       mLength++;
116185 -                               } /* catch up */
116186 -                               offset = curr - matchIndex;
116187 -                               offset_2 = offset_1;
116188 -                               offset_1 = offset;
116189 -                               ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
116190 -                       }
116191 -               }
116193 -               /* found a match : store it */
116194 -               ip += mLength;
116195 -               anchor = ip;
116197 -               if (ip <= ilimit) {
116198 -                       /* Fill Table */
116199 -                       hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2;
116200 -                       hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base);
116201 -                       /* check immediate repcode */
116202 -                       while (ip <= ilimit) {
116203 -                               U32 const curr2 = (U32)(ip - base);
116204 -                               U32 const repIndex2 = curr2 - offset_2;
116205 -                               const BYTE *repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
116206 -                               if ((((U32)((dictLimit - 1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
116207 -                                   && (ZSTD_read32(repMatch2) == ZSTD_read32(ip))) {
116208 -                                       const BYTE *const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
116209 -                                       size_t repLength2 =
116210 -                                           ZSTD_count_2segments(ip + EQUAL_READ32, repMatch2 + EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
116211 -                                       U32 tmpOffset = offset_2;
116212 -                                       offset_2 = offset_1;
116213 -                                       offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
116214 -                                       ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2 - MINMATCH);
116215 -                                       hashTable[ZSTD_hashPtr(ip, hBits, mls)] = curr2;
116216 -                                       ip += repLength2;
116217 -                                       anchor = ip;
116218 -                                       continue;
116219 -                               }
116220 -                               break;
116221 -                       }
116222 -               }
116223 -       }
116225 -       /* save reps for next block */
116226 -       ctx->repToConfirm[0] = offset_1;
116227 -       ctx->repToConfirm[1] = offset_2;
116229 -       /* Last Literals */
116230 -       {
116231 -               size_t const lastLLSize = iend - anchor;
116232 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
116233 -               seqStorePtr->lit += lastLLSize;
116234 -       }
116237 -static void ZSTD_compressBlock_fast_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
116239 -       U32 const mls = ctx->params.cParams.searchLength;
116240 -       switch (mls) {
116241 -       default: /* includes case 3 */
116242 -       case 4: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4); return;
116243 -       case 5: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5); return;
116244 -       case 6: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6); return;
116245 -       case 7: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7); return;
116246 -       }
116249 -/*-*************************************
116250 -*  Double Fast
116251 -***************************************/
116252 -static void ZSTD_fillDoubleHashTable(ZSTD_CCtx *cctx, const void *end, const U32 mls)
116254 -       U32 *const hashLarge = cctx->hashTable;
116255 -       U32 const hBitsL = cctx->params.cParams.hashLog;
116256 -       U32 *const hashSmall = cctx->chainTable;
116257 -       U32 const hBitsS = cctx->params.cParams.chainLog;
116258 -       const BYTE *const base = cctx->base;
116259 -       const BYTE *ip = base + cctx->nextToUpdate;
116260 -       const BYTE *const iend = ((const BYTE *)end) - HASH_READ_SIZE;
116261 -       const size_t fastHashFillStep = 3;
116263 -       while (ip <= iend) {
116264 -               hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
116265 -               hashLarge[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
116266 -               ip += fastHashFillStep;
116267 -       }
116270 -FORCE_INLINE
116271 -void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx *cctx, const void *src, size_t srcSize, const U32 mls)
116273 -       U32 *const hashLong = cctx->hashTable;
116274 -       const U32 hBitsL = cctx->params.cParams.hashLog;
116275 -       U32 *const hashSmall = cctx->chainTable;
116276 -       const U32 hBitsS = cctx->params.cParams.chainLog;
116277 -       seqStore_t *seqStorePtr = &(cctx->seqStore);
116278 -       const BYTE *const base = cctx->base;
116279 -       const BYTE *const istart = (const BYTE *)src;
116280 -       const BYTE *ip = istart;
116281 -       const BYTE *anchor = istart;
116282 -       const U32 lowestIndex = cctx->dictLimit;
116283 -       const BYTE *const lowest = base + lowestIndex;
116284 -       const BYTE *const iend = istart + srcSize;
116285 -       const BYTE *const ilimit = iend - HASH_READ_SIZE;
116286 -       U32 offset_1 = cctx->rep[0], offset_2 = cctx->rep[1];
116287 -       U32 offsetSaved = 0;
116289 -       /* init */
116290 -       ip += (ip == lowest);
116291 -       {
116292 -               U32 const maxRep = (U32)(ip - lowest);
116293 -               if (offset_2 > maxRep)
116294 -                       offsetSaved = offset_2, offset_2 = 0;
116295 -               if (offset_1 > maxRep)
116296 -                       offsetSaved = offset_1, offset_1 = 0;
116297 -       }
116299 -       /* Main Search Loop */
116300 -       while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
116301 -               size_t mLength;
116302 -               size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
116303 -               size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
116304 -               U32 const curr = (U32)(ip - base);
116305 -               U32 const matchIndexL = hashLong[h2];
116306 -               U32 const matchIndexS = hashSmall[h];
116307 -               const BYTE *matchLong = base + matchIndexL;
116308 -               const BYTE *match = base + matchIndexS;
116309 -               hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
116311 -               if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) { /* note : by construction, offset_1 <= curr */
116312 -                       mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4;
116313 -                       ip++;
116314 -                       ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
116315 -               } else {
116316 -                       U32 offset;
116317 -                       if ((matchIndexL > lowestIndex) && (ZSTD_read64(matchLong) == ZSTD_read64(ip))) {
116318 -                               mLength = ZSTD_count(ip + 8, matchLong + 8, iend) + 8;
116319 -                               offset = (U32)(ip - matchLong);
116320 -                               while (((ip > anchor) & (matchLong > lowest)) && (ip[-1] == matchLong[-1])) {
116321 -                                       ip--;
116322 -                                       matchLong--;
116323 -                                       mLength++;
116324 -                               } /* catch up */
116325 -                       } else if ((matchIndexS > lowestIndex) && (ZSTD_read32(match) == ZSTD_read32(ip))) {
116326 -                               size_t const h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8);
116327 -                               U32 const matchIndex3 = hashLong[h3];
116328 -                               const BYTE *match3 = base + matchIndex3;
116329 -                               hashLong[h3] = curr + 1;
116330 -                               if ((matchIndex3 > lowestIndex) && (ZSTD_read64(match3) == ZSTD_read64(ip + 1))) {
116331 -                                       mLength = ZSTD_count(ip + 9, match3 + 8, iend) + 8;
116332 -                                       ip++;
116333 -                                       offset = (U32)(ip - match3);
116334 -                                       while (((ip > anchor) & (match3 > lowest)) && (ip[-1] == match3[-1])) {
116335 -                                               ip--;
116336 -                                               match3--;
116337 -                                               mLength++;
116338 -                                       } /* catch up */
116339 -                               } else {
116340 -                                       mLength = ZSTD_count(ip + 4, match + 4, iend) + 4;
116341 -                                       offset = (U32)(ip - match);
116342 -                                       while (((ip > anchor) & (match > lowest)) && (ip[-1] == match[-1])) {
116343 -                                               ip--;
116344 -                                               match--;
116345 -                                               mLength++;
116346 -                                       } /* catch up */
116347 -                               }
116348 -                       } else {
116349 -                               ip += ((ip - anchor) >> g_searchStrength) + 1;
116350 -                               continue;
116351 -                       }
116353 -                       offset_2 = offset_1;
116354 -                       offset_1 = offset;
116356 -                       ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
116357 -               }
116359 -               /* match found */
116360 -               ip += mLength;
116361 -               anchor = ip;
116363 -               if (ip <= ilimit) {
116364 -                       /* Fill Table */
116365 -                       hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] =
116366 -                           curr + 2; /* here because curr+2 could be > iend-8 */
116367 -                       hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base);
116369 -                       /* check immediate repcode */
116370 -                       while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
116371 -                               /* store sequence */
116372 -                               size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4;
116373 -                               {
116374 -                                       U32 const tmpOff = offset_2;
116375 -                                       offset_2 = offset_1;
116376 -                                       offset_1 = tmpOff;
116377 -                               } /* swap offset_2 <=> offset_1 */
116378 -                               hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
116379 -                               hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
116380 -                               ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength - MINMATCH);
116381 -                               ip += rLength;
116382 -                               anchor = ip;
116383 -                               continue; /* faster when present ... (?) */
116384 -                       }
116385 -               }
116386 -       }
116388 -       /* save reps for next block */
116389 -       cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
116390 -       cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
116392 -       /* Last Literals */
116393 -       {
116394 -               size_t const lastLLSize = iend - anchor;
116395 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
116396 -               seqStorePtr->lit += lastLLSize;
116397 -       }
116400 -static void ZSTD_compressBlock_doubleFast(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
116402 -       const U32 mls = ctx->params.cParams.searchLength;
116403 -       switch (mls) {
116404 -       default: /* includes case 3 */
116405 -       case 4: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4); return;
116406 -       case 5: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 5); return;
116407 -       case 6: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 6); return;
116408 -       case 7: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 7); return;
116409 -       }
116412 -static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 mls)
116414 -       U32 *const hashLong = ctx->hashTable;
116415 -       U32 const hBitsL = ctx->params.cParams.hashLog;
116416 -       U32 *const hashSmall = ctx->chainTable;
116417 -       U32 const hBitsS = ctx->params.cParams.chainLog;
116418 -       seqStore_t *seqStorePtr = &(ctx->seqStore);
116419 -       const BYTE *const base = ctx->base;
116420 -       const BYTE *const dictBase = ctx->dictBase;
116421 -       const BYTE *const istart = (const BYTE *)src;
116422 -       const BYTE *ip = istart;
116423 -       const BYTE *anchor = istart;
116424 -       const U32 lowestIndex = ctx->lowLimit;
116425 -       const BYTE *const dictStart = dictBase + lowestIndex;
116426 -       const U32 dictLimit = ctx->dictLimit;
116427 -       const BYTE *const lowPrefixPtr = base + dictLimit;
116428 -       const BYTE *const dictEnd = dictBase + dictLimit;
116429 -       const BYTE *const iend = istart + srcSize;
116430 -       const BYTE *const ilimit = iend - 8;
116431 -       U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
116433 -       /* Search Loop */
116434 -       while (ip < ilimit) { /* < instead of <=, because (ip+1) */
116435 -               const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
116436 -               const U32 matchIndex = hashSmall[hSmall];
116437 -               const BYTE *matchBase = matchIndex < dictLimit ? dictBase : base;
116438 -               const BYTE *match = matchBase + matchIndex;
116440 -               const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
116441 -               const U32 matchLongIndex = hashLong[hLong];
116442 -               const BYTE *matchLongBase = matchLongIndex < dictLimit ? dictBase : base;
116443 -               const BYTE *matchLong = matchLongBase + matchLongIndex;
116445 -               const U32 curr = (U32)(ip - base);
116446 -               const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
116447 -               const BYTE *repBase = repIndex < dictLimit ? dictBase : base;
116448 -               const BYTE *repMatch = repBase + repIndex;
116449 -               size_t mLength;
116450 -               hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
116452 -               if ((((U32)((dictLimit - 1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) &&
116453 -                   (ZSTD_read32(repMatch) == ZSTD_read32(ip + 1))) {
116454 -                       const BYTE *repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
116455 -                       mLength = ZSTD_count_2segments(ip + 1 + 4, repMatch + 4, iend, repMatchEnd, lowPrefixPtr) + 4;
116456 -                       ip++;
116457 -                       ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
116458 -               } else {
116459 -                       if ((matchLongIndex > lowestIndex) && (ZSTD_read64(matchLong) == ZSTD_read64(ip))) {
116460 -                               const BYTE *matchEnd = matchLongIndex < dictLimit ? dictEnd : iend;
116461 -                               const BYTE *lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr;
116462 -                               U32 offset;
116463 -                               mLength = ZSTD_count_2segments(ip + 8, matchLong + 8, iend, matchEnd, lowPrefixPtr) + 8;
116464 -                               offset = curr - matchLongIndex;
116465 -                               while (((ip > anchor) & (matchLong > lowMatchPtr)) && (ip[-1] == matchLong[-1])) {
116466 -                                       ip--;
116467 -                                       matchLong--;
116468 -                                       mLength++;
116469 -                               } /* catch up */
116470 -                               offset_2 = offset_1;
116471 -                               offset_1 = offset;
116472 -                               ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
116474 -                       } else if ((matchIndex > lowestIndex) && (ZSTD_read32(match) == ZSTD_read32(ip))) {
116475 -                               size_t const h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8);
116476 -                               U32 const matchIndex3 = hashLong[h3];
116477 -                               const BYTE *const match3Base = matchIndex3 < dictLimit ? dictBase : base;
116478 -                               const BYTE *match3 = match3Base + matchIndex3;
116479 -                               U32 offset;
116480 -                               hashLong[h3] = curr + 1;
116481 -                               if ((matchIndex3 > lowestIndex) && (ZSTD_read64(match3) == ZSTD_read64(ip + 1))) {
116482 -                                       const BYTE *matchEnd = matchIndex3 < dictLimit ? dictEnd : iend;
116483 -                                       const BYTE *lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr;
116484 -                                       mLength = ZSTD_count_2segments(ip + 9, match3 + 8, iend, matchEnd, lowPrefixPtr) + 8;
116485 -                                       ip++;
116486 -                                       offset = curr + 1 - matchIndex3;
116487 -                                       while (((ip > anchor) & (match3 > lowMatchPtr)) && (ip[-1] == match3[-1])) {
116488 -                                               ip--;
116489 -                                               match3--;
116490 -                                               mLength++;
116491 -                                       } /* catch up */
116492 -                               } else {
116493 -                                       const BYTE *matchEnd = matchIndex < dictLimit ? dictEnd : iend;
116494 -                                       const BYTE *lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
116495 -                                       mLength = ZSTD_count_2segments(ip + 4, match + 4, iend, matchEnd, lowPrefixPtr) + 4;
116496 -                                       offset = curr - matchIndex;
116497 -                                       while (((ip > anchor) & (match > lowMatchPtr)) && (ip[-1] == match[-1])) {
116498 -                                               ip--;
116499 -                                               match--;
116500 -                                               mLength++;
116501 -                                       } /* catch up */
116502 -                               }
116503 -                               offset_2 = offset_1;
116504 -                               offset_1 = offset;
116505 -                               ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
116507 -                       } else {
116508 -                               ip += ((ip - anchor) >> g_searchStrength) + 1;
116509 -                               continue;
116510 -                       }
116511 -               }
116513 -               /* found a match : store it */
116514 -               ip += mLength;
116515 -               anchor = ip;
116517 -               if (ip <= ilimit) {
116518 -                       /* Fill Table */
116519 -                       hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] = curr + 2;
116520 -                       hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = curr + 2;
116521 -                       hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base);
116522 -                       hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = (U32)(ip - 2 - base);
116523 -                       /* check immediate repcode */
116524 -                       while (ip <= ilimit) {
116525 -                               U32 const curr2 = (U32)(ip - base);
116526 -                               U32 const repIndex2 = curr2 - offset_2;
116527 -                               const BYTE *repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
116528 -                               if ((((U32)((dictLimit - 1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
116529 -                                   && (ZSTD_read32(repMatch2) == ZSTD_read32(ip))) {
116530 -                                       const BYTE *const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
116531 -                                       size_t const repLength2 =
116532 -                                           ZSTD_count_2segments(ip + EQUAL_READ32, repMatch2 + EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
116533 -                                       U32 tmpOffset = offset_2;
116534 -                                       offset_2 = offset_1;
116535 -                                       offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
116536 -                                       ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2 - MINMATCH);
116537 -                                       hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = curr2;
116538 -                                       hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = curr2;
116539 -                                       ip += repLength2;
116540 -                                       anchor = ip;
116541 -                                       continue;
116542 -                               }
116543 -                               break;
116544 -                       }
116545 -               }
116546 -       }
116548 -       /* save reps for next block */
116549 -       ctx->repToConfirm[0] = offset_1;
116550 -       ctx->repToConfirm[1] = offset_2;
116552 -       /* Last Literals */
116553 -       {
116554 -               size_t const lastLLSize = iend - anchor;
116555 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
116556 -               seqStorePtr->lit += lastLLSize;
116557 -       }
116560 -static void ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
116562 -       U32 const mls = ctx->params.cParams.searchLength;
116563 -       switch (mls) {
116564 -       default: /* includes case 3 */
116565 -       case 4: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4); return;
116566 -       case 5: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 5); return;
116567 -       case 6: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 6); return;
116568 -       case 7: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 7); return;
116569 -       }
116572 -/*-*************************************
116573 -*  Binary Tree search
116574 -***************************************/
116575 -/** ZSTD_insertBt1() : add one or multiple positions to tree.
116576 -*   ip : assumed <= iend-8 .
116577 -*   @return : nb of positions added */
116578 -static U32 ZSTD_insertBt1(ZSTD_CCtx *zc, const BYTE *const ip, const U32 mls, const BYTE *const iend, U32 nbCompares, U32 extDict)
116580 -       U32 *const hashTable = zc->hashTable;
116581 -       U32 const hashLog = zc->params.cParams.hashLog;
116582 -       size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
116583 -       U32 *const bt = zc->chainTable;
116584 -       U32 const btLog = zc->params.cParams.chainLog - 1;
116585 -       U32 const btMask = (1 << btLog) - 1;
116586 -       U32 matchIndex = hashTable[h];
116587 -       size_t commonLengthSmaller = 0, commonLengthLarger = 0;
116588 -       const BYTE *const base = zc->base;
116589 -       const BYTE *const dictBase = zc->dictBase;
116590 -       const U32 dictLimit = zc->dictLimit;
116591 -       const BYTE *const dictEnd = dictBase + dictLimit;
116592 -       const BYTE *const prefixStart = base + dictLimit;
116593 -       const BYTE *match;
116594 -       const U32 curr = (U32)(ip - base);
116595 -       const U32 btLow = btMask >= curr ? 0 : curr - btMask;
116596 -       U32 *smallerPtr = bt + 2 * (curr & btMask);
116597 -       U32 *largerPtr = smallerPtr + 1;
116598 -       U32 dummy32; /* to be nullified at the end */
116599 -       U32 const windowLow = zc->lowLimit;
116600 -       U32 matchEndIdx = curr + 8;
116601 -       size_t bestLength = 8;
116603 -       hashTable[h] = curr; /* Update Hash Table */
116605 -       while (nbCompares-- && (matchIndex > windowLow)) {
116606 -               U32 *const nextPtr = bt + 2 * (matchIndex & btMask);
116607 -               size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
116609 -               if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
116610 -                       match = base + matchIndex;
116611 -                       if (match[matchLength] == ip[matchLength])
116612 -                               matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iend) + 1;
116613 -               } else {
116614 -                       match = dictBase + matchIndex;
116615 -                       matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart);
116616 -                       if (matchIndex + matchLength >= dictLimit)
116617 -                               match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
116618 -               }
116620 -               if (matchLength > bestLength) {
116621 -                       bestLength = matchLength;
116622 -                       if (matchLength > matchEndIdx - matchIndex)
116623 -                               matchEndIdx = matchIndex + (U32)matchLength;
116624 -               }
116626 -               if (ip + matchLength == iend) /* equal : no way to know if inf or sup */
116627 -                       break;                /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */
116629 -               if (match[matchLength] < ip[matchLength]) { /* necessarily within correct buffer */
116630 -                       /* match is smaller than curr */
116631 -                       *smallerPtr = matchIndex;         /* update smaller idx */
116632 -                       commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
116633 -                       if (matchIndex <= btLow) {
116634 -                               smallerPtr = &dummy32;
116635 -                               break;
116636 -                       }                         /* beyond tree size, stop the search */
116637 -                       smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */
116638 -                       matchIndex = nextPtr[1];  /* new matchIndex larger than previous (closer to curr) */
116639 -               } else {
116640 -                       /* match is larger than curr */
116641 -                       *largerPtr = matchIndex;
116642 -                       commonLengthLarger = matchLength;
116643 -                       if (matchIndex <= btLow) {
116644 -                               largerPtr = &dummy32;
116645 -                               break;
116646 -                       } /* beyond tree size, stop the search */
116647 -                       largerPtr = nextPtr;
116648 -                       matchIndex = nextPtr[0];
116649 -               }
116650 -       }
116652 -       *smallerPtr = *largerPtr = 0;
116653 -       if (bestLength > 384)
116654 -               return MIN(192, (U32)(bestLength - 384)); /* speed optimization */
116655 -       if (matchEndIdx > curr + 8)
116656 -               return matchEndIdx - curr - 8;
116657 -       return 1;
116660 -static size_t ZSTD_insertBtAndFindBestMatch(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, size_t *offsetPtr, U32 nbCompares, const U32 mls,
116661 -                                           U32 extDict)
116663 -       U32 *const hashTable = zc->hashTable;
116664 -       U32 const hashLog = zc->params.cParams.hashLog;
116665 -       size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
116666 -       U32 *const bt = zc->chainTable;
116667 -       U32 const btLog = zc->params.cParams.chainLog - 1;
116668 -       U32 const btMask = (1 << btLog) - 1;
116669 -       U32 matchIndex = hashTable[h];
116670 -       size_t commonLengthSmaller = 0, commonLengthLarger = 0;
116671 -       const BYTE *const base = zc->base;
116672 -       const BYTE *const dictBase = zc->dictBase;
116673 -       const U32 dictLimit = zc->dictLimit;
116674 -       const BYTE *const dictEnd = dictBase + dictLimit;
116675 -       const BYTE *const prefixStart = base + dictLimit;
116676 -       const U32 curr = (U32)(ip - base);
116677 -       const U32 btLow = btMask >= curr ? 0 : curr - btMask;
116678 -       const U32 windowLow = zc->lowLimit;
116679 -       U32 *smallerPtr = bt + 2 * (curr & btMask);
116680 -       U32 *largerPtr = bt + 2 * (curr & btMask) + 1;
116681 -       U32 matchEndIdx = curr + 8;
116682 -       U32 dummy32; /* to be nullified at the end */
116683 -       size_t bestLength = 0;
116685 -       hashTable[h] = curr; /* Update Hash Table */
116687 -       while (nbCompares-- && (matchIndex > windowLow)) {
116688 -               U32 *const nextPtr = bt + 2 * (matchIndex & btMask);
116689 -               size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
116690 -               const BYTE *match;
116692 -               if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
116693 -                       match = base + matchIndex;
116694 -                       if (match[matchLength] == ip[matchLength])
116695 -                               matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iend) + 1;
116696 -               } else {
116697 -                       match = dictBase + matchIndex;
116698 -                       matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart);
116699 -                       if (matchIndex + matchLength >= dictLimit)
116700 -                               match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
116701 -               }
116703 -               if (matchLength > bestLength) {
116704 -                       if (matchLength > matchEndIdx - matchIndex)
116705 -                               matchEndIdx = matchIndex + (U32)matchLength;
116706 -                       if ((4 * (int)(matchLength - bestLength)) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((U32)offsetPtr[0] + 1)))
116707 -                               bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
116708 -                       if (ip + matchLength == iend) /* equal : no way to know if inf or sup */
116709 -                               break;                /* drop, to guarantee consistency (miss a little bit of compression) */
116710 -               }
116712 -               if (match[matchLength] < ip[matchLength]) {
116713 -                       /* match is smaller than curr */
116714 -                       *smallerPtr = matchIndex;         /* update smaller idx */
116715 -                       commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
116716 -                       if (matchIndex <= btLow) {
116717 -                               smallerPtr = &dummy32;
116718 -                               break;
116719 -                       }                         /* beyond tree size, stop the search */
116720 -                       smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */
116721 -                       matchIndex = nextPtr[1];  /* new matchIndex larger than previous (closer to curr) */
116722 -               } else {
116723 -                       /* match is larger than curr */
116724 -                       *largerPtr = matchIndex;
116725 -                       commonLengthLarger = matchLength;
116726 -                       if (matchIndex <= btLow) {
116727 -                               largerPtr = &dummy32;
116728 -                               break;
116729 -                       } /* beyond tree size, stop the search */
116730 -                       largerPtr = nextPtr;
116731 -                       matchIndex = nextPtr[0];
116732 -               }
116733 -       }
116735 -       *smallerPtr = *largerPtr = 0;
116737 -       zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr + 1;
116738 -       return bestLength;
116741 -static void ZSTD_updateTree(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, const U32 nbCompares, const U32 mls)
116743 -       const BYTE *const base = zc->base;
116744 -       const U32 target = (U32)(ip - base);
116745 -       U32 idx = zc->nextToUpdate;
116747 -       while (idx < target)
116748 -               idx += ZSTD_insertBt1(zc, base + idx, mls, iend, nbCompares, 0);
116751 -/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
116752 -static size_t ZSTD_BtFindBestMatch(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 mls)
116754 -       if (ip < zc->base + zc->nextToUpdate)
116755 -               return 0; /* skipped area */
116756 -       ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
116757 -       return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0);
116760 -static size_t ZSTD_BtFindBestMatch_selectMLS(ZSTD_CCtx *zc, /* Index table will be updated */
116761 -                                            const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 matchLengthSearch)
116763 -       switch (matchLengthSearch) {
116764 -       default: /* includes case 3 */
116765 -       case 4: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
116766 -       case 5: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
116767 -       case 7:
116768 -       case 6: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
116769 -       }
116772 -static void ZSTD_updateTree_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, const U32 nbCompares, const U32 mls)
116774 -       const BYTE *const base = zc->base;
116775 -       const U32 target = (U32)(ip - base);
116776 -       U32 idx = zc->nextToUpdate;
116778 -       while (idx < target)
116779 -               idx += ZSTD_insertBt1(zc, base + idx, mls, iend, nbCompares, 1);
116782 -/** Tree updater, providing best match */
116783 -static size_t ZSTD_BtFindBestMatch_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
116784 -                                          const U32 mls)
116786 -       if (ip < zc->base + zc->nextToUpdate)
116787 -               return 0; /* skipped area */
116788 -       ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
116789 -       return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1);
116792 -static size_t ZSTD_BtFindBestMatch_selectMLS_extDict(ZSTD_CCtx *zc, /* Index table will be updated */
116793 -                                                    const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
116794 -                                                    const U32 matchLengthSearch)
116796 -       switch (matchLengthSearch) {
116797 -       default: /* includes case 3 */
116798 -       case 4: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
116799 -       case 5: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
116800 -       case 7:
116801 -       case 6: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
116802 -       }
116805 -/* *********************************
116806 -*  Hash Chain
116807 -***********************************/
116808 -#define NEXT_IN_CHAIN(d, mask) chainTable[(d)&mask]
116810 -/* Update chains up to ip (excluded)
116811 -   Assumption : always within prefix (i.e. not within extDict) */
116812 -FORCE_INLINE
116813 -U32 ZSTD_insertAndFindFirstIndex(ZSTD_CCtx *zc, const BYTE *ip, U32 mls)
116815 -       U32 *const hashTable = zc->hashTable;
116816 -       const U32 hashLog = zc->params.cParams.hashLog;
116817 -       U32 *const chainTable = zc->chainTable;
116818 -       const U32 chainMask = (1 << zc->params.cParams.chainLog) - 1;
116819 -       const BYTE *const base = zc->base;
116820 -       const U32 target = (U32)(ip - base);
116821 -       U32 idx = zc->nextToUpdate;
116823 -       while (idx < target) { /* catch up */
116824 -               size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls);
116825 -               NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
116826 -               hashTable[h] = idx;
116827 -               idx++;
116828 -       }
116830 -       zc->nextToUpdate = target;
116831 -       return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
116834 -/* inlining is important to hardwire a hot branch (template emulation) */
116835 -FORCE_INLINE
116836 -size_t ZSTD_HcFindBestMatch_generic(ZSTD_CCtx *zc, /* Index table will be updated */
116837 -                                   const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 mls,
116838 -                                   const U32 extDict)
116840 -       U32 *const chainTable = zc->chainTable;
116841 -       const U32 chainSize = (1 << zc->params.cParams.chainLog);
116842 -       const U32 chainMask = chainSize - 1;
116843 -       const BYTE *const base = zc->base;
116844 -       const BYTE *const dictBase = zc->dictBase;
116845 -       const U32 dictLimit = zc->dictLimit;
116846 -       const BYTE *const prefixStart = base + dictLimit;
116847 -       const BYTE *const dictEnd = dictBase + dictLimit;
116848 -       const U32 lowLimit = zc->lowLimit;
116849 -       const U32 curr = (U32)(ip - base);
116850 -       const U32 minChain = curr > chainSize ? curr - chainSize : 0;
116851 -       int nbAttempts = maxNbAttempts;
116852 -       size_t ml = EQUAL_READ32 - 1;
116854 -       /* HC4 match finder */
116855 -       U32 matchIndex = ZSTD_insertAndFindFirstIndex(zc, ip, mls);
116857 -       for (; (matchIndex > lowLimit) & (nbAttempts > 0); nbAttempts--) {
116858 -               const BYTE *match;
116859 -               size_t currMl = 0;
116860 -               if ((!extDict) || matchIndex >= dictLimit) {
116861 -                       match = base + matchIndex;
116862 -                       if (match[ml] == ip[ml]) /* potentially better */
116863 -                               currMl = ZSTD_count(ip, match, iLimit);
116864 -               } else {
116865 -                       match = dictBase + matchIndex;
116866 -                       if (ZSTD_read32(match) == ZSTD_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
116867 -                               currMl = ZSTD_count_2segments(ip + EQUAL_READ32, match + EQUAL_READ32, iLimit, dictEnd, prefixStart) + EQUAL_READ32;
116868 -               }
116870 -               /* save best solution */
116871 -               if (currMl > ml) {
116872 -                       ml = currMl;
116873 -                       *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE;
116874 -                       if (ip + currMl == iLimit)
116875 -                               break; /* best possible, and avoid read overflow*/
116876 -               }
116878 -               if (matchIndex <= minChain)
116879 -                       break;
116880 -               matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
116881 -       }
116883 -       return ml;
116886 -FORCE_INLINE size_t ZSTD_HcFindBestMatch_selectMLS(ZSTD_CCtx *zc, const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
116887 -                                                  const U32 matchLengthSearch)
116889 -       switch (matchLengthSearch) {
116890 -       default: /* includes case 3 */
116891 -       case 4: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0);
116892 -       case 5: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0);
116893 -       case 7:
116894 -       case 6: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0);
116895 -       }
116898 -FORCE_INLINE size_t ZSTD_HcFindBestMatch_extDict_selectMLS(ZSTD_CCtx *zc, const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
116899 -                                                          const U32 matchLengthSearch)
116901 -       switch (matchLengthSearch) {
116902 -       default: /* includes case 3 */
116903 -       case 4: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1);
116904 -       case 5: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1);
116905 -       case 7:
116906 -       case 6: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1);
116907 -       }
116910 -/* *******************************
116911 -*  Common parser - lazy strategy
116912 -*********************************/
116913 -FORCE_INLINE
116914 -void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 searchMethod, const U32 depth)
116916 -       seqStore_t *seqStorePtr = &(ctx->seqStore);
116917 -       const BYTE *const istart = (const BYTE *)src;
116918 -       const BYTE *ip = istart;
116919 -       const BYTE *anchor = istart;
116920 -       const BYTE *const iend = istart + srcSize;
116921 -       const BYTE *const ilimit = iend - 8;
116922 -       const BYTE *const base = ctx->base + ctx->dictLimit;
116924 -       U32 const maxSearches = 1 << ctx->params.cParams.searchLog;
116925 -       U32 const mls = ctx->params.cParams.searchLength;
116927 -       typedef size_t (*searchMax_f)(ZSTD_CCtx * zc, const BYTE *ip, const BYTE *iLimit, size_t *offsetPtr, U32 maxNbAttempts, U32 matchLengthSearch);
116928 -       searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
116929 -       U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1], savedOffset = 0;
116931 -       /* init */
116932 -       ip += (ip == base);
116933 -       ctx->nextToUpdate3 = ctx->nextToUpdate;
116934 -       {
116935 -               U32 const maxRep = (U32)(ip - base);
116936 -               if (offset_2 > maxRep)
116937 -                       savedOffset = offset_2, offset_2 = 0;
116938 -               if (offset_1 > maxRep)
116939 -                       savedOffset = offset_1, offset_1 = 0;
116940 -       }
116942 -       /* Match Loop */
116943 -       while (ip < ilimit) {
116944 -               size_t matchLength = 0;
116945 -               size_t offset = 0;
116946 -               const BYTE *start = ip + 1;
116948 -               /* check repCode */
116949 -               if ((offset_1 > 0) & (ZSTD_read32(ip + 1) == ZSTD_read32(ip + 1 - offset_1))) {
116950 -                       /* repcode : we take it */
116951 -                       matchLength = ZSTD_count(ip + 1 + EQUAL_READ32, ip + 1 + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
116952 -                       if (depth == 0)
116953 -                               goto _storeSequence;
116954 -               }
116956 -               /* first search (depth 0) */
116957 -               {
116958 -                       size_t offsetFound = 99999999;
116959 -                       size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
116960 -                       if (ml2 > matchLength)
116961 -                               matchLength = ml2, start = ip, offset = offsetFound;
116962 -               }
116964 -               if (matchLength < EQUAL_READ32) {
116965 -                       ip += ((ip - anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
116966 -                       continue;
116967 -               }
116969 -               /* let's try to find a better solution */
116970 -               if (depth >= 1)
116971 -                       while (ip < ilimit) {
116972 -                               ip++;
116973 -                               if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
116974 -                                       size_t const mlRep = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
116975 -                                       int const gain2 = (int)(mlRep * 3);
116976 -                                       int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1);
116977 -                                       if ((mlRep >= EQUAL_READ32) && (gain2 > gain1))
116978 -                                               matchLength = mlRep, offset = 0, start = ip;
116979 -                               }
116980 -                               {
116981 -                                       size_t offset2 = 99999999;
116982 -                                       size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
116983 -                                       int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
116984 -                                       int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 4);
116985 -                                       if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
116986 -                                               matchLength = ml2, offset = offset2, start = ip;
116987 -                                               continue; /* search a better one */
116988 -                                       }
116989 -                               }
116991 -                               /* let's find an even better one */
116992 -                               if ((depth == 2) && (ip < ilimit)) {
116993 -                                       ip++;
116994 -                                       if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
116995 -                                               size_t const ml2 = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
116996 -                                               int const gain2 = (int)(ml2 * 4);
116997 -                                               int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1);
116998 -                                               if ((ml2 >= EQUAL_READ32) && (gain2 > gain1))
116999 -                                                       matchLength = ml2, offset = 0, start = ip;
117000 -                                       }
117001 -                                       {
117002 -                                               size_t offset2 = 99999999;
117003 -                                               size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
117004 -                                               int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
117005 -                                               int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 7);
117006 -                                               if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
117007 -                                                       matchLength = ml2, offset = offset2, start = ip;
117008 -                                                       continue;
117009 -                                               }
117010 -                                       }
117011 -                               }
117012 -                               break; /* nothing found : store previous solution */
117013 -                       }
117015 -               /* NOTE:
117016 -                * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
117017 -                * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
117018 -                * overflows the pointer, which is undefined behavior.
117019 -                */
117020 -               /* catch up */
117021 -               if (offset) {
117022 -                       while ((start > anchor) && (start > base + offset - ZSTD_REP_MOVE) &&
117023 -                              (start[-1] == (start-offset+ZSTD_REP_MOVE)[-1])) /* only search for offset within prefix */
117024 -                       {
117025 -                               start--;
117026 -                               matchLength++;
117027 -                       }
117028 -                       offset_2 = offset_1;
117029 -                       offset_1 = (U32)(offset - ZSTD_REP_MOVE);
117030 -               }
117032 -       /* store sequence */
117033 -_storeSequence:
117034 -               {
117035 -                       size_t const litLength = start - anchor;
117036 -                       ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength - MINMATCH);
117037 -                       anchor = ip = start + matchLength;
117038 -               }
117040 -               /* check immediate repcode */
117041 -               while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
117042 -                       /* store sequence */
117043 -                       matchLength = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_2, iend) + EQUAL_READ32;
117044 -                       offset = offset_2;
117045 -                       offset_2 = offset_1;
117046 -                       offset_1 = (U32)offset; /* swap repcodes */
117047 -                       ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength - MINMATCH);
117048 -                       ip += matchLength;
117049 -                       anchor = ip;
117050 -                       continue; /* faster when present ... (?) */
117051 -               }
117052 -       }
117054 -       /* Save reps for next block */
117055 -       ctx->repToConfirm[0] = offset_1 ? offset_1 : savedOffset;
117056 -       ctx->repToConfirm[1] = offset_2 ? offset_2 : savedOffset;
117058 -       /* Last Literals */
117059 -       {
117060 -               size_t const lastLLSize = iend - anchor;
117061 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
117062 -               seqStorePtr->lit += lastLLSize;
117063 -       }
117066 -static void ZSTD_compressBlock_btlazy2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2); }
117068 -static void ZSTD_compressBlock_lazy2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2); }
117070 -static void ZSTD_compressBlock_lazy(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1); }
117072 -static void ZSTD_compressBlock_greedy(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0); }
117074 -FORCE_INLINE
117075 -void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 searchMethod, const U32 depth)
117077 -       seqStore_t *seqStorePtr = &(ctx->seqStore);
117078 -       const BYTE *const istart = (const BYTE *)src;
117079 -       const BYTE *ip = istart;
117080 -       const BYTE *anchor = istart;
117081 -       const BYTE *const iend = istart + srcSize;
117082 -       const BYTE *const ilimit = iend - 8;
117083 -       const BYTE *const base = ctx->base;
117084 -       const U32 dictLimit = ctx->dictLimit;
117085 -       const U32 lowestIndex = ctx->lowLimit;
117086 -       const BYTE *const prefixStart = base + dictLimit;
117087 -       const BYTE *const dictBase = ctx->dictBase;
117088 -       const BYTE *const dictEnd = dictBase + dictLimit;
117089 -       const BYTE *const dictStart = dictBase + ctx->lowLimit;
117091 -       const U32 maxSearches = 1 << ctx->params.cParams.searchLog;
117092 -       const U32 mls = ctx->params.cParams.searchLength;
117094 -       typedef size_t (*searchMax_f)(ZSTD_CCtx * zc, const BYTE *ip, const BYTE *iLimit, size_t *offsetPtr, U32 maxNbAttempts, U32 matchLengthSearch);
117095 -       searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
117097 -       U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
117099 -       /* init */
117100 -       ctx->nextToUpdate3 = ctx->nextToUpdate;
117101 -       ip += (ip == prefixStart);
117103 -       /* Match Loop */
117104 -       while (ip < ilimit) {
117105 -               size_t matchLength = 0;
117106 -               size_t offset = 0;
117107 -               const BYTE *start = ip + 1;
117108 -               U32 curr = (U32)(ip - base);
117110 -               /* check repCode */
117111 -               {
117112 -                       const U32 repIndex = (U32)(curr + 1 - offset_1);
117113 -                       const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
117114 -                       const BYTE *const repMatch = repBase + repIndex;
117115 -                       if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
117116 -                               if (ZSTD_read32(ip + 1) == ZSTD_read32(repMatch)) {
117117 -                                       /* repcode detected we should take it */
117118 -                                       const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
117119 -                                       matchLength =
117120 -                                           ZSTD_count_2segments(ip + 1 + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
117121 -                                       if (depth == 0)
117122 -                                               goto _storeSequence;
117123 -                               }
117124 -               }
117126 -               /* first search (depth 0) */
117127 -               {
117128 -                       size_t offsetFound = 99999999;
117129 -                       size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
117130 -                       if (ml2 > matchLength)
117131 -                               matchLength = ml2, start = ip, offset = offsetFound;
117132 -               }
117134 -               if (matchLength < EQUAL_READ32) {
117135 -                       ip += ((ip - anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
117136 -                       continue;
117137 -               }
117139 -               /* let's try to find a better solution */
117140 -               if (depth >= 1)
117141 -                       while (ip < ilimit) {
117142 -                               ip++;
117143 -                               curr++;
117144 -                               /* check repCode */
117145 -                               if (offset) {
117146 -                                       const U32 repIndex = (U32)(curr - offset_1);
117147 -                                       const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
117148 -                                       const BYTE *const repMatch = repBase + repIndex;
117149 -                                       if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
117150 -                                               if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
117151 -                                                       /* repcode detected */
117152 -                                                       const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
117153 -                                                       size_t const repLength =
117154 -                                                           ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) +
117155 -                                                           EQUAL_READ32;
117156 -                                                       int const gain2 = (int)(repLength * 3);
117157 -                                                       int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1);
117158 -                                                       if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
117159 -                                                               matchLength = repLength, offset = 0, start = ip;
117160 -                                               }
117161 -                               }
117163 -                               /* search match, depth 1 */
117164 -                               {
117165 -                                       size_t offset2 = 99999999;
117166 -                                       size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
117167 -                                       int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
117168 -                                       int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 4);
117169 -                                       if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
117170 -                                               matchLength = ml2, offset = offset2, start = ip;
117171 -                                               continue; /* search a better one */
117172 -                                       }
117173 -                               }
117175 -                               /* let's find an even better one */
117176 -                               if ((depth == 2) && (ip < ilimit)) {
117177 -                                       ip++;
117178 -                                       curr++;
117179 -                                       /* check repCode */
117180 -                                       if (offset) {
117181 -                                               const U32 repIndex = (U32)(curr - offset_1);
117182 -                                               const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
117183 -                                               const BYTE *const repMatch = repBase + repIndex;
117184 -                                               if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
117185 -                                                       if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
117186 -                                                               /* repcode detected */
117187 -                                                               const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
117188 -                                                               size_t repLength = ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend,
117189 -                                                                                                       repEnd, prefixStart) +
117190 -                                                                                  EQUAL_READ32;
117191 -                                                               int gain2 = (int)(repLength * 4);
117192 -                                                               int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1);
117193 -                                                               if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
117194 -                                                                       matchLength = repLength, offset = 0, start = ip;
117195 -                                                       }
117196 -                                       }
117198 -                                       /* search match, depth 2 */
117199 -                                       {
117200 -                                               size_t offset2 = 99999999;
117201 -                                               size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
117202 -                                               int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
117203 -                                               int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 7);
117204 -                                               if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
117205 -                                                       matchLength = ml2, offset = offset2, start = ip;
117206 -                                                       continue;
117207 -                                               }
117208 -                                       }
117209 -                               }
117210 -                               break; /* nothing found : store previous solution */
117211 -                       }
117213 -               /* catch up */
117214 -               if (offset) {
117215 -                       U32 const matchIndex = (U32)((start - base) - (offset - ZSTD_REP_MOVE));
117216 -                       const BYTE *match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
117217 -                       const BYTE *const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
117218 -                       while ((start > anchor) && (match > mStart) && (start[-1] == match[-1])) {
117219 -                               start--;
117220 -                               match--;
117221 -                               matchLength++;
117222 -                       } /* catch up */
117223 -                       offset_2 = offset_1;
117224 -                       offset_1 = (U32)(offset - ZSTD_REP_MOVE);
117225 -               }
117227 -       /* store sequence */
117228 -       _storeSequence : {
117229 -               size_t const litLength = start - anchor;
117230 -               ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength - MINMATCH);
117231 -               anchor = ip = start + matchLength;
117232 -       }
117234 -               /* check immediate repcode */
117235 -               while (ip <= ilimit) {
117236 -                       const U32 repIndex = (U32)((ip - base) - offset_2);
117237 -                       const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
117238 -                       const BYTE *const repMatch = repBase + repIndex;
117239 -                       if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
117240 -                               if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
117241 -                                       /* repcode detected we should take it */
117242 -                                       const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
117243 -                                       matchLength =
117244 -                                           ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
117245 -                                       offset = offset_2;
117246 -                                       offset_2 = offset_1;
117247 -                                       offset_1 = (U32)offset; /* swap offset history */
117248 -                                       ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength - MINMATCH);
117249 -                                       ip += matchLength;
117250 -                                       anchor = ip;
117251 -                                       continue; /* faster when present ... (?) */
117252 -                               }
117253 -                       break;
117254 -               }
117255 -       }
117257 -       /* Save reps for next block */
117258 -       ctx->repToConfirm[0] = offset_1;
117259 -       ctx->repToConfirm[1] = offset_2;
117261 -       /* Last Literals */
117262 -       {
117263 -               size_t const lastLLSize = iend - anchor;
117264 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
117265 -               seqStorePtr->lit += lastLLSize;
117266 -       }
117269 -void ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0); }
117271 -static void ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
117273 -       ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1);
117276 -static void ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
117278 -       ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2);
117281 -static void ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
117283 -       ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2);
117286 -/* The optimal parser */
117287 -#include "zstd_opt.h"
117289 -static void ZSTD_compressBlock_btopt(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
117291 -#ifdef ZSTD_OPT_H_91842398743
117292 -       ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 0);
117293 -#else
117294 -       (void)ctx;
117295 -       (void)src;
117296 -       (void)srcSize;
117297 -       return;
117298 -#endif
117301 -static void ZSTD_compressBlock_btopt2(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
117303 -#ifdef ZSTD_OPT_H_91842398743
117304 -       ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 1);
117305 -#else
117306 -       (void)ctx;
117307 -       (void)src;
117308 -       (void)srcSize;
117309 -       return;
117310 -#endif
117313 -static void ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
117315 -#ifdef ZSTD_OPT_H_91842398743
117316 -       ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 0);
117317 -#else
117318 -       (void)ctx;
117319 -       (void)src;
117320 -       (void)srcSize;
117321 -       return;
117322 -#endif
117325 -static void ZSTD_compressBlock_btopt2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
117327 -#ifdef ZSTD_OPT_H_91842398743
117328 -       ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 1);
117329 -#else
117330 -       (void)ctx;
117331 -       (void)src;
117332 -       (void)srcSize;
117333 -       return;
117334 -#endif
117337 -typedef void (*ZSTD_blockCompressor)(ZSTD_CCtx *ctx, const void *src, size_t srcSize);
117339 -static ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict)
117341 -       static const ZSTD_blockCompressor blockCompressor[2][8] = {
117342 -           {ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2,
117343 -            ZSTD_compressBlock_btlazy2, ZSTD_compressBlock_btopt, ZSTD_compressBlock_btopt2},
117344 -           {ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, ZSTD_compressBlock_lazy_extDict,
117345 -            ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btopt2_extDict}};
117347 -       return blockCompressor[extDict][(U32)strat];
117350 -static size_t ZSTD_compressBlock_internal(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
117352 -       ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->params.cParams.strategy, zc->lowLimit < zc->dictLimit);
117353 -       const BYTE *const base = zc->base;
117354 -       const BYTE *const istart = (const BYTE *)src;
117355 -       const U32 curr = (U32)(istart - base);
117356 -       if (srcSize < MIN_CBLOCK_SIZE + ZSTD_blockHeaderSize + 1)
117357 -               return 0; /* don't even attempt compression below a certain srcSize */
117358 -       ZSTD_resetSeqStore(&(zc->seqStore));
117359 -       if (curr > zc->nextToUpdate + 384)
117360 -               zc->nextToUpdate = curr - MIN(192, (U32)(curr - zc->nextToUpdate - 384)); /* update tree not updated after finding very long rep matches */
117361 -       blockCompressor(zc, src, srcSize);
117362 -       return ZSTD_compressSequences(zc, dst, dstCapacity, srcSize);
117365 -/*! ZSTD_compress_generic() :
117366 -*   Compress a chunk of data into one or multiple blocks.
117367 -*   All blocks will be terminated, all input will be consumed.
117368 -*   Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
117369 -*   Frame is supposed already started (header already produced)
117370 -*   @return : compressed size, or an error code
117372 -static size_t ZSTD_compress_generic(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 lastFrameChunk)
117374 -       size_t blockSize = cctx->blockSize;
117375 -       size_t remaining = srcSize;
117376 -       const BYTE *ip = (const BYTE *)src;
117377 -       BYTE *const ostart = (BYTE *)dst;
117378 -       BYTE *op = ostart;
117379 -       U32 const maxDist = 1 << cctx->params.cParams.windowLog;
117381 -       if (cctx->params.fParams.checksumFlag && srcSize)
117382 -               xxh64_update(&cctx->xxhState, src, srcSize);
117384 -       while (remaining) {
117385 -               U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
117386 -               size_t cSize;
117388 -               if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE)
117389 -                       return ERROR(dstSize_tooSmall); /* not enough space to store compressed block */
117390 -               if (remaining < blockSize)
117391 -                       blockSize = remaining;
117393 -               /* preemptive overflow correction */
117394 -               if (cctx->lowLimit > (3U << 29)) {
117395 -                       U32 const cycleMask = (1 << ZSTD_cycleLog(cctx->params.cParams.hashLog, cctx->params.cParams.strategy)) - 1;
117396 -                       U32 const curr = (U32)(ip - cctx->base);
117397 -                       U32 const newCurr = (curr & cycleMask) + (1 << cctx->params.cParams.windowLog);
117398 -                       U32 const correction = curr - newCurr;
117399 -                       ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_64 <= 30);
117400 -                       ZSTD_reduceIndex(cctx, correction);
117401 -                       cctx->base += correction;
117402 -                       cctx->dictBase += correction;
117403 -                       cctx->lowLimit -= correction;
117404 -                       cctx->dictLimit -= correction;
117405 -                       if (cctx->nextToUpdate < correction)
117406 -                               cctx->nextToUpdate = 0;
117407 -                       else
117408 -                               cctx->nextToUpdate -= correction;
117409 -               }
117411 -               if ((U32)(ip + blockSize - cctx->base) > cctx->loadedDictEnd + maxDist) {
117412 -                       /* enforce maxDist */
117413 -                       U32 const newLowLimit = (U32)(ip + blockSize - cctx->base) - maxDist;
117414 -                       if (cctx->lowLimit < newLowLimit)
117415 -                               cctx->lowLimit = newLowLimit;
117416 -                       if (cctx->dictLimit < cctx->lowLimit)
117417 -                               cctx->dictLimit = cctx->lowLimit;
117418 -               }
117420 -               cSize = ZSTD_compressBlock_internal(cctx, op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, ip, blockSize);
117421 -               if (ZSTD_isError(cSize))
117422 -                       return cSize;
117424 -               if (cSize == 0) { /* block is not compressible */
117425 -                       U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw) << 1) + (U32)(blockSize << 3);
117426 -                       if (blockSize + ZSTD_blockHeaderSize > dstCapacity)
117427 -                               return ERROR(dstSize_tooSmall);
117428 -                       ZSTD_writeLE32(op, cBlockHeader24); /* no pb, 4th byte will be overwritten */
117429 -                       memcpy(op + ZSTD_blockHeaderSize, ip, blockSize);
117430 -                       cSize = ZSTD_blockHeaderSize + blockSize;
117431 -               } else {
117432 -                       U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed) << 1) + (U32)(cSize << 3);
117433 -                       ZSTD_writeLE24(op, cBlockHeader24);
117434 -                       cSize += ZSTD_blockHeaderSize;
117435 -               }
117437 -               remaining -= blockSize;
117438 -               dstCapacity -= cSize;
117439 -               ip += blockSize;
117440 -               op += cSize;
117441 -       }
117443 -       if (lastFrameChunk && (op > ostart))
117444 -               cctx->stage = ZSTDcs_ending;
117445 -       return op - ostart;
117448 -static size_t ZSTD_writeFrameHeader(void *dst, size_t dstCapacity, ZSTD_parameters params, U64 pledgedSrcSize, U32 dictID)
117450 -       BYTE *const op = (BYTE *)dst;
117451 -       U32 const dictIDSizeCode = (dictID > 0) + (dictID >= 256) + (dictID >= 65536); /* 0-3 */
117452 -       U32 const checksumFlag = params.fParams.checksumFlag > 0;
117453 -       U32 const windowSize = 1U << params.cParams.windowLog;
117454 -       U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
117455 -       BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
117456 -       U32 const fcsCode =
117457 -           params.fParams.contentSizeFlag ? (pledgedSrcSize >= 256) + (pledgedSrcSize >= 65536 + 256) + (pledgedSrcSize >= 0xFFFFFFFFU) : 0; /* 0-3 */
117458 -       BYTE const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag << 2) + (singleSegment << 5) + (fcsCode << 6));
117459 -       size_t pos;
117461 -       if (dstCapacity < ZSTD_frameHeaderSize_max)
117462 -               return ERROR(dstSize_tooSmall);
117464 -       ZSTD_writeLE32(dst, ZSTD_MAGICNUMBER);
117465 -       op[4] = frameHeaderDecriptionByte;
117466 -       pos = 5;
117467 -       if (!singleSegment)
117468 -               op[pos++] = windowLogByte;
117469 -       switch (dictIDSizeCode) {
117470 -       default: /* impossible */
117471 -       case 0: break;
117472 -       case 1:
117473 -               op[pos] = (BYTE)(dictID);
117474 -               pos++;
117475 -               break;
117476 -       case 2:
117477 -               ZSTD_writeLE16(op + pos, (U16)dictID);
117478 -               pos += 2;
117479 -               break;
117480 -       case 3:
117481 -               ZSTD_writeLE32(op + pos, dictID);
117482 -               pos += 4;
117483 -               break;
117484 -       }
117485 -       switch (fcsCode) {
117486 -       default: /* impossible */
117487 -       case 0:
117488 -               if (singleSegment)
117489 -                       op[pos++] = (BYTE)(pledgedSrcSize);
117490 -               break;
117491 -       case 1:
117492 -               ZSTD_writeLE16(op + pos, (U16)(pledgedSrcSize - 256));
117493 -               pos += 2;
117494 -               break;
117495 -       case 2:
117496 -               ZSTD_writeLE32(op + pos, (U32)(pledgedSrcSize));
117497 -               pos += 4;
117498 -               break;
117499 -       case 3:
117500 -               ZSTD_writeLE64(op + pos, (U64)(pledgedSrcSize));
117501 -               pos += 8;
117502 -               break;
117503 -       }
117504 -       return pos;
117507 -static size_t ZSTD_compressContinue_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 frame, U32 lastFrameChunk)
117509 -       const BYTE *const ip = (const BYTE *)src;
117510 -       size_t fhSize = 0;
117512 -       if (cctx->stage == ZSTDcs_created)
117513 -               return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */
117515 -       if (frame && (cctx->stage == ZSTDcs_init)) {
117516 -               fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, cctx->frameContentSize, cctx->dictID);
117517 -               if (ZSTD_isError(fhSize))
117518 -                       return fhSize;
117519 -               dstCapacity -= fhSize;
117520 -               dst = (char *)dst + fhSize;
117521 -               cctx->stage = ZSTDcs_ongoing;
117522 -       }
117524 -       /* Check if blocks follow each other */
117525 -       if (src != cctx->nextSrc) {
117526 -               /* not contiguous */
117527 -               ptrdiff_t const delta = cctx->nextSrc - ip;
117528 -               cctx->lowLimit = cctx->dictLimit;
117529 -               cctx->dictLimit = (U32)(cctx->nextSrc - cctx->base);
117530 -               cctx->dictBase = cctx->base;
117531 -               cctx->base -= delta;
117532 -               cctx->nextToUpdate = cctx->dictLimit;
117533 -               if (cctx->dictLimit - cctx->lowLimit < HASH_READ_SIZE)
117534 -                       cctx->lowLimit = cctx->dictLimit; /* too small extDict */
117535 -       }
117537 -       /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
117538 -       if ((ip + srcSize > cctx->dictBase + cctx->lowLimit) & (ip < cctx->dictBase + cctx->dictLimit)) {
117539 -               ptrdiff_t const highInputIdx = (ip + srcSize) - cctx->dictBase;
117540 -               U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)cctx->dictLimit) ? cctx->dictLimit : (U32)highInputIdx;
117541 -               cctx->lowLimit = lowLimitMax;
117542 -       }
117544 -       cctx->nextSrc = ip + srcSize;
117546 -       if (srcSize) {
117547 -               size_t const cSize = frame ? ZSTD_compress_generic(cctx, dst, dstCapacity, src, srcSize, lastFrameChunk)
117548 -                                          : ZSTD_compressBlock_internal(cctx, dst, dstCapacity, src, srcSize);
117549 -               if (ZSTD_isError(cSize))
117550 -                       return cSize;
117551 -               return cSize + fhSize;
117552 -       } else
117553 -               return fhSize;
117556 -size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
117558 -       return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 0);
117561 -size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx) { return MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, 1 << cctx->params.cParams.windowLog); }
117563 -size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
117565 -       size_t const blockSizeMax = ZSTD_getBlockSizeMax(cctx);
117566 -       if (srcSize > blockSizeMax)
117567 -               return ERROR(srcSize_wrong);
117568 -       return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0, 0);
117571 -/*! ZSTD_loadDictionaryContent() :
117572 - *  @return : 0, or an error code
117573 - */
117574 -static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx *zc, const void *src, size_t srcSize)
117576 -       const BYTE *const ip = (const BYTE *)src;
117577 -       const BYTE *const iend = ip + srcSize;
117579 -       /* input becomes curr prefix */
117580 -       zc->lowLimit = zc->dictLimit;
117581 -       zc->dictLimit = (U32)(zc->nextSrc - zc->base);
117582 -       zc->dictBase = zc->base;
117583 -       zc->base += ip - zc->nextSrc;
117584 -       zc->nextToUpdate = zc->dictLimit;
117585 -       zc->loadedDictEnd = zc->forceWindow ? 0 : (U32)(iend - zc->base);
117587 -       zc->nextSrc = iend;
117588 -       if (srcSize <= HASH_READ_SIZE)
117589 -               return 0;
117591 -       switch (zc->params.cParams.strategy) {
117592 -       case ZSTD_fast: ZSTD_fillHashTable(zc, iend, zc->params.cParams.searchLength); break;
117594 -       case ZSTD_dfast: ZSTD_fillDoubleHashTable(zc, iend, zc->params.cParams.searchLength); break;
117596 -       case ZSTD_greedy:
117597 -       case ZSTD_lazy:
117598 -       case ZSTD_lazy2:
117599 -               if (srcSize >= HASH_READ_SIZE)
117600 -                       ZSTD_insertAndFindFirstIndex(zc, iend - HASH_READ_SIZE, zc->params.cParams.searchLength);
117601 -               break;
117603 -       case ZSTD_btlazy2:
117604 -       case ZSTD_btopt:
117605 -       case ZSTD_btopt2:
117606 -               if (srcSize >= HASH_READ_SIZE)
117607 -                       ZSTD_updateTree(zc, iend - HASH_READ_SIZE, iend, 1 << zc->params.cParams.searchLog, zc->params.cParams.searchLength);
117608 -               break;
117610 -       default:
117611 -               return ERROR(GENERIC); /* strategy doesn't exist; impossible */
117612 -       }
117614 -       zc->nextToUpdate = (U32)(iend - zc->base);
117615 -       return 0;
117618 -/* Dictionaries that assign zero probability to symbols that show up causes problems
117619 -   when FSE encoding.  Refuse dictionaries that assign zero probability to symbols
117620 -   that we may encounter during compression.
117621 -   NOTE: This behavior is not standard and could be improved in the future. */
117622 -static size_t ZSTD_checkDictNCount(short *normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue)
117624 -       U32 s;
117625 -       if (dictMaxSymbolValue < maxSymbolValue)
117626 -               return ERROR(dictionary_corrupted);
117627 -       for (s = 0; s <= maxSymbolValue; ++s) {
117628 -               if (normalizedCounter[s] == 0)
117629 -                       return ERROR(dictionary_corrupted);
117630 -       }
117631 -       return 0;
117634 -/* Dictionary format :
117635 - * See :
117636 - * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
117637 - */
117638 -/*! ZSTD_loadZstdDictionary() :
117639 - * @return : 0, or an error code
117640 - *  assumptions : magic number supposed already checked
117641 - *                dictSize supposed > 8
117642 - */
117643 -static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx *cctx, const void *dict, size_t dictSize)
117645 -       const BYTE *dictPtr = (const BYTE *)dict;
117646 -       const BYTE *const dictEnd = dictPtr + dictSize;
117647 -       short offcodeNCount[MaxOff + 1];
117648 -       unsigned offcodeMaxValue = MaxOff;
117650 -       dictPtr += 4; /* skip magic number */
117651 -       cctx->dictID = cctx->params.fParams.noDictIDFlag ? 0 : ZSTD_readLE32(dictPtr);
117652 -       dictPtr += 4;
117654 -       {
117655 -               size_t const hufHeaderSize = HUF_readCTable_wksp(cctx->hufTable, 255, dictPtr, dictEnd - dictPtr, cctx->tmpCounters, sizeof(cctx->tmpCounters));
117656 -               if (HUF_isError(hufHeaderSize))
117657 -                       return ERROR(dictionary_corrupted);
117658 -               dictPtr += hufHeaderSize;
117659 -       }
117661 -       {
117662 -               unsigned offcodeLog;
117663 -               size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd - dictPtr);
117664 -               if (FSE_isError(offcodeHeaderSize))
117665 -                       return ERROR(dictionary_corrupted);
117666 -               if (offcodeLog > OffFSELog)
117667 -                       return ERROR(dictionary_corrupted);
117668 -               /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
117669 -               CHECK_E(FSE_buildCTable_wksp(cctx->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
117670 -                       dictionary_corrupted);
117671 -               dictPtr += offcodeHeaderSize;
117672 -       }
117674 -       {
117675 -               short matchlengthNCount[MaxML + 1];
117676 -               unsigned matchlengthMaxValue = MaxML, matchlengthLog;
117677 -               size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd - dictPtr);
117678 -               if (FSE_isError(matchlengthHeaderSize))
117679 -                       return ERROR(dictionary_corrupted);
117680 -               if (matchlengthLog > MLFSELog)
117681 -                       return ERROR(dictionary_corrupted);
117682 -               /* Every match length code must have non-zero probability */
117683 -               CHECK_F(ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
117684 -               CHECK_E(
117685 -                   FSE_buildCTable_wksp(cctx->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
117686 -                   dictionary_corrupted);
117687 -               dictPtr += matchlengthHeaderSize;
117688 -       }
117690 -       {
117691 -               short litlengthNCount[MaxLL + 1];
117692 -               unsigned litlengthMaxValue = MaxLL, litlengthLog;
117693 -               size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd - dictPtr);
117694 -               if (FSE_isError(litlengthHeaderSize))
117695 -                       return ERROR(dictionary_corrupted);
117696 -               if (litlengthLog > LLFSELog)
117697 -                       return ERROR(dictionary_corrupted);
117698 -               /* Every literal length code must have non-zero probability */
117699 -               CHECK_F(ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
117700 -               CHECK_E(FSE_buildCTable_wksp(cctx->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
117701 -                       dictionary_corrupted);
117702 -               dictPtr += litlengthHeaderSize;
117703 -       }
117705 -       if (dictPtr + 12 > dictEnd)
117706 -               return ERROR(dictionary_corrupted);
117707 -       cctx->rep[0] = ZSTD_readLE32(dictPtr + 0);
117708 -       cctx->rep[1] = ZSTD_readLE32(dictPtr + 4);
117709 -       cctx->rep[2] = ZSTD_readLE32(dictPtr + 8);
117710 -       dictPtr += 12;
117712 -       {
117713 -               size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
117714 -               U32 offcodeMax = MaxOff;
117715 -               if (dictContentSize <= ((U32)-1) - 128 KB) {
117716 -                       U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
117717 -                       offcodeMax = ZSTD_highbit32(maxOffset);              /* Calculate minimum offset code required to represent maxOffset */
117718 -               }
117719 -               /* All offset values <= dictContentSize + 128 KB must be representable */
117720 -               CHECK_F(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
117721 -               /* All repCodes must be <= dictContentSize and != 0*/
117722 -               {
117723 -                       U32 u;
117724 -                       for (u = 0; u < 3; u++) {
117725 -                               if (cctx->rep[u] == 0)
117726 -                                       return ERROR(dictionary_corrupted);
117727 -                               if (cctx->rep[u] > dictContentSize)
117728 -                                       return ERROR(dictionary_corrupted);
117729 -                       }
117730 -               }
117732 -               cctx->flagStaticTables = 1;
117733 -               cctx->flagStaticHufTable = HUF_repeat_valid;
117734 -               return ZSTD_loadDictionaryContent(cctx, dictPtr, dictContentSize);
117735 -       }
117738 -/** ZSTD_compress_insertDictionary() :
117739 -*   @return : 0, or an error code */
117740 -static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx *cctx, const void *dict, size_t dictSize)
117742 -       if ((dict == NULL) || (dictSize <= 8))
117743 -               return 0;
117745 -       /* dict as pure content */
117746 -       if ((ZSTD_readLE32(dict) != ZSTD_DICT_MAGIC) || (cctx->forceRawDict))
117747 -               return ZSTD_loadDictionaryContent(cctx, dict, dictSize);
117749 -       /* dict as zstd dictionary */
117750 -       return ZSTD_loadZstdDictionary(cctx, dict, dictSize);
117753 -/*! ZSTD_compressBegin_internal() :
117754 -*   @return : 0, or an error code */
117755 -static size_t ZSTD_compressBegin_internal(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, U64 pledgedSrcSize)
117757 -       ZSTD_compResetPolicy_e const crp = dictSize ? ZSTDcrp_fullReset : ZSTDcrp_continue;
117758 -       CHECK_F(ZSTD_resetCCtx_advanced(cctx, params, pledgedSrcSize, crp));
117759 -       return ZSTD_compress_insertDictionary(cctx, dict, dictSize);
117762 -/*! ZSTD_compressBegin_advanced() :
117763 -*   @return : 0, or an error code */
117764 -size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize)
117766 -       /* compression parameters verification and optimization */
117767 -       CHECK_F(ZSTD_checkCParams(params.cParams));
117768 -       return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, pledgedSrcSize);
117771 -size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, int compressionLevel)
117773 -       ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
117774 -       return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, 0);
117777 -size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel) { return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); }
117779 -/*! ZSTD_writeEpilogue() :
117780 -*   Ends a frame.
117781 -*   @return : nb of bytes written into dst (or an error code) */
117782 -static size_t ZSTD_writeEpilogue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity)
117784 -       BYTE *const ostart = (BYTE *)dst;
117785 -       BYTE *op = ostart;
117786 -       size_t fhSize = 0;
117788 -       if (cctx->stage == ZSTDcs_created)
117789 -               return ERROR(stage_wrong); /* init missing */
117791 -       /* special case : empty frame */
117792 -       if (cctx->stage == ZSTDcs_init) {
117793 -               fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, 0, 0);
117794 -               if (ZSTD_isError(fhSize))
117795 -                       return fhSize;
117796 -               dstCapacity -= fhSize;
117797 -               op += fhSize;
117798 -               cctx->stage = ZSTDcs_ongoing;
117799 -       }
117801 -       if (cctx->stage != ZSTDcs_ending) {
117802 -               /* write one last empty block, make it the "last" block */
117803 -               U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw) << 1) + 0;
117804 -               if (dstCapacity < 4)
117805 -                       return ERROR(dstSize_tooSmall);
117806 -               ZSTD_writeLE32(op, cBlockHeader24);
117807 -               op += ZSTD_blockHeaderSize;
117808 -               dstCapacity -= ZSTD_blockHeaderSize;
117809 -       }
117811 -       if (cctx->params.fParams.checksumFlag) {
117812 -               U32 const checksum = (U32)xxh64_digest(&cctx->xxhState);
117813 -               if (dstCapacity < 4)
117814 -                       return ERROR(dstSize_tooSmall);
117815 -               ZSTD_writeLE32(op, checksum);
117816 -               op += 4;
117817 -       }
117819 -       cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
117820 -       return op - ostart;
117823 -size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
117825 -       size_t endResult;
117826 -       size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 1);
117827 -       if (ZSTD_isError(cSize))
117828 -               return cSize;
117829 -       endResult = ZSTD_writeEpilogue(cctx, (char *)dst + cSize, dstCapacity - cSize);
117830 -       if (ZSTD_isError(endResult))
117831 -               return endResult;
117832 -       return cSize + endResult;
117835 -static size_t ZSTD_compress_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
117836 -                                    ZSTD_parameters params)
117838 -       CHECK_F(ZSTD_compressBegin_internal(cctx, dict, dictSize, params, srcSize));
117839 -       return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
117842 -size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
117843 -                              ZSTD_parameters params)
117845 -       return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
117848 -size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, ZSTD_parameters params)
117850 -       return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, NULL, 0, params);
117853 -/* =====  Dictionary API  ===== */
117855 -struct ZSTD_CDict_s {
117856 -       void *dictBuffer;
117857 -       const void *dictContent;
117858 -       size_t dictContentSize;
117859 -       ZSTD_CCtx *refContext;
117860 -}; /* typedef'd tp ZSTD_CDict within "zstd.h" */
117862 -size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams) { return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CDict)); }
117864 -static ZSTD_CDict *ZSTD_createCDict_advanced(const void *dictBuffer, size_t dictSize, unsigned byReference, ZSTD_parameters params, ZSTD_customMem customMem)
117866 -       if (!customMem.customAlloc || !customMem.customFree)
117867 -               return NULL;
117869 -       {
117870 -               ZSTD_CDict *const cdict = (ZSTD_CDict *)ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
117871 -               ZSTD_CCtx *const cctx = ZSTD_createCCtx_advanced(customMem);
117873 -               if (!cdict || !cctx) {
117874 -                       ZSTD_free(cdict, customMem);
117875 -                       ZSTD_freeCCtx(cctx);
117876 -                       return NULL;
117877 -               }
117879 -               if ((byReference) || (!dictBuffer) || (!dictSize)) {
117880 -                       cdict->dictBuffer = NULL;
117881 -                       cdict->dictContent = dictBuffer;
117882 -               } else {
117883 -                       void *const internalBuffer = ZSTD_malloc(dictSize, customMem);
117884 -                       if (!internalBuffer) {
117885 -                               ZSTD_free(cctx, customMem);
117886 -                               ZSTD_free(cdict, customMem);
117887 -                               return NULL;
117888 -                       }
117889 -                       memcpy(internalBuffer, dictBuffer, dictSize);
117890 -                       cdict->dictBuffer = internalBuffer;
117891 -                       cdict->dictContent = internalBuffer;
117892 -               }
117894 -               {
117895 -                       size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0);
117896 -                       if (ZSTD_isError(errorCode)) {
117897 -                               ZSTD_free(cdict->dictBuffer, customMem);
117898 -                               ZSTD_free(cdict, customMem);
117899 -                               ZSTD_freeCCtx(cctx);
117900 -                               return NULL;
117901 -                       }
117902 -               }
117904 -               cdict->refContext = cctx;
117905 -               cdict->dictContentSize = dictSize;
117906 -               return cdict;
117907 -       }
117910 -ZSTD_CDict *ZSTD_initCDict(const void *dict, size_t dictSize, ZSTD_parameters params, void *workspace, size_t workspaceSize)
117912 -       ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
117913 -       return ZSTD_createCDict_advanced(dict, dictSize, 1, params, stackMem);
117916 -size_t ZSTD_freeCDict(ZSTD_CDict *cdict)
117918 -       if (cdict == NULL)
117919 -               return 0; /* support free on NULL */
117920 -       {
117921 -               ZSTD_customMem const cMem = cdict->refContext->customMem;
117922 -               ZSTD_freeCCtx(cdict->refContext);
117923 -               ZSTD_free(cdict->dictBuffer, cMem);
117924 -               ZSTD_free(cdict, cMem);
117925 -               return 0;
117926 -       }
117929 -static ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict *cdict) { return ZSTD_getParamsFromCCtx(cdict->refContext); }
117931 -size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict, unsigned long long pledgedSrcSize)
117933 -       if (cdict->dictContentSize)
117934 -               CHECK_F(ZSTD_copyCCtx(cctx, cdict->refContext, pledgedSrcSize))
117935 -       else {
117936 -               ZSTD_parameters params = cdict->refContext->params;
117937 -               params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
117938 -               CHECK_F(ZSTD_compressBegin_advanced(cctx, NULL, 0, params, pledgedSrcSize));
117939 -       }
117940 -       return 0;
117943 -/*! ZSTD_compress_usingCDict() :
117944 -*   Compression using a digested Dictionary.
117945 -*   Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
117946 -*   Note that compression level is decided during dictionary creation */
117947 -size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_CDict *cdict)
117949 -       CHECK_F(ZSTD_compressBegin_usingCDict(cctx, cdict, srcSize));
117951 -       if (cdict->refContext->params.fParams.contentSizeFlag == 1) {
117952 -               cctx->params.fParams.contentSizeFlag = 1;
117953 -               cctx->frameContentSize = srcSize;
117954 -       } else {
117955 -               cctx->params.fParams.contentSizeFlag = 0;
117956 -       }
117958 -       return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
117961 -/* ******************************************************************
117962 -*  Streaming
117963 -********************************************************************/
117965 -typedef enum { zcss_init, zcss_load, zcss_flush, zcss_final } ZSTD_cStreamStage;
117967 -struct ZSTD_CStream_s {
117968 -       ZSTD_CCtx *cctx;
117969 -       ZSTD_CDict *cdictLocal;
117970 -       const ZSTD_CDict *cdict;
117971 -       char *inBuff;
117972 -       size_t inBuffSize;
117973 -       size_t inToCompress;
117974 -       size_t inBuffPos;
117975 -       size_t inBuffTarget;
117976 -       size_t blockSize;
117977 -       char *outBuff;
117978 -       size_t outBuffSize;
117979 -       size_t outBuffContentSize;
117980 -       size_t outBuffFlushedSize;
117981 -       ZSTD_cStreamStage stage;
117982 -       U32 checksum;
117983 -       U32 frameEnded;
117984 -       U64 pledgedSrcSize;
117985 -       U64 inputProcessed;
117986 -       ZSTD_parameters params;
117987 -       ZSTD_customMem customMem;
117988 -}; /* typedef'd to ZSTD_CStream within "zstd.h" */
117990 -size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams)
117992 -       size_t const inBuffSize = (size_t)1 << cParams.windowLog;
117993 -       size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, inBuffSize);
117994 -       size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
117996 -       return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize);
117999 -ZSTD_CStream *ZSTD_createCStream_advanced(ZSTD_customMem customMem)
118001 -       ZSTD_CStream *zcs;
118003 -       if (!customMem.customAlloc || !customMem.customFree)
118004 -               return NULL;
118006 -       zcs = (ZSTD_CStream *)ZSTD_malloc(sizeof(ZSTD_CStream), customMem);
118007 -       if (zcs == NULL)
118008 -               return NULL;
118009 -       memset(zcs, 0, sizeof(ZSTD_CStream));
118010 -       memcpy(&zcs->customMem, &customMem, sizeof(ZSTD_customMem));
118011 -       zcs->cctx = ZSTD_createCCtx_advanced(customMem);
118012 -       if (zcs->cctx == NULL) {
118013 -               ZSTD_freeCStream(zcs);
118014 -               return NULL;
118015 -       }
118016 -       return zcs;
118019 -size_t ZSTD_freeCStream(ZSTD_CStream *zcs)
118021 -       if (zcs == NULL)
118022 -               return 0; /* support free on NULL */
118023 -       {
118024 -               ZSTD_customMem const cMem = zcs->customMem;
118025 -               ZSTD_freeCCtx(zcs->cctx);
118026 -               zcs->cctx = NULL;
118027 -               ZSTD_freeCDict(zcs->cdictLocal);
118028 -               zcs->cdictLocal = NULL;
118029 -               ZSTD_free(zcs->inBuff, cMem);
118030 -               zcs->inBuff = NULL;
118031 -               ZSTD_free(zcs->outBuff, cMem);
118032 -               zcs->outBuff = NULL;
118033 -               ZSTD_free(zcs, cMem);
118034 -               return 0;
118035 -       }
118038 -/*======   Initialization   ======*/
118040 -size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; }
118041 -size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_ABSOLUTEMAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */; }
118043 -static size_t ZSTD_resetCStream_internal(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize)
118045 -       if (zcs->inBuffSize == 0)
118046 -               return ERROR(stage_wrong); /* zcs has not been init at least once => can't reset */
118048 -       if (zcs->cdict)
118049 -               CHECK_F(ZSTD_compressBegin_usingCDict(zcs->cctx, zcs->cdict, pledgedSrcSize))
118050 -       else
118051 -               CHECK_F(ZSTD_compressBegin_advanced(zcs->cctx, NULL, 0, zcs->params, pledgedSrcSize));
118053 -       zcs->inToCompress = 0;
118054 -       zcs->inBuffPos = 0;
118055 -       zcs->inBuffTarget = zcs->blockSize;
118056 -       zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
118057 -       zcs->stage = zcss_load;
118058 -       zcs->frameEnded = 0;
118059 -       zcs->pledgedSrcSize = pledgedSrcSize;
118060 -       zcs->inputProcessed = 0;
118061 -       return 0; /* ready to go */
118064 -size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize)
118067 -       zcs->params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
118069 -       return ZSTD_resetCStream_internal(zcs, pledgedSrcSize);
118072 -static size_t ZSTD_initCStream_advanced(ZSTD_CStream *zcs, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize)
118074 -       /* allocate buffers */
118075 -       {
118076 -               size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog;
118077 -               if (zcs->inBuffSize < neededInBuffSize) {
118078 -                       zcs->inBuffSize = neededInBuffSize;
118079 -                       ZSTD_free(zcs->inBuff, zcs->customMem);
118080 -                       zcs->inBuff = (char *)ZSTD_malloc(neededInBuffSize, zcs->customMem);
118081 -                       if (zcs->inBuff == NULL)
118082 -                               return ERROR(memory_allocation);
118083 -               }
118084 -               zcs->blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, neededInBuffSize);
118085 -       }
118086 -       if (zcs->outBuffSize < ZSTD_compressBound(zcs->blockSize) + 1) {
118087 -               zcs->outBuffSize = ZSTD_compressBound(zcs->blockSize) + 1;
118088 -               ZSTD_free(zcs->outBuff, zcs->customMem);
118089 -               zcs->outBuff = (char *)ZSTD_malloc(zcs->outBuffSize, zcs->customMem);
118090 -               if (zcs->outBuff == NULL)
118091 -                       return ERROR(memory_allocation);
118092 -       }
118094 -       if (dict && dictSize >= 8) {
118095 -               ZSTD_freeCDict(zcs->cdictLocal);
118096 -               zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, 0, params, zcs->customMem);
118097 -               if (zcs->cdictLocal == NULL)
118098 -                       return ERROR(memory_allocation);
118099 -               zcs->cdict = zcs->cdictLocal;
118100 -       } else
118101 -               zcs->cdict = NULL;
118103 -       zcs->checksum = params.fParams.checksumFlag > 0;
118104 -       zcs->params = params;
118106 -       return ZSTD_resetCStream_internal(zcs, pledgedSrcSize);
118109 -ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params, unsigned long long pledgedSrcSize, void *workspace, size_t workspaceSize)
118111 -       ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
118112 -       ZSTD_CStream *const zcs = ZSTD_createCStream_advanced(stackMem);
118113 -       if (zcs) {
118114 -               size_t const code = ZSTD_initCStream_advanced(zcs, NULL, 0, params, pledgedSrcSize);
118115 -               if (ZSTD_isError(code)) {
118116 -                       return NULL;
118117 -               }
118118 -       }
118119 -       return zcs;
118122 -ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict, unsigned long long pledgedSrcSize, void *workspace, size_t workspaceSize)
118124 -       ZSTD_parameters const params = ZSTD_getParamsFromCDict(cdict);
118125 -       ZSTD_CStream *const zcs = ZSTD_initCStream(params, pledgedSrcSize, workspace, workspaceSize);
118126 -       if (zcs) {
118127 -               zcs->cdict = cdict;
118128 -               if (ZSTD_isError(ZSTD_resetCStream_internal(zcs, pledgedSrcSize))) {
118129 -                       return NULL;
118130 -               }
118131 -       }
118132 -       return zcs;
118135 -/*======   Compression   ======*/
118137 -typedef enum { zsf_gather, zsf_flush, zsf_end } ZSTD_flush_e;
118139 -ZSTD_STATIC size_t ZSTD_limitCopy(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
118141 -       size_t const length = MIN(dstCapacity, srcSize);
118142 -       memcpy(dst, src, length);
118143 -       return length;
118146 -static size_t ZSTD_compressStream_generic(ZSTD_CStream *zcs, void *dst, size_t *dstCapacityPtr, const void *src, size_t *srcSizePtr, ZSTD_flush_e const flush)
118148 -       U32 someMoreWork = 1;
118149 -       const char *const istart = (const char *)src;
118150 -       const char *const iend = istart + *srcSizePtr;
118151 -       const char *ip = istart;
118152 -       char *const ostart = (char *)dst;
118153 -       char *const oend = ostart + *dstCapacityPtr;
118154 -       char *op = ostart;
118156 -       while (someMoreWork) {
118157 -               switch (zcs->stage) {
118158 -               case zcss_init:
118159 -                       return ERROR(init_missing); /* call ZBUFF_compressInit() first ! */
118161 -               case zcss_load:
118162 -                       /* complete inBuffer */
118163 -                       {
118164 -                               size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
118165 -                               size_t const loaded = ZSTD_limitCopy(zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend - ip);
118166 -                               zcs->inBuffPos += loaded;
118167 -                               ip += loaded;
118168 -                               if ((zcs->inBuffPos == zcs->inToCompress) || (!flush && (toLoad != loaded))) {
118169 -                                       someMoreWork = 0;
118170 -                                       break; /* not enough input to get a full block : stop there, wait for more */
118171 -                               }
118172 -                       }
118173 -                       /* compress curr block (note : this stage cannot be stopped in the middle) */
118174 -                       {
118175 -                               void *cDst;
118176 -                               size_t cSize;
118177 -                               size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
118178 -                               size_t oSize = oend - op;
118179 -                               if (oSize >= ZSTD_compressBound(iSize))
118180 -                                       cDst = op; /* compress directly into output buffer (avoid flush stage) */
118181 -                               else
118182 -                                       cDst = zcs->outBuff, oSize = zcs->outBuffSize;
118183 -                               cSize = (flush == zsf_end) ? ZSTD_compressEnd(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize)
118184 -                                                          : ZSTD_compressContinue(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize);
118185 -                               if (ZSTD_isError(cSize))
118186 -                                       return cSize;
118187 -                               if (flush == zsf_end)
118188 -                                       zcs->frameEnded = 1;
118189 -                               /* prepare next block */
118190 -                               zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
118191 -                               if (zcs->inBuffTarget > zcs->inBuffSize)
118192 -                                       zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; /* note : inBuffSize >= blockSize */
118193 -                               zcs->inToCompress = zcs->inBuffPos;
118194 -                               if (cDst == op) {
118195 -                                       op += cSize;
118196 -                                       break;
118197 -                               } /* no need to flush */
118198 -                               zcs->outBuffContentSize = cSize;
118199 -                               zcs->outBuffFlushedSize = 0;
118200 -                               zcs->stage = zcss_flush; /* pass-through to flush stage */
118201 -                       }
118202 -                       fallthrough;
118204 -               case zcss_flush: {
118205 -                       size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
118206 -                       size_t const flushed = ZSTD_limitCopy(op, oend - op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
118207 -                       op += flushed;
118208 -                       zcs->outBuffFlushedSize += flushed;
118209 -                       if (toFlush != flushed) {
118210 -                               someMoreWork = 0;
118211 -                               break;
118212 -                       } /* dst too small to store flushed data : stop there */
118213 -                       zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
118214 -                       zcs->stage = zcss_load;
118215 -                       break;
118216 -               }
118218 -               case zcss_final:
118219 -                       someMoreWork = 0; /* do nothing */
118220 -                       break;
118222 -               default:
118223 -                       return ERROR(GENERIC); /* impossible */
118224 -               }
118225 -       }
118227 -       *srcSizePtr = ip - istart;
118228 -       *dstCapacityPtr = op - ostart;
118229 -       zcs->inputProcessed += *srcSizePtr;
118230 -       if (zcs->frameEnded)
118231 -               return 0;
118232 -       {
118233 -               size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos;
118234 -               if (hintInSize == 0)
118235 -                       hintInSize = zcs->blockSize;
118236 -               return hintInSize;
118237 -       }
118240 -size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output, ZSTD_inBuffer *input)
118242 -       size_t sizeRead = input->size - input->pos;
118243 -       size_t sizeWritten = output->size - output->pos;
118244 -       size_t const result =
118245 -           ZSTD_compressStream_generic(zcs, (char *)(output->dst) + output->pos, &sizeWritten, (const char *)(input->src) + input->pos, &sizeRead, zsf_gather);
118246 -       input->pos += sizeRead;
118247 -       output->pos += sizeWritten;
118248 -       return result;
118251 -/*======   Finalize   ======*/
118253 -/*! ZSTD_flushStream() :
118254 -*   @return : amount of data remaining to flush */
118255 -size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output)
118257 -       size_t srcSize = 0;
118258 -       size_t sizeWritten = output->size - output->pos;
118259 -       size_t const result = ZSTD_compressStream_generic(zcs, (char *)(output->dst) + output->pos, &sizeWritten, &srcSize,
118260 -                                                         &srcSize, /* use a valid src address instead of NULL */
118261 -                                                         zsf_flush);
118262 -       output->pos += sizeWritten;
118263 -       if (ZSTD_isError(result))
118264 -               return result;
118265 -       return zcs->outBuffContentSize - zcs->outBuffFlushedSize; /* remaining to flush */
118268 -size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output)
118270 -       BYTE *const ostart = (BYTE *)(output->dst) + output->pos;
118271 -       BYTE *const oend = (BYTE *)(output->dst) + output->size;
118272 -       BYTE *op = ostart;
118274 -       if ((zcs->pledgedSrcSize) && (zcs->inputProcessed != zcs->pledgedSrcSize))
118275 -               return ERROR(srcSize_wrong); /* pledgedSrcSize not respected */
118277 -       if (zcs->stage != zcss_final) {
118278 -               /* flush whatever remains */
118279 -               size_t srcSize = 0;
118280 -               size_t sizeWritten = output->size - output->pos;
118281 -               size_t const notEnded =
118282 -                   ZSTD_compressStream_generic(zcs, ostart, &sizeWritten, &srcSize, &srcSize, zsf_end); /* use a valid src address instead of NULL */
118283 -               size_t const remainingToFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
118284 -               op += sizeWritten;
118285 -               if (remainingToFlush) {
118286 -                       output->pos += sizeWritten;
118287 -                       return remainingToFlush + ZSTD_BLOCKHEADERSIZE /* final empty block */ + (zcs->checksum * 4);
118288 -               }
118289 -               /* create epilogue */
118290 -               zcs->stage = zcss_final;
118291 -               zcs->outBuffContentSize = !notEnded ? 0 : ZSTD_compressEnd(zcs->cctx, zcs->outBuff, zcs->outBuffSize, NULL,
118292 -                                                                          0); /* write epilogue, including final empty block, into outBuff */
118293 -       }
118295 -       /* flush epilogue */
118296 -       {
118297 -               size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
118298 -               size_t const flushed = ZSTD_limitCopy(op, oend - op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
118299 -               op += flushed;
118300 -               zcs->outBuffFlushedSize += flushed;
118301 -               output->pos += op - ostart;
118302 -               if (toFlush == flushed)
118303 -                       zcs->stage = zcss_init; /* end reached */
118304 -               return toFlush - flushed;
118305 -       }
118308 -/*-=====  Pre-defined compression levels  =====-*/
118310 -#define ZSTD_DEFAULT_CLEVEL 1
118311 -#define ZSTD_MAX_CLEVEL 22
118312 -int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
118314 -static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL + 1] = {
118315 -    {
118316 -       /* "default" */
118317 -       /* W,  C,  H,  S,  L, TL, strat */
118318 -       {18, 12, 12, 1, 7, 16, ZSTD_fast},    /* level  0 - never used */
118319 -       {19, 13, 14, 1, 7, 16, ZSTD_fast},    /* level  1 */
118320 -       {19, 15, 16, 1, 6, 16, ZSTD_fast},    /* level  2 */
118321 -       {20, 16, 17, 1, 5, 16, ZSTD_dfast},   /* level  3.*/
118322 -       {20, 18, 18, 1, 5, 16, ZSTD_dfast},   /* level  4.*/
118323 -       {20, 15, 18, 3, 5, 16, ZSTD_greedy},  /* level  5 */
118324 -       {21, 16, 19, 2, 5, 16, ZSTD_lazy},    /* level  6 */
118325 -       {21, 17, 20, 3, 5, 16, ZSTD_lazy},    /* level  7 */
118326 -       {21, 18, 20, 3, 5, 16, ZSTD_lazy2},   /* level  8 */
118327 -       {21, 20, 20, 3, 5, 16, ZSTD_lazy2},   /* level  9 */
118328 -       {21, 19, 21, 4, 5, 16, ZSTD_lazy2},   /* level 10 */
118329 -       {22, 20, 22, 4, 5, 16, ZSTD_lazy2},   /* level 11 */
118330 -       {22, 20, 22, 5, 5, 16, ZSTD_lazy2},   /* level 12 */
118331 -       {22, 21, 22, 5, 5, 16, ZSTD_lazy2},   /* level 13 */
118332 -       {22, 21, 22, 6, 5, 16, ZSTD_lazy2},   /* level 14 */
118333 -       {22, 21, 21, 5, 5, 16, ZSTD_btlazy2}, /* level 15 */
118334 -       {23, 22, 22, 5, 5, 16, ZSTD_btlazy2}, /* level 16 */
118335 -       {23, 21, 22, 4, 5, 24, ZSTD_btopt},   /* level 17 */
118336 -       {23, 23, 22, 6, 5, 32, ZSTD_btopt},   /* level 18 */
118337 -       {23, 23, 22, 6, 3, 48, ZSTD_btopt},   /* level 19 */
118338 -       {25, 25, 23, 7, 3, 64, ZSTD_btopt2},  /* level 20 */
118339 -       {26, 26, 23, 7, 3, 256, ZSTD_btopt2}, /* level 21 */
118340 -       {27, 27, 25, 9, 3, 512, ZSTD_btopt2}, /* level 22 */
118341 -    },
118342 -    {
118343 -       /* for srcSize <= 256 KB */
118344 -       /* W,  C,  H,  S,  L,  T, strat */
118345 -       {0, 0, 0, 0, 0, 0, ZSTD_fast},   /* level  0 - not used */
118346 -       {18, 13, 14, 1, 6, 8, ZSTD_fast},      /* level  1 */
118347 -       {18, 14, 13, 1, 5, 8, ZSTD_dfast},     /* level  2 */
118348 -       {18, 16, 15, 1, 5, 8, ZSTD_dfast},     /* level  3 */
118349 -       {18, 15, 17, 1, 5, 8, ZSTD_greedy},    /* level  4.*/
118350 -       {18, 16, 17, 4, 5, 8, ZSTD_greedy},    /* level  5.*/
118351 -       {18, 16, 17, 3, 5, 8, ZSTD_lazy},      /* level  6.*/
118352 -       {18, 17, 17, 4, 4, 8, ZSTD_lazy},      /* level  7 */
118353 -       {18, 17, 17, 4, 4, 8, ZSTD_lazy2},     /* level  8 */
118354 -       {18, 17, 17, 5, 4, 8, ZSTD_lazy2},     /* level  9 */
118355 -       {18, 17, 17, 6, 4, 8, ZSTD_lazy2},     /* level 10 */
118356 -       {18, 18, 17, 6, 4, 8, ZSTD_lazy2},     /* level 11.*/
118357 -       {18, 18, 17, 7, 4, 8, ZSTD_lazy2},     /* level 12.*/
118358 -       {18, 19, 17, 6, 4, 8, ZSTD_btlazy2},   /* level 13 */
118359 -       {18, 18, 18, 4, 4, 16, ZSTD_btopt},    /* level 14.*/
118360 -       {18, 18, 18, 4, 3, 16, ZSTD_btopt},    /* level 15.*/
118361 -       {18, 19, 18, 6, 3, 32, ZSTD_btopt},    /* level 16.*/
118362 -       {18, 19, 18, 8, 3, 64, ZSTD_btopt},    /* level 17.*/
118363 -       {18, 19, 18, 9, 3, 128, ZSTD_btopt},   /* level 18.*/
118364 -       {18, 19, 18, 10, 3, 256, ZSTD_btopt},  /* level 19.*/
118365 -       {18, 19, 18, 11, 3, 512, ZSTD_btopt2}, /* level 20.*/
118366 -       {18, 19, 18, 12, 3, 512, ZSTD_btopt2}, /* level 21.*/
118367 -       {18, 19, 18, 13, 3, 512, ZSTD_btopt2}, /* level 22.*/
118368 -    },
118369 -    {
118370 -       /* for srcSize <= 128 KB */
118371 -       /* W,  C,  H,  S,  L,  T, strat */
118372 -       {17, 12, 12, 1, 7, 8, ZSTD_fast},      /* level  0 - not used */
118373 -       {17, 12, 13, 1, 6, 8, ZSTD_fast},      /* level  1 */
118374 -       {17, 13, 16, 1, 5, 8, ZSTD_fast},      /* level  2 */
118375 -       {17, 16, 16, 2, 5, 8, ZSTD_dfast},     /* level  3 */
118376 -       {17, 13, 15, 3, 4, 8, ZSTD_greedy},    /* level  4 */
118377 -       {17, 15, 17, 4, 4, 8, ZSTD_greedy},    /* level  5 */
118378 -       {17, 16, 17, 3, 4, 8, ZSTD_lazy},      /* level  6 */
118379 -       {17, 15, 17, 4, 4, 8, ZSTD_lazy2},     /* level  7 */
118380 -       {17, 17, 17, 4, 4, 8, ZSTD_lazy2},     /* level  8 */
118381 -       {17, 17, 17, 5, 4, 8, ZSTD_lazy2},     /* level  9 */
118382 -       {17, 17, 17, 6, 4, 8, ZSTD_lazy2},     /* level 10 */
118383 -       {17, 17, 17, 7, 4, 8, ZSTD_lazy2},     /* level 11 */
118384 -       {17, 17, 17, 8, 4, 8, ZSTD_lazy2},     /* level 12 */
118385 -       {17, 18, 17, 6, 4, 8, ZSTD_btlazy2},   /* level 13.*/
118386 -       {17, 17, 17, 7, 3, 8, ZSTD_btopt},     /* level 14.*/
118387 -       {17, 17, 17, 7, 3, 16, ZSTD_btopt},    /* level 15.*/
118388 -       {17, 18, 17, 7, 3, 32, ZSTD_btopt},    /* level 16.*/
118389 -       {17, 18, 17, 7, 3, 64, ZSTD_btopt},    /* level 17.*/
118390 -       {17, 18, 17, 7, 3, 256, ZSTD_btopt},   /* level 18.*/
118391 -       {17, 18, 17, 8, 3, 256, ZSTD_btopt},   /* level 19.*/
118392 -       {17, 18, 17, 9, 3, 256, ZSTD_btopt2},  /* level 20.*/
118393 -       {17, 18, 17, 10, 3, 256, ZSTD_btopt2}, /* level 21.*/
118394 -       {17, 18, 17, 11, 3, 512, ZSTD_btopt2}, /* level 22.*/
118395 -    },
118396 -    {
118397 -       /* for srcSize <= 16 KB */
118398 -       /* W,  C,  H,  S,  L,  T, strat */
118399 -       {14, 12, 12, 1, 7, 6, ZSTD_fast},      /* level  0 - not used */
118400 -       {14, 14, 14, 1, 6, 6, ZSTD_fast},      /* level  1 */
118401 -       {14, 14, 14, 1, 4, 6, ZSTD_fast},      /* level  2 */
118402 -       {14, 14, 14, 1, 4, 6, ZSTD_dfast},     /* level  3.*/
118403 -       {14, 14, 14, 4, 4, 6, ZSTD_greedy},    /* level  4.*/
118404 -       {14, 14, 14, 3, 4, 6, ZSTD_lazy},      /* level  5.*/
118405 -       {14, 14, 14, 4, 4, 6, ZSTD_lazy2},     /* level  6 */
118406 -       {14, 14, 14, 5, 4, 6, ZSTD_lazy2},     /* level  7 */
118407 -       {14, 14, 14, 6, 4, 6, ZSTD_lazy2},     /* level  8.*/
118408 -       {14, 15, 14, 6, 4, 6, ZSTD_btlazy2},   /* level  9.*/
118409 -       {14, 15, 14, 3, 3, 6, ZSTD_btopt},     /* level 10.*/
118410 -       {14, 15, 14, 6, 3, 8, ZSTD_btopt},     /* level 11.*/
118411 -       {14, 15, 14, 6, 3, 16, ZSTD_btopt},    /* level 12.*/
118412 -       {14, 15, 14, 6, 3, 24, ZSTD_btopt},    /* level 13.*/
118413 -       {14, 15, 15, 6, 3, 48, ZSTD_btopt},    /* level 14.*/
118414 -       {14, 15, 15, 6, 3, 64, ZSTD_btopt},    /* level 15.*/
118415 -       {14, 15, 15, 6, 3, 96, ZSTD_btopt},    /* level 16.*/
118416 -       {14, 15, 15, 6, 3, 128, ZSTD_btopt},   /* level 17.*/
118417 -       {14, 15, 15, 6, 3, 256, ZSTD_btopt},   /* level 18.*/
118418 -       {14, 15, 15, 7, 3, 256, ZSTD_btopt},   /* level 19.*/
118419 -       {14, 15, 15, 8, 3, 256, ZSTD_btopt2},  /* level 20.*/
118420 -       {14, 15, 15, 9, 3, 256, ZSTD_btopt2},  /* level 21.*/
118421 -       {14, 15, 15, 10, 3, 256, ZSTD_btopt2}, /* level 22.*/
118422 -    },
118425 -/*! ZSTD_getCParams() :
118426 -*   @return ZSTD_compressionParameters structure for a selected compression level, `srcSize` and `dictSize`.
118427 -*   Size values are optional, provide 0 if not known or unused */
118428 -ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSize, size_t dictSize)
118430 -       ZSTD_compressionParameters cp;
118431 -       size_t const addedSize = srcSize ? 0 : 500;
118432 -       U64 const rSize = srcSize + dictSize ? srcSize + dictSize + addedSize : (U64)-1;
118433 -       U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); /* intentional underflow for srcSizeHint == 0 */
118434 -       if (compressionLevel <= 0)
118435 -               compressionLevel = ZSTD_DEFAULT_CLEVEL; /* 0 == default; no negative compressionLevel yet */
118436 -       if (compressionLevel > ZSTD_MAX_CLEVEL)
118437 -               compressionLevel = ZSTD_MAX_CLEVEL;
118438 -       cp = ZSTD_defaultCParameters[tableID][compressionLevel];
118439 -       if (ZSTD_32bits()) { /* auto-correction, for 32-bits mode */
118440 -               if (cp.windowLog > ZSTD_WINDOWLOG_MAX)
118441 -                       cp.windowLog = ZSTD_WINDOWLOG_MAX;
118442 -               if (cp.chainLog > ZSTD_CHAINLOG_MAX)
118443 -                       cp.chainLog = ZSTD_CHAINLOG_MAX;
118444 -               if (cp.hashLog > ZSTD_HASHLOG_MAX)
118445 -                       cp.hashLog = ZSTD_HASHLOG_MAX;
118446 -       }
118447 -       cp = ZSTD_adjustCParams(cp, srcSize, dictSize);
118448 -       return cp;
118451 -/*! ZSTD_getParams() :
118452 -*   same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`).
118453 -*   All fields of `ZSTD_frameParameters` are set to default (0) */
118454 -ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSize, size_t dictSize)
118456 -       ZSTD_parameters params;
118457 -       ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSize, dictSize);
118458 -       memset(&params, 0, sizeof(params));
118459 -       params.cParams = cParams;
118460 -       return params;
118463 -EXPORT_SYMBOL(ZSTD_maxCLevel);
118464 -EXPORT_SYMBOL(ZSTD_compressBound);
118466 -EXPORT_SYMBOL(ZSTD_CCtxWorkspaceBound);
118467 -EXPORT_SYMBOL(ZSTD_initCCtx);
118468 -EXPORT_SYMBOL(ZSTD_compressCCtx);
118469 -EXPORT_SYMBOL(ZSTD_compress_usingDict);
118471 -EXPORT_SYMBOL(ZSTD_CDictWorkspaceBound);
118472 -EXPORT_SYMBOL(ZSTD_initCDict);
118473 -EXPORT_SYMBOL(ZSTD_compress_usingCDict);
118475 -EXPORT_SYMBOL(ZSTD_CStreamWorkspaceBound);
118476 -EXPORT_SYMBOL(ZSTD_initCStream);
118477 -EXPORT_SYMBOL(ZSTD_initCStream_usingCDict);
118478 -EXPORT_SYMBOL(ZSTD_resetCStream);
118479 -EXPORT_SYMBOL(ZSTD_compressStream);
118480 -EXPORT_SYMBOL(ZSTD_flushStream);
118481 -EXPORT_SYMBOL(ZSTD_endStream);
118482 -EXPORT_SYMBOL(ZSTD_CStreamInSize);
118483 -EXPORT_SYMBOL(ZSTD_CStreamOutSize);
118485 -EXPORT_SYMBOL(ZSTD_getCParams);
118486 -EXPORT_SYMBOL(ZSTD_getParams);
118487 -EXPORT_SYMBOL(ZSTD_checkCParams);
118488 -EXPORT_SYMBOL(ZSTD_adjustCParams);
118490 -EXPORT_SYMBOL(ZSTD_compressBegin);
118491 -EXPORT_SYMBOL(ZSTD_compressBegin_usingDict);
118492 -EXPORT_SYMBOL(ZSTD_compressBegin_advanced);
118493 -EXPORT_SYMBOL(ZSTD_copyCCtx);
118494 -EXPORT_SYMBOL(ZSTD_compressBegin_usingCDict);
118495 -EXPORT_SYMBOL(ZSTD_compressContinue);
118496 -EXPORT_SYMBOL(ZSTD_compressEnd);
118498 -EXPORT_SYMBOL(ZSTD_getBlockSizeMax);
118499 -EXPORT_SYMBOL(ZSTD_compressBlock);
118501 -MODULE_LICENSE("Dual BSD/GPL");
118502 -MODULE_DESCRIPTION("Zstd Compressor");
118503 diff --git a/lib/zstd/compress/fse_compress.c b/lib/zstd/compress/fse_compress.c
118504 new file mode 100644
118505 index 000000000000..436985b620e5
118506 --- /dev/null
118507 +++ b/lib/zstd/compress/fse_compress.c
118508 @@ -0,0 +1,625 @@
118509 +/* ******************************************************************
118510 + * FSE : Finite State Entropy encoder
118511 + * Copyright (c) Yann Collet, Facebook, Inc.
118513 + *  You can contact the author at :
118514 + *  - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
118515 + *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
118517 + * This source code is licensed under both the BSD-style license (found in the
118518 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
118519 + * in the COPYING file in the root directory of this source tree).
118520 + * You may select, at your option, one of the above-listed licenses.
118521 +****************************************************************** */
118523 +/* **************************************************************
118524 +*  Includes
118525 +****************************************************************/
118526 +#include "../common/compiler.h"
118527 +#include "../common/mem.h"        /* U32, U16, etc. */
118528 +#include "../common/debug.h"      /* assert, DEBUGLOG */
118529 +#include "hist.h"       /* HIST_count_wksp */
118530 +#include "../common/bitstream.h"
118531 +#define FSE_STATIC_LINKING_ONLY
118532 +#include "../common/fse.h"
118533 +#include "../common/error_private.h"
118534 +#define ZSTD_DEPS_NEED_MALLOC
118535 +#define ZSTD_DEPS_NEED_MATH64
118536 +#include "../common/zstd_deps.h"  /* ZSTD_malloc, ZSTD_free, ZSTD_memcpy, ZSTD_memset */
118539 +/* **************************************************************
118540 +*  Error Management
118541 +****************************************************************/
118542 +#define FSE_isError ERR_isError
118545 +/* **************************************************************
118546 +*  Templates
118547 +****************************************************************/
118549 +  designed to be included
118550 +  for type-specific functions (template emulation in C)
118551 +  Objective is to write these functions only once, for improved maintenance
118554 +/* safety checks */
118555 +#ifndef FSE_FUNCTION_EXTENSION
118556 +#  error "FSE_FUNCTION_EXTENSION must be defined"
118557 +#endif
118558 +#ifndef FSE_FUNCTION_TYPE
118559 +#  error "FSE_FUNCTION_TYPE must be defined"
118560 +#endif
118562 +/* Function names */
118563 +#define FSE_CAT(X,Y) X##Y
118564 +#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
118565 +#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
118568 +/* Function templates */
118570 +/* FSE_buildCTable_wksp() :
118571 + * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
118572 + * wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
118573 + * workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
118574 + */
118575 +size_t FSE_buildCTable_wksp(FSE_CTable* ct,
118576 +                      const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
118577 +                            void* workSpace, size_t wkspSize)
118579 +    U32 const tableSize = 1 << tableLog;
118580 +    U32 const tableMask = tableSize - 1;
118581 +    void* const ptr = ct;
118582 +    U16* const tableU16 = ( (U16*) ptr) + 2;
118583 +    void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
118584 +    FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
118585 +    U32 const step = FSE_TABLESTEP(tableSize);
118587 +    U32* cumul = (U32*)workSpace;
118588 +    FSE_FUNCTION_TYPE* tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSymbolValue + 2));
118590 +    U32 highThreshold = tableSize-1;
118592 +    if ((size_t)workSpace & 3) return ERROR(GENERIC); /* Must be 4 byte aligned */
118593 +    if (FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) > wkspSize) return ERROR(tableLog_tooLarge);
118594 +    /* CTable header */
118595 +    tableU16[-2] = (U16) tableLog;
118596 +    tableU16[-1] = (U16) maxSymbolValue;
118597 +    assert(tableLog < 16);   /* required for threshold strategy to work */
118599 +    /* For explanations on how to distribute symbol values over the table :
118600 +     * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
118602 +     #ifdef __clang_analyzer__
118603 +     ZSTD_memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize);   /* useless initialization, just to keep scan-build happy */
118604 +     #endif
118606 +    /* symbol start positions */
118607 +    {   U32 u;
118608 +        cumul[0] = 0;
118609 +        for (u=1; u <= maxSymbolValue+1; u++) {
118610 +            if (normalizedCounter[u-1]==-1) {  /* Low proba symbol */
118611 +                cumul[u] = cumul[u-1] + 1;
118612 +                tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
118613 +            } else {
118614 +                cumul[u] = cumul[u-1] + normalizedCounter[u-1];
118615 +        }   }
118616 +        cumul[maxSymbolValue+1] = tableSize+1;
118617 +    }
118619 +    /* Spread symbols */
118620 +    {   U32 position = 0;
118621 +        U32 symbol;
118622 +        for (symbol=0; symbol<=maxSymbolValue; symbol++) {
118623 +            int nbOccurrences;
118624 +            int const freq = normalizedCounter[symbol];
118625 +            for (nbOccurrences=0; nbOccurrences<freq; nbOccurrences++) {
118626 +                tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
118627 +                position = (position + step) & tableMask;
118628 +                while (position > highThreshold)
118629 +                    position = (position + step) & tableMask;   /* Low proba area */
118630 +        }   }
118632 +        assert(position==0);  /* Must have initialized all positions */
118633 +    }
118635 +    /* Build table */
118636 +    {   U32 u; for (u=0; u<tableSize; u++) {
118637 +        FSE_FUNCTION_TYPE s = tableSymbol[u];   /* note : static analyzer may not understand tableSymbol is properly initialized */
118638 +        tableU16[cumul[s]++] = (U16) (tableSize+u);   /* TableU16 : sorted by symbol order; gives next state value */
118639 +    }   }
118641 +    /* Build Symbol Transformation Table */
118642 +    {   unsigned total = 0;
118643 +        unsigned s;
118644 +        for (s=0; s<=maxSymbolValue; s++) {
118645 +            switch (normalizedCounter[s])
118646 +            {
118647 +            case  0:
118648 +                /* filling nonetheless, for compatibility with FSE_getMaxNbBits() */
118649 +                symbolTT[s].deltaNbBits = ((tableLog+1) << 16) - (1<<tableLog);
118650 +                break;
118652 +            case -1:
118653 +            case  1:
118654 +                symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog);
118655 +                symbolTT[s].deltaFindState = total - 1;
118656 +                total ++;
118657 +                break;
118658 +            default :
118659 +                {
118660 +                    U32 const maxBitsOut = tableLog - BIT_highbit32 (normalizedCounter[s]-1);
118661 +                    U32 const minStatePlus = normalizedCounter[s] << maxBitsOut;
118662 +                    symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
118663 +                    symbolTT[s].deltaFindState = total - normalizedCounter[s];
118664 +                    total +=  normalizedCounter[s];
118665 +    }   }   }   }
118667 +#if 0  /* debug : symbol costs */
118668 +    DEBUGLOG(5, "\n --- table statistics : ");
118669 +    {   U32 symbol;
118670 +        for (symbol=0; symbol<=maxSymbolValue; symbol++) {
118671 +            DEBUGLOG(5, "%3u: w=%3i,   maxBits=%u, fracBits=%.2f",
118672 +                symbol, normalizedCounter[symbol],
118673 +                FSE_getMaxNbBits(symbolTT, symbol),
118674 +                (double)FSE_bitCost(symbolTT, tableLog, symbol, 8) / 256);
118675 +        }
118676 +    }
118677 +#endif
118679 +    return 0;
118685 +#ifndef FSE_COMMONDEFS_ONLY
118688 +/*-**************************************************************
118689 +*  FSE NCount encoding
118690 +****************************************************************/
118691 +size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
118693 +    size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog) >> 3) + 3;
118694 +    return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND;  /* maxSymbolValue==0 ? use default */
118697 +static size_t
118698 +FSE_writeNCount_generic (void* header, size_t headerBufferSize,
118699 +                   const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
118700 +                         unsigned writeIsSafe)
118702 +    BYTE* const ostart = (BYTE*) header;
118703 +    BYTE* out = ostart;
118704 +    BYTE* const oend = ostart + headerBufferSize;
118705 +    int nbBits;
118706 +    const int tableSize = 1 << tableLog;
118707 +    int remaining;
118708 +    int threshold;
118709 +    U32 bitStream = 0;
118710 +    int bitCount = 0;
118711 +    unsigned symbol = 0;
118712 +    unsigned const alphabetSize = maxSymbolValue + 1;
118713 +    int previousIs0 = 0;
118715 +    /* Table Size */
118716 +    bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount;
118717 +    bitCount  += 4;
118719 +    /* Init */
118720 +    remaining = tableSize+1;   /* +1 for extra accuracy */
118721 +    threshold = tableSize;
118722 +    nbBits = tableLog+1;
118724 +    while ((symbol < alphabetSize) && (remaining>1)) {  /* stops at 1 */
118725 +        if (previousIs0) {
118726 +            unsigned start = symbol;
118727 +            while ((symbol < alphabetSize) && !normalizedCounter[symbol]) symbol++;
118728 +            if (symbol == alphabetSize) break;   /* incorrect distribution */
118729 +            while (symbol >= start+24) {
118730 +                start+=24;
118731 +                bitStream += 0xFFFFU << bitCount;
118732 +                if ((!writeIsSafe) && (out > oend-2))
118733 +                    return ERROR(dstSize_tooSmall);   /* Buffer overflow */
118734 +                out[0] = (BYTE) bitStream;
118735 +                out[1] = (BYTE)(bitStream>>8);
118736 +                out+=2;
118737 +                bitStream>>=16;
118738 +            }
118739 +            while (symbol >= start+3) {
118740 +                start+=3;
118741 +                bitStream += 3 << bitCount;
118742 +                bitCount += 2;
118743 +            }
118744 +            bitStream += (symbol-start) << bitCount;
118745 +            bitCount += 2;
118746 +            if (bitCount>16) {
118747 +                if ((!writeIsSafe) && (out > oend - 2))
118748 +                    return ERROR(dstSize_tooSmall);   /* Buffer overflow */
118749 +                out[0] = (BYTE)bitStream;
118750 +                out[1] = (BYTE)(bitStream>>8);
118751 +                out += 2;
118752 +                bitStream >>= 16;
118753 +                bitCount -= 16;
118754 +        }   }
118755 +        {   int count = normalizedCounter[symbol++];
118756 +            int const max = (2*threshold-1) - remaining;
118757 +            remaining -= count < 0 ? -count : count;
118758 +            count++;   /* +1 for extra accuracy */
118759 +            if (count>=threshold)
118760 +                count += max;   /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
118761 +            bitStream += count << bitCount;
118762 +            bitCount  += nbBits;
118763 +            bitCount  -= (count<max);
118764 +            previousIs0  = (count==1);
118765 +            if (remaining<1) return ERROR(GENERIC);
118766 +            while (remaining<threshold) { nbBits--; threshold>>=1; }
118767 +        }
118768 +        if (bitCount>16) {
118769 +            if ((!writeIsSafe) && (out > oend - 2))
118770 +                return ERROR(dstSize_tooSmall);   /* Buffer overflow */
118771 +            out[0] = (BYTE)bitStream;
118772 +            out[1] = (BYTE)(bitStream>>8);
118773 +            out += 2;
118774 +            bitStream >>= 16;
118775 +            bitCount -= 16;
118776 +    }   }
118778 +    if (remaining != 1)
118779 +        return ERROR(GENERIC);  /* incorrect normalized distribution */
118780 +    assert(symbol <= alphabetSize);
118782 +    /* flush remaining bitStream */
118783 +    if ((!writeIsSafe) && (out > oend - 2))
118784 +        return ERROR(dstSize_tooSmall);   /* Buffer overflow */
118785 +    out[0] = (BYTE)bitStream;
118786 +    out[1] = (BYTE)(bitStream>>8);
118787 +    out+= (bitCount+7) /8;
118789 +    return (out-ostart);
118793 +size_t FSE_writeNCount (void* buffer, size_t bufferSize,
118794 +                  const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
118796 +    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);   /* Unsupported */
118797 +    if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC);   /* Unsupported */
118799 +    if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
118800 +        return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
118802 +    return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1 /* write in buffer is safe */);
118806 +/*-**************************************************************
118807 +*  FSE Compression Code
118808 +****************************************************************/
118810 +FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
118812 +    size_t size;
118813 +    if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
118814 +    size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
118815 +    return (FSE_CTable*)ZSTD_malloc(size);
118818 +void FSE_freeCTable (FSE_CTable* ct) { ZSTD_free(ct); }
118820 +/* provides the minimum logSize to safely represent a distribution */
118821 +static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
118823 +    U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1;
118824 +    U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
118825 +    U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
118826 +    assert(srcSize > 1); /* Not supported, RLE should be used instead */
118827 +    return minBits;
118830 +unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
118832 +    U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
118833 +    U32 tableLog = maxTableLog;
118834 +    U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
118835 +    assert(srcSize > 1); /* Not supported, RLE should be used instead */
118836 +    if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
118837 +    if (maxBitsSrc < tableLog) tableLog = maxBitsSrc;   /* Accuracy can be reduced */
118838 +    if (minBits > tableLog) tableLog = minBits;   /* Need a minimum to safely represent all symbol values */
118839 +    if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG;
118840 +    if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG;
118841 +    return tableLog;
118844 +unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
118846 +    return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
118849 +/* Secondary normalization method.
118850 +   To be used when primary method fails. */
118852 +static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue, short lowProbCount)
118854 +    short const NOT_YET_ASSIGNED = -2;
118855 +    U32 s;
118856 +    U32 distributed = 0;
118857 +    U32 ToDistribute;
118859 +    /* Init */
118860 +    U32 const lowThreshold = (U32)(total >> tableLog);
118861 +    U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
118863 +    for (s=0; s<=maxSymbolValue; s++) {
118864 +        if (count[s] == 0) {
118865 +            norm[s]=0;
118866 +            continue;
118867 +        }
118868 +        if (count[s] <= lowThreshold) {
118869 +            norm[s] = lowProbCount;
118870 +            distributed++;
118871 +            total -= count[s];
118872 +            continue;
118873 +        }
118874 +        if (count[s] <= lowOne) {
118875 +            norm[s] = 1;
118876 +            distributed++;
118877 +            total -= count[s];
118878 +            continue;
118879 +        }
118881 +        norm[s]=NOT_YET_ASSIGNED;
118882 +    }
118883 +    ToDistribute = (1 << tableLog) - distributed;
118885 +    if (ToDistribute == 0)
118886 +        return 0;
118888 +    if ((total / ToDistribute) > lowOne) {
118889 +        /* risk of rounding to zero */
118890 +        lowOne = (U32)((total * 3) / (ToDistribute * 2));
118891 +        for (s=0; s<=maxSymbolValue; s++) {
118892 +            if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
118893 +                norm[s] = 1;
118894 +                distributed++;
118895 +                total -= count[s];
118896 +                continue;
118897 +        }   }
118898 +        ToDistribute = (1 << tableLog) - distributed;
118899 +    }
118901 +    if (distributed == maxSymbolValue+1) {
118902 +        /* all values are pretty poor;
118903 +           probably incompressible data (should have already been detected);
118904 +           find max, then give all remaining points to max */
118905 +        U32 maxV = 0, maxC = 0;
118906 +        for (s=0; s<=maxSymbolValue; s++)
118907 +            if (count[s] > maxC) { maxV=s; maxC=count[s]; }
118908 +        norm[maxV] += (short)ToDistribute;
118909 +        return 0;
118910 +    }
118912 +    if (total == 0) {
118913 +        /* all of the symbols were low enough for the lowOne or lowThreshold */
118914 +        for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1))
118915 +            if (norm[s] > 0) { ToDistribute--; norm[s]++; }
118916 +        return 0;
118917 +    }
118919 +    {   U64 const vStepLog = 62 - tableLog;
118920 +        U64 const mid = (1ULL << (vStepLog-1)) - 1;
118921 +        U64 const rStep = ZSTD_div64((((U64)1<<vStepLog) * ToDistribute) + mid, (U32)total);   /* scale on remaining */
118922 +        U64 tmpTotal = mid;
118923 +        for (s=0; s<=maxSymbolValue; s++) {
118924 +            if (norm[s]==NOT_YET_ASSIGNED) {
118925 +                U64 const end = tmpTotal + (count[s] * rStep);
118926 +                U32 const sStart = (U32)(tmpTotal >> vStepLog);
118927 +                U32 const sEnd = (U32)(end >> vStepLog);
118928 +                U32 const weight = sEnd - sStart;
118929 +                if (weight < 1)
118930 +                    return ERROR(GENERIC);
118931 +                norm[s] = (short)weight;
118932 +                tmpTotal = end;
118933 +    }   }   }
118935 +    return 0;
118938 +size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
118939 +                           const unsigned* count, size_t total,
118940 +                           unsigned maxSymbolValue, unsigned useLowProbCount)
118942 +    /* Sanity checks */
118943 +    if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
118944 +    if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC);   /* Unsupported size */
118945 +    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);   /* Unsupported size */
118946 +    if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC);   /* Too small tableLog, compression potentially impossible */
118948 +    {   static U32 const rtbTable[] = {     0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
118949 +        short const lowProbCount = useLowProbCount ? -1 : 1;
118950 +        U64 const scale = 62 - tableLog;
118951 +        U64 const step = ZSTD_div64((U64)1<<62, (U32)total);   /* <== here, one division ! */
118952 +        U64 const vStep = 1ULL<<(scale-20);
118953 +        int stillToDistribute = 1<<tableLog;
118954 +        unsigned s;
118955 +        unsigned largest=0;
118956 +        short largestP=0;
118957 +        U32 lowThreshold = (U32)(total >> tableLog);
118959 +        for (s=0; s<=maxSymbolValue; s++) {
118960 +            if (count[s] == total) return 0;   /* rle special case */
118961 +            if (count[s] == 0) { normalizedCounter[s]=0; continue; }
118962 +            if (count[s] <= lowThreshold) {
118963 +                normalizedCounter[s] = lowProbCount;
118964 +                stillToDistribute--;
118965 +            } else {
118966 +                short proba = (short)((count[s]*step) >> scale);
118967 +                if (proba<8) {
118968 +                    U64 restToBeat = vStep * rtbTable[proba];
118969 +                    proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat;
118970 +                }
118971 +                if (proba > largestP) { largestP=proba; largest=s; }
118972 +                normalizedCounter[s] = proba;
118973 +                stillToDistribute -= proba;
118974 +        }   }
118975 +        if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
118976 +            /* corner case, need another normalization method */
118977 +            size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue, lowProbCount);
118978 +            if (FSE_isError(errorCode)) return errorCode;
118979 +        }
118980 +        else normalizedCounter[largest] += (short)stillToDistribute;
118981 +    }
118983 +#if 0
118984 +    {   /* Print Table (debug) */
118985 +        U32 s;
118986 +        U32 nTotal = 0;
118987 +        for (s=0; s<=maxSymbolValue; s++)
118988 +            RAWLOG(2, "%3i: %4i \n", s, normalizedCounter[s]);
118989 +        for (s=0; s<=maxSymbolValue; s++)
118990 +            nTotal += abs(normalizedCounter[s]);
118991 +        if (nTotal != (1U<<tableLog))
118992 +            RAWLOG(2, "Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
118993 +        getchar();
118994 +    }
118995 +#endif
118997 +    return tableLog;
119001 +/* fake FSE_CTable, for raw (uncompressed) input */
119002 +size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits)
119004 +    const unsigned tableSize = 1 << nbBits;
119005 +    const unsigned tableMask = tableSize - 1;
119006 +    const unsigned maxSymbolValue = tableMask;
119007 +    void* const ptr = ct;
119008 +    U16* const tableU16 = ( (U16*) ptr) + 2;
119009 +    void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1);   /* assumption : tableLog >= 1 */
119010 +    FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
119011 +    unsigned s;
119013 +    /* Sanity checks */
119014 +    if (nbBits < 1) return ERROR(GENERIC);             /* min size */
119016 +    /* header */
119017 +    tableU16[-2] = (U16) nbBits;
119018 +    tableU16[-1] = (U16) maxSymbolValue;
119020 +    /* Build table */
119021 +    for (s=0; s<tableSize; s++)
119022 +        tableU16[s] = (U16)(tableSize + s);
119024 +    /* Build Symbol Transformation Table */
119025 +    {   const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
119026 +        for (s=0; s<=maxSymbolValue; s++) {
119027 +            symbolTT[s].deltaNbBits = deltaNbBits;
119028 +            symbolTT[s].deltaFindState = s-1;
119029 +    }   }
119031 +    return 0;
119034 +/* fake FSE_CTable, for rle input (always same symbol) */
119035 +size_t FSE_buildCTable_rle (FSE_CTable* ct, BYTE symbolValue)
119037 +    void* ptr = ct;
119038 +    U16* tableU16 = ( (U16*) ptr) + 2;
119039 +    void* FSCTptr = (U32*)ptr + 2;
119040 +    FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*) FSCTptr;
119042 +    /* header */
119043 +    tableU16[-2] = (U16) 0;
119044 +    tableU16[-1] = (U16) symbolValue;
119046 +    /* Build table */
119047 +    tableU16[0] = 0;
119048 +    tableU16[1] = 0;   /* just in case */
119050 +    /* Build Symbol Transformation Table */
119051 +    symbolTT[symbolValue].deltaNbBits = 0;
119052 +    symbolTT[symbolValue].deltaFindState = 0;
119054 +    return 0;
119058 +static size_t FSE_compress_usingCTable_generic (void* dst, size_t dstSize,
119059 +                           const void* src, size_t srcSize,
119060 +                           const FSE_CTable* ct, const unsigned fast)
119062 +    const BYTE* const istart = (const BYTE*) src;
119063 +    const BYTE* const iend = istart + srcSize;
119064 +    const BYTE* ip=iend;
119066 +    BIT_CStream_t bitC;
119067 +    FSE_CState_t CState1, CState2;
119069 +    /* init */
119070 +    if (srcSize <= 2) return 0;
119071 +    { size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
119072 +      if (FSE_isError(initError)) return 0; /* not enough space available to write a bitstream */ }
119074 +#define FSE_FLUSHBITS(s)  (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
119076 +    if (srcSize & 1) {
119077 +        FSE_initCState2(&CState1, ct, *--ip);
119078 +        FSE_initCState2(&CState2, ct, *--ip);
119079 +        FSE_encodeSymbol(&bitC, &CState1, *--ip);
119080 +        FSE_FLUSHBITS(&bitC);
119081 +    } else {
119082 +        FSE_initCState2(&CState2, ct, *--ip);
119083 +        FSE_initCState2(&CState1, ct, *--ip);
119084 +    }
119086 +    /* join to mod 4 */
119087 +    srcSize -= 2;
119088 +    if ((sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) {  /* test bit 2 */
119089 +        FSE_encodeSymbol(&bitC, &CState2, *--ip);
119090 +        FSE_encodeSymbol(&bitC, &CState1, *--ip);
119091 +        FSE_FLUSHBITS(&bitC);
119092 +    }
119094 +    /* 2 or 4 encoding per loop */
119095 +    while ( ip>istart ) {
119097 +        FSE_encodeSymbol(&bitC, &CState2, *--ip);
119099 +        if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 )   /* this test must be static */
119100 +            FSE_FLUSHBITS(&bitC);
119102 +        FSE_encodeSymbol(&bitC, &CState1, *--ip);
119104 +        if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) {  /* this test must be static */
119105 +            FSE_encodeSymbol(&bitC, &CState2, *--ip);
119106 +            FSE_encodeSymbol(&bitC, &CState1, *--ip);
119107 +        }
119109 +        FSE_FLUSHBITS(&bitC);
119110 +    }
119112 +    FSE_flushCState(&bitC, &CState2);
119113 +    FSE_flushCState(&bitC, &CState1);
119114 +    return BIT_closeCStream(&bitC);
119117 +size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
119118 +                           const void* src, size_t srcSize,
119119 +                           const FSE_CTable* ct)
119121 +    unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
119123 +    if (fast)
119124 +        return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
119125 +    else
119126 +        return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
119130 +size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
119133 +#endif   /* FSE_COMMONDEFS_ONLY */
119134 diff --git a/lib/zstd/compress/hist.c b/lib/zstd/compress/hist.c
119135 new file mode 100644
119136 index 000000000000..5fc30f766591
119137 --- /dev/null
119138 +++ b/lib/zstd/compress/hist.c
119139 @@ -0,0 +1,164 @@
119140 +/* ******************************************************************
119141 + * hist : Histogram functions
119142 + * part of Finite State Entropy project
119143 + * Copyright (c) Yann Collet, Facebook, Inc.
119145 + *  You can contact the author at :
119146 + *  - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
119147 + *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
119149 + * This source code is licensed under both the BSD-style license (found in the
119150 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
119151 + * in the COPYING file in the root directory of this source tree).
119152 + * You may select, at your option, one of the above-listed licenses.
119153 +****************************************************************** */
119155 +/* --- dependencies --- */
119156 +#include "../common/mem.h"             /* U32, BYTE, etc. */
119157 +#include "../common/debug.h"           /* assert, DEBUGLOG */
119158 +#include "../common/error_private.h"   /* ERROR */
119159 +#include "hist.h"
119162 +/* --- Error management --- */
119163 +unsigned HIST_isError(size_t code) { return ERR_isError(code); }
119165 +/*-**************************************************************
119166 + *  Histogram functions
119167 + ****************************************************************/
119168 +unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
119169 +                           const void* src, size_t srcSize)
119171 +    const BYTE* ip = (const BYTE*)src;
119172 +    const BYTE* const end = ip + srcSize;
119173 +    unsigned maxSymbolValue = *maxSymbolValuePtr;
119174 +    unsigned largestCount=0;
119176 +    ZSTD_memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
119177 +    if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
119179 +    while (ip<end) {
119180 +        assert(*ip <= maxSymbolValue);
119181 +        count[*ip++]++;
119182 +    }
119184 +    while (!count[maxSymbolValue]) maxSymbolValue--;
119185 +    *maxSymbolValuePtr = maxSymbolValue;
119187 +    {   U32 s;
119188 +        for (s=0; s<=maxSymbolValue; s++)
119189 +            if (count[s] > largestCount) largestCount = count[s];
119190 +    }
119192 +    return largestCount;
119195 +typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e;
119197 +/* HIST_count_parallel_wksp() :
119198 + * store histogram into 4 intermediate tables, recombined at the end.
119199 + * this design makes better use of OoO cpus,
119200 + * and is noticeably faster when some values are heavily repeated.
119201 + * But it needs some additional workspace for intermediate tables.
119202 + * `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32.
119203 + * @return : largest histogram frequency,
119204 + *           or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */
119205 +static size_t HIST_count_parallel_wksp(
119206 +                                unsigned* count, unsigned* maxSymbolValuePtr,
119207 +                                const void* source, size_t sourceSize,
119208 +                                HIST_checkInput_e check,
119209 +                                U32* const workSpace)
119211 +    const BYTE* ip = (const BYTE*)source;
119212 +    const BYTE* const iend = ip+sourceSize;
119213 +    size_t const countSize = (*maxSymbolValuePtr + 1) * sizeof(*count);
119214 +    unsigned max=0;
119215 +    U32* const Counting1 = workSpace;
119216 +    U32* const Counting2 = Counting1 + 256;
119217 +    U32* const Counting3 = Counting2 + 256;
119218 +    U32* const Counting4 = Counting3 + 256;
119220 +    /* safety checks */
119221 +    assert(*maxSymbolValuePtr <= 255);
119222 +    if (!sourceSize) {
119223 +        ZSTD_memset(count, 0, countSize);
119224 +        *maxSymbolValuePtr = 0;
119225 +        return 0;
119226 +    }
119227 +    ZSTD_memset(workSpace, 0, 4*256*sizeof(unsigned));
119229 +    /* by stripes of 16 bytes */
119230 +    {   U32 cached = MEM_read32(ip); ip += 4;
119231 +        while (ip < iend-15) {
119232 +            U32 c = cached; cached = MEM_read32(ip); ip += 4;
119233 +            Counting1[(BYTE) c     ]++;
119234 +            Counting2[(BYTE)(c>>8) ]++;
119235 +            Counting3[(BYTE)(c>>16)]++;
119236 +            Counting4[       c>>24 ]++;
119237 +            c = cached; cached = MEM_read32(ip); ip += 4;
119238 +            Counting1[(BYTE) c     ]++;
119239 +            Counting2[(BYTE)(c>>8) ]++;
119240 +            Counting3[(BYTE)(c>>16)]++;
119241 +            Counting4[       c>>24 ]++;
119242 +            c = cached; cached = MEM_read32(ip); ip += 4;
119243 +            Counting1[(BYTE) c     ]++;
119244 +            Counting2[(BYTE)(c>>8) ]++;
119245 +            Counting3[(BYTE)(c>>16)]++;
119246 +            Counting4[       c>>24 ]++;
119247 +            c = cached; cached = MEM_read32(ip); ip += 4;
119248 +            Counting1[(BYTE) c     ]++;
119249 +            Counting2[(BYTE)(c>>8) ]++;
119250 +            Counting3[(BYTE)(c>>16)]++;
119251 +            Counting4[       c>>24 ]++;
119252 +        }
119253 +        ip-=4;
119254 +    }
119256 +    /* finish last symbols */
119257 +    while (ip<iend) Counting1[*ip++]++;
119259 +    {   U32 s;
119260 +        for (s=0; s<256; s++) {
119261 +            Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
119262 +            if (Counting1[s] > max) max = Counting1[s];
119263 +    }   }
119265 +    {   unsigned maxSymbolValue = 255;
119266 +        while (!Counting1[maxSymbolValue]) maxSymbolValue--;
119267 +        if (check && maxSymbolValue > *maxSymbolValuePtr) return ERROR(maxSymbolValue_tooSmall);
119268 +        *maxSymbolValuePtr = maxSymbolValue;
119269 +        ZSTD_memmove(count, Counting1, countSize);   /* in case count & Counting1 are overlapping */
119270 +    }
119271 +    return (size_t)max;
119274 +/* HIST_countFast_wksp() :
119275 + * Same as HIST_countFast(), but using an externally provided scratch buffer.
119276 + * `workSpace` is a writable buffer which must be 4-bytes aligned,
119277 + * `workSpaceSize` must be >= HIST_WKSP_SIZE
119278 + */
119279 +size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
119280 +                          const void* source, size_t sourceSize,
119281 +                          void* workSpace, size_t workSpaceSize)
119283 +    if (sourceSize < 1500) /* heuristic threshold */
119284 +        return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize);
119285 +    if ((size_t)workSpace & 3) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */
119286 +    if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
119287 +    return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace);
119290 +/* HIST_count_wksp() :
119291 + * Same as HIST_count(), but using an externally provided scratch buffer.
119292 + * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
119293 +size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
119294 +                       const void* source, size_t sourceSize,
119295 +                       void* workSpace, size_t workSpaceSize)
119297 +    if ((size_t)workSpace & 3) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */
119298 +    if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
119299 +    if (*maxSymbolValuePtr < 255)
119300 +        return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace);
119301 +    *maxSymbolValuePtr = 255;
119302 +    return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize);
119304 diff --git a/lib/zstd/compress/hist.h b/lib/zstd/compress/hist.h
119305 new file mode 100644
119306 index 000000000000..228ed48a71de
119307 --- /dev/null
119308 +++ b/lib/zstd/compress/hist.h
119309 @@ -0,0 +1,75 @@
119310 +/* ******************************************************************
119311 + * hist : Histogram functions
119312 + * part of Finite State Entropy project
119313 + * Copyright (c) Yann Collet, Facebook, Inc.
119315 + *  You can contact the author at :
119316 + *  - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
119317 + *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
119319 + * This source code is licensed under both the BSD-style license (found in the
119320 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
119321 + * in the COPYING file in the root directory of this source tree).
119322 + * You may select, at your option, one of the above-listed licenses.
119323 +****************************************************************** */
119325 +/* --- dependencies --- */
119326 +#include "../common/zstd_deps.h"   /* size_t */
119329 +/* --- simple histogram functions --- */
119331 +/*! HIST_count():
119332 + *  Provides the precise count of each byte within a table 'count'.
119333 + * 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1).
119334 + *  Updates *maxSymbolValuePtr with actual largest symbol value detected.
119335 + * @return : count of the most frequent symbol (which isn't identified).
119336 + *           or an error code, which can be tested using HIST_isError().
119337 + *           note : if return == srcSize, there is only one symbol.
119338 + */
119339 +size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
119340 +                  const void* src, size_t srcSize);
119342 +unsigned HIST_isError(size_t code);  /**< tells if a return value is an error code */
119345 +/* --- advanced histogram functions --- */
119347 +#define HIST_WKSP_SIZE_U32 1024
119348 +#define HIST_WKSP_SIZE    (HIST_WKSP_SIZE_U32 * sizeof(unsigned))
119349 +/** HIST_count_wksp() :
119350 + *  Same as HIST_count(), but using an externally provided scratch buffer.
119351 + *  Benefit is this function will use very little stack space.
119352 + * `workSpace` is a writable buffer which must be 4-bytes aligned,
119353 + * `workSpaceSize` must be >= HIST_WKSP_SIZE
119354 + */
119355 +size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
119356 +                       const void* src, size_t srcSize,
119357 +                       void* workSpace, size_t workSpaceSize);
119359 +/** HIST_countFast() :
119360 + *  same as HIST_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr.
119361 + *  This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr`
119362 + */
119363 +size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
119364 +                      const void* src, size_t srcSize);
119366 +/** HIST_countFast_wksp() :
119367 + *  Same as HIST_countFast(), but using an externally provided scratch buffer.
119368 + * `workSpace` is a writable buffer which must be 4-bytes aligned,
119369 + * `workSpaceSize` must be >= HIST_WKSP_SIZE
119370 + */
119371 +size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
119372 +                           const void* src, size_t srcSize,
119373 +                           void* workSpace, size_t workSpaceSize);
119375 +/*! HIST_count_simple() :
119376 + *  Same as HIST_countFast(), this function is unsafe,
119377 + *  and will segfault if any value within `src` is `> *maxSymbolValuePtr`.
119378 + *  It is also a bit slower for large inputs.
119379 + *  However, it does not need any additional memory (not even on stack).
119380 + * @return : count of the most frequent symbol.
119381 + *  Note this function doesn't produce any error (i.e. it must succeed).
119382 + */
119383 +unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
119384 +                           const void* src, size_t srcSize);
119385 diff --git a/lib/zstd/compress/huf_compress.c b/lib/zstd/compress/huf_compress.c
119386 new file mode 100644
119387 index 000000000000..ff0e76a2e0e3
119388 --- /dev/null
119389 +++ b/lib/zstd/compress/huf_compress.c
119390 @@ -0,0 +1,901 @@
119391 +/* ******************************************************************
119392 + * Huffman encoder, part of New Generation Entropy library
119393 + * Copyright (c) Yann Collet, Facebook, Inc.
119395 + *  You can contact the author at :
119396 + *  - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
119397 + *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
119399 + * This source code is licensed under both the BSD-style license (found in the
119400 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
119401 + * in the COPYING file in the root directory of this source tree).
119402 + * You may select, at your option, one of the above-listed licenses.
119403 +****************************************************************** */
119405 +/* **************************************************************
119406 +*  Compiler specifics
119407 +****************************************************************/
119410 +/* **************************************************************
119411 +*  Includes
119412 +****************************************************************/
119413 +#include "../common/zstd_deps.h"     /* ZSTD_memcpy, ZSTD_memset */
119414 +#include "../common/compiler.h"
119415 +#include "../common/bitstream.h"
119416 +#include "hist.h"
119417 +#define FSE_STATIC_LINKING_ONLY   /* FSE_optimalTableLog_internal */
119418 +#include "../common/fse.h"        /* header compression */
119419 +#define HUF_STATIC_LINKING_ONLY
119420 +#include "../common/huf.h"
119421 +#include "../common/error_private.h"
119424 +/* **************************************************************
119425 +*  Error Management
119426 +****************************************************************/
119427 +#define HUF_isError ERR_isError
119428 +#define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)   /* use only *after* variable declarations */
119431 +/* **************************************************************
119432 +*  Utils
119433 +****************************************************************/
119434 +unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
119436 +    return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
119440 +/* *******************************************************
119441 +*  HUF : Huffman block compression
119442 +*********************************************************/
119443 +/* HUF_compressWeights() :
119444 + * Same as FSE_compress(), but dedicated to huff0's weights compression.
119445 + * The use case needs much less stack memory.
119446 + * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
119447 + */
119448 +#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
119450 +typedef struct {
119451 +    FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
119452 +    U32 scratchBuffer[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(HUF_TABLELOG_MAX, MAX_FSE_TABLELOG_FOR_HUFF_HEADER)];
119453 +    unsigned count[HUF_TABLELOG_MAX+1];
119454 +    S16 norm[HUF_TABLELOG_MAX+1];
119455 +} HUF_CompressWeightsWksp;
119457 +static size_t HUF_compressWeights(void* dst, size_t dstSize, const void* weightTable, size_t wtSize, void* workspace, size_t workspaceSize)
119459 +    BYTE* const ostart = (BYTE*) dst;
119460 +    BYTE* op = ostart;
119461 +    BYTE* const oend = ostart + dstSize;
119463 +    unsigned maxSymbolValue = HUF_TABLELOG_MAX;
119464 +    U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
119465 +    HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)workspace;
119467 +    if (workspaceSize < sizeof(HUF_CompressWeightsWksp)) return ERROR(GENERIC);
119469 +    /* init conditions */
119470 +    if (wtSize <= 1) return 0;  /* Not compressible */
119472 +    /* Scan input and build symbol stats */
119473 +    {   unsigned const maxCount = HIST_count_simple(wksp->count, &maxSymbolValue, weightTable, wtSize);   /* never fails */
119474 +        if (maxCount == wtSize) return 1;   /* only a single symbol in src : rle */
119475 +        if (maxCount == 1) return 0;        /* each symbol present maximum once => not compressible */
119476 +    }
119478 +    tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
119479 +    CHECK_F( FSE_normalizeCount(wksp->norm, tableLog, wksp->count, wtSize, maxSymbolValue, /* useLowProbCount */ 0) );
119481 +    /* Write table description header */
119482 +    {   CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), wksp->norm, maxSymbolValue, tableLog) );
119483 +        op += hSize;
119484 +    }
119486 +    /* Compress */
119487 +    CHECK_F( FSE_buildCTable_wksp(wksp->CTable, wksp->norm, maxSymbolValue, tableLog, wksp->scratchBuffer, sizeof(wksp->scratchBuffer)) );
119488 +    {   CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, wksp->CTable) );
119489 +        if (cSize == 0) return 0;   /* not enough space for compressed data */
119490 +        op += cSize;
119491 +    }
119493 +    return (size_t)(op-ostart);
119497 +typedef struct {
119498 +    HUF_CompressWeightsWksp wksp;
119499 +    BYTE bitsToWeight[HUF_TABLELOG_MAX + 1];   /* precomputed conversion table */
119500 +    BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
119501 +} HUF_WriteCTableWksp;
119503 +size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
119504 +                            const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog,
119505 +                            void* workspace, size_t workspaceSize)
119507 +    BYTE* op = (BYTE*)dst;
119508 +    U32 n;
119509 +    HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)workspace;
119511 +    /* check conditions */
119512 +    if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC);
119513 +    if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
119515 +    /* convert to weight */
119516 +    wksp->bitsToWeight[0] = 0;
119517 +    for (n=1; n<huffLog+1; n++)
119518 +        wksp->bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
119519 +    for (n=0; n<maxSymbolValue; n++)
119520 +        wksp->huffWeight[n] = wksp->bitsToWeight[CTable[n].nbBits];
119522 +    /* attempt weights compression by FSE */
119523 +    {   CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) );
119524 +        if ((hSize>1) & (hSize < maxSymbolValue/2)) {   /* FSE compressed */
119525 +            op[0] = (BYTE)hSize;
119526 +            return hSize+1;
119527 +    }   }
119529 +    /* write raw values as 4-bits (max : 15) */
119530 +    if (maxSymbolValue > (256-128)) return ERROR(GENERIC);   /* should not happen : likely means source cannot be compressed */
119531 +    if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall);   /* not enough space within dst buffer */
119532 +    op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
119533 +    wksp->huffWeight[maxSymbolValue] = 0;   /* to be sure it doesn't cause msan issue in final combination */
119534 +    for (n=0; n<maxSymbolValue; n+=2)
119535 +        op[(n/2)+1] = (BYTE)((wksp->huffWeight[n] << 4) + wksp->huffWeight[n+1]);
119536 +    return ((maxSymbolValue+1)/2) + 1;
119539 +/*! HUF_writeCTable() :
119540 +    `CTable` : Huffman tree to save, using huf representation.
119541 +    @return : size of saved CTable */
119542 +size_t HUF_writeCTable (void* dst, size_t maxDstSize,
119543 +                        const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog)
119545 +    HUF_WriteCTableWksp wksp;
119546 +    return HUF_writeCTable_wksp(dst, maxDstSize, CTable, maxSymbolValue, huffLog, &wksp, sizeof(wksp));
119550 +size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights)
119552 +    BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];   /* init not required, even though some static analyzer may complain */
119553 +    U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];   /* large enough for values from 0 to 16 */
119554 +    U32 tableLog = 0;
119555 +    U32 nbSymbols = 0;
119557 +    /* get symbol weights */
119558 +    CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
119559 +    *hasZeroWeights = (rankVal[0] > 0);
119561 +    /* check result */
119562 +    if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
119563 +    if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
119565 +    /* Prepare base value per rank */
119566 +    {   U32 n, nextRankStart = 0;
119567 +        for (n=1; n<=tableLog; n++) {
119568 +            U32 curr = nextRankStart;
119569 +            nextRankStart += (rankVal[n] << (n-1));
119570 +            rankVal[n] = curr;
119571 +    }   }
119573 +    /* fill nbBits */
119574 +    {   U32 n; for (n=0; n<nbSymbols; n++) {
119575 +            const U32 w = huffWeight[n];
119576 +            CTable[n].nbBits = (BYTE)(tableLog + 1 - w) & -(w != 0);
119577 +    }   }
119579 +    /* fill val */
119580 +    {   U16 nbPerRank[HUF_TABLELOG_MAX+2]  = {0};  /* support w=0=>n=tableLog+1 */
119581 +        U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
119582 +        { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[CTable[n].nbBits]++; }
119583 +        /* determine stating value per rank */
119584 +        valPerRank[tableLog+1] = 0;   /* for w==0 */
119585 +        {   U16 min = 0;
119586 +            U32 n; for (n=tableLog; n>0; n--) {  /* start at n=tablelog <-> w=1 */
119587 +                valPerRank[n] = min;     /* get starting value within each rank */
119588 +                min += nbPerRank[n];
119589 +                min >>= 1;
119590 +        }   }
119591 +        /* assign value within rank, symbol order */
119592 +        { U32 n; for (n=0; n<nbSymbols; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; }
119593 +    }
119595 +    *maxSymbolValuePtr = nbSymbols - 1;
119596 +    return readSize;
119599 +U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue)
119601 +    const HUF_CElt* table = (const HUF_CElt*)symbolTable;
119602 +    assert(symbolValue <= HUF_SYMBOLVALUE_MAX);
119603 +    return table[symbolValue].nbBits;
119607 +typedef struct nodeElt_s {
119608 +    U32 count;
119609 +    U16 parent;
119610 +    BYTE byte;
119611 +    BYTE nbBits;
119612 +} nodeElt;
119615 + * HUF_setMaxHeight():
119616 + * Enforces maxNbBits on the Huffman tree described in huffNode.
119618 + * It sets all nodes with nbBits > maxNbBits to be maxNbBits. Then it adjusts
119619 + * the tree to so that it is a valid canonical Huffman tree.
119621 + * @pre               The sum of the ranks of each symbol == 2^largestBits,
119622 + *                    where largestBits == huffNode[lastNonNull].nbBits.
119623 + * @post              The sum of the ranks of each symbol == 2^largestBits,
119624 + *                    where largestBits is the return value <= maxNbBits.
119626 + * @param huffNode    The Huffman tree modified in place to enforce maxNbBits.
119627 + * @param lastNonNull The symbol with the lowest count in the Huffman tree.
119628 + * @param maxNbBits   The maximum allowed number of bits, which the Huffman tree
119629 + *                    may not respect. After this function the Huffman tree will
119630 + *                    respect maxNbBits.
119631 + * @return            The maximum number of bits of the Huffman tree after adjustment,
119632 + *                    necessarily no more than maxNbBits.
119633 + */
119634 +static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
119636 +    const U32 largestBits = huffNode[lastNonNull].nbBits;
119637 +    /* early exit : no elt > maxNbBits, so the tree is already valid. */
119638 +    if (largestBits <= maxNbBits) return largestBits;
119640 +    /* there are several too large elements (at least >= 2) */
119641 +    {   int totalCost = 0;
119642 +        const U32 baseCost = 1 << (largestBits - maxNbBits);
119643 +        int n = (int)lastNonNull;
119645 +        /* Adjust any ranks > maxNbBits to maxNbBits.
119646 +         * Compute totalCost, which is how far the sum of the ranks is
119647 +         * we are over 2^largestBits after adjust the offending ranks.
119648 +         */
119649 +        while (huffNode[n].nbBits > maxNbBits) {
119650 +            totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
119651 +            huffNode[n].nbBits = (BYTE)maxNbBits;
119652 +            n--;
119653 +        }
119654 +        /* n stops at huffNode[n].nbBits <= maxNbBits */
119655 +        assert(huffNode[n].nbBits <= maxNbBits);
119656 +        /* n end at index of smallest symbol using < maxNbBits */
119657 +        while (huffNode[n].nbBits == maxNbBits) --n;
119659 +        /* renorm totalCost from 2^largestBits to 2^maxNbBits
119660 +         * note : totalCost is necessarily a multiple of baseCost */
119661 +        assert((totalCost & (baseCost - 1)) == 0);
119662 +        totalCost >>= (largestBits - maxNbBits);
119663 +        assert(totalCost > 0);
119665 +        /* repay normalized cost */
119666 +        {   U32 const noSymbol = 0xF0F0F0F0;
119667 +            U32 rankLast[HUF_TABLELOG_MAX+2];
119669 +            /* Get pos of last (smallest = lowest cum. count) symbol per rank */
119670 +            ZSTD_memset(rankLast, 0xF0, sizeof(rankLast));
119671 +            {   U32 currentNbBits = maxNbBits;
119672 +                int pos;
119673 +                for (pos=n ; pos >= 0; pos--) {
119674 +                    if (huffNode[pos].nbBits >= currentNbBits) continue;
119675 +                    currentNbBits = huffNode[pos].nbBits;   /* < maxNbBits */
119676 +                    rankLast[maxNbBits-currentNbBits] = (U32)pos;
119677 +            }   }
119679 +            while (totalCost > 0) {
119680 +                /* Try to reduce the next power of 2 above totalCost because we
119681 +                 * gain back half the rank.
119682 +                 */
119683 +                U32 nBitsToDecrease = BIT_highbit32((U32)totalCost) + 1;
119684 +                for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
119685 +                    U32 const highPos = rankLast[nBitsToDecrease];
119686 +                    U32 const lowPos = rankLast[nBitsToDecrease-1];
119687 +                    if (highPos == noSymbol) continue;
119688 +                    /* Decrease highPos if no symbols of lowPos or if it is
119689 +                     * not cheaper to remove 2 lowPos than highPos.
119690 +                     */
119691 +                    if (lowPos == noSymbol) break;
119692 +                    {   U32 const highTotal = huffNode[highPos].count;
119693 +                        U32 const lowTotal = 2 * huffNode[lowPos].count;
119694 +                        if (highTotal <= lowTotal) break;
119695 +                }   }
119696 +                /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
119697 +                assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1);
119698 +                /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
119699 +                while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
119700 +                    nBitsToDecrease++;
119701 +                assert(rankLast[nBitsToDecrease] != noSymbol);
119702 +                /* Increase the number of bits to gain back half the rank cost. */
119703 +                totalCost -= 1 << (nBitsToDecrease-1);
119704 +                huffNode[rankLast[nBitsToDecrease]].nbBits++;
119706 +                /* Fix up the new rank.
119707 +                 * If the new rank was empty, this symbol is now its smallest.
119708 +                 * Otherwise, this symbol will be the largest in the new rank so no adjustment.
119709 +                 */
119710 +                if (rankLast[nBitsToDecrease-1] == noSymbol)
119711 +                    rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease];
119712 +                /* Fix up the old rank.
119713 +                 * If the symbol was at position 0, meaning it was the highest weight symbol in the tree,
119714 +                 * it must be the only symbol in its rank, so the old rank now has no symbols.
119715 +                 * Otherwise, since the Huffman nodes are sorted by count, the previous position is now
119716 +                 * the smallest node in the rank. If the previous position belongs to a different rank,
119717 +                 * then the rank is now empty.
119718 +                 */
119719 +                if (rankLast[nBitsToDecrease] == 0)    /* special case, reached largest symbol */
119720 +                    rankLast[nBitsToDecrease] = noSymbol;
119721 +                else {
119722 +                    rankLast[nBitsToDecrease]--;
119723 +                    if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
119724 +                        rankLast[nBitsToDecrease] = noSymbol;   /* this rank is now empty */
119725 +                }
119726 +            }   /* while (totalCost > 0) */
119728 +            /* If we've removed too much weight, then we have to add it back.
119729 +             * To avoid overshooting again, we only adjust the smallest rank.
119730 +             * We take the largest nodes from the lowest rank 0 and move them
119731 +             * to rank 1. There's guaranteed to be enough rank 0 symbols because
119732 +             * TODO.
119733 +             */
119734 +            while (totalCost < 0) {  /* Sometimes, cost correction overshoot */
119735 +                /* special case : no rank 1 symbol (using maxNbBits-1);
119736 +                 * let's create one from largest rank 0 (using maxNbBits).
119737 +                 */
119738 +                if (rankLast[1] == noSymbol) {
119739 +                    while (huffNode[n].nbBits == maxNbBits) n--;
119740 +                    huffNode[n+1].nbBits--;
119741 +                    assert(n >= 0);
119742 +                    rankLast[1] = (U32)(n+1);
119743 +                    totalCost++;
119744 +                    continue;
119745 +                }
119746 +                huffNode[ rankLast[1] + 1 ].nbBits--;
119747 +                rankLast[1]++;
119748 +                totalCost ++;
119749 +            }
119750 +        }   /* repay normalized cost */
119751 +    }   /* there are several too large elements (at least >= 2) */
119753 +    return maxNbBits;
119756 +typedef struct {
119757 +    U32 base;
119758 +    U32 curr;
119759 +} rankPos;
119761 +typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
119763 +#define RANK_POSITION_TABLE_SIZE 32
119765 +typedef struct {
119766 +  huffNodeTable huffNodeTbl;
119767 +  rankPos rankPosition[RANK_POSITION_TABLE_SIZE];
119768 +} HUF_buildCTable_wksp_tables;
119771 + * HUF_sort():
119772 + * Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order.
119774 + * @param[out] huffNode       Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled.
119775 + *                            Must have (maxSymbolValue + 1) entries.
119776 + * @param[in]  count          Histogram of the symbols.
119777 + * @param[in]  maxSymbolValue Maximum symbol value.
119778 + * @param      rankPosition   This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries.
119779 + */
119780 +static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue, rankPos* rankPosition)
119782 +    int n;
119783 +    int const maxSymbolValue1 = (int)maxSymbolValue + 1;
119785 +    /* Compute base and set curr to base.
119786 +     * For symbol s let lowerRank = BIT_highbit32(count[n]+1) and rank = lowerRank + 1.
119787 +     * Then 2^lowerRank <= count[n]+1 <= 2^rank.
119788 +     * We attribute each symbol to lowerRank's base value, because we want to know where
119789 +     * each rank begins in the output, so for rank R we want to count ranks R+1 and above.
119790 +     */
119791 +    ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE);
119792 +    for (n = 0; n < maxSymbolValue1; ++n) {
119793 +        U32 lowerRank = BIT_highbit32(count[n] + 1);
119794 +        rankPosition[lowerRank].base++;
119795 +    }
119796 +    assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0);
119797 +    for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) {
119798 +        rankPosition[n-1].base += rankPosition[n].base;
119799 +        rankPosition[n-1].curr = rankPosition[n-1].base;
119800 +    }
119801 +    /* Sort */
119802 +    for (n = 0; n < maxSymbolValue1; ++n) {
119803 +        U32 const c = count[n];
119804 +        U32 const r = BIT_highbit32(c+1) + 1;
119805 +        U32 pos = rankPosition[r].curr++;
119806 +        /* Insert into the correct position in the rank.
119807 +         * We have at most 256 symbols, so this insertion should be fine.
119808 +         */
119809 +        while ((pos > rankPosition[r].base) && (c > huffNode[pos-1].count)) {
119810 +            huffNode[pos] = huffNode[pos-1];
119811 +            pos--;
119812 +        }
119813 +        huffNode[pos].count = c;
119814 +        huffNode[pos].byte  = (BYTE)n;
119815 +    }
119819 +/** HUF_buildCTable_wksp() :
119820 + *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
119821 + *  `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables).
119822 + */
119823 +#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
119825 +/* HUF_buildTree():
119826 + * Takes the huffNode array sorted by HUF_sort() and builds an unlimited-depth Huffman tree.
119828 + * @param huffNode        The array sorted by HUF_sort(). Builds the Huffman tree in this array.
119829 + * @param maxSymbolValue  The maximum symbol value.
119830 + * @return                The smallest node in the Huffman tree (by count).
119831 + */
119832 +static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue)
119834 +    nodeElt* const huffNode0 = huffNode - 1;
119835 +    int nonNullRank;
119836 +    int lowS, lowN;
119837 +    int nodeNb = STARTNODE;
119838 +    int n, nodeRoot;
119839 +    /* init for parents */
119840 +    nonNullRank = (int)maxSymbolValue;
119841 +    while(huffNode[nonNullRank].count == 0) nonNullRank--;
119842 +    lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;
119843 +    huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;
119844 +    huffNode[lowS].parent = huffNode[lowS-1].parent = (U16)nodeNb;
119845 +    nodeNb++; lowS-=2;
119846 +    for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);
119847 +    huffNode0[0].count = (U32)(1U<<31);  /* fake entry, strong barrier */
119849 +    /* create parents */
119850 +    while (nodeNb <= nodeRoot) {
119851 +        int const n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
119852 +        int const n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
119853 +        huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
119854 +        huffNode[n1].parent = huffNode[n2].parent = (U16)nodeNb;
119855 +        nodeNb++;
119856 +    }
119858 +    /* distribute weights (unlimited tree height) */
119859 +    huffNode[nodeRoot].nbBits = 0;
119860 +    for (n=nodeRoot-1; n>=STARTNODE; n--)
119861 +        huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
119862 +    for (n=0; n<=nonNullRank; n++)
119863 +        huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
119865 +    return nonNullRank;
119869 + * HUF_buildCTableFromTree():
119870 + * Build the CTable given the Huffman tree in huffNode.
119872 + * @param[out] CTable         The output Huffman CTable.
119873 + * @param      huffNode       The Huffman tree.
119874 + * @param      nonNullRank    The last and smallest node in the Huffman tree.
119875 + * @param      maxSymbolValue The maximum symbol value.
119876 + * @param      maxNbBits      The exact maximum number of bits used in the Huffman tree.
119877 + */
119878 +static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits)
119880 +    /* fill result into ctable (val, nbBits) */
119881 +    int n;
119882 +    U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
119883 +    U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
119884 +    int const alphabetSize = (int)(maxSymbolValue + 1);
119885 +    for (n=0; n<=nonNullRank; n++)
119886 +        nbPerRank[huffNode[n].nbBits]++;
119887 +    /* determine starting value per rank */
119888 +    {   U16 min = 0;
119889 +        for (n=(int)maxNbBits; n>0; n--) {
119890 +            valPerRank[n] = min;      /* get starting value within each rank */
119891 +            min += nbPerRank[n];
119892 +            min >>= 1;
119893 +    }   }
119894 +    for (n=0; n<alphabetSize; n++)
119895 +        CTable[huffNode[n].byte].nbBits = huffNode[n].nbBits;   /* push nbBits per symbol, symbol order */
119896 +    for (n=0; n<alphabetSize; n++)
119897 +        CTable[n].val = valPerRank[CTable[n].nbBits]++;   /* assign value within rank, symbol order */
119900 +size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
119902 +    HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)workSpace;
119903 +    nodeElt* const huffNode0 = wksp_tables->huffNodeTbl;
119904 +    nodeElt* const huffNode = huffNode0+1;
119905 +    int nonNullRank;
119907 +    /* safety checks */
119908 +    if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */
119909 +    if (wkspSize < sizeof(HUF_buildCTable_wksp_tables))
119910 +      return ERROR(workSpace_tooSmall);
119911 +    if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
119912 +    if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
119913 +      return ERROR(maxSymbolValue_tooLarge);
119914 +    ZSTD_memset(huffNode0, 0, sizeof(huffNodeTable));
119916 +    /* sort, decreasing order */
119917 +    HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition);
119919 +    /* build tree */
119920 +    nonNullRank = HUF_buildTree(huffNode, maxSymbolValue);
119922 +    /* enforce maxTableLog */
119923 +    maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits);
119924 +    if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC);   /* check fit into table */
119926 +    HUF_buildCTableFromTree(tree, huffNode, nonNullRank, maxSymbolValue, maxNbBits);
119928 +    return maxNbBits;
119931 +size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
119933 +    size_t nbBits = 0;
119934 +    int s;
119935 +    for (s = 0; s <= (int)maxSymbolValue; ++s) {
119936 +        nbBits += CTable[s].nbBits * count[s];
119937 +    }
119938 +    return nbBits >> 3;
119941 +int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
119942 +  int bad = 0;
119943 +  int s;
119944 +  for (s = 0; s <= (int)maxSymbolValue; ++s) {
119945 +    bad |= (count[s] != 0) & (CTable[s].nbBits == 0);
119946 +  }
119947 +  return !bad;
119950 +size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
119952 +FORCE_INLINE_TEMPLATE void
119953 +HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable)
119955 +    BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
119958 +#define HUF_FLUSHBITS(s)  BIT_flushBits(s)
119960 +#define HUF_FLUSHBITS_1(stream) \
119961 +    if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream)
119963 +#define HUF_FLUSHBITS_2(stream) \
119964 +    if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream)
119966 +FORCE_INLINE_TEMPLATE size_t
119967 +HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
119968 +                                   const void* src, size_t srcSize,
119969 +                                   const HUF_CElt* CTable)
119971 +    const BYTE* ip = (const BYTE*) src;
119972 +    BYTE* const ostart = (BYTE*)dst;
119973 +    BYTE* const oend = ostart + dstSize;
119974 +    BYTE* op = ostart;
119975 +    size_t n;
119976 +    BIT_CStream_t bitC;
119978 +    /* init */
119979 +    if (dstSize < 8) return 0;   /* not enough space to compress */
119980 +    { size_t const initErr = BIT_initCStream(&bitC, op, (size_t)(oend-op));
119981 +      if (HUF_isError(initErr)) return 0; }
119983 +    n = srcSize & ~3;  /* join to mod 4 */
119984 +    switch (srcSize & 3)
119985 +    {
119986 +        case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable);
119987 +                 HUF_FLUSHBITS_2(&bitC);
119988 +                /* fall-through */
119989 +        case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable);
119990 +                 HUF_FLUSHBITS_1(&bitC);
119991 +                /* fall-through */
119992 +        case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable);
119993 +                 HUF_FLUSHBITS(&bitC);
119994 +                /* fall-through */
119995 +        case 0 : /* fall-through */
119996 +        default: break;
119997 +    }
119999 +    for (; n>0; n-=4) {  /* note : n&3==0 at this stage */
120000 +        HUF_encodeSymbol(&bitC, ip[n- 1], CTable);
120001 +        HUF_FLUSHBITS_1(&bitC);
120002 +        HUF_encodeSymbol(&bitC, ip[n- 2], CTable);
120003 +        HUF_FLUSHBITS_2(&bitC);
120004 +        HUF_encodeSymbol(&bitC, ip[n- 3], CTable);
120005 +        HUF_FLUSHBITS_1(&bitC);
120006 +        HUF_encodeSymbol(&bitC, ip[n- 4], CTable);
120007 +        HUF_FLUSHBITS(&bitC);
120008 +    }
120010 +    return BIT_closeCStream(&bitC);
120013 +#if DYNAMIC_BMI2
120015 +static TARGET_ATTRIBUTE("bmi2") size_t
120016 +HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize,
120017 +                                   const void* src, size_t srcSize,
120018 +                                   const HUF_CElt* CTable)
120020 +    return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
120023 +static size_t
120024 +HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize,
120025 +                                      const void* src, size_t srcSize,
120026 +                                      const HUF_CElt* CTable)
120028 +    return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
120031 +static size_t
120032 +HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
120033 +                              const void* src, size_t srcSize,
120034 +                              const HUF_CElt* CTable, const int bmi2)
120036 +    if (bmi2) {
120037 +        return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable);
120038 +    }
120039 +    return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable);
120042 +#else
120044 +static size_t
120045 +HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
120046 +                              const void* src, size_t srcSize,
120047 +                              const HUF_CElt* CTable, const int bmi2)
120049 +    (void)bmi2;
120050 +    return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
120053 +#endif
120055 +size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
120057 +    return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
120061 +static size_t
120062 +HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
120063 +                              const void* src, size_t srcSize,
120064 +                              const HUF_CElt* CTable, int bmi2)
120066 +    size_t const segmentSize = (srcSize+3)/4;   /* first 3 segments */
120067 +    const BYTE* ip = (const BYTE*) src;
120068 +    const BYTE* const iend = ip + srcSize;
120069 +    BYTE* const ostart = (BYTE*) dst;
120070 +    BYTE* const oend = ostart + dstSize;
120071 +    BYTE* op = ostart;
120073 +    if (dstSize < 6 + 1 + 1 + 1 + 8) return 0;   /* minimum space to compress successfully */
120074 +    if (srcSize < 12) return 0;   /* no saving possible : too small input */
120075 +    op += 6;   /* jumpTable */
120077 +    assert(op <= oend);
120078 +    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
120079 +        if (cSize==0) return 0;
120080 +        assert(cSize <= 65535);
120081 +        MEM_writeLE16(ostart, (U16)cSize);
120082 +        op += cSize;
120083 +    }
120085 +    ip += segmentSize;
120086 +    assert(op <= oend);
120087 +    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
120088 +        if (cSize==0) return 0;
120089 +        assert(cSize <= 65535);
120090 +        MEM_writeLE16(ostart+2, (U16)cSize);
120091 +        op += cSize;
120092 +    }
120094 +    ip += segmentSize;
120095 +    assert(op <= oend);
120096 +    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
120097 +        if (cSize==0) return 0;
120098 +        assert(cSize <= 65535);
120099 +        MEM_writeLE16(ostart+4, (U16)cSize);
120100 +        op += cSize;
120101 +    }
120103 +    ip += segmentSize;
120104 +    assert(op <= oend);
120105 +    assert(ip <= iend);
120106 +    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) );
120107 +        if (cSize==0) return 0;
120108 +        op += cSize;
120109 +    }
120111 +    return (size_t)(op-ostart);
120114 +size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
120116 +    return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
120119 +typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
120121 +static size_t HUF_compressCTable_internal(
120122 +                BYTE* const ostart, BYTE* op, BYTE* const oend,
120123 +                const void* src, size_t srcSize,
120124 +                HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2)
120126 +    size_t const cSize = (nbStreams==HUF_singleStream) ?
120127 +                         HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2) :
120128 +                         HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2);
120129 +    if (HUF_isError(cSize)) { return cSize; }
120130 +    if (cSize==0) { return 0; }   /* uncompressible */
120131 +    op += cSize;
120132 +    /* check compressibility */
120133 +    assert(op >= ostart);
120134 +    if ((size_t)(op-ostart) >= srcSize-1) { return 0; }
120135 +    return (size_t)(op-ostart);
120138 +typedef struct {
120139 +    unsigned count[HUF_SYMBOLVALUE_MAX + 1];
120140 +    HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1];
120141 +    union {
120142 +        HUF_buildCTable_wksp_tables buildCTable_wksp;
120143 +        HUF_WriteCTableWksp writeCTable_wksp;
120144 +    } wksps;
120145 +} HUF_compress_tables_t;
120147 +/* HUF_compress_internal() :
120148 + * `workSpace_align4` must be aligned on 4-bytes boundaries,
120149 + * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U32 unsigned */
120150 +static size_t
120151 +HUF_compress_internal (void* dst, size_t dstSize,
120152 +                 const void* src, size_t srcSize,
120153 +                       unsigned maxSymbolValue, unsigned huffLog,
120154 +                       HUF_nbStreams_e nbStreams,
120155 +                       void* workSpace_align4, size_t wkspSize,
120156 +                       HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
120157 +                 const int bmi2)
120159 +    HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace_align4;
120160 +    BYTE* const ostart = (BYTE*)dst;
120161 +    BYTE* const oend = ostart + dstSize;
120162 +    BYTE* op = ostart;
120164 +    HUF_STATIC_ASSERT(sizeof(*table) <= HUF_WORKSPACE_SIZE);
120165 +    assert(((size_t)workSpace_align4 & 3) == 0);   /* must be aligned on 4-bytes boundaries */
120167 +    /* checks & inits */
120168 +    if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall);
120169 +    if (!srcSize) return 0;  /* Uncompressed */
120170 +    if (!dstSize) return 0;  /* cannot fit anything within dst budget */
120171 +    if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);   /* current block size limit */
120172 +    if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
120173 +    if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
120174 +    if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
120175 +    if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
120177 +    /* Heuristic : If old table is valid, use it for small inputs */
120178 +    if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
120179 +        return HUF_compressCTable_internal(ostart, op, oend,
120180 +                                           src, srcSize,
120181 +                                           nbStreams, oldHufTable, bmi2);
120182 +    }
120184 +    /* Scan input and build symbol stats */
120185 +    {   CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace_align4, wkspSize) );
120186 +        if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; }   /* single symbol, rle */
120187 +        if (largest <= (srcSize >> 7)+4) return 0;   /* heuristic : probably not compressible enough */
120188 +    }
120190 +    /* Check validity of previous table */
120191 +    if ( repeat
120192 +      && *repeat == HUF_repeat_check
120193 +      && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) {
120194 +        *repeat = HUF_repeat_none;
120195 +    }
120196 +    /* Heuristic : use existing table for small inputs */
120197 +    if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
120198 +        return HUF_compressCTable_internal(ostart, op, oend,
120199 +                                           src, srcSize,
120200 +                                           nbStreams, oldHufTable, bmi2);
120201 +    }
120203 +    /* Build Huffman Tree */
120204 +    huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
120205 +    {   size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
120206 +                                            maxSymbolValue, huffLog,
120207 +                                            &table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp));
120208 +        CHECK_F(maxBits);
120209 +        huffLog = (U32)maxBits;
120210 +        /* Zero unused symbols in CTable, so we can check it for validity */
120211 +        ZSTD_memset(table->CTable + (maxSymbolValue + 1), 0,
120212 +               sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt)));
120213 +    }
120215 +    /* Write table description header */
120216 +    {   CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog,
120217 +                                              &table->wksps.writeCTable_wksp, sizeof(table->wksps.writeCTable_wksp)) );
120218 +        /* Check if using previous huffman table is beneficial */
120219 +        if (repeat && *repeat != HUF_repeat_none) {
120220 +            size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue);
120221 +            size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue);
120222 +            if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
120223 +                return HUF_compressCTable_internal(ostart, op, oend,
120224 +                                                   src, srcSize,
120225 +                                                   nbStreams, oldHufTable, bmi2);
120226 +        }   }
120228 +        /* Use the new huffman table */
120229 +        if (hSize + 12ul >= srcSize) { return 0; }
120230 +        op += hSize;
120231 +        if (repeat) { *repeat = HUF_repeat_none; }
120232 +        if (oldHufTable)
120233 +            ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable));  /* Save new table */
120234 +    }
120235 +    return HUF_compressCTable_internal(ostart, op, oend,
120236 +                                       src, srcSize,
120237 +                                       nbStreams, table->CTable, bmi2);
120241 +size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
120242 +                      const void* src, size_t srcSize,
120243 +                      unsigned maxSymbolValue, unsigned huffLog,
120244 +                      void* workSpace, size_t wkspSize)
120246 +    return HUF_compress_internal(dst, dstSize, src, srcSize,
120247 +                                 maxSymbolValue, huffLog, HUF_singleStream,
120248 +                                 workSpace, wkspSize,
120249 +                                 NULL, NULL, 0, 0 /*bmi2*/);
120252 +size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
120253 +                      const void* src, size_t srcSize,
120254 +                      unsigned maxSymbolValue, unsigned huffLog,
120255 +                      void* workSpace, size_t wkspSize,
120256 +                      HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
120258 +    return HUF_compress_internal(dst, dstSize, src, srcSize,
120259 +                                 maxSymbolValue, huffLog, HUF_singleStream,
120260 +                                 workSpace, wkspSize, hufTable,
120261 +                                 repeat, preferRepeat, bmi2);
120264 +/* HUF_compress4X_repeat():
120265 + * compress input using 4 streams.
120266 + * provide workspace to generate compression tables */
120267 +size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
120268 +                      const void* src, size_t srcSize,
120269 +                      unsigned maxSymbolValue, unsigned huffLog,
120270 +                      void* workSpace, size_t wkspSize)
120272 +    return HUF_compress_internal(dst, dstSize, src, srcSize,
120273 +                                 maxSymbolValue, huffLog, HUF_fourStreams,
120274 +                                 workSpace, wkspSize,
120275 +                                 NULL, NULL, 0, 0 /*bmi2*/);
120278 +/* HUF_compress4X_repeat():
120279 + * compress input using 4 streams.
120280 + * re-use an existing huffman compression table */
120281 +size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
120282 +                      const void* src, size_t srcSize,
120283 +                      unsigned maxSymbolValue, unsigned huffLog,
120284 +                      void* workSpace, size_t wkspSize,
120285 +                      HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
120287 +    return HUF_compress_internal(dst, dstSize, src, srcSize,
120288 +                                 maxSymbolValue, huffLog, HUF_fourStreams,
120289 +                                 workSpace, wkspSize,
120290 +                                 hufTable, repeat, preferRepeat, bmi2);
120292 diff --git a/lib/zstd/compress/zstd_compress.c b/lib/zstd/compress/zstd_compress.c
120293 new file mode 100644
120294 index 000000000000..78aa14c50dd2
120295 --- /dev/null
120296 +++ b/lib/zstd/compress/zstd_compress.c
120297 @@ -0,0 +1,5105 @@
120299 + * Copyright (c) Yann Collet, Facebook, Inc.
120300 + * All rights reserved.
120302 + * This source code is licensed under both the BSD-style license (found in the
120303 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
120304 + * in the COPYING file in the root directory of this source tree).
120305 + * You may select, at your option, one of the above-listed licenses.
120306 + */
120308 +/*-*************************************
120309 +*  Dependencies
120310 +***************************************/
120311 +#include "../common/zstd_deps.h"  /* INT_MAX, ZSTD_memset, ZSTD_memcpy */
120312 +#include "../common/cpu.h"
120313 +#include "../common/mem.h"
120314 +#include "hist.h"           /* HIST_countFast_wksp */
120315 +#define FSE_STATIC_LINKING_ONLY   /* FSE_encodeSymbol */
120316 +#include "../common/fse.h"
120317 +#define HUF_STATIC_LINKING_ONLY
120318 +#include "../common/huf.h"
120319 +#include "zstd_compress_internal.h"
120320 +#include "zstd_compress_sequences.h"
120321 +#include "zstd_compress_literals.h"
120322 +#include "zstd_fast.h"
120323 +#include "zstd_double_fast.h"
120324 +#include "zstd_lazy.h"
120325 +#include "zstd_opt.h"
120326 +#include "zstd_ldm.h"
120327 +#include "zstd_compress_superblock.h"
120329 +/* ***************************************************************
120330 +*  Tuning parameters
120331 +*****************************************************************/
120333 + * COMPRESS_HEAPMODE :
120334 + * Select how default decompression function ZSTD_compress() allocates its context,
120335 + * on stack (0, default), or into heap (1).
120336 + * Note that functions with explicit context such as ZSTD_compressCCtx() are unaffected.
120337 + */
120340 +/*-*************************************
120341 +*  Helper functions
120342 +***************************************/
120343 +/* ZSTD_compressBound()
120344 + * Note that the result from this function is only compatible with the "normal"
120345 + * full-block strategy.
120346 + * When there are a lot of small blocks due to frequent flush in streaming mode
120347 + * the overhead of headers can make the compressed data to be larger than the
120348 + * return value of ZSTD_compressBound().
120349 + */
120350 +size_t ZSTD_compressBound(size_t srcSize) {
120351 +    return ZSTD_COMPRESSBOUND(srcSize);
120355 +/*-*************************************
120356 +*  Context memory management
120357 +***************************************/
120358 +struct ZSTD_CDict_s {
120359 +    const void* dictContent;
120360 +    size_t dictContentSize;
120361 +    ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */
120362 +    U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
120363 +    ZSTD_cwksp workspace;
120364 +    ZSTD_matchState_t matchState;
120365 +    ZSTD_compressedBlockState_t cBlockState;
120366 +    ZSTD_customMem customMem;
120367 +    U32 dictID;
120368 +    int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */
120369 +};  /* typedef'd to ZSTD_CDict within "zstd.h" */
120371 +ZSTD_CCtx* ZSTD_createCCtx(void)
120373 +    return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
120376 +static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
120378 +    assert(cctx != NULL);
120379 +    ZSTD_memset(cctx, 0, sizeof(*cctx));
120380 +    cctx->customMem = memManager;
120381 +    cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
120382 +    {   size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters);
120383 +        assert(!ZSTD_isError(err));
120384 +        (void)err;
120385 +    }
120388 +ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
120390 +    ZSTD_STATIC_ASSERT(zcss_init==0);
120391 +    ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
120392 +    if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
120393 +    {   ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_customMalloc(sizeof(ZSTD_CCtx), customMem);
120394 +        if (!cctx) return NULL;
120395 +        ZSTD_initCCtx(cctx, customMem);
120396 +        return cctx;
120397 +    }
120400 +ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize)
120402 +    ZSTD_cwksp ws;
120403 +    ZSTD_CCtx* cctx;
120404 +    if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL;  /* minimum size */
120405 +    if ((size_t)workspace & 7) return NULL;  /* must be 8-aligned */
120406 +    ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
120408 +    cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx));
120409 +    if (cctx == NULL) return NULL;
120411 +    ZSTD_memset(cctx, 0, sizeof(ZSTD_CCtx));
120412 +    ZSTD_cwksp_move(&cctx->workspace, &ws);
120413 +    cctx->staticSize = workspaceSize;
120415 +    /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
120416 +    if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
120417 +    cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
120418 +    cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
120419 +    cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE);
120420 +    cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
120421 +    return cctx;
120425 + * Clears and frees all of the dictionaries in the CCtx.
120426 + */
120427 +static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx)
120429 +    ZSTD_customFree(cctx->localDict.dictBuffer, cctx->customMem);
120430 +    ZSTD_freeCDict(cctx->localDict.cdict);
120431 +    ZSTD_memset(&cctx->localDict, 0, sizeof(cctx->localDict));
120432 +    ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));
120433 +    cctx->cdict = NULL;
120436 +static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict)
120438 +    size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0;
120439 +    size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict);
120440 +    return bufferSize + cdictSize;
120443 +static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
120445 +    assert(cctx != NULL);
120446 +    assert(cctx->staticSize == 0);
120447 +    ZSTD_clearAllDicts(cctx);
120448 +    ZSTD_cwksp_free(&cctx->workspace, cctx->customMem);
120451 +size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
120453 +    if (cctx==NULL) return 0;   /* support free on NULL */
120454 +    RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
120455 +                    "not compatible with static CCtx");
120456 +    {
120457 +        int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);
120458 +        ZSTD_freeCCtxContent(cctx);
120459 +        if (!cctxInWorkspace) {
120460 +            ZSTD_customFree(cctx, cctx->customMem);
120461 +        }
120462 +    }
120463 +    return 0;
120467 +static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
120469 +    (void)cctx;
120470 +    return 0;
120474 +size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
120476 +    if (cctx==NULL) return 0;   /* support sizeof on NULL */
120477 +    /* cctx may be in the workspace */
120478 +    return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx))
120479 +           + ZSTD_cwksp_sizeof(&cctx->workspace)
120480 +           + ZSTD_sizeof_localDict(cctx->localDict)
120481 +           + ZSTD_sizeof_mtctx(cctx);
120484 +size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
120486 +    return ZSTD_sizeof_CCtx(zcs);  /* same object */
120489 +/* private API call, for dictBuilder only */
120490 +const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
120492 +/* Returns 1 if compression parameters are such that we should
120493 + * enable long distance matching (wlog >= 27, strategy >= btopt).
120494 + * Returns 0 otherwise.
120495 + */
120496 +static U32 ZSTD_CParams_shouldEnableLdm(const ZSTD_compressionParameters* const cParams) {
120497 +    return cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27;
120500 +static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
120501 +        ZSTD_compressionParameters cParams)
120503 +    ZSTD_CCtx_params cctxParams;
120504 +    /* should not matter, as all cParams are presumed properly defined */
120505 +    ZSTD_CCtxParams_init(&cctxParams, ZSTD_CLEVEL_DEFAULT);
120506 +    cctxParams.cParams = cParams;
120508 +    if (ZSTD_CParams_shouldEnableLdm(&cParams)) {
120509 +        DEBUGLOG(4, "ZSTD_makeCCtxParamsFromCParams(): Including LDM into cctx params");
120510 +        cctxParams.ldmParams.enableLdm = 1;
120511 +        /* LDM is enabled by default for optimal parser and window size >= 128MB */
120512 +        ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams);
120513 +        assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog);
120514 +        assert(cctxParams.ldmParams.hashRateLog < 32);
120515 +    }
120517 +    assert(!ZSTD_checkCParams(cParams));
120518 +    return cctxParams;
120521 +static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
120522 +        ZSTD_customMem customMem)
120524 +    ZSTD_CCtx_params* params;
120525 +    if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
120526 +    params = (ZSTD_CCtx_params*)ZSTD_customCalloc(
120527 +            sizeof(ZSTD_CCtx_params), customMem);
120528 +    if (!params) { return NULL; }
120529 +    ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
120530 +    params->customMem = customMem;
120531 +    return params;
120534 +ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
120536 +    return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
120539 +size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
120541 +    if (params == NULL) { return 0; }
120542 +    ZSTD_customFree(params, params->customMem);
120543 +    return 0;
120546 +size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
120548 +    return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
120551 +size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
120552 +    RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
120553 +    ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
120554 +    cctxParams->compressionLevel = compressionLevel;
120555 +    cctxParams->fParams.contentSizeFlag = 1;
120556 +    return 0;
120559 +#define ZSTD_NO_CLEVEL 0
120562 + * Initializes the cctxParams from params and compressionLevel.
120563 + * @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL.
120564 + */
120565 +static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_parameters const* params, int compressionLevel)
120567 +    assert(!ZSTD_checkCParams(params->cParams));
120568 +    ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
120569 +    cctxParams->cParams = params->cParams;
120570 +    cctxParams->fParams = params->fParams;
120571 +    /* Should not matter, as all cParams are presumed properly defined.
120572 +     * But, set it for tracing anyway.
120573 +     */
120574 +    cctxParams->compressionLevel = compressionLevel;
120577 +size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
120579 +    RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
120580 +    FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
120581 +    ZSTD_CCtxParams_init_internal(cctxParams, &params, ZSTD_NO_CLEVEL);
120582 +    return 0;
120586 + * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone.
120587 + * @param param Validated zstd parameters.
120588 + */
120589 +static void ZSTD_CCtxParams_setZstdParams(
120590 +        ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)
120592 +    assert(!ZSTD_checkCParams(params->cParams));
120593 +    cctxParams->cParams = params->cParams;
120594 +    cctxParams->fParams = params->fParams;
120595 +    /* Should not matter, as all cParams are presumed properly defined.
120596 +     * But, set it for tracing anyway.
120597 +     */
120598 +    cctxParams->compressionLevel = ZSTD_NO_CLEVEL;
120601 +ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
120603 +    ZSTD_bounds bounds = { 0, 0, 0 };
120605 +    switch(param)
120606 +    {
120607 +    case ZSTD_c_compressionLevel:
120608 +        bounds.lowerBound = ZSTD_minCLevel();
120609 +        bounds.upperBound = ZSTD_maxCLevel();
120610 +        return bounds;
120612 +    case ZSTD_c_windowLog:
120613 +        bounds.lowerBound = ZSTD_WINDOWLOG_MIN;
120614 +        bounds.upperBound = ZSTD_WINDOWLOG_MAX;
120615 +        return bounds;
120617 +    case ZSTD_c_hashLog:
120618 +        bounds.lowerBound = ZSTD_HASHLOG_MIN;
120619 +        bounds.upperBound = ZSTD_HASHLOG_MAX;
120620 +        return bounds;
120622 +    case ZSTD_c_chainLog:
120623 +        bounds.lowerBound = ZSTD_CHAINLOG_MIN;
120624 +        bounds.upperBound = ZSTD_CHAINLOG_MAX;
120625 +        return bounds;
120627 +    case ZSTD_c_searchLog:
120628 +        bounds.lowerBound = ZSTD_SEARCHLOG_MIN;
120629 +        bounds.upperBound = ZSTD_SEARCHLOG_MAX;
120630 +        return bounds;
120632 +    case ZSTD_c_minMatch:
120633 +        bounds.lowerBound = ZSTD_MINMATCH_MIN;
120634 +        bounds.upperBound = ZSTD_MINMATCH_MAX;
120635 +        return bounds;
120637 +    case ZSTD_c_targetLength:
120638 +        bounds.lowerBound = ZSTD_TARGETLENGTH_MIN;
120639 +        bounds.upperBound = ZSTD_TARGETLENGTH_MAX;
120640 +        return bounds;
120642 +    case ZSTD_c_strategy:
120643 +        bounds.lowerBound = ZSTD_STRATEGY_MIN;
120644 +        bounds.upperBound = ZSTD_STRATEGY_MAX;
120645 +        return bounds;
120647 +    case ZSTD_c_contentSizeFlag:
120648 +        bounds.lowerBound = 0;
120649 +        bounds.upperBound = 1;
120650 +        return bounds;
120652 +    case ZSTD_c_checksumFlag:
120653 +        bounds.lowerBound = 0;
120654 +        bounds.upperBound = 1;
120655 +        return bounds;
120657 +    case ZSTD_c_dictIDFlag:
120658 +        bounds.lowerBound = 0;
120659 +        bounds.upperBound = 1;
120660 +        return bounds;
120662 +    case ZSTD_c_nbWorkers:
120663 +        bounds.lowerBound = 0;
120664 +        bounds.upperBound = 0;
120665 +        return bounds;
120667 +    case ZSTD_c_jobSize:
120668 +        bounds.lowerBound = 0;
120669 +        bounds.upperBound = 0;
120670 +        return bounds;
120672 +    case ZSTD_c_overlapLog:
120673 +        bounds.lowerBound = 0;
120674 +        bounds.upperBound = 0;
120675 +        return bounds;
120677 +    case ZSTD_c_enableDedicatedDictSearch:
120678 +        bounds.lowerBound = 0;
120679 +        bounds.upperBound = 1;
120680 +        return bounds;
120682 +    case ZSTD_c_enableLongDistanceMatching:
120683 +        bounds.lowerBound = 0;
120684 +        bounds.upperBound = 1;
120685 +        return bounds;
120687 +    case ZSTD_c_ldmHashLog:
120688 +        bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN;
120689 +        bounds.upperBound = ZSTD_LDM_HASHLOG_MAX;
120690 +        return bounds;
120692 +    case ZSTD_c_ldmMinMatch:
120693 +        bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN;
120694 +        bounds.upperBound = ZSTD_LDM_MINMATCH_MAX;
120695 +        return bounds;
120697 +    case ZSTD_c_ldmBucketSizeLog:
120698 +        bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN;
120699 +        bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX;
120700 +        return bounds;
120702 +    case ZSTD_c_ldmHashRateLog:
120703 +        bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN;
120704 +        bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX;
120705 +        return bounds;
120707 +    /* experimental parameters */
120708 +    case ZSTD_c_rsyncable:
120709 +        bounds.lowerBound = 0;
120710 +        bounds.upperBound = 1;
120711 +        return bounds;
120713 +    case ZSTD_c_forceMaxWindow :
120714 +        bounds.lowerBound = 0;
120715 +        bounds.upperBound = 1;
120716 +        return bounds;
120718 +    case ZSTD_c_format:
120719 +        ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
120720 +        bounds.lowerBound = ZSTD_f_zstd1;
120721 +        bounds.upperBound = ZSTD_f_zstd1_magicless;   /* note : how to ensure at compile time that this is the highest value enum ? */
120722 +        return bounds;
120724 +    case ZSTD_c_forceAttachDict:
120725 +        ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceLoad);
120726 +        bounds.lowerBound = ZSTD_dictDefaultAttach;
120727 +        bounds.upperBound = ZSTD_dictForceLoad;       /* note : how to ensure at compile time that this is the highest value enum ? */
120728 +        return bounds;
120730 +    case ZSTD_c_literalCompressionMode:
120731 +        ZSTD_STATIC_ASSERT(ZSTD_lcm_auto < ZSTD_lcm_huffman && ZSTD_lcm_huffman < ZSTD_lcm_uncompressed);
120732 +        bounds.lowerBound = ZSTD_lcm_auto;
120733 +        bounds.upperBound = ZSTD_lcm_uncompressed;
120734 +        return bounds;
120736 +    case ZSTD_c_targetCBlockSize:
120737 +        bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN;
120738 +        bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX;
120739 +        return bounds;
120741 +    case ZSTD_c_srcSizeHint:
120742 +        bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN;
120743 +        bounds.upperBound = ZSTD_SRCSIZEHINT_MAX;
120744 +        return bounds;
120746 +    case ZSTD_c_stableInBuffer:
120747 +    case ZSTD_c_stableOutBuffer:
120748 +        bounds.lowerBound = (int)ZSTD_bm_buffered;
120749 +        bounds.upperBound = (int)ZSTD_bm_stable;
120750 +        return bounds;
120752 +    case ZSTD_c_blockDelimiters:
120753 +        bounds.lowerBound = (int)ZSTD_sf_noBlockDelimiters;
120754 +        bounds.upperBound = (int)ZSTD_sf_explicitBlockDelimiters;
120755 +        return bounds;
120757 +    case ZSTD_c_validateSequences:
120758 +        bounds.lowerBound = 0;
120759 +        bounds.upperBound = 1;
120760 +        return bounds;
120762 +    default:
120763 +        bounds.error = ERROR(parameter_unsupported);
120764 +        return bounds;
120765 +    }
120768 +/* ZSTD_cParam_clampBounds:
120769 + * Clamps the value into the bounded range.
120770 + */
120771 +static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)
120773 +    ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
120774 +    if (ZSTD_isError(bounds.error)) return bounds.error;
120775 +    if (*value < bounds.lowerBound) *value = bounds.lowerBound;
120776 +    if (*value > bounds.upperBound) *value = bounds.upperBound;
120777 +    return 0;
120780 +#define BOUNDCHECK(cParam, val) { \
120781 +    RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
120782 +                    parameter_outOfBound, "Param out of bounds"); \
120786 +static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
120788 +    switch(param)
120789 +    {
120790 +    case ZSTD_c_compressionLevel:
120791 +    case ZSTD_c_hashLog:
120792 +    case ZSTD_c_chainLog:
120793 +    case ZSTD_c_searchLog:
120794 +    case ZSTD_c_minMatch:
120795 +    case ZSTD_c_targetLength:
120796 +    case ZSTD_c_strategy:
120797 +        return 1;
120799 +    case ZSTD_c_format:
120800 +    case ZSTD_c_windowLog:
120801 +    case ZSTD_c_contentSizeFlag:
120802 +    case ZSTD_c_checksumFlag:
120803 +    case ZSTD_c_dictIDFlag:
120804 +    case ZSTD_c_forceMaxWindow :
120805 +    case ZSTD_c_nbWorkers:
120806 +    case ZSTD_c_jobSize:
120807 +    case ZSTD_c_overlapLog:
120808 +    case ZSTD_c_rsyncable:
120809 +    case ZSTD_c_enableDedicatedDictSearch:
120810 +    case ZSTD_c_enableLongDistanceMatching:
120811 +    case ZSTD_c_ldmHashLog:
120812 +    case ZSTD_c_ldmMinMatch:
120813 +    case ZSTD_c_ldmBucketSizeLog:
120814 +    case ZSTD_c_ldmHashRateLog:
120815 +    case ZSTD_c_forceAttachDict:
120816 +    case ZSTD_c_literalCompressionMode:
120817 +    case ZSTD_c_targetCBlockSize:
120818 +    case ZSTD_c_srcSizeHint:
120819 +    case ZSTD_c_stableInBuffer:
120820 +    case ZSTD_c_stableOutBuffer:
120821 +    case ZSTD_c_blockDelimiters:
120822 +    case ZSTD_c_validateSequences:
120823 +    default:
120824 +        return 0;
120825 +    }
120828 +size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
120830 +    DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value);
120831 +    if (cctx->streamStage != zcss_init) {
120832 +        if (ZSTD_isUpdateAuthorized(param)) {
120833 +            cctx->cParamsChanged = 1;
120834 +        } else {
120835 +            RETURN_ERROR(stage_wrong, "can only set params in ctx init stage");
120836 +    }   }
120838 +    switch(param)
120839 +    {
120840 +    case ZSTD_c_nbWorkers:
120841 +        RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported,
120842 +                        "MT not compatible with static alloc");
120843 +        break;
120845 +    case ZSTD_c_compressionLevel:
120846 +    case ZSTD_c_windowLog:
120847 +    case ZSTD_c_hashLog:
120848 +    case ZSTD_c_chainLog:
120849 +    case ZSTD_c_searchLog:
120850 +    case ZSTD_c_minMatch:
120851 +    case ZSTD_c_targetLength:
120852 +    case ZSTD_c_strategy:
120853 +    case ZSTD_c_ldmHashRateLog:
120854 +    case ZSTD_c_format:
120855 +    case ZSTD_c_contentSizeFlag:
120856 +    case ZSTD_c_checksumFlag:
120857 +    case ZSTD_c_dictIDFlag:
120858 +    case ZSTD_c_forceMaxWindow:
120859 +    case ZSTD_c_forceAttachDict:
120860 +    case ZSTD_c_literalCompressionMode:
120861 +    case ZSTD_c_jobSize:
120862 +    case ZSTD_c_overlapLog:
120863 +    case ZSTD_c_rsyncable:
120864 +    case ZSTD_c_enableDedicatedDictSearch:
120865 +    case ZSTD_c_enableLongDistanceMatching:
120866 +    case ZSTD_c_ldmHashLog:
120867 +    case ZSTD_c_ldmMinMatch:
120868 +    case ZSTD_c_ldmBucketSizeLog:
120869 +    case ZSTD_c_targetCBlockSize:
120870 +    case ZSTD_c_srcSizeHint:
120871 +    case ZSTD_c_stableInBuffer:
120872 +    case ZSTD_c_stableOutBuffer:
120873 +    case ZSTD_c_blockDelimiters:
120874 +    case ZSTD_c_validateSequences:
120875 +        break;
120877 +    default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
120878 +    }
120879 +    return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value);
120882 +size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
120883 +                                    ZSTD_cParameter param, int value)
120885 +    DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value);
120886 +    switch(param)
120887 +    {
120888 +    case ZSTD_c_format :
120889 +        BOUNDCHECK(ZSTD_c_format, value);
120890 +        CCtxParams->format = (ZSTD_format_e)value;
120891 +        return (size_t)CCtxParams->format;
120893 +    case ZSTD_c_compressionLevel : {
120894 +        FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
120895 +        if (value == 0)
120896 +            CCtxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
120897 +        else
120898 +            CCtxParams->compressionLevel = value;
120899 +        if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel;
120900 +        return 0;  /* return type (size_t) cannot represent negative values */
120901 +    }
120903 +    case ZSTD_c_windowLog :
120904 +        if (value!=0)   /* 0 => use default */
120905 +            BOUNDCHECK(ZSTD_c_windowLog, value);
120906 +        CCtxParams->cParams.windowLog = (U32)value;
120907 +        return CCtxParams->cParams.windowLog;
120909 +    case ZSTD_c_hashLog :
120910 +        if (value!=0)   /* 0 => use default */
120911 +            BOUNDCHECK(ZSTD_c_hashLog, value);
120912 +        CCtxParams->cParams.hashLog = (U32)value;
120913 +        return CCtxParams->cParams.hashLog;
120915 +    case ZSTD_c_chainLog :
120916 +        if (value!=0)   /* 0 => use default */
120917 +            BOUNDCHECK(ZSTD_c_chainLog, value);
120918 +        CCtxParams->cParams.chainLog = (U32)value;
120919 +        return CCtxParams->cParams.chainLog;
120921 +    case ZSTD_c_searchLog :
120922 +        if (value!=0)   /* 0 => use default */
120923 +            BOUNDCHECK(ZSTD_c_searchLog, value);
120924 +        CCtxParams->cParams.searchLog = (U32)value;
120925 +        return (size_t)value;
120927 +    case ZSTD_c_minMatch :
120928 +        if (value!=0)   /* 0 => use default */
120929 +            BOUNDCHECK(ZSTD_c_minMatch, value);
120930 +        CCtxParams->cParams.minMatch = value;
120931 +        return CCtxParams->cParams.minMatch;
120933 +    case ZSTD_c_targetLength :
120934 +        BOUNDCHECK(ZSTD_c_targetLength, value);
120935 +        CCtxParams->cParams.targetLength = value;
120936 +        return CCtxParams->cParams.targetLength;
120938 +    case ZSTD_c_strategy :
120939 +        if (value!=0)   /* 0 => use default */
120940 +            BOUNDCHECK(ZSTD_c_strategy, value);
120941 +        CCtxParams->cParams.strategy = (ZSTD_strategy)value;
120942 +        return (size_t)CCtxParams->cParams.strategy;
120944 +    case ZSTD_c_contentSizeFlag :
120945 +        /* Content size written in frame header _when known_ (default:1) */
120946 +        DEBUGLOG(4, "set content size flag = %u", (value!=0));
120947 +        CCtxParams->fParams.contentSizeFlag = value != 0;
120948 +        return CCtxParams->fParams.contentSizeFlag;
120950 +    case ZSTD_c_checksumFlag :
120951 +        /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
120952 +        CCtxParams->fParams.checksumFlag = value != 0;
120953 +        return CCtxParams->fParams.checksumFlag;
120955 +    case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
120956 +        DEBUGLOG(4, "set dictIDFlag = %u", (value!=0));
120957 +        CCtxParams->fParams.noDictIDFlag = !value;
120958 +        return !CCtxParams->fParams.noDictIDFlag;
120960 +    case ZSTD_c_forceMaxWindow :
120961 +        CCtxParams->forceWindow = (value != 0);
120962 +        return CCtxParams->forceWindow;
120964 +    case ZSTD_c_forceAttachDict : {
120965 +        const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value;
120966 +        BOUNDCHECK(ZSTD_c_forceAttachDict, pref);
120967 +        CCtxParams->attachDictPref = pref;
120968 +        return CCtxParams->attachDictPref;
120969 +    }
120971 +    case ZSTD_c_literalCompressionMode : {
120972 +        const ZSTD_literalCompressionMode_e lcm = (ZSTD_literalCompressionMode_e)value;
120973 +        BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm);
120974 +        CCtxParams->literalCompressionMode = lcm;
120975 +        return CCtxParams->literalCompressionMode;
120976 +    }
120978 +    case ZSTD_c_nbWorkers :
120979 +        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
120980 +        return 0;
120982 +    case ZSTD_c_jobSize :
120983 +        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
120984 +        return 0;
120986 +    case ZSTD_c_overlapLog :
120987 +        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
120988 +        return 0;
120990 +    case ZSTD_c_rsyncable :
120991 +        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
120992 +        return 0;
120994 +    case ZSTD_c_enableDedicatedDictSearch :
120995 +        CCtxParams->enableDedicatedDictSearch = (value!=0);
120996 +        return CCtxParams->enableDedicatedDictSearch;
120998 +    case ZSTD_c_enableLongDistanceMatching :
120999 +        CCtxParams->ldmParams.enableLdm = (value!=0);
121000 +        return CCtxParams->ldmParams.enableLdm;
121002 +    case ZSTD_c_ldmHashLog :
121003 +        if (value!=0)   /* 0 ==> auto */
121004 +            BOUNDCHECK(ZSTD_c_ldmHashLog, value);
121005 +        CCtxParams->ldmParams.hashLog = value;
121006 +        return CCtxParams->ldmParams.hashLog;
121008 +    case ZSTD_c_ldmMinMatch :
121009 +        if (value!=0)   /* 0 ==> default */
121010 +            BOUNDCHECK(ZSTD_c_ldmMinMatch, value);
121011 +        CCtxParams->ldmParams.minMatchLength = value;
121012 +        return CCtxParams->ldmParams.minMatchLength;
121014 +    case ZSTD_c_ldmBucketSizeLog :
121015 +        if (value!=0)   /* 0 ==> default */
121016 +            BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value);
121017 +        CCtxParams->ldmParams.bucketSizeLog = value;
121018 +        return CCtxParams->ldmParams.bucketSizeLog;
121020 +    case ZSTD_c_ldmHashRateLog :
121021 +        RETURN_ERROR_IF(value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN,
121022 +                        parameter_outOfBound, "Param out of bounds!");
121023 +        CCtxParams->ldmParams.hashRateLog = value;
121024 +        return CCtxParams->ldmParams.hashRateLog;
121026 +    case ZSTD_c_targetCBlockSize :
121027 +        if (value!=0)   /* 0 ==> default */
121028 +            BOUNDCHECK(ZSTD_c_targetCBlockSize, value);
121029 +        CCtxParams->targetCBlockSize = value;
121030 +        return CCtxParams->targetCBlockSize;
121032 +    case ZSTD_c_srcSizeHint :
121033 +        if (value!=0)    /* 0 ==> default */
121034 +            BOUNDCHECK(ZSTD_c_srcSizeHint, value);
121035 +        CCtxParams->srcSizeHint = value;
121036 +        return CCtxParams->srcSizeHint;
121038 +    case ZSTD_c_stableInBuffer:
121039 +        BOUNDCHECK(ZSTD_c_stableInBuffer, value);
121040 +        CCtxParams->inBufferMode = (ZSTD_bufferMode_e)value;
121041 +        return CCtxParams->inBufferMode;
121043 +    case ZSTD_c_stableOutBuffer:
121044 +        BOUNDCHECK(ZSTD_c_stableOutBuffer, value);
121045 +        CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value;
121046 +        return CCtxParams->outBufferMode;
121048 +    case ZSTD_c_blockDelimiters:
121049 +        BOUNDCHECK(ZSTD_c_blockDelimiters, value);
121050 +        CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value;
121051 +        return CCtxParams->blockDelimiters;
121053 +    case ZSTD_c_validateSequences:
121054 +        BOUNDCHECK(ZSTD_c_validateSequences, value);
121055 +        CCtxParams->validateSequences = value;
121056 +        return CCtxParams->validateSequences;
121058 +    default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
121059 +    }
121062 +size_t ZSTD_CCtx_getParameter(ZSTD_CCtx const* cctx, ZSTD_cParameter param, int* value)
121064 +    return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value);
121067 +size_t ZSTD_CCtxParams_getParameter(
121068 +        ZSTD_CCtx_params const* CCtxParams, ZSTD_cParameter param, int* value)
121070 +    switch(param)
121071 +    {
121072 +    case ZSTD_c_format :
121073 +        *value = CCtxParams->format;
121074 +        break;
121075 +    case ZSTD_c_compressionLevel :
121076 +        *value = CCtxParams->compressionLevel;
121077 +        break;
121078 +    case ZSTD_c_windowLog :
121079 +        *value = (int)CCtxParams->cParams.windowLog;
121080 +        break;
121081 +    case ZSTD_c_hashLog :
121082 +        *value = (int)CCtxParams->cParams.hashLog;
121083 +        break;
121084 +    case ZSTD_c_chainLog :
121085 +        *value = (int)CCtxParams->cParams.chainLog;
121086 +        break;
121087 +    case ZSTD_c_searchLog :
121088 +        *value = CCtxParams->cParams.searchLog;
121089 +        break;
121090 +    case ZSTD_c_minMatch :
121091 +        *value = CCtxParams->cParams.minMatch;
121092 +        break;
121093 +    case ZSTD_c_targetLength :
121094 +        *value = CCtxParams->cParams.targetLength;
121095 +        break;
121096 +    case ZSTD_c_strategy :
121097 +        *value = (unsigned)CCtxParams->cParams.strategy;
121098 +        break;
121099 +    case ZSTD_c_contentSizeFlag :
121100 +        *value = CCtxParams->fParams.contentSizeFlag;
121101 +        break;
121102 +    case ZSTD_c_checksumFlag :
121103 +        *value = CCtxParams->fParams.checksumFlag;
121104 +        break;
121105 +    case ZSTD_c_dictIDFlag :
121106 +        *value = !CCtxParams->fParams.noDictIDFlag;
121107 +        break;
121108 +    case ZSTD_c_forceMaxWindow :
121109 +        *value = CCtxParams->forceWindow;
121110 +        break;
121111 +    case ZSTD_c_forceAttachDict :
121112 +        *value = CCtxParams->attachDictPref;
121113 +        break;
121114 +    case ZSTD_c_literalCompressionMode :
121115 +        *value = CCtxParams->literalCompressionMode;
121116 +        break;
121117 +    case ZSTD_c_nbWorkers :
121118 +        assert(CCtxParams->nbWorkers == 0);
121119 +        *value = CCtxParams->nbWorkers;
121120 +        break;
121121 +    case ZSTD_c_jobSize :
121122 +        RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
121123 +    case ZSTD_c_overlapLog :
121124 +        RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
121125 +    case ZSTD_c_rsyncable :
121126 +        RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
121127 +    case ZSTD_c_enableDedicatedDictSearch :
121128 +        *value = CCtxParams->enableDedicatedDictSearch;
121129 +        break;
121130 +    case ZSTD_c_enableLongDistanceMatching :
121131 +        *value = CCtxParams->ldmParams.enableLdm;
121132 +        break;
121133 +    case ZSTD_c_ldmHashLog :
121134 +        *value = CCtxParams->ldmParams.hashLog;
121135 +        break;
121136 +    case ZSTD_c_ldmMinMatch :
121137 +        *value = CCtxParams->ldmParams.minMatchLength;
121138 +        break;
121139 +    case ZSTD_c_ldmBucketSizeLog :
121140 +        *value = CCtxParams->ldmParams.bucketSizeLog;
121141 +        break;
121142 +    case ZSTD_c_ldmHashRateLog :
121143 +        *value = CCtxParams->ldmParams.hashRateLog;
121144 +        break;
121145 +    case ZSTD_c_targetCBlockSize :
121146 +        *value = (int)CCtxParams->targetCBlockSize;
121147 +        break;
121148 +    case ZSTD_c_srcSizeHint :
121149 +        *value = (int)CCtxParams->srcSizeHint;
121150 +        break;
121151 +    case ZSTD_c_stableInBuffer :
121152 +        *value = (int)CCtxParams->inBufferMode;
121153 +        break;
121154 +    case ZSTD_c_stableOutBuffer :
121155 +        *value = (int)CCtxParams->outBufferMode;
121156 +        break;
121157 +    case ZSTD_c_blockDelimiters :
121158 +        *value = (int)CCtxParams->blockDelimiters;
121159 +        break;
121160 +    case ZSTD_c_validateSequences :
121161 +        *value = (int)CCtxParams->validateSequences;
121162 +        break;
121163 +    default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
121164 +    }
121165 +    return 0;
121168 +/** ZSTD_CCtx_setParametersUsingCCtxParams() :
121169 + *  just applies `params` into `cctx`
121170 + *  no action is performed, parameters are merely stored.
121171 + *  If ZSTDMT is enabled, parameters are pushed to cctx->mtctx.
121172 + *    This is possible even if a compression is ongoing.
121173 + *    In which case, new parameters will be applied on the fly, starting with next compression job.
121174 + */
121175 +size_t ZSTD_CCtx_setParametersUsingCCtxParams(
121176 +        ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
121178 +    DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
121179 +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
121180 +                    "The context is in the wrong stage!");
121181 +    RETURN_ERROR_IF(cctx->cdict, stage_wrong,
121182 +                    "Can't override parameters with cdict attached (some must "
121183 +                    "be inherited from the cdict).");
121185 +    cctx->requestedParams = *params;
121186 +    return 0;
121189 +ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
121191 +    DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
121192 +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
121193 +                    "Can't set pledgedSrcSize when not in init stage.");
121194 +    cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
121195 +    return 0;
121198 +static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(
121199 +        int const compressionLevel,
121200 +        size_t const dictSize);
121201 +static int ZSTD_dedicatedDictSearch_isSupported(
121202 +        const ZSTD_compressionParameters* cParams);
121203 +static void ZSTD_dedicatedDictSearch_revertCParams(
121204 +        ZSTD_compressionParameters* cParams);
121207 + * Initializes the local dict using the requested parameters.
121208 + * NOTE: This does not use the pledged src size, because it may be used for more
121209 + * than one compression.
121210 + */
121211 +static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
121213 +    ZSTD_localDict* const dl = &cctx->localDict;
121214 +    if (dl->dict == NULL) {
121215 +        /* No local dictionary. */
121216 +        assert(dl->dictBuffer == NULL);
121217 +        assert(dl->cdict == NULL);
121218 +        assert(dl->dictSize == 0);
121219 +        return 0;
121220 +    }
121221 +    if (dl->cdict != NULL) {
121222 +        assert(cctx->cdict == dl->cdict);
121223 +        /* Local dictionary already initialized. */
121224 +        return 0;
121225 +    }
121226 +    assert(dl->dictSize > 0);
121227 +    assert(cctx->cdict == NULL);
121228 +    assert(cctx->prefixDict.dict == NULL);
121230 +    dl->cdict = ZSTD_createCDict_advanced2(
121231 +            dl->dict,
121232 +            dl->dictSize,
121233 +            ZSTD_dlm_byRef,
121234 +            dl->dictContentType,
121235 +            &cctx->requestedParams,
121236 +            cctx->customMem);
121237 +    RETURN_ERROR_IF(!dl->cdict, memory_allocation, "ZSTD_createCDict_advanced failed");
121238 +    cctx->cdict = dl->cdict;
121239 +    return 0;
121242 +size_t ZSTD_CCtx_loadDictionary_advanced(
121243 +        ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
121244 +        ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
121246 +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
121247 +                    "Can't load a dictionary when ctx is not in init stage.");
121248 +    DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
121249 +    ZSTD_clearAllDicts(cctx);  /* in case one already exists */
121250 +    if (dict == NULL || dictSize == 0)  /* no dictionary mode */
121251 +        return 0;
121252 +    if (dictLoadMethod == ZSTD_dlm_byRef) {
121253 +        cctx->localDict.dict = dict;
121254 +    } else {
121255 +        void* dictBuffer;
121256 +        RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
121257 +                        "no malloc for static CCtx");
121258 +        dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem);
121259 +        RETURN_ERROR_IF(!dictBuffer, memory_allocation, "NULL pointer!");
121260 +        ZSTD_memcpy(dictBuffer, dict, dictSize);
121261 +        cctx->localDict.dictBuffer = dictBuffer;
121262 +        cctx->localDict.dict = dictBuffer;
121263 +    }
121264 +    cctx->localDict.dictSize = dictSize;
121265 +    cctx->localDict.dictContentType = dictContentType;
121266 +    return 0;
121269 +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(
121270 +      ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
121272 +    return ZSTD_CCtx_loadDictionary_advanced(
121273 +            cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
121276 +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
121278 +    return ZSTD_CCtx_loadDictionary_advanced(
121279 +            cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
121283 +size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
121285 +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
121286 +                    "Can't ref a dict when ctx not in init stage.");
121287 +    /* Free the existing local cdict (if any) to save memory. */
121288 +    ZSTD_clearAllDicts(cctx);
121289 +    cctx->cdict = cdict;
121290 +    return 0;
121293 +size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool)
121295 +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
121296 +                    "Can't ref a pool when ctx not in init stage.");
121297 +    cctx->pool = pool;
121298 +    return 0;
121301 +size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
121303 +    return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent);
121306 +size_t ZSTD_CCtx_refPrefix_advanced(
121307 +        ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
121309 +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
121310 +                    "Can't ref a prefix when ctx not in init stage.");
121311 +    ZSTD_clearAllDicts(cctx);
121312 +    if (prefix != NULL && prefixSize > 0) {
121313 +        cctx->prefixDict.dict = prefix;
121314 +        cctx->prefixDict.dictSize = prefixSize;
121315 +        cctx->prefixDict.dictContentType = dictContentType;
121316 +    }
121317 +    return 0;
121320 +/*! ZSTD_CCtx_reset() :
121321 + *  Also dumps dictionary */
121322 +size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
121324 +    if ( (reset == ZSTD_reset_session_only)
121325 +      || (reset == ZSTD_reset_session_and_parameters) ) {
121326 +        cctx->streamStage = zcss_init;
121327 +        cctx->pledgedSrcSizePlusOne = 0;
121328 +    }
121329 +    if ( (reset == ZSTD_reset_parameters)
121330 +      || (reset == ZSTD_reset_session_and_parameters) ) {
121331 +        RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
121332 +                        "Can't reset parameters only when not in init stage.");
121333 +        ZSTD_clearAllDicts(cctx);
121334 +        return ZSTD_CCtxParams_reset(&cctx->requestedParams);
121335 +    }
121336 +    return 0;
121340 +/** ZSTD_checkCParams() :
121341 +    control CParam values remain within authorized range.
121342 +    @return : 0, or an error code if one value is beyond authorized range */
121343 +size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
121345 +    BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog);
121346 +    BOUNDCHECK(ZSTD_c_chainLog,  (int)cParams.chainLog);
121347 +    BOUNDCHECK(ZSTD_c_hashLog,   (int)cParams.hashLog);
121348 +    BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog);
121349 +    BOUNDCHECK(ZSTD_c_minMatch,  (int)cParams.minMatch);
121350 +    BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength);
121351 +    BOUNDCHECK(ZSTD_c_strategy,  cParams.strategy);
121352 +    return 0;
121355 +/** ZSTD_clampCParams() :
121356 + *  make CParam values within valid range.
121357 + *  @return : valid CParams */
121358 +static ZSTD_compressionParameters
121359 +ZSTD_clampCParams(ZSTD_compressionParameters cParams)
121361 +#   define CLAMP_TYPE(cParam, val, type) {                                \
121362 +        ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);         \
121363 +        if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound;      \
121364 +        else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
121365 +    }
121366 +#   define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned)
121367 +    CLAMP(ZSTD_c_windowLog, cParams.windowLog);
121368 +    CLAMP(ZSTD_c_chainLog,  cParams.chainLog);
121369 +    CLAMP(ZSTD_c_hashLog,   cParams.hashLog);
121370 +    CLAMP(ZSTD_c_searchLog, cParams.searchLog);
121371 +    CLAMP(ZSTD_c_minMatch,  cParams.minMatch);
121372 +    CLAMP(ZSTD_c_targetLength,cParams.targetLength);
121373 +    CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy);
121374 +    return cParams;
121377 +/** ZSTD_cycleLog() :
121378 + *  condition for correct operation : hashLog > 1 */
121379 +U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
121381 +    U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
121382 +    return hashLog - btScale;
121385 +/** ZSTD_dictAndWindowLog() :
121386 + * Returns an adjusted window log that is large enough to fit the source and the dictionary.
121387 + * The zstd format says that the entire dictionary is valid if one byte of the dictionary
121388 + * is within the window. So the hashLog and chainLog should be large enough to reference both
121389 + * the dictionary and the window. So we must use this adjusted dictAndWindowLog when downsizing
121390 + * the hashLog and windowLog.
121391 + * NOTE: srcSize must not be ZSTD_CONTENTSIZE_UNKNOWN.
121392 + */
121393 +static U32 ZSTD_dictAndWindowLog(U32 windowLog, U64 srcSize, U64 dictSize)
121395 +    const U64 maxWindowSize = 1ULL << ZSTD_WINDOWLOG_MAX;
121396 +    /* No dictionary ==> No change */
121397 +    if (dictSize == 0) {
121398 +        return windowLog;
121399 +    }
121400 +    assert(windowLog <= ZSTD_WINDOWLOG_MAX);
121401 +    assert(srcSize != ZSTD_CONTENTSIZE_UNKNOWN); /* Handled in ZSTD_adjustCParams_internal() */
121402 +    {
121403 +        U64 const windowSize = 1ULL << windowLog;
121404 +        U64 const dictAndWindowSize = dictSize + windowSize;
121405 +        /* If the window size is already large enough to fit both the source and the dictionary
121406 +         * then just use the window size. Otherwise adjust so that it fits the dictionary and
121407 +         * the window.
121408 +         */
121409 +        if (windowSize >= dictSize + srcSize) {
121410 +            return windowLog; /* Window size large enough already */
121411 +        } else if (dictAndWindowSize >= maxWindowSize) {
121412 +            return ZSTD_WINDOWLOG_MAX; /* Larger than max window log */
121413 +        } else  {
121414 +            return ZSTD_highbit32((U32)dictAndWindowSize - 1) + 1;
121415 +        }
121416 +    }
121419 +/** ZSTD_adjustCParams_internal() :
121420 + *  optimize `cPar` for a specified input (`srcSize` and `dictSize`).
121421 + *  mostly downsize to reduce memory consumption and initialization latency.
121422 + * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
121423 + * `mode` is the mode for parameter adjustment. See docs for `ZSTD_cParamMode_e`.
121424 + *  note : `srcSize==0` means 0!
121425 + *  condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */
121426 +static ZSTD_compressionParameters
121427 +ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
121428 +                            unsigned long long srcSize,
121429 +                            size_t dictSize,
121430 +                            ZSTD_cParamMode_e mode)
121432 +    const U64 minSrcSize = 513; /* (1<<9) + 1 */
121433 +    const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
121434 +    assert(ZSTD_checkCParams(cPar)==0);
121436 +    switch (mode) {
121437 +    case ZSTD_cpm_unknown:
121438 +    case ZSTD_cpm_noAttachDict:
121439 +        /* If we don't know the source size, don't make any
121440 +         * assumptions about it. We will already have selected
121441 +         * smaller parameters if a dictionary is in use.
121442 +         */
121443 +        break;
121444 +    case ZSTD_cpm_createCDict:
121445 +        /* Assume a small source size when creating a dictionary
121446 +         * with an unkown source size.
121447 +         */
121448 +        if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN)
121449 +            srcSize = minSrcSize;
121450 +        break;
121451 +    case ZSTD_cpm_attachDict:
121452 +        /* Dictionary has its own dedicated parameters which have
121453 +         * already been selected. We are selecting parameters
121454 +         * for only the source.
121455 +         */
121456 +        dictSize = 0;
121457 +        break;
121458 +    default:
121459 +        assert(0);
121460 +        break;
121461 +    }
121463 +    /* resize windowLog if input is small enough, to use less memory */
121464 +    if ( (srcSize < maxWindowResize)
121465 +      && (dictSize < maxWindowResize) )  {
121466 +        U32 const tSize = (U32)(srcSize + dictSize);
121467 +        static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
121468 +        U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
121469 +                            ZSTD_highbit32(tSize-1) + 1;
121470 +        if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
121471 +    }
121472 +    if (srcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
121473 +        U32 const dictAndWindowLog = ZSTD_dictAndWindowLog(cPar.windowLog, (U64)srcSize, (U64)dictSize);
121474 +        U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
121475 +        if (cPar.hashLog > dictAndWindowLog+1) cPar.hashLog = dictAndWindowLog+1;
121476 +        if (cycleLog > dictAndWindowLog)
121477 +            cPar.chainLog -= (cycleLog - dictAndWindowLog);
121478 +    }
121480 +    if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
121481 +        cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN;  /* minimum wlog required for valid frame header */
121483 +    return cPar;
121486 +ZSTD_compressionParameters
121487 +ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
121488 +                   unsigned long long srcSize,
121489 +                   size_t dictSize)
121491 +    cPar = ZSTD_clampCParams(cPar);   /* resulting cPar is necessarily valid (all parameters within range) */
121492 +    if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN;
121493 +    return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown);
121496 +static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
121497 +static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
121499 +static void ZSTD_overrideCParams(
121500 +              ZSTD_compressionParameters* cParams,
121501 +        const ZSTD_compressionParameters* overrides)
121503 +    if (overrides->windowLog)    cParams->windowLog    = overrides->windowLog;
121504 +    if (overrides->hashLog)      cParams->hashLog      = overrides->hashLog;
121505 +    if (overrides->chainLog)     cParams->chainLog     = overrides->chainLog;
121506 +    if (overrides->searchLog)    cParams->searchLog    = overrides->searchLog;
121507 +    if (overrides->minMatch)     cParams->minMatch     = overrides->minMatch;
121508 +    if (overrides->targetLength) cParams->targetLength = overrides->targetLength;
121509 +    if (overrides->strategy)     cParams->strategy     = overrides->strategy;
121512 +ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
121513 +        const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
121515 +    ZSTD_compressionParameters cParams;
121516 +    if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) {
121517 +      srcSizeHint = CCtxParams->srcSizeHint;
121518 +    }
121519 +    cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode);
121520 +    if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
121521 +    ZSTD_overrideCParams(&cParams, &CCtxParams->cParams);
121522 +    assert(!ZSTD_checkCParams(cParams));
121523 +    /* srcSizeHint == 0 means 0 */
121524 +    return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode);
121527 +static size_t
121528 +ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
121529 +                       const U32 forCCtx)
121531 +    size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
121532 +    size_t const hSize = ((size_t)1) << cParams->hashLog;
121533 +    U32    const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
121534 +    size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
121535 +    /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't
121536 +     * surrounded by redzones in ASAN. */
121537 +    size_t const tableSpace = chainSize * sizeof(U32)
121538 +                            + hSize * sizeof(U32)
121539 +                            + h3Size * sizeof(U32);
121540 +    size_t const optPotentialSpace =
121541 +        ZSTD_cwksp_alloc_size((MaxML+1) * sizeof(U32))
121542 +      + ZSTD_cwksp_alloc_size((MaxLL+1) * sizeof(U32))
121543 +      + ZSTD_cwksp_alloc_size((MaxOff+1) * sizeof(U32))
121544 +      + ZSTD_cwksp_alloc_size((1<<Litbits) * sizeof(U32))
121545 +      + ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))
121546 +      + ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
121547 +    size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
121548 +                                ? optPotentialSpace
121549 +                                : 0;
121550 +    DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
121551 +                (U32)chainSize, (U32)hSize, (U32)h3Size);
121552 +    return tableSpace + optSpace;
121555 +static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
121556 +        const ZSTD_compressionParameters* cParams,
121557 +        const ldmParams_t* ldmParams,
121558 +        const int isStatic,
121559 +        const size_t buffInSize,
121560 +        const size_t buffOutSize,
121561 +        const U64 pledgedSrcSize)
121563 +    size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << cParams->windowLog), pledgedSrcSize));
121564 +    size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
121565 +    U32    const divider = (cParams->minMatch==3) ? 3 : 4;
121566 +    size_t const maxNbSeq = blockSize / divider;
121567 +    size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
121568 +                            + ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(seqDef))
121569 +                            + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
121570 +    size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE);
121571 +    size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
121572 +    size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, /* forCCtx */ 1);
121574 +    size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams);
121575 +    size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize);
121576 +    size_t const ldmSeqSpace = ldmParams->enableLdm ?
121577 +        ZSTD_cwksp_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0;
121580 +    size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize)
121581 +                             + ZSTD_cwksp_alloc_size(buffOutSize);
121583 +    size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0;
121585 +    size_t const neededSpace =
121586 +        cctxSpace +
121587 +        entropySpace +
121588 +        blockStateSpace +
121589 +        ldmSpace +
121590 +        ldmSeqSpace +
121591 +        matchStateSize +
121592 +        tokenSpace +
121593 +        bufferSpace;
121595 +    DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace);
121596 +    return neededSpace;
121599 +size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
121601 +    ZSTD_compressionParameters const cParams =
121602 +                ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
121604 +    RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
121605 +    /* estimateCCtxSize is for one-shot compression. So no buffers should
121606 +     * be needed. However, we still allocate two 0-sized buffers, which can
121607 +     * take space under ASAN. */
121608 +    return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
121609 +        &cParams, &params->ldmParams, 1, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN);
121612 +size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
121614 +    ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
121615 +    return ZSTD_estimateCCtxSize_usingCCtxParams(&params);
121618 +static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
121620 +    int tier = 0;
121621 +    size_t largestSize = 0;
121622 +    static const unsigned long long srcSizeTiers[4] = {16 KB, 128 KB, 256 KB, ZSTD_CONTENTSIZE_UNKNOWN};
121623 +    for (; tier < 4; ++tier) {
121624 +        /* Choose the set of cParams for a given level across all srcSizes that give the largest cctxSize */
121625 +        ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeTiers[tier], 0, ZSTD_cpm_noAttachDict);
121626 +        largestSize = MAX(ZSTD_estimateCCtxSize_usingCParams(cParams), largestSize);
121627 +    }
121628 +    return largestSize;
121631 +size_t ZSTD_estimateCCtxSize(int compressionLevel)
121633 +    int level;
121634 +    size_t memBudget = 0;
121635 +    for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
121636 +        /* Ensure monotonically increasing memory usage as compression level increases */
121637 +        size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
121638 +        if (newMB > memBudget) memBudget = newMB;
121639 +    }
121640 +    return memBudget;
121643 +size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
121645 +    RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
121646 +    {   ZSTD_compressionParameters const cParams =
121647 +                ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
121648 +        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
121649 +        size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered)
121650 +                ? ((size_t)1 << cParams.windowLog) + blockSize
121651 +                : 0;
121652 +        size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered)
121653 +                ? ZSTD_compressBound(blockSize) + 1
121654 +                : 0;
121656 +        return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
121657 +            &cParams, &params->ldmParams, 1, inBuffSize, outBuffSize,
121658 +            ZSTD_CONTENTSIZE_UNKNOWN);
121659 +    }
121662 +size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
121664 +    ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
121665 +    return ZSTD_estimateCStreamSize_usingCCtxParams(&params);
121668 +static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
121670 +    ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
121671 +    return ZSTD_estimateCStreamSize_usingCParams(cParams);
121674 +size_t ZSTD_estimateCStreamSize(int compressionLevel)
121676 +    int level;
121677 +    size_t memBudget = 0;
121678 +    for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
121679 +        size_t const newMB = ZSTD_estimateCStreamSize_internal(level);
121680 +        if (newMB > memBudget) memBudget = newMB;
121681 +    }
121682 +    return memBudget;
121685 +/* ZSTD_getFrameProgression():
121686 + * tells how much data has been consumed (input) and produced (output) for current frame.
121687 + * able to count progression inside worker threads (non-blocking mode).
121688 + */
121689 +ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx)
121691 +    {   ZSTD_frameProgression fp;
121692 +        size_t const buffered = (cctx->inBuff == NULL) ? 0 :
121693 +                                cctx->inBuffPos - cctx->inToCompress;
121694 +        if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress);
121695 +        assert(buffered <= ZSTD_BLOCKSIZE_MAX);
121696 +        fp.ingested = cctx->consumedSrcSize + buffered;
121697 +        fp.consumed = cctx->consumedSrcSize;
121698 +        fp.produced = cctx->producedCSize;
121699 +        fp.flushed  = cctx->producedCSize;   /* simplified; some data might still be left within streaming output buffer */
121700 +        fp.currentJobID = 0;
121701 +        fp.nbActiveWorkers = 0;
121702 +        return fp;
121703 +}   }
121705 +/*! ZSTD_toFlushNow()
121706 + *  Only useful for multithreading scenarios currently (nbWorkers >= 1).
121707 + */
121708 +size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx)
121710 +    (void)cctx;
121711 +    return 0;   /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */
121714 +static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,
121715 +                                    ZSTD_compressionParameters cParams2)
121717 +    (void)cParams1;
121718 +    (void)cParams2;
121719 +    assert(cParams1.windowLog    == cParams2.windowLog);
121720 +    assert(cParams1.chainLog     == cParams2.chainLog);
121721 +    assert(cParams1.hashLog      == cParams2.hashLog);
121722 +    assert(cParams1.searchLog    == cParams2.searchLog);
121723 +    assert(cParams1.minMatch     == cParams2.minMatch);
121724 +    assert(cParams1.targetLength == cParams2.targetLength);
121725 +    assert(cParams1.strategy     == cParams2.strategy);
121728 +void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
121730 +    int i;
121731 +    for (i = 0; i < ZSTD_REP_NUM; ++i)
121732 +        bs->rep[i] = repStartValue[i];
121733 +    bs->entropy.huf.repeatMode = HUF_repeat_none;
121734 +    bs->entropy.fse.offcode_repeatMode = FSE_repeat_none;
121735 +    bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none;
121736 +    bs->entropy.fse.litlength_repeatMode = FSE_repeat_none;
121739 +/*! ZSTD_invalidateMatchState()
121740 + *  Invalidate all the matches in the match finder tables.
121741 + *  Requires nextSrc and base to be set (can be NULL).
121742 + */
121743 +static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
121745 +    ZSTD_window_clear(&ms->window);
121747 +    ms->nextToUpdate = ms->window.dictLimit;
121748 +    ms->loadedDictEnd = 0;
121749 +    ms->opt.litLengthSum = 0;  /* force reset of btopt stats */
121750 +    ms->dictMatchState = NULL;
121754 + * Controls, for this matchState reset, whether the tables need to be cleared /
121755 + * prepared for the coming compression (ZSTDcrp_makeClean), or whether the
121756 + * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a
121757 + * subsequent operation will overwrite the table space anyways (e.g., copying
121758 + * the matchState contents in from a CDict).
121759 + */
121760 +typedef enum {
121761 +    ZSTDcrp_makeClean,
121762 +    ZSTDcrp_leaveDirty
121763 +} ZSTD_compResetPolicy_e;
121766 + * Controls, for this matchState reset, whether indexing can continue where it
121767 + * left off (ZSTDirp_continue), or whether it needs to be restarted from zero
121768 + * (ZSTDirp_reset).
121769 + */
121770 +typedef enum {
121771 +    ZSTDirp_continue,
121772 +    ZSTDirp_reset
121773 +} ZSTD_indexResetPolicy_e;
121775 +typedef enum {
121776 +    ZSTD_resetTarget_CDict,
121777 +    ZSTD_resetTarget_CCtx
121778 +} ZSTD_resetTarget_e;
121780 +static size_t
121781 +ZSTD_reset_matchState(ZSTD_matchState_t* ms,
121782 +                      ZSTD_cwksp* ws,
121783 +                const ZSTD_compressionParameters* cParams,
121784 +                const ZSTD_compResetPolicy_e crp,
121785 +                const ZSTD_indexResetPolicy_e forceResetIndex,
121786 +                const ZSTD_resetTarget_e forWho)
121788 +    size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
121789 +    size_t const hSize = ((size_t)1) << cParams->hashLog;
121790 +    U32    const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
121791 +    size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
121793 +    DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset);
121794 +    if (forceResetIndex == ZSTDirp_reset) {
121795 +        ZSTD_window_init(&ms->window);
121796 +        ZSTD_cwksp_mark_tables_dirty(ws);
121797 +    }
121799 +    ms->hashLog3 = hashLog3;
121801 +    ZSTD_invalidateMatchState(ms);
121803 +    assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */
121805 +    ZSTD_cwksp_clear_tables(ws);
121807 +    DEBUGLOG(5, "reserving table space");
121808 +    /* table Space */
121809 +    ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32));
121810 +    ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32));
121811 +    ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32));
121812 +    RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
121813 +                    "failed a workspace allocation in ZSTD_reset_matchState");
121815 +    DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_leaveDirty);
121816 +    if (crp!=ZSTDcrp_leaveDirty) {
121817 +        /* reset tables only */
121818 +        ZSTD_cwksp_clean_tables(ws);
121819 +    }
121821 +    /* opt parser space */
121822 +    if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
121823 +        DEBUGLOG(4, "reserving optimal parser space");
121824 +        ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<<Litbits) * sizeof(unsigned));
121825 +        ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));
121826 +        ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));
121827 +        ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));
121828 +        ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t));
121829 +        ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
121830 +    }
121832 +    ms->cParams = *cParams;
121834 +    RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
121835 +                    "failed a workspace allocation in ZSTD_reset_matchState");
121837 +    return 0;
121840 +/* ZSTD_indexTooCloseToMax() :
121841 + * minor optimization : prefer memset() rather than reduceIndex()
121842 + * which is measurably slow in some circumstances (reported for Visual Studio).
121843 + * Works when re-using a context for a lot of smallish inputs :
121844 + * if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN,
121845 + * memset() will be triggered before reduceIndex().
121846 + */
121847 +#define ZSTD_INDEXOVERFLOW_MARGIN (16 MB)
121848 +static int ZSTD_indexTooCloseToMax(ZSTD_window_t w)
121850 +    return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN);
121853 +/*! ZSTD_resetCCtx_internal() :
121854 +    note : `params` are assumed fully validated at this stage */
121855 +static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
121856 +                                      ZSTD_CCtx_params params,
121857 +                                      U64 const pledgedSrcSize,
121858 +                                      ZSTD_compResetPolicy_e const crp,
121859 +                                      ZSTD_buffered_policy_e const zbuff)
121861 +    ZSTD_cwksp* const ws = &zc->workspace;
121862 +    DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u",
121863 +                (U32)pledgedSrcSize, params.cParams.windowLog);
121864 +    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
121866 +    zc->isFirstBlock = 1;
121868 +    if (params.ldmParams.enableLdm) {
121869 +        /* Adjust long distance matching parameters */
121870 +        ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
121871 +        assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
121872 +        assert(params.ldmParams.hashRateLog < 32);
121873 +    }
121875 +    {   size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
121876 +        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
121877 +        U32    const divider = (params.cParams.minMatch==3) ? 3 : 4;
121878 +        size_t const maxNbSeq = blockSize / divider;
121879 +        size_t const buffOutSize = (zbuff == ZSTDb_buffered && params.outBufferMode == ZSTD_bm_buffered)
121880 +                ? ZSTD_compressBound(blockSize) + 1
121881 +                : 0;
121882 +        size_t const buffInSize = (zbuff == ZSTDb_buffered && params.inBufferMode == ZSTD_bm_buffered)
121883 +                ? windowSize + blockSize
121884 +                : 0;
121885 +        size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize);
121887 +        int const indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window);
121888 +        ZSTD_indexResetPolicy_e needsIndexReset =
121889 +            (!indexTooClose && zc->initialized) ? ZSTDirp_continue : ZSTDirp_reset;
121891 +        size_t const neededSpace =
121892 +            ZSTD_estimateCCtxSize_usingCCtxParams_internal(
121893 +                &params.cParams, &params.ldmParams, zc->staticSize != 0,
121894 +                buffInSize, buffOutSize, pledgedSrcSize);
121895 +        FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!");
121897 +        if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0);
121899 +        /* Check if workspace is large enough, alloc a new one if needed */
121900 +        {
121901 +            int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;
121902 +            int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);
121904 +            DEBUGLOG(4, "Need %zu B workspace", neededSpace);
121905 +            DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
121907 +            if (workspaceTooSmall || workspaceWasteful) {
121908 +                DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB",
121909 +                            ZSTD_cwksp_sizeof(ws) >> 10,
121910 +                            neededSpace >> 10);
121912 +                RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize");
121914 +                needsIndexReset = ZSTDirp_reset;
121916 +                ZSTD_cwksp_free(ws, zc->customMem);
121917 +                FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem), "");
121919 +                DEBUGLOG(5, "reserving object space");
121920 +                /* Statically sized space.
121921 +                 * entropyWorkspace never moves,
121922 +                 * though prev/next block swap places */
121923 +                assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t)));
121924 +                zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
121925 +                RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock");
121926 +                zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
121927 +                RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock");
121928 +                zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE);
121929 +                RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate entropyWorkspace");
121930 +        }   }
121932 +        ZSTD_cwksp_clear(ws);
121934 +        /* init params */
121935 +        zc->appliedParams = params;
121936 +        zc->blockState.matchState.cParams = params.cParams;
121937 +        zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
121938 +        zc->consumedSrcSize = 0;
121939 +        zc->producedCSize = 0;
121940 +        if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
121941 +            zc->appliedParams.fParams.contentSizeFlag = 0;
121942 +        DEBUGLOG(4, "pledged content size : %u ; flag : %u",
121943 +            (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
121944 +        zc->blockSize = blockSize;
121946 +        xxh64_reset(&zc->xxhState, 0);
121947 +        zc->stage = ZSTDcs_init;
121948 +        zc->dictID = 0;
121949 +        zc->dictContentSize = 0;
121951 +        ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
121953 +        /* ZSTD_wildcopy() is used to copy into the literals buffer,
121954 +         * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
121955 +         */
121956 +        zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH);
121957 +        zc->seqStore.maxNbLit = blockSize;
121959 +        /* buffers */
121960 +        zc->bufferedPolicy = zbuff;
121961 +        zc->inBuffSize = buffInSize;
121962 +        zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize);
121963 +        zc->outBuffSize = buffOutSize;
121964 +        zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize);
121966 +        /* ldm bucketOffsets table */
121967 +        if (params.ldmParams.enableLdm) {
121968 +            /* TODO: avoid memset? */
121969 +            size_t const numBuckets =
121970 +                  ((size_t)1) << (params.ldmParams.hashLog -
121971 +                                  params.ldmParams.bucketSizeLog);
121972 +            zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets);
121973 +            ZSTD_memset(zc->ldmState.bucketOffsets, 0, numBuckets);
121974 +        }
121976 +        /* sequences storage */
121977 +        ZSTD_referenceExternalSequences(zc, NULL, 0);
121978 +        zc->seqStore.maxNbSeq = maxNbSeq;
121979 +        zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
121980 +        zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
121981 +        zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
121982 +        zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef));
121984 +        FORWARD_IF_ERROR(ZSTD_reset_matchState(
121985 +            &zc->blockState.matchState,
121986 +            ws,
121987 +            &params.cParams,
121988 +            crp,
121989 +            needsIndexReset,
121990 +            ZSTD_resetTarget_CCtx), "");
121992 +        /* ldm hash table */
121993 +        if (params.ldmParams.enableLdm) {
121994 +            /* TODO: avoid memset? */
121995 +            size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog;
121996 +            zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t));
121997 +            ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t));
121998 +            zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq));
121999 +            zc->maxNbLdmSequences = maxNbLdmSeq;
122001 +            ZSTD_window_init(&zc->ldmState.window);
122002 +            ZSTD_window_clear(&zc->ldmState.window);
122003 +            zc->ldmState.loadedDictEnd = 0;
122004 +        }
122006 +        /* Due to alignment, when reusing a workspace, we can actually consume
122007 +         * up to 3 extra bytes for alignment. See the comments in zstd_cwksp.h
122008 +         */
122009 +        assert(ZSTD_cwksp_used(ws) >= neededSpace &&
122010 +               ZSTD_cwksp_used(ws) <= neededSpace + 3);
122012 +        DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws));
122013 +        zc->initialized = 1;
122015 +        return 0;
122016 +    }
122019 +/* ZSTD_invalidateRepCodes() :
122020 + * ensures next compression will not use repcodes from previous block.
122021 + * Note : only works with regular variant;
122022 + *        do not use with extDict variant ! */
122023 +void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
122024 +    int i;
122025 +    for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0;
122026 +    assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
122029 +/* These are the approximate sizes for each strategy past which copying the
122030 + * dictionary tables into the working context is faster than using them
122031 + * in-place.
122032 + */
122033 +static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = {
122034 +    8 KB,  /* unused */
122035 +    8 KB,  /* ZSTD_fast */
122036 +    16 KB, /* ZSTD_dfast */
122037 +    32 KB, /* ZSTD_greedy */
122038 +    32 KB, /* ZSTD_lazy */
122039 +    32 KB, /* ZSTD_lazy2 */
122040 +    32 KB, /* ZSTD_btlazy2 */
122041 +    32 KB, /* ZSTD_btopt */
122042 +    8 KB,  /* ZSTD_btultra */
122043 +    8 KB   /* ZSTD_btultra2 */
122046 +static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
122047 +                                 const ZSTD_CCtx_params* params,
122048 +                                 U64 pledgedSrcSize)
122050 +    size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
122051 +    int const dedicatedDictSearch = cdict->matchState.dedicatedDictSearch;
122052 +    return dedicatedDictSearch
122053 +        || ( ( pledgedSrcSize <= cutoff
122054 +            || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
122055 +            || params->attachDictPref == ZSTD_dictForceAttach )
122056 +          && params->attachDictPref != ZSTD_dictForceCopy
122057 +          && !params->forceWindow ); /* dictMatchState isn't correctly
122058 +                                      * handled in _enforceMaxDist */
122061 +static size_t
122062 +ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
122063 +                        const ZSTD_CDict* cdict,
122064 +                        ZSTD_CCtx_params params,
122065 +                        U64 pledgedSrcSize,
122066 +                        ZSTD_buffered_policy_e zbuff)
122068 +    {
122069 +        ZSTD_compressionParameters adjusted_cdict_cParams = cdict->matchState.cParams;
122070 +        unsigned const windowLog = params.cParams.windowLog;
122071 +        assert(windowLog != 0);
122072 +        /* Resize working context table params for input only, since the dict
122073 +         * has its own tables. */
122074 +        /* pledgedSrcSize == 0 means 0! */
122076 +        if (cdict->matchState.dedicatedDictSearch) {
122077 +            ZSTD_dedicatedDictSearch_revertCParams(&adjusted_cdict_cParams);
122078 +        }
122080 +        params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize,
122081 +                                                     cdict->dictContentSize, ZSTD_cpm_attachDict);
122082 +        params.cParams.windowLog = windowLog;
122083 +        FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
122084 +                                                 ZSTDcrp_makeClean, zbuff), "");
122085 +        assert(cctx->appliedParams.cParams.strategy == adjusted_cdict_cParams.strategy);
122086 +    }
122088 +    {   const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc
122089 +                                  - cdict->matchState.window.base);
122090 +        const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit;
122091 +        if (cdictLen == 0) {
122092 +            /* don't even attach dictionaries with no contents */
122093 +            DEBUGLOG(4, "skipping attaching empty dictionary");
122094 +        } else {
122095 +            DEBUGLOG(4, "attaching dictionary into context");
122096 +            cctx->blockState.matchState.dictMatchState = &cdict->matchState;
122098 +            /* prep working match state so dict matches never have negative indices
122099 +             * when they are translated to the working context's index space. */
122100 +            if (cctx->blockState.matchState.window.dictLimit < cdictEnd) {
122101 +                cctx->blockState.matchState.window.nextSrc =
122102 +                    cctx->blockState.matchState.window.base + cdictEnd;
122103 +                ZSTD_window_clear(&cctx->blockState.matchState.window);
122104 +            }
122105 +            /* loadedDictEnd is expressed within the referential of the active context */
122106 +            cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit;
122107 +    }   }
122109 +    cctx->dictID = cdict->dictID;
122110 +    cctx->dictContentSize = cdict->dictContentSize;
122112 +    /* copy block state */
122113 +    ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
122115 +    return 0;
122118 +static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
122119 +                            const ZSTD_CDict* cdict,
122120 +                            ZSTD_CCtx_params params,
122121 +                            U64 pledgedSrcSize,
122122 +                            ZSTD_buffered_policy_e zbuff)
122124 +    const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
122126 +    assert(!cdict->matchState.dedicatedDictSearch);
122128 +    DEBUGLOG(4, "copying dictionary into context");
122130 +    {   unsigned const windowLog = params.cParams.windowLog;
122131 +        assert(windowLog != 0);
122132 +        /* Copy only compression parameters related to tables. */
122133 +        params.cParams = *cdict_cParams;
122134 +        params.cParams.windowLog = windowLog;
122135 +        FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
122136 +                                                 ZSTDcrp_leaveDirty, zbuff), "");
122137 +        assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
122138 +        assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
122139 +        assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);
122140 +    }
122142 +    ZSTD_cwksp_mark_tables_dirty(&cctx->workspace);
122144 +    /* copy tables */
122145 +    {   size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog);
122146 +        size_t const hSize =  (size_t)1 << cdict_cParams->hashLog;
122148 +        ZSTD_memcpy(cctx->blockState.matchState.hashTable,
122149 +               cdict->matchState.hashTable,
122150 +               hSize * sizeof(U32));
122151 +        ZSTD_memcpy(cctx->blockState.matchState.chainTable,
122152 +               cdict->matchState.chainTable,
122153 +               chainSize * sizeof(U32));
122154 +    }
122156 +    /* Zero the hashTable3, since the cdict never fills it */
122157 +    {   int const h3log = cctx->blockState.matchState.hashLog3;
122158 +        size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
122159 +        assert(cdict->matchState.hashLog3 == 0);
122160 +        ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
122161 +    }
122163 +    ZSTD_cwksp_mark_tables_clean(&cctx->workspace);
122165 +    /* copy dictionary offsets */
122166 +    {   ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
122167 +        ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
122168 +        dstMatchState->window       = srcMatchState->window;
122169 +        dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
122170 +        dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
122171 +    }
122173 +    cctx->dictID = cdict->dictID;
122174 +    cctx->dictContentSize = cdict->dictContentSize;
122176 +    /* copy block state */
122177 +    ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
122179 +    return 0;
122182 +/* We have a choice between copying the dictionary context into the working
122183 + * context, or referencing the dictionary context from the working context
122184 + * in-place. We decide here which strategy to use. */
122185 +static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
122186 +                            const ZSTD_CDict* cdict,
122187 +                            const ZSTD_CCtx_params* params,
122188 +                            U64 pledgedSrcSize,
122189 +                            ZSTD_buffered_policy_e zbuff)
122192 +    DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)",
122193 +                (unsigned)pledgedSrcSize);
122195 +    if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
122196 +        return ZSTD_resetCCtx_byAttachingCDict(
122197 +            cctx, cdict, *params, pledgedSrcSize, zbuff);
122198 +    } else {
122199 +        return ZSTD_resetCCtx_byCopyingCDict(
122200 +            cctx, cdict, *params, pledgedSrcSize, zbuff);
122201 +    }
122204 +/*! ZSTD_copyCCtx_internal() :
122205 + *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
122206 + *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
122207 + *  The "context", in this case, refers to the hash and chain tables,
122208 + *  entropy tables, and dictionary references.
122209 + * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx.
122210 + * @return : 0, or an error code */
122211 +static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
122212 +                            const ZSTD_CCtx* srcCCtx,
122213 +                            ZSTD_frameParameters fParams,
122214 +                            U64 pledgedSrcSize,
122215 +                            ZSTD_buffered_policy_e zbuff)
122217 +    DEBUGLOG(5, "ZSTD_copyCCtx_internal");
122218 +    RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong,
122219 +                    "Can't copy a ctx that's not in init stage.");
122221 +    ZSTD_memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
122222 +    {   ZSTD_CCtx_params params = dstCCtx->requestedParams;
122223 +        /* Copy only compression parameters related to tables. */
122224 +        params.cParams = srcCCtx->appliedParams.cParams;
122225 +        params.fParams = fParams;
122226 +        ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize,
122227 +                                ZSTDcrp_leaveDirty, zbuff);
122228 +        assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
122229 +        assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
122230 +        assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);
122231 +        assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog);
122232 +        assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
122233 +    }
122235 +    ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace);
122237 +    /* copy tables */
122238 +    {   size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog);
122239 +        size_t const hSize =  (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
122240 +        int const h3log = srcCCtx->blockState.matchState.hashLog3;
122241 +        size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
122243 +        ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable,
122244 +               srcCCtx->blockState.matchState.hashTable,
122245 +               hSize * sizeof(U32));
122246 +        ZSTD_memcpy(dstCCtx->blockState.matchState.chainTable,
122247 +               srcCCtx->blockState.matchState.chainTable,
122248 +               chainSize * sizeof(U32));
122249 +        ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable3,
122250 +               srcCCtx->blockState.matchState.hashTable3,
122251 +               h3Size * sizeof(U32));
122252 +    }
122254 +    ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace);
122256 +    /* copy dictionary offsets */
122257 +    {
122258 +        const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
122259 +        ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
122260 +        dstMatchState->window       = srcMatchState->window;
122261 +        dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
122262 +        dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
122263 +    }
122264 +    dstCCtx->dictID = srcCCtx->dictID;
122265 +    dstCCtx->dictContentSize = srcCCtx->dictContentSize;
122267 +    /* copy block state */
122268 +    ZSTD_memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));
122270 +    return 0;
122273 +/*! ZSTD_copyCCtx() :
122274 + *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
122275 + *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
122276 + *  pledgedSrcSize==0 means "unknown".
122277 +*   @return : 0, or an error code */
122278 +size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
122280 +    ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
122281 +    ZSTD_buffered_policy_e const zbuff = srcCCtx->bufferedPolicy;
122282 +    ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
122283 +    if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
122284 +    fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
122286 +    return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
122287 +                                fParams, pledgedSrcSize,
122288 +                                zbuff);
122292 +#define ZSTD_ROWSIZE 16
122293 +/*! ZSTD_reduceTable() :
122294 + *  reduce table indexes by `reducerValue`, or squash to zero.
122295 + *  PreserveMark preserves "unsorted mark" for btlazy2 strategy.
122296 + *  It must be set to a clear 0/1 value, to remove branch during inlining.
122297 + *  Presume table size is a multiple of ZSTD_ROWSIZE
122298 + *  to help auto-vectorization */
122299 +FORCE_INLINE_TEMPLATE void
122300 +ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)
122302 +    int const nbRows = (int)size / ZSTD_ROWSIZE;
122303 +    int cellNb = 0;
122304 +    int rowNb;
122305 +    assert((size & (ZSTD_ROWSIZE-1)) == 0);  /* multiple of ZSTD_ROWSIZE */
122306 +    assert(size < (1U<<31));   /* can be casted to int */
122309 +    for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
122310 +        int column;
122311 +        for (column=0; column<ZSTD_ROWSIZE; column++) {
122312 +            if (preserveMark) {
122313 +                U32 const adder = (table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) ? reducerValue : 0;
122314 +                table[cellNb] += adder;
122315 +            }
122316 +            if (table[cellNb] < reducerValue) table[cellNb] = 0;
122317 +            else table[cellNb] -= reducerValue;
122318 +            cellNb++;
122319 +    }   }
122322 +static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)
122324 +    ZSTD_reduceTable_internal(table, size, reducerValue, 0);
122327 +static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue)
122329 +    ZSTD_reduceTable_internal(table, size, reducerValue, 1);
122332 +/*! ZSTD_reduceIndex() :
122333 +*   rescale all indexes to avoid future overflow (indexes are U32) */
122334 +static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)
122336 +    {   U32 const hSize = (U32)1 << params->cParams.hashLog;
122337 +        ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
122338 +    }
122340 +    if (params->cParams.strategy != ZSTD_fast) {
122341 +        U32 const chainSize = (U32)1 << params->cParams.chainLog;
122342 +        if (params->cParams.strategy == ZSTD_btlazy2)
122343 +            ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
122344 +        else
122345 +            ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
122346 +    }
122348 +    if (ms->hashLog3) {
122349 +        U32 const h3Size = (U32)1 << ms->hashLog3;
122350 +        ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);
122351 +    }
122355 +/*-*******************************************************
122356 +*  Block entropic compression
122357 +*********************************************************/
122359 +/* See doc/zstd_compression_format.md for detailed format description */
122361 +void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
122363 +    const seqDef* const sequences = seqStorePtr->sequencesStart;
122364 +    BYTE* const llCodeTable = seqStorePtr->llCode;
122365 +    BYTE* const ofCodeTable = seqStorePtr->ofCode;
122366 +    BYTE* const mlCodeTable = seqStorePtr->mlCode;
122367 +    U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
122368 +    U32 u;
122369 +    assert(nbSeq <= seqStorePtr->maxNbSeq);
122370 +    for (u=0; u<nbSeq; u++) {
122371 +        U32 const llv = sequences[u].litLength;
122372 +        U32 const mlv = sequences[u].matchLength;
122373 +        llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
122374 +        ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
122375 +        mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
122376 +    }
122377 +    if (seqStorePtr->longLengthID==1)
122378 +        llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
122379 +    if (seqStorePtr->longLengthID==2)
122380 +        mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
122383 +/* ZSTD_useTargetCBlockSize():
122384 + * Returns if target compressed block size param is being used.
122385 + * If used, compression will do best effort to make a compressed block size to be around targetCBlockSize.
122386 + * Returns 1 if true, 0 otherwise. */
122387 +static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams)
122389 +    DEBUGLOG(5, "ZSTD_useTargetCBlockSize (targetCBlockSize=%zu)", cctxParams->targetCBlockSize);
122390 +    return (cctxParams->targetCBlockSize != 0);
122393 +/* ZSTD_entropyCompressSequences_internal():
122394 + * actually compresses both literals and sequences */
122395 +MEM_STATIC size_t
122396 +ZSTD_entropyCompressSequences_internal(seqStore_t* seqStorePtr,
122397 +                          const ZSTD_entropyCTables_t* prevEntropy,
122398 +                                ZSTD_entropyCTables_t* nextEntropy,
122399 +                          const ZSTD_CCtx_params* cctxParams,
122400 +                                void* dst, size_t dstCapacity,
122401 +                                void* entropyWorkspace, size_t entropyWkspSize,
122402 +                          const int bmi2)
122404 +    const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
122405 +    ZSTD_strategy const strategy = cctxParams->cParams.strategy;
122406 +    unsigned* count = (unsigned*)entropyWorkspace;
122407 +    FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
122408 +    FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
122409 +    FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
122410 +    U32 LLtype, Offtype, MLtype;   /* compressed, raw or rle */
122411 +    const seqDef* const sequences = seqStorePtr->sequencesStart;
122412 +    const BYTE* const ofCodeTable = seqStorePtr->ofCode;
122413 +    const BYTE* const llCodeTable = seqStorePtr->llCode;
122414 +    const BYTE* const mlCodeTable = seqStorePtr->mlCode;
122415 +    BYTE* const ostart = (BYTE*)dst;
122416 +    BYTE* const oend = ostart + dstCapacity;
122417 +    BYTE* op = ostart;
122418 +    size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
122419 +    BYTE* seqHead;
122420 +    BYTE* lastNCount = NULL;
122422 +    entropyWorkspace = count + (MaxSeq + 1);
122423 +    entropyWkspSize -= (MaxSeq + 1) * sizeof(*count);
122425 +    DEBUGLOG(4, "ZSTD_entropyCompressSequences_internal (nbSeq=%zu)", nbSeq);
122426 +    ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
122427 +    assert(entropyWkspSize >= HUF_WORKSPACE_SIZE);
122429 +    /* Compress literals */
122430 +    {   const BYTE* const literals = seqStorePtr->litStart;
122431 +        size_t const litSize = (size_t)(seqStorePtr->lit - literals);
122432 +        size_t const cSize = ZSTD_compressLiterals(
122433 +                                    &prevEntropy->huf, &nextEntropy->huf,
122434 +                                    cctxParams->cParams.strategy,
122435 +                                    ZSTD_disableLiteralsCompression(cctxParams),
122436 +                                    op, dstCapacity,
122437 +                                    literals, litSize,
122438 +                                    entropyWorkspace, entropyWkspSize,
122439 +                                    bmi2);
122440 +        FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed");
122441 +        assert(cSize <= dstCapacity);
122442 +        op += cSize;
122443 +    }
122445 +    /* Sequences Header */
122446 +    RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
122447 +                    dstSize_tooSmall, "Can't fit seq hdr in output buf!");
122448 +    if (nbSeq < 128) {
122449 +        *op++ = (BYTE)nbSeq;
122450 +    } else if (nbSeq < LONGNBSEQ) {
122451 +        op[0] = (BYTE)((nbSeq>>8) + 0x80);
122452 +        op[1] = (BYTE)nbSeq;
122453 +        op+=2;
122454 +    } else {
122455 +        op[0]=0xFF;
122456 +        MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ));
122457 +        op+=3;
122458 +    }
122459 +    assert(op <= oend);
122460 +    if (nbSeq==0) {
122461 +        /* Copy the old tables over as if we repeated them */
122462 +        ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
122463 +        return (size_t)(op - ostart);
122464 +    }
122466 +    /* seqHead : flags for FSE encoding type */
122467 +    seqHead = op++;
122468 +    assert(op <= oend);
122470 +    /* convert length/distances into codes */
122471 +    ZSTD_seqToCodes(seqStorePtr);
122472 +    /* build CTable for Literal Lengths */
122473 +    {   unsigned max = MaxLL;
122474 +        size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize);   /* can't fail */
122475 +        DEBUGLOG(5, "Building LL table");
122476 +        nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode;
122477 +        LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode,
122478 +                                        count, max, mostFrequent, nbSeq,
122479 +                                        LLFSELog, prevEntropy->fse.litlengthCTable,
122480 +                                        LL_defaultNorm, LL_defaultNormLog,
122481 +                                        ZSTD_defaultAllowed, strategy);
122482 +        assert(set_basic < set_compressed && set_rle < set_compressed);
122483 +        assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
122484 +        {   size_t const countSize = ZSTD_buildCTable(
122485 +                op, (size_t)(oend - op),
122486 +                CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
122487 +                count, max, llCodeTable, nbSeq,
122488 +                LL_defaultNorm, LL_defaultNormLog, MaxLL,
122489 +                prevEntropy->fse.litlengthCTable,
122490 +                sizeof(prevEntropy->fse.litlengthCTable),
122491 +                entropyWorkspace, entropyWkspSize);
122492 +            FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed");
122493 +            if (LLtype == set_compressed)
122494 +                lastNCount = op;
122495 +            op += countSize;
122496 +            assert(op <= oend);
122497 +    }   }
122498 +    /* build CTable for Offsets */
122499 +    {   unsigned max = MaxOff;
122500 +        size_t const mostFrequent = HIST_countFast_wksp(
122501 +            count, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize);  /* can't fail */
122502 +        /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
122503 +        ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
122504 +        DEBUGLOG(5, "Building OF table");
122505 +        nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode;
122506 +        Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode,
122507 +                                        count, max, mostFrequent, nbSeq,
122508 +                                        OffFSELog, prevEntropy->fse.offcodeCTable,
122509 +                                        OF_defaultNorm, OF_defaultNormLog,
122510 +                                        defaultPolicy, strategy);
122511 +        assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
122512 +        {   size_t const countSize = ZSTD_buildCTable(
122513 +                op, (size_t)(oend - op),
122514 +                CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
122515 +                count, max, ofCodeTable, nbSeq,
122516 +                OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
122517 +                prevEntropy->fse.offcodeCTable,
122518 +                sizeof(prevEntropy->fse.offcodeCTable),
122519 +                entropyWorkspace, entropyWkspSize);
122520 +            FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed");
122521 +            if (Offtype == set_compressed)
122522 +                lastNCount = op;
122523 +            op += countSize;
122524 +            assert(op <= oend);
122525 +    }   }
122526 +    /* build CTable for MatchLengths */
122527 +    {   unsigned max = MaxML;
122528 +        size_t const mostFrequent = HIST_countFast_wksp(
122529 +            count, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize);   /* can't fail */
122530 +        DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
122531 +        nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode;
122532 +        MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode,
122533 +                                        count, max, mostFrequent, nbSeq,
122534 +                                        MLFSELog, prevEntropy->fse.matchlengthCTable,
122535 +                                        ML_defaultNorm, ML_defaultNormLog,
122536 +                                        ZSTD_defaultAllowed, strategy);
122537 +        assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
122538 +        {   size_t const countSize = ZSTD_buildCTable(
122539 +                op, (size_t)(oend - op),
122540 +                CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
122541 +                count, max, mlCodeTable, nbSeq,
122542 +                ML_defaultNorm, ML_defaultNormLog, MaxML,
122543 +                prevEntropy->fse.matchlengthCTable,
122544 +                sizeof(prevEntropy->fse.matchlengthCTable),
122545 +                entropyWorkspace, entropyWkspSize);
122546 +            FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed");
122547 +            if (MLtype == set_compressed)
122548 +                lastNCount = op;
122549 +            op += countSize;
122550 +            assert(op <= oend);
122551 +    }   }
122553 +    *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
122555 +    {   size_t const bitstreamSize = ZSTD_encodeSequences(
122556 +                                        op, (size_t)(oend - op),
122557 +                                        CTable_MatchLength, mlCodeTable,
122558 +                                        CTable_OffsetBits, ofCodeTable,
122559 +                                        CTable_LitLength, llCodeTable,
122560 +                                        sequences, nbSeq,
122561 +                                        longOffsets, bmi2);
122562 +        FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
122563 +        op += bitstreamSize;
122564 +        assert(op <= oend);
122565 +        /* zstd versions <= 1.3.4 mistakenly report corruption when
122566 +         * FSE_readNCount() receives a buffer < 4 bytes.
122567 +         * Fixed by https://github.com/facebook/zstd/pull/1146.
122568 +         * This can happen when the last set_compressed table present is 2
122569 +         * bytes and the bitstream is only one byte.
122570 +         * In this exceedingly rare case, we will simply emit an uncompressed
122571 +         * block, since it isn't worth optimizing.
122572 +         */
122573 +        if (lastNCount && (op - lastNCount) < 4) {
122574 +            /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
122575 +            assert(op - lastNCount == 3);
122576 +            DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
122577 +                        "emitting an uncompressed block.");
122578 +            return 0;
122579 +        }
122580 +    }
122582 +    DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart));
122583 +    return (size_t)(op - ostart);
122586 +MEM_STATIC size_t
122587 +ZSTD_entropyCompressSequences(seqStore_t* seqStorePtr,
122588 +                       const ZSTD_entropyCTables_t* prevEntropy,
122589 +                             ZSTD_entropyCTables_t* nextEntropy,
122590 +                       const ZSTD_CCtx_params* cctxParams,
122591 +                             void* dst, size_t dstCapacity,
122592 +                             size_t srcSize,
122593 +                             void* entropyWorkspace, size_t entropyWkspSize,
122594 +                             int bmi2)
122596 +    size_t const cSize = ZSTD_entropyCompressSequences_internal(
122597 +                            seqStorePtr, prevEntropy, nextEntropy, cctxParams,
122598 +                            dst, dstCapacity,
122599 +                            entropyWorkspace, entropyWkspSize, bmi2);
122600 +    if (cSize == 0) return 0;
122601 +    /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
122602 +     * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
122603 +     */
122604 +    if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
122605 +        return 0;  /* block not compressed */
122606 +    FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSequences_internal failed");
122608 +    /* Check compressibility */
122609 +    {   size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
122610 +        if (cSize >= maxCSize) return 0;  /* block not compressed */
122611 +    }
122612 +    DEBUGLOG(4, "ZSTD_entropyCompressSequences() cSize: %zu\n", cSize);
122613 +    return cSize;
122616 +/* ZSTD_selectBlockCompressor() :
122617 + * Not static, but internal use only (used by long distance matcher)
122618 + * assumption : strat is a valid strategy */
122619 +ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode)
122621 +    static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {
122622 +        { ZSTD_compressBlock_fast  /* default for 0 */,
122623 +          ZSTD_compressBlock_fast,
122624 +          ZSTD_compressBlock_doubleFast,
122625 +          ZSTD_compressBlock_greedy,
122626 +          ZSTD_compressBlock_lazy,
122627 +          ZSTD_compressBlock_lazy2,
122628 +          ZSTD_compressBlock_btlazy2,
122629 +          ZSTD_compressBlock_btopt,
122630 +          ZSTD_compressBlock_btultra,
122631 +          ZSTD_compressBlock_btultra2 },
122632 +        { ZSTD_compressBlock_fast_extDict  /* default for 0 */,
122633 +          ZSTD_compressBlock_fast_extDict,
122634 +          ZSTD_compressBlock_doubleFast_extDict,
122635 +          ZSTD_compressBlock_greedy_extDict,
122636 +          ZSTD_compressBlock_lazy_extDict,
122637 +          ZSTD_compressBlock_lazy2_extDict,
122638 +          ZSTD_compressBlock_btlazy2_extDict,
122639 +          ZSTD_compressBlock_btopt_extDict,
122640 +          ZSTD_compressBlock_btultra_extDict,
122641 +          ZSTD_compressBlock_btultra_extDict },
122642 +        { ZSTD_compressBlock_fast_dictMatchState  /* default for 0 */,
122643 +          ZSTD_compressBlock_fast_dictMatchState,
122644 +          ZSTD_compressBlock_doubleFast_dictMatchState,
122645 +          ZSTD_compressBlock_greedy_dictMatchState,
122646 +          ZSTD_compressBlock_lazy_dictMatchState,
122647 +          ZSTD_compressBlock_lazy2_dictMatchState,
122648 +          ZSTD_compressBlock_btlazy2_dictMatchState,
122649 +          ZSTD_compressBlock_btopt_dictMatchState,
122650 +          ZSTD_compressBlock_btultra_dictMatchState,
122651 +          ZSTD_compressBlock_btultra_dictMatchState },
122652 +        { NULL  /* default for 0 */,
122653 +          NULL,
122654 +          NULL,
122655 +          ZSTD_compressBlock_greedy_dedicatedDictSearch,
122656 +          ZSTD_compressBlock_lazy_dedicatedDictSearch,
122657 +          ZSTD_compressBlock_lazy2_dedicatedDictSearch,
122658 +          NULL,
122659 +          NULL,
122660 +          NULL,
122661 +          NULL }
122662 +    };
122663 +    ZSTD_blockCompressor selectedCompressor;
122664 +    ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
122666 +    assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
122667 +    selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
122668 +    assert(selectedCompressor != NULL);
122669 +    return selectedCompressor;
122672 +static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
122673 +                                   const BYTE* anchor, size_t lastLLSize)
122675 +    ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize);
122676 +    seqStorePtr->lit += lastLLSize;
122679 +void ZSTD_resetSeqStore(seqStore_t* ssPtr)
122681 +    ssPtr->lit = ssPtr->litStart;
122682 +    ssPtr->sequences = ssPtr->sequencesStart;
122683 +    ssPtr->longLengthID = 0;
122686 +typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;
122688 +static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
122690 +    ZSTD_matchState_t* const ms = &zc->blockState.matchState;
122691 +    DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize);
122692 +    assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
122693 +    /* Assert that we have correctly flushed the ctx params into the ms's copy */
122694 +    ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
122695 +    if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
122696 +        if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) {
122697 +            ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize);
122698 +        } else {
122699 +            ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);
122700 +        }
122701 +        return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */
122702 +    }
122703 +    ZSTD_resetSeqStore(&(zc->seqStore));
122704 +    /* required for optimal parser to read stats from dictionary */
122705 +    ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;
122706 +    /* tell the optimal parser how we expect to compress literals */
122707 +    ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode;
122708 +    /* a gap between an attached dict and the current window is not safe,
122709 +     * they must remain adjacent,
122710 +     * and when that stops being the case, the dict must be unset */
122711 +    assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);
122713 +    /* limited update after a very long match */
122714 +    {   const BYTE* const base = ms->window.base;
122715 +        const BYTE* const istart = (const BYTE*)src;
122716 +        const U32 curr = (U32)(istart-base);
122717 +        if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1));   /* ensure no overflow */
122718 +        if (curr > ms->nextToUpdate + 384)
122719 +            ms->nextToUpdate = curr - MIN(192, (U32)(curr - ms->nextToUpdate - 384));
122720 +    }
122722 +    /* select and store sequences */
122723 +    {   ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
122724 +        size_t lastLLSize;
122725 +        {   int i;
122726 +            for (i = 0; i < ZSTD_REP_NUM; ++i)
122727 +                zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];
122728 +        }
122729 +        if (zc->externSeqStore.pos < zc->externSeqStore.size) {
122730 +            assert(!zc->appliedParams.ldmParams.enableLdm);
122731 +            /* Updates ldmSeqStore.pos */
122732 +            lastLLSize =
122733 +                ZSTD_ldm_blockCompress(&zc->externSeqStore,
122734 +                                       ms, &zc->seqStore,
122735 +                                       zc->blockState.nextCBlock->rep,
122736 +                                       src, srcSize);
122737 +            assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
122738 +        } else if (zc->appliedParams.ldmParams.enableLdm) {
122739 +            rawSeqStore_t ldmSeqStore = kNullRawSeqStore;
122741 +            ldmSeqStore.seq = zc->ldmSequences;
122742 +            ldmSeqStore.capacity = zc->maxNbLdmSequences;
122743 +            /* Updates ldmSeqStore.size */
122744 +            FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
122745 +                                               &zc->appliedParams.ldmParams,
122746 +                                               src, srcSize), "");
122747 +            /* Updates ldmSeqStore.pos */
122748 +            lastLLSize =
122749 +                ZSTD_ldm_blockCompress(&ldmSeqStore,
122750 +                                       ms, &zc->seqStore,
122751 +                                       zc->blockState.nextCBlock->rep,
122752 +                                       src, srcSize);
122753 +            assert(ldmSeqStore.pos == ldmSeqStore.size);
122754 +        } else {   /* not long range mode */
122755 +            ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode);
122756 +            ms->ldmSeqStore = NULL;
122757 +            lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
122758 +        }
122759 +        {   const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
122760 +            ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
122761 +    }   }
122762 +    return ZSTDbss_compress;
122765 +static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
122767 +    const seqStore_t* seqStore = ZSTD_getSeqStore(zc);
122768 +    const seqDef* seqStoreSeqs = seqStore->sequencesStart;
122769 +    size_t seqStoreSeqSize = seqStore->sequences - seqStoreSeqs;
122770 +    size_t seqStoreLiteralsSize = (size_t)(seqStore->lit - seqStore->litStart);
122771 +    size_t literalsRead = 0;
122772 +    size_t lastLLSize;
122774 +    ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex];
122775 +    size_t i;
122776 +    repcodes_t updatedRepcodes;
122778 +    assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences);
122779 +    /* Ensure we have enough space for last literals "sequence" */
122780 +    assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1);
122781 +    ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
122782 +    for (i = 0; i < seqStoreSeqSize; ++i) {
122783 +        U32 rawOffset = seqStoreSeqs[i].offset - ZSTD_REP_NUM;
122784 +        outSeqs[i].litLength = seqStoreSeqs[i].litLength;
122785 +        outSeqs[i].matchLength = seqStoreSeqs[i].matchLength + MINMATCH;
122786 +        outSeqs[i].rep = 0;
122788 +        if (i == seqStore->longLengthPos) {
122789 +            if (seqStore->longLengthID == 1) {
122790 +                outSeqs[i].litLength += 0x10000;
122791 +            } else if (seqStore->longLengthID == 2) {
122792 +                outSeqs[i].matchLength += 0x10000;
122793 +            }
122794 +        }
122796 +        if (seqStoreSeqs[i].offset <= ZSTD_REP_NUM) {
122797 +            /* Derive the correct offset corresponding to a repcode */
122798 +            outSeqs[i].rep = seqStoreSeqs[i].offset;
122799 +            if (outSeqs[i].litLength != 0) {
122800 +                rawOffset = updatedRepcodes.rep[outSeqs[i].rep - 1];
122801 +            } else {
122802 +                if (outSeqs[i].rep == 3) {
122803 +                    rawOffset = updatedRepcodes.rep[0] - 1;
122804 +                } else {
122805 +                    rawOffset = updatedRepcodes.rep[outSeqs[i].rep];
122806 +                }
122807 +            }
122808 +        }
122809 +        outSeqs[i].offset = rawOffset;
122810 +        /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode
122811 +           so we provide seqStoreSeqs[i].offset - 1 */
122812 +        updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep,
122813 +                                         seqStoreSeqs[i].offset - 1,
122814 +                                         seqStoreSeqs[i].litLength == 0);
122815 +        literalsRead += outSeqs[i].litLength;
122816 +    }
122817 +    /* Insert last literals (if any exist) in the block as a sequence with ml == off == 0.
122818 +     * If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker
122819 +     * for the block boundary, according to the API.
122820 +     */
122821 +    assert(seqStoreLiteralsSize >= literalsRead);
122822 +    lastLLSize = seqStoreLiteralsSize - literalsRead;
122823 +    outSeqs[i].litLength = (U32)lastLLSize;
122824 +    outSeqs[i].matchLength = outSeqs[i].offset = outSeqs[i].rep = 0;
122825 +    seqStoreSeqSize++;
122826 +    zc->seqCollector.seqIndex += seqStoreSeqSize;
122829 +size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
122830 +                              size_t outSeqsSize, const void* src, size_t srcSize)
122832 +    const size_t dstCapacity = ZSTD_compressBound(srcSize);
122833 +    void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem);
122834 +    SeqCollector seqCollector;
122836 +    RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!");
122838 +    seqCollector.collectSequences = 1;
122839 +    seqCollector.seqStart = outSeqs;
122840 +    seqCollector.seqIndex = 0;
122841 +    seqCollector.maxSequences = outSeqsSize;
122842 +    zc->seqCollector = seqCollector;
122844 +    ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
122845 +    ZSTD_customFree(dst, ZSTD_defaultCMem);
122846 +    return zc->seqCollector.seqIndex;
122849 +size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize) {
122850 +    size_t in = 0;
122851 +    size_t out = 0;
122852 +    for (; in < seqsSize; ++in) {
122853 +        if (sequences[in].offset == 0 && sequences[in].matchLength == 0) {
122854 +            if (in != seqsSize - 1) {
122855 +                sequences[in+1].litLength += sequences[in].litLength;
122856 +            }
122857 +        } else {
122858 +            sequences[out] = sequences[in];
122859 +            ++out;
122860 +        }
122861 +    }
122862 +    return out;
122865 +/* Unrolled loop to read four size_ts of input at a time. Returns 1 if is RLE, 0 if not. */
122866 +static int ZSTD_isRLE(const BYTE* src, size_t length) {
122867 +    const BYTE* ip = src;
122868 +    const BYTE value = ip[0];
122869 +    const size_t valueST = (size_t)((U64)value * 0x0101010101010101ULL);
122870 +    const size_t unrollSize = sizeof(size_t) * 4;
122871 +    const size_t unrollMask = unrollSize - 1;
122872 +    const size_t prefixLength = length & unrollMask;
122873 +    size_t i;
122874 +    size_t u;
122875 +    if (length == 1) return 1;
122876 +    /* Check if prefix is RLE first before using unrolled loop */
122877 +    if (prefixLength && ZSTD_count(ip+1, ip, ip+prefixLength) != prefixLength-1) {
122878 +        return 0;
122879 +    }
122880 +    for (i = prefixLength; i != length; i += unrollSize) {
122881 +        for (u = 0; u < unrollSize; u += sizeof(size_t)) {
122882 +            if (MEM_readST(ip + i + u) != valueST) {
122883 +                return 0;
122884 +            }
122885 +        }
122886 +    }
122887 +    return 1;
122890 +/* Returns true if the given block may be RLE.
122891 + * This is just a heuristic based on the compressibility.
122892 + * It may return both false positives and false negatives.
122893 + */
122894 +static int ZSTD_maybeRLE(seqStore_t const* seqStore)
122896 +    size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
122897 +    size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart);
122899 +    return nbSeqs < 4 && nbLits < 10;
122902 +static void ZSTD_confirmRepcodesAndEntropyTables(ZSTD_CCtx* zc)
122904 +    ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
122905 +    zc->blockState.prevCBlock = zc->blockState.nextCBlock;
122906 +    zc->blockState.nextCBlock = tmp;
122909 +static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
122910 +                                        void* dst, size_t dstCapacity,
122911 +                                        const void* src, size_t srcSize, U32 frame)
122913 +    /* This the upper bound for the length of an rle block.
122914 +     * This isn't the actual upper bound. Finding the real threshold
122915 +     * needs further investigation.
122916 +     */
122917 +    const U32 rleMaxLength = 25;
122918 +    size_t cSize;
122919 +    const BYTE* ip = (const BYTE*)src;
122920 +    BYTE* op = (BYTE*)dst;
122921 +    DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
122922 +                (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
122923 +                (unsigned)zc->blockState.matchState.nextToUpdate);
122925 +    {   const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
122926 +        FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
122927 +        if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; }
122928 +    }
122930 +    if (zc->seqCollector.collectSequences) {
122931 +        ZSTD_copyBlockSequences(zc);
122932 +        ZSTD_confirmRepcodesAndEntropyTables(zc);
122933 +        return 0;
122934 +    }
122936 +    /* encode sequences and literals */
122937 +    cSize = ZSTD_entropyCompressSequences(&zc->seqStore,
122938 +            &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
122939 +            &zc->appliedParams,
122940 +            dst, dstCapacity,
122941 +            srcSize,
122942 +            zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
122943 +            zc->bmi2);
122945 +    if (zc->seqCollector.collectSequences) {
122946 +        ZSTD_copyBlockSequences(zc);
122947 +        return 0;
122948 +    }
122951 +    if (frame &&
122952 +        /* We don't want to emit our first block as a RLE even if it qualifies because
122953 +         * doing so will cause the decoder (cli only) to throw a "should consume all input error."
122954 +         * This is only an issue for zstd <= v1.4.3
122955 +         */
122956 +        !zc->isFirstBlock &&
122957 +        cSize < rleMaxLength &&
122958 +        ZSTD_isRLE(ip, srcSize))
122959 +    {
122960 +        cSize = 1;
122961 +        op[0] = ip[0];
122962 +    }
122964 +out:
122965 +    if (!ZSTD_isError(cSize) && cSize > 1) {
122966 +        ZSTD_confirmRepcodesAndEntropyTables(zc);
122967 +    }
122968 +    /* We check that dictionaries have offset codes available for the first
122969 +     * block. After the first block, the offcode table might not have large
122970 +     * enough codes to represent the offsets in the data.
122971 +     */
122972 +    if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
122973 +        zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
122975 +    return cSize;
122978 +static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,
122979 +                               void* dst, size_t dstCapacity,
122980 +                               const void* src, size_t srcSize,
122981 +                               const size_t bss, U32 lastBlock)
122983 +    DEBUGLOG(6, "Attempting ZSTD_compressSuperBlock()");
122984 +    if (bss == ZSTDbss_compress) {
122985 +        if (/* We don't want to emit our first block as a RLE even if it qualifies because
122986 +            * doing so will cause the decoder (cli only) to throw a "should consume all input error."
122987 +            * This is only an issue for zstd <= v1.4.3
122988 +            */
122989 +            !zc->isFirstBlock &&
122990 +            ZSTD_maybeRLE(&zc->seqStore) &&
122991 +            ZSTD_isRLE((BYTE const*)src, srcSize))
122992 +        {
122993 +            return ZSTD_rleCompressBlock(dst, dstCapacity, *(BYTE const*)src, srcSize, lastBlock);
122994 +        }
122995 +        /* Attempt superblock compression.
122996 +         *
122997 +         * Note that compressed size of ZSTD_compressSuperBlock() is not bound by the
122998 +         * standard ZSTD_compressBound(). This is a problem, because even if we have
122999 +         * space now, taking an extra byte now could cause us to run out of space later
123000 +         * and violate ZSTD_compressBound().
123001 +         *
123002 +         * Define blockBound(blockSize) = blockSize + ZSTD_blockHeaderSize.
123003 +         *
123004 +         * In order to respect ZSTD_compressBound() we must attempt to emit a raw
123005 +         * uncompressed block in these cases:
123006 +         *   * cSize == 0: Return code for an uncompressed block.
123007 +         *   * cSize == dstSize_tooSmall: We may have expanded beyond blockBound(srcSize).
123008 +         *     ZSTD_noCompressBlock() will return dstSize_tooSmall if we are really out of
123009 +         *     output space.
123010 +         *   * cSize >= blockBound(srcSize): We have expanded the block too much so
123011 +         *     emit an uncompressed block.
123012 +         */
123013 +        {
123014 +            size_t const cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock);
123015 +            if (cSize != ERROR(dstSize_tooSmall)) {
123016 +                size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy);
123017 +                FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed");
123018 +                if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) {
123019 +                    ZSTD_confirmRepcodesAndEntropyTables(zc);
123020 +                    return cSize;
123021 +                }
123022 +            }
123023 +        }
123024 +    }
123026 +    DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()");
123027 +    /* Superblock compression failed, attempt to emit a single no compress block.
123028 +     * The decoder will be able to stream this block since it is uncompressed.
123029 +     */
123030 +    return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock);
123033 +static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc,
123034 +                               void* dst, size_t dstCapacity,
123035 +                               const void* src, size_t srcSize,
123036 +                               U32 lastBlock)
123038 +    size_t cSize = 0;
123039 +    const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
123040 +    DEBUGLOG(5, "ZSTD_compressBlock_targetCBlockSize (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u, srcSize=%zu)",
123041 +                (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate, srcSize);
123042 +    FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
123044 +    cSize = ZSTD_compressBlock_targetCBlockSize_body(zc, dst, dstCapacity, src, srcSize, bss, lastBlock);
123045 +    FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize_body failed");
123047 +    if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
123048 +        zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
123050 +    return cSize;
123053 +static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
123054 +                                         ZSTD_cwksp* ws,
123055 +                                         ZSTD_CCtx_params const* params,
123056 +                                         void const* ip,
123057 +                                         void const* iend)
123059 +    if (ZSTD_window_needOverflowCorrection(ms->window, iend)) {
123060 +        U32 const maxDist = (U32)1 << params->cParams.windowLog;
123061 +        U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);
123062 +        U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
123063 +        ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
123064 +        ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
123065 +        ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
123066 +        ZSTD_cwksp_mark_tables_dirty(ws);
123067 +        ZSTD_reduceIndex(ms, params, correction);
123068 +        ZSTD_cwksp_mark_tables_clean(ws);
123069 +        if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
123070 +        else ms->nextToUpdate -= correction;
123071 +        /* invalidate dictionaries on overflow correction */
123072 +        ms->loadedDictEnd = 0;
123073 +        ms->dictMatchState = NULL;
123074 +    }
123077 +/*! ZSTD_compress_frameChunk() :
123078 +*   Compress a chunk of data into one or multiple blocks.
123079 +*   All blocks will be terminated, all input will be consumed.
123080 +*   Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
123081 +*   Frame is supposed already started (header already produced)
123082 +*   @return : compressed size, or an error code
123084 +static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
123085 +                                     void* dst, size_t dstCapacity,
123086 +                               const void* src, size_t srcSize,
123087 +                                     U32 lastFrameChunk)
123089 +    size_t blockSize = cctx->blockSize;
123090 +    size_t remaining = srcSize;
123091 +    const BYTE* ip = (const BYTE*)src;
123092 +    BYTE* const ostart = (BYTE*)dst;
123093 +    BYTE* op = ostart;
123094 +    U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
123096 +    assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX);
123098 +    DEBUGLOG(4, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
123099 +    if (cctx->appliedParams.fParams.checksumFlag && srcSize)
123100 +        xxh64_update(&cctx->xxhState, src, srcSize);
123102 +    while (remaining) {
123103 +        ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
123104 +        U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
123106 +        RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
123107 +                        dstSize_tooSmall,
123108 +                        "not enough space to store compressed block");
123109 +        if (remaining < blockSize) blockSize = remaining;
123111 +        ZSTD_overflowCorrectIfNeeded(
123112 +            ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize);
123113 +        ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
123115 +        /* Ensure hash/chain table insertion resumes no sooner than lowlimit */
123116 +        if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
123118 +        {   size_t cSize;
123119 +            if (ZSTD_useTargetCBlockSize(&cctx->appliedParams)) {
123120 +                cSize = ZSTD_compressBlock_targetCBlockSize(cctx, op, dstCapacity, ip, blockSize, lastBlock);
123121 +                FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize failed");
123122 +                assert(cSize > 0);
123123 +                assert(cSize <= blockSize + ZSTD_blockHeaderSize);
123124 +            } else {
123125 +                cSize = ZSTD_compressBlock_internal(cctx,
123126 +                                        op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
123127 +                                        ip, blockSize, 1 /* frame */);
123128 +                FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_internal failed");
123130 +                if (cSize == 0) {  /* block is not compressible */
123131 +                    cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
123132 +                    FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
123133 +                } else {
123134 +                    U32 const cBlockHeader = cSize == 1 ?
123135 +                        lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
123136 +                        lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
123137 +                    MEM_writeLE24(op, cBlockHeader);
123138 +                    cSize += ZSTD_blockHeaderSize;
123139 +                }
123140 +            }
123143 +            ip += blockSize;
123144 +            assert(remaining >= blockSize);
123145 +            remaining -= blockSize;
123146 +            op += cSize;
123147 +            assert(dstCapacity >= cSize);
123148 +            dstCapacity -= cSize;
123149 +            cctx->isFirstBlock = 0;
123150 +            DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
123151 +                        (unsigned)cSize);
123152 +    }   }
123154 +    if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
123155 +    return (size_t)(op-ostart);
123159 +static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
123160 +                                    const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID)
123161 +{   BYTE* const op = (BYTE*)dst;
123162 +    U32   const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536);   /* 0-3 */
123163 +    U32   const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength;   /* 0-3 */
123164 +    U32   const checksumFlag = params->fParams.checksumFlag>0;
123165 +    U32   const windowSize = (U32)1 << params->cParams.windowLog;
123166 +    U32   const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
123167 +    BYTE  const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
123168 +    U32   const fcsCode = params->fParams.contentSizeFlag ?
123169 +                     (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0;  /* 0-3 */
123170 +    BYTE  const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
123171 +    size_t pos=0;
123173 +    assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
123174 +    RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall,
123175 +                    "dst buf is too small to fit worst-case frame header size.");
123176 +    DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
123177 +                !params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
123178 +    if (params->format == ZSTD_f_zstd1) {
123179 +        MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
123180 +        pos = 4;
123181 +    }
123182 +    op[pos++] = frameHeaderDescriptionByte;
123183 +    if (!singleSegment) op[pos++] = windowLogByte;
123184 +    switch(dictIDSizeCode)
123185 +    {
123186 +        default:  assert(0); /* impossible */
123187 +        case 0 : break;
123188 +        case 1 : op[pos] = (BYTE)(dictID); pos++; break;
123189 +        case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
123190 +        case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
123191 +    }
123192 +    switch(fcsCode)
123193 +    {
123194 +        default:  assert(0); /* impossible */
123195 +        case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
123196 +        case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
123197 +        case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
123198 +        case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
123199 +    }
123200 +    return pos;
123203 +/* ZSTD_writeSkippableFrame_advanced() :
123204 + * Writes out a skippable frame with the specified magic number variant (16 are supported),
123205 + * from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15, and the desired source data.
123207 + * Returns the total number of bytes written, or a ZSTD error code.
123208 + */
123209 +size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
123210 +                                const void* src, size_t srcSize, unsigned magicVariant) {
123211 +    BYTE* op = (BYTE*)dst;
123212 +    RETURN_ERROR_IF(dstCapacity < srcSize + ZSTD_SKIPPABLEHEADERSIZE /* Skippable frame overhead */,
123213 +                    dstSize_tooSmall, "Not enough room for skippable frame");
123214 +    RETURN_ERROR_IF(srcSize > (unsigned)0xFFFFFFFF, srcSize_wrong, "Src size too large for skippable frame");
123215 +    RETURN_ERROR_IF(magicVariant > 15, parameter_outOfBound, "Skippable frame magic number variant not supported");
123217 +    MEM_writeLE32(op, (U32)(ZSTD_MAGIC_SKIPPABLE_START + magicVariant));
123218 +    MEM_writeLE32(op+4, (U32)srcSize);
123219 +    ZSTD_memcpy(op+8, src, srcSize);
123220 +    return srcSize + ZSTD_SKIPPABLEHEADERSIZE;
123223 +/* ZSTD_writeLastEmptyBlock() :
123224 + * output an empty Block with end-of-frame mark to complete a frame
123225 + * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
123226 + *           or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
123227 + */
123228 +size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
123230 +    RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall,
123231 +                    "dst buf is too small to write frame trailer empty block.");
123232 +    {   U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1);  /* 0 size */
123233 +        MEM_writeLE24(dst, cBlockHeader24);
123234 +        return ZSTD_blockHeaderSize;
123235 +    }
123238 +size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
123240 +    RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong,
123241 +                    "wrong cctx stage");
123242 +    RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm,
123243 +                    parameter_unsupported,
123244 +                    "incompatible with ldm");
123245 +    cctx->externSeqStore.seq = seq;
123246 +    cctx->externSeqStore.size = nbSeq;
123247 +    cctx->externSeqStore.capacity = nbSeq;
123248 +    cctx->externSeqStore.pos = 0;
123249 +    cctx->externSeqStore.posInSequence = 0;
123250 +    return 0;
123254 +static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
123255 +                              void* dst, size_t dstCapacity,
123256 +                        const void* src, size_t srcSize,
123257 +                               U32 frame, U32 lastFrameChunk)
123259 +    ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
123260 +    size_t fhSize = 0;
123262 +    DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
123263 +                cctx->stage, (unsigned)srcSize);
123264 +    RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong,
123265 +                    "missing init (ZSTD_compressBegin)");
123267 +    if (frame && (cctx->stage==ZSTDcs_init)) {
123268 +        fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams,
123269 +                                       cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
123270 +        FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
123271 +        assert(fhSize <= dstCapacity);
123272 +        dstCapacity -= fhSize;
123273 +        dst = (char*)dst + fhSize;
123274 +        cctx->stage = ZSTDcs_ongoing;
123275 +    }
123277 +    if (!srcSize) return fhSize;  /* do not generate an empty block if no input */
123279 +    if (!ZSTD_window_update(&ms->window, src, srcSize)) {
123280 +        ms->nextToUpdate = ms->window.dictLimit;
123281 +    }
123282 +    if (cctx->appliedParams.ldmParams.enableLdm) {
123283 +        ZSTD_window_update(&cctx->ldmState.window, src, srcSize);
123284 +    }
123286 +    if (!frame) {
123287 +        /* overflow check and correction for block mode */
123288 +        ZSTD_overflowCorrectIfNeeded(
123289 +            ms, &cctx->workspace, &cctx->appliedParams,
123290 +            src, (BYTE const*)src + srcSize);
123291 +    }
123293 +    DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
123294 +    {   size_t const cSize = frame ?
123295 +                             ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
123296 +                             ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */);
123297 +        FORWARD_IF_ERROR(cSize, "%s", frame ? "ZSTD_compress_frameChunk failed" : "ZSTD_compressBlock_internal failed");
123298 +        cctx->consumedSrcSize += srcSize;
123299 +        cctx->producedCSize += (cSize + fhSize);
123300 +        assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
123301 +        if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */
123302 +            ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
123303 +            RETURN_ERROR_IF(
123304 +                cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne,
123305 +                srcSize_wrong,
123306 +                "error : pledgedSrcSize = %u, while realSrcSize >= %u",
123307 +                (unsigned)cctx->pledgedSrcSizePlusOne-1,
123308 +                (unsigned)cctx->consumedSrcSize);
123309 +        }
123310 +        return cSize + fhSize;
123311 +    }
123314 +size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
123315 +                              void* dst, size_t dstCapacity,
123316 +                        const void* src, size_t srcSize)
123318 +    DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
123319 +    return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
123323 +size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
123325 +    ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
123326 +    assert(!ZSTD_checkCParams(cParams));
123327 +    return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
123330 +size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
123332 +    DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize);
123333 +    { size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
123334 +      RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); }
123336 +    return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
123339 +/*! ZSTD_loadDictionaryContent() :
123340 + *  @return : 0, or an error code
123341 + */
123342 +static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
123343 +                                         ldmState_t* ls,
123344 +                                         ZSTD_cwksp* ws,
123345 +                                         ZSTD_CCtx_params const* params,
123346 +                                         const void* src, size_t srcSize,
123347 +                                         ZSTD_dictTableLoadMethod_e dtlm)
123349 +    const BYTE* ip = (const BYTE*) src;
123350 +    const BYTE* const iend = ip + srcSize;
123352 +    ZSTD_window_update(&ms->window, src, srcSize);
123353 +    ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
123355 +    if (params->ldmParams.enableLdm && ls != NULL) {
123356 +        ZSTD_window_update(&ls->window, src, srcSize);
123357 +        ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base);
123358 +    }
123360 +    /* Assert that we the ms params match the params we're being given */
123361 +    ZSTD_assertEqualCParams(params->cParams, ms->cParams);
123363 +    if (srcSize <= HASH_READ_SIZE) return 0;
123365 +    while (iend - ip > HASH_READ_SIZE) {
123366 +        size_t const remaining = (size_t)(iend - ip);
123367 +        size_t const chunk = MIN(remaining, ZSTD_CHUNKSIZE_MAX);
123368 +        const BYTE* const ichunk = ip + chunk;
123370 +        ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, ichunk);
123372 +        if (params->ldmParams.enableLdm && ls != NULL)
123373 +            ZSTD_ldm_fillHashTable(ls, (const BYTE*)src, (const BYTE*)src + srcSize, &params->ldmParams);
123375 +        switch(params->cParams.strategy)
123376 +        {
123377 +        case ZSTD_fast:
123378 +            ZSTD_fillHashTable(ms, ichunk, dtlm);
123379 +            break;
123380 +        case ZSTD_dfast:
123381 +            ZSTD_fillDoubleHashTable(ms, ichunk, dtlm);
123382 +            break;
123384 +        case ZSTD_greedy:
123385 +        case ZSTD_lazy:
123386 +        case ZSTD_lazy2:
123387 +            if (chunk >= HASH_READ_SIZE && ms->dedicatedDictSearch) {
123388 +                assert(chunk == remaining); /* must load everything in one go */
123389 +                ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, ichunk-HASH_READ_SIZE);
123390 +            } else if (chunk >= HASH_READ_SIZE) {
123391 +                ZSTD_insertAndFindFirstIndex(ms, ichunk-HASH_READ_SIZE);
123392 +            }
123393 +            break;
123395 +        case ZSTD_btlazy2:   /* we want the dictionary table fully sorted */
123396 +        case ZSTD_btopt:
123397 +        case ZSTD_btultra:
123398 +        case ZSTD_btultra2:
123399 +            if (chunk >= HASH_READ_SIZE)
123400 +                ZSTD_updateTree(ms, ichunk-HASH_READ_SIZE, ichunk);
123401 +            break;
123403 +        default:
123404 +            assert(0);  /* not possible : not a valid strategy id */
123405 +        }
123407 +        ip = ichunk;
123408 +    }
123410 +    ms->nextToUpdate = (U32)(iend - ms->window.base);
123411 +    return 0;
123415 +/* Dictionaries that assign zero probability to symbols that show up causes problems
123416 + * when FSE encoding. Mark dictionaries with zero probability symbols as FSE_repeat_check
123417 + * and only dictionaries with 100% valid symbols can be assumed valid.
123418 + */
123419 +static FSE_repeat ZSTD_dictNCountRepeat(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue)
123421 +    U32 s;
123422 +    if (dictMaxSymbolValue < maxSymbolValue) {
123423 +        return FSE_repeat_check;
123424 +    }
123425 +    for (s = 0; s <= maxSymbolValue; ++s) {
123426 +        if (normalizedCounter[s] == 0) {
123427 +            return FSE_repeat_check;
123428 +        }
123429 +    }
123430 +    return FSE_repeat_valid;
123433 +size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
123434 +                         const void* const dict, size_t dictSize)
123436 +    short offcodeNCount[MaxOff+1];
123437 +    unsigned offcodeMaxValue = MaxOff;
123438 +    const BYTE* dictPtr = (const BYTE*)dict;    /* skip magic num and dict ID */
123439 +    const BYTE* const dictEnd = dictPtr + dictSize;
123440 +    dictPtr += 8;
123441 +    bs->entropy.huf.repeatMode = HUF_repeat_check;
123443 +    {   unsigned maxSymbolValue = 255;
123444 +        unsigned hasZeroWeights = 1;
123445 +        size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr,
123446 +            dictEnd-dictPtr, &hasZeroWeights);
123448 +        /* We only set the loaded table as valid if it contains all non-zero
123449 +         * weights. Otherwise, we set it to check */
123450 +        if (!hasZeroWeights)
123451 +            bs->entropy.huf.repeatMode = HUF_repeat_valid;
123453 +        RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, "");
123454 +        RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, "");
123455 +        dictPtr += hufHeaderSize;
123456 +    }
123458 +    {   unsigned offcodeLog;
123459 +        size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
123460 +        RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
123461 +        RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
123462 +        /* fill all offset symbols to avoid garbage at end of table */
123463 +        RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
123464 +                bs->entropy.fse.offcodeCTable,
123465 +                offcodeNCount, MaxOff, offcodeLog,
123466 +                workspace, HUF_WORKSPACE_SIZE)),
123467 +            dictionary_corrupted, "");
123468 +        /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
123469 +        dictPtr += offcodeHeaderSize;
123470 +    }
123472 +    {   short matchlengthNCount[MaxML+1];
123473 +        unsigned matchlengthMaxValue = MaxML, matchlengthLog;
123474 +        size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
123475 +        RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
123476 +        RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
123477 +        RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
123478 +                bs->entropy.fse.matchlengthCTable,
123479 +                matchlengthNCount, matchlengthMaxValue, matchlengthLog,
123480 +                workspace, HUF_WORKSPACE_SIZE)),
123481 +            dictionary_corrupted, "");
123482 +        bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat(matchlengthNCount, matchlengthMaxValue, MaxML);
123483 +        dictPtr += matchlengthHeaderSize;
123484 +    }
123486 +    {   short litlengthNCount[MaxLL+1];
123487 +        unsigned litlengthMaxValue = MaxLL, litlengthLog;
123488 +        size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
123489 +        RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
123490 +        RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
123491 +        RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
123492 +                bs->entropy.fse.litlengthCTable,
123493 +                litlengthNCount, litlengthMaxValue, litlengthLog,
123494 +                workspace, HUF_WORKSPACE_SIZE)),
123495 +            dictionary_corrupted, "");
123496 +        bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat(litlengthNCount, litlengthMaxValue, MaxLL);
123497 +        dictPtr += litlengthHeaderSize;
123498 +    }
123500 +    RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
123501 +    bs->rep[0] = MEM_readLE32(dictPtr+0);
123502 +    bs->rep[1] = MEM_readLE32(dictPtr+4);
123503 +    bs->rep[2] = MEM_readLE32(dictPtr+8);
123504 +    dictPtr += 12;
123506 +    {   size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
123507 +        U32 offcodeMax = MaxOff;
123508 +        if (dictContentSize <= ((U32)-1) - 128 KB) {
123509 +            U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
123510 +            offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
123511 +        }
123512 +        /* All offset values <= dictContentSize + 128 KB must be representable for a valid table */
123513 +        bs->entropy.fse.offcode_repeatMode = ZSTD_dictNCountRepeat(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff));
123515 +        /* All repCodes must be <= dictContentSize and != 0 */
123516 +        {   U32 u;
123517 +            for (u=0; u<3; u++) {
123518 +                RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted, "");
123519 +                RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, "");
123520 +    }   }   }
123522 +    return dictPtr - (const BYTE*)dict;
123525 +/* Dictionary format :
123526 + * See :
123527 + * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#dictionary-format
123528 + */
123529 +/*! ZSTD_loadZstdDictionary() :
123530 + * @return : dictID, or an error code
123531 + *  assumptions : magic number supposed already checked
123532 + *                dictSize supposed >= 8
123533 + */
123534 +static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
123535 +                                      ZSTD_matchState_t* ms,
123536 +                                      ZSTD_cwksp* ws,
123537 +                                      ZSTD_CCtx_params const* params,
123538 +                                      const void* dict, size_t dictSize,
123539 +                                      ZSTD_dictTableLoadMethod_e dtlm,
123540 +                                      void* workspace)
123542 +    const BYTE* dictPtr = (const BYTE*)dict;
123543 +    const BYTE* const dictEnd = dictPtr + dictSize;
123544 +    size_t dictID;
123545 +    size_t eSize;
123547 +    ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
123548 +    assert(dictSize >= 8);
123549 +    assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
123551 +    dictID = params->fParams.noDictIDFlag ? 0 :  MEM_readLE32(dictPtr + 4 /* skip magic number */ );
123552 +    eSize = ZSTD_loadCEntropy(bs, workspace, dict, dictSize);
123553 +    FORWARD_IF_ERROR(eSize, "ZSTD_loadCEntropy failed");
123554 +    dictPtr += eSize;
123556 +    {
123557 +        size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
123558 +        FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(
123559 +            ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), "");
123560 +    }
123561 +    return dictID;
123564 +/** ZSTD_compress_insertDictionary() :
123565 +*   @return : dictID, or an error code */
123566 +static size_t
123567 +ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
123568 +                               ZSTD_matchState_t* ms,
123569 +                               ldmState_t* ls,
123570 +                               ZSTD_cwksp* ws,
123571 +                         const ZSTD_CCtx_params* params,
123572 +                         const void* dict, size_t dictSize,
123573 +                               ZSTD_dictContentType_e dictContentType,
123574 +                               ZSTD_dictTableLoadMethod_e dtlm,
123575 +                               void* workspace)
123577 +    DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
123578 +    if ((dict==NULL) || (dictSize<8)) {
123579 +        RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
123580 +        return 0;
123581 +    }
123583 +    ZSTD_reset_compressedBlockState(bs);
123585 +    /* dict restricted modes */
123586 +    if (dictContentType == ZSTD_dct_rawContent)
123587 +        return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm);
123589 +    if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
123590 +        if (dictContentType == ZSTD_dct_auto) {
123591 +            DEBUGLOG(4, "raw content dictionary detected");
123592 +            return ZSTD_loadDictionaryContent(
123593 +                ms, ls, ws, params, dict, dictSize, dtlm);
123594 +        }
123595 +        RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
123596 +        assert(0);   /* impossible */
123597 +    }
123599 +    /* dict as full zstd dictionary */
123600 +    return ZSTD_loadZstdDictionary(
123601 +        bs, ms, ws, params, dict, dictSize, dtlm, workspace);
123604 +#define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB)
123605 +#define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL)
123607 +/*! ZSTD_compressBegin_internal() :
123608 + * @return : 0, or an error code */
123609 +static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
123610 +                                    const void* dict, size_t dictSize,
123611 +                                    ZSTD_dictContentType_e dictContentType,
123612 +                                    ZSTD_dictTableLoadMethod_e dtlm,
123613 +                                    const ZSTD_CDict* cdict,
123614 +                                    const ZSTD_CCtx_params* params, U64 pledgedSrcSize,
123615 +                                    ZSTD_buffered_policy_e zbuff)
123617 +    DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog);
123618 +    /* params are supposed to be fully validated at this point */
123619 +    assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
123620 +    assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
123621 +    if ( (cdict)
123622 +      && (cdict->dictContentSize > 0)
123623 +      && ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
123624 +        || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
123625 +        || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
123626 +        || cdict->compressionLevel == 0)
123627 +      && (params->attachDictPref != ZSTD_dictForceLoad) ) {
123628 +        return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
123629 +    }
123631 +    FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, *params, pledgedSrcSize,
123632 +                                     ZSTDcrp_makeClean, zbuff) , "");
123633 +    {   size_t const dictID = cdict ?
123634 +                ZSTD_compress_insertDictionary(
123635 +                        cctx->blockState.prevCBlock, &cctx->blockState.matchState,
123636 +                        &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent,
123637 +                        cdict->dictContentSize, cdict->dictContentType, dtlm,
123638 +                        cctx->entropyWorkspace)
123639 +              : ZSTD_compress_insertDictionary(
123640 +                        cctx->blockState.prevCBlock, &cctx->blockState.matchState,
123641 +                        &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize,
123642 +                        dictContentType, dtlm, cctx->entropyWorkspace);
123643 +        FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
123644 +        assert(dictID <= UINT_MAX);
123645 +        cctx->dictID = (U32)dictID;
123646 +        cctx->dictContentSize = cdict ? cdict->dictContentSize : dictSize;
123647 +    }
123648 +    return 0;
123651 +size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
123652 +                                    const void* dict, size_t dictSize,
123653 +                                    ZSTD_dictContentType_e dictContentType,
123654 +                                    ZSTD_dictTableLoadMethod_e dtlm,
123655 +                                    const ZSTD_CDict* cdict,
123656 +                                    const ZSTD_CCtx_params* params,
123657 +                                    unsigned long long pledgedSrcSize)
123659 +    DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog);
123660 +    /* compression parameters verification and optimization */
123661 +    FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) , "");
123662 +    return ZSTD_compressBegin_internal(cctx,
123663 +                                       dict, dictSize, dictContentType, dtlm,
123664 +                                       cdict,
123665 +                                       params, pledgedSrcSize,
123666 +                                       ZSTDb_not_buffered);
123669 +/*! ZSTD_compressBegin_advanced() :
123670 +*   @return : 0, or an error code */
123671 +size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
123672 +                             const void* dict, size_t dictSize,
123673 +                                   ZSTD_parameters params, unsigned long long pledgedSrcSize)
123675 +    ZSTD_CCtx_params cctxParams;
123676 +    ZSTD_CCtxParams_init_internal(&cctxParams, &params, ZSTD_NO_CLEVEL);
123677 +    return ZSTD_compressBegin_advanced_internal(cctx,
123678 +                                            dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
123679 +                                            NULL /*cdict*/,
123680 +                                            &cctxParams, pledgedSrcSize);
123683 +size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
123685 +    ZSTD_CCtx_params cctxParams;
123686 +    {
123687 +        ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict);
123688 +        ZSTD_CCtxParams_init_internal(&cctxParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel);
123689 +    }
123690 +    DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
123691 +    return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
123692 +                                       &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
123695 +size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
123697 +    return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
123701 +/*! ZSTD_writeEpilogue() :
123702 +*   Ends a frame.
123703 +*   @return : nb of bytes written into dst (or an error code) */
123704 +static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
123706 +    BYTE* const ostart = (BYTE*)dst;
123707 +    BYTE* op = ostart;
123708 +    size_t fhSize = 0;
123710 +    DEBUGLOG(4, "ZSTD_writeEpilogue");
123711 +    RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
123713 +    /* special case : empty frame */
123714 +    if (cctx->stage == ZSTDcs_init) {
123715 +        fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
123716 +        FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
123717 +        dstCapacity -= fhSize;
123718 +        op += fhSize;
123719 +        cctx->stage = ZSTDcs_ongoing;
123720 +    }
123722 +    if (cctx->stage != ZSTDcs_ending) {
123723 +        /* write one last empty block, make it the "last" block */
123724 +        U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
123725 +        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for epilogue");
123726 +        MEM_writeLE32(op, cBlockHeader24);
123727 +        op += ZSTD_blockHeaderSize;
123728 +        dstCapacity -= ZSTD_blockHeaderSize;
123729 +    }
123731 +    if (cctx->appliedParams.fParams.checksumFlag) {
123732 +        U32 const checksum = (U32) xxh64_digest(&cctx->xxhState);
123733 +        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
123734 +        DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
123735 +        MEM_writeLE32(op, checksum);
123736 +        op += 4;
123737 +    }
123739 +    cctx->stage = ZSTDcs_created;  /* return to "created but no init" status */
123740 +    return op-ostart;
123743 +void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize)
123745 +    (void)cctx;
123746 +    (void)extraCSize;
123749 +size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
123750 +                         void* dst, size_t dstCapacity,
123751 +                   const void* src, size_t srcSize)
123753 +    size_t endResult;
123754 +    size_t const cSize = ZSTD_compressContinue_internal(cctx,
123755 +                                dst, dstCapacity, src, srcSize,
123756 +                                1 /* frame mode */, 1 /* last chunk */);
123757 +    FORWARD_IF_ERROR(cSize, "ZSTD_compressContinue_internal failed");
123758 +    endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
123759 +    FORWARD_IF_ERROR(endResult, "ZSTD_writeEpilogue failed");
123760 +    assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
123761 +    if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */
123762 +        ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
123763 +        DEBUGLOG(4, "end of frame : controlling src size");
123764 +        RETURN_ERROR_IF(
123765 +            cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1,
123766 +            srcSize_wrong,
123767 +             "error : pledgedSrcSize = %u, while realSrcSize = %u",
123768 +            (unsigned)cctx->pledgedSrcSizePlusOne-1,
123769 +            (unsigned)cctx->consumedSrcSize);
123770 +    }
123771 +    ZSTD_CCtx_trace(cctx, endResult);
123772 +    return cSize + endResult;
123775 +size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
123776 +                               void* dst, size_t dstCapacity,
123777 +                         const void* src, size_t srcSize,
123778 +                         const void* dict,size_t dictSize,
123779 +                               ZSTD_parameters params)
123781 +    ZSTD_CCtx_params cctxParams;
123782 +    DEBUGLOG(4, "ZSTD_compress_advanced");
123783 +    FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), "");
123784 +    ZSTD_CCtxParams_init_internal(&cctxParams, &params, ZSTD_NO_CLEVEL);
123785 +    return ZSTD_compress_advanced_internal(cctx,
123786 +                                           dst, dstCapacity,
123787 +                                           src, srcSize,
123788 +                                           dict, dictSize,
123789 +                                           &cctxParams);
123792 +/* Internal */
123793 +size_t ZSTD_compress_advanced_internal(
123794 +        ZSTD_CCtx* cctx,
123795 +        void* dst, size_t dstCapacity,
123796 +        const void* src, size_t srcSize,
123797 +        const void* dict,size_t dictSize,
123798 +        const ZSTD_CCtx_params* params)
123800 +    DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
123801 +    FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
123802 +                         dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
123803 +                         params, srcSize, ZSTDb_not_buffered) , "");
123804 +    return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
123807 +size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
123808 +                               void* dst, size_t dstCapacity,
123809 +                         const void* src, size_t srcSize,
123810 +                         const void* dict, size_t dictSize,
123811 +                               int compressionLevel)
123813 +    ZSTD_CCtx_params cctxParams;
123814 +    {
123815 +        ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0, ZSTD_cpm_noAttachDict);
123816 +        assert(params.fParams.contentSizeFlag == 1);
123817 +        ZSTD_CCtxParams_init_internal(&cctxParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT: compressionLevel);
123818 +    }
123819 +    DEBUGLOG(4, "ZSTD_compress_usingDict (srcSize=%u)", (unsigned)srcSize);
123820 +    return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctxParams);
123823 +size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
123824 +                         void* dst, size_t dstCapacity,
123825 +                   const void* src, size_t srcSize,
123826 +                         int compressionLevel)
123828 +    DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize);
123829 +    assert(cctx != NULL);
123830 +    return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
123833 +size_t ZSTD_compress(void* dst, size_t dstCapacity,
123834 +               const void* src, size_t srcSize,
123835 +                     int compressionLevel)
123837 +    size_t result;
123838 +    ZSTD_CCtx* cctx = ZSTD_createCCtx();
123839 +    RETURN_ERROR_IF(!cctx, memory_allocation, "ZSTD_createCCtx failed");
123840 +    result = ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel);
123841 +    ZSTD_freeCCtx(cctx);
123842 +    return result;
123846 +/* =====  Dictionary API  ===== */
123848 +/*! ZSTD_estimateCDictSize_advanced() :
123849 + *  Estimate amount of memory that will be needed to create a dictionary with following arguments */
123850 +size_t ZSTD_estimateCDictSize_advanced(
123851 +        size_t dictSize, ZSTD_compressionParameters cParams,
123852 +        ZSTD_dictLoadMethod_e dictLoadMethod)
123854 +    DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
123855 +    return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
123856 +         + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
123857 +         + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
123858 +         + (dictLoadMethod == ZSTD_dlm_byRef ? 0
123859 +            : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *))));
123862 +size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
123864 +    ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
123865 +    return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
123868 +size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
123870 +    if (cdict==NULL) return 0;   /* support sizeof on NULL */
123871 +    DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict));
123872 +    /* cdict may be in the workspace */
123873 +    return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict))
123874 +        + ZSTD_cwksp_sizeof(&cdict->workspace);
123877 +static size_t ZSTD_initCDict_internal(
123878 +                    ZSTD_CDict* cdict,
123879 +              const void* dictBuffer, size_t dictSize,
123880 +                    ZSTD_dictLoadMethod_e dictLoadMethod,
123881 +                    ZSTD_dictContentType_e dictContentType,
123882 +                    ZSTD_CCtx_params params)
123884 +    DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType);
123885 +    assert(!ZSTD_checkCParams(params.cParams));
123886 +    cdict->matchState.cParams = params.cParams;
123887 +    cdict->matchState.dedicatedDictSearch = params.enableDedicatedDictSearch;
123888 +    if (cdict->matchState.dedicatedDictSearch && dictSize > ZSTD_CHUNKSIZE_MAX) {
123889 +        cdict->matchState.dedicatedDictSearch = 0;
123890 +    }
123891 +    if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
123892 +        cdict->dictContent = dictBuffer;
123893 +    } else {
123894 +         void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*)));
123895 +        RETURN_ERROR_IF(!internalBuffer, memory_allocation, "NULL pointer!");
123896 +        cdict->dictContent = internalBuffer;
123897 +        ZSTD_memcpy(internalBuffer, dictBuffer, dictSize);
123898 +    }
123899 +    cdict->dictContentSize = dictSize;
123900 +    cdict->dictContentType = dictContentType;
123902 +    cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE);
123905 +    /* Reset the state to no dictionary */
123906 +    ZSTD_reset_compressedBlockState(&cdict->cBlockState);
123907 +    FORWARD_IF_ERROR(ZSTD_reset_matchState(
123908 +        &cdict->matchState,
123909 +        &cdict->workspace,
123910 +        &params.cParams,
123911 +        ZSTDcrp_makeClean,
123912 +        ZSTDirp_reset,
123913 +        ZSTD_resetTarget_CDict), "");
123914 +    /* (Maybe) load the dictionary
123915 +     * Skips loading the dictionary if it is < 8 bytes.
123916 +     */
123917 +    {   params.compressionLevel = ZSTD_CLEVEL_DEFAULT;
123918 +        params.fParams.contentSizeFlag = 1;
123919 +        {   size_t const dictID = ZSTD_compress_insertDictionary(
123920 +                    &cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace,
123921 +                    &params, cdict->dictContent, cdict->dictContentSize,
123922 +                    dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace);
123923 +            FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
123924 +            assert(dictID <= (size_t)(U32)-1);
123925 +            cdict->dictID = (U32)dictID;
123926 +        }
123927 +    }
123929 +    return 0;
123932 +static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize,
123933 +                                      ZSTD_dictLoadMethod_e dictLoadMethod,
123934 +                                      ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
123936 +    if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
123938 +    {   size_t const workspaceSize =
123939 +            ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +
123940 +            ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) +
123941 +            ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0) +
123942 +            (dictLoadMethod == ZSTD_dlm_byRef ? 0
123943 +             : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))));
123944 +        void* const workspace = ZSTD_customMalloc(workspaceSize, customMem);
123945 +        ZSTD_cwksp ws;
123946 +        ZSTD_CDict* cdict;
123948 +        if (!workspace) {
123949 +            ZSTD_customFree(workspace, customMem);
123950 +            return NULL;
123951 +        }
123953 +        ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_dynamic_alloc);
123955 +        cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
123956 +        assert(cdict != NULL);
123957 +        ZSTD_cwksp_move(&cdict->workspace, &ws);
123958 +        cdict->customMem = customMem;
123959 +        cdict->compressionLevel = ZSTD_NO_CLEVEL; /* signals advanced API usage */
123961 +        return cdict;
123962 +    }
123965 +ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
123966 +                                      ZSTD_dictLoadMethod_e dictLoadMethod,
123967 +                                      ZSTD_dictContentType_e dictContentType,
123968 +                                      ZSTD_compressionParameters cParams,
123969 +                                      ZSTD_customMem customMem)
123971 +    ZSTD_CCtx_params cctxParams;
123972 +    ZSTD_memset(&cctxParams, 0, sizeof(cctxParams));
123973 +    ZSTD_CCtxParams_init(&cctxParams, 0);
123974 +    cctxParams.cParams = cParams;
123975 +    cctxParams.customMem = customMem;
123976 +    return ZSTD_createCDict_advanced2(
123977 +        dictBuffer, dictSize,
123978 +        dictLoadMethod, dictContentType,
123979 +        &cctxParams, customMem);
123982 +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2(
123983 +        const void* dict, size_t dictSize,
123984 +        ZSTD_dictLoadMethod_e dictLoadMethod,
123985 +        ZSTD_dictContentType_e dictContentType,
123986 +        const ZSTD_CCtx_params* originalCctxParams,
123987 +        ZSTD_customMem customMem)
123989 +    ZSTD_CCtx_params cctxParams = *originalCctxParams;
123990 +    ZSTD_compressionParameters cParams;
123991 +    ZSTD_CDict* cdict;
123993 +    DEBUGLOG(3, "ZSTD_createCDict_advanced2, mode %u", (unsigned)dictContentType);
123994 +    if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
123996 +    if (cctxParams.enableDedicatedDictSearch) {
123997 +        cParams = ZSTD_dedicatedDictSearch_getCParams(
123998 +            cctxParams.compressionLevel, dictSize);
123999 +        ZSTD_overrideCParams(&cParams, &cctxParams.cParams);
124000 +    } else {
124001 +        cParams = ZSTD_getCParamsFromCCtxParams(
124002 +            &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
124003 +    }
124005 +    if (!ZSTD_dedicatedDictSearch_isSupported(&cParams)) {
124006 +        /* Fall back to non-DDSS params */
124007 +        cctxParams.enableDedicatedDictSearch = 0;
124008 +        cParams = ZSTD_getCParamsFromCCtxParams(
124009 +            &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
124010 +    }
124012 +    cctxParams.cParams = cParams;
124014 +    cdict = ZSTD_createCDict_advanced_internal(dictSize,
124015 +                        dictLoadMethod, cctxParams.cParams,
124016 +                        customMem);
124018 +    if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
124019 +                                    dict, dictSize,
124020 +                                    dictLoadMethod, dictContentType,
124021 +                                    cctxParams) )) {
124022 +        ZSTD_freeCDict(cdict);
124023 +        return NULL;
124024 +    }
124026 +    return cdict;
124029 +ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
124031 +    ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
124032 +    ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
124033 +                                                  ZSTD_dlm_byCopy, ZSTD_dct_auto,
124034 +                                                  cParams, ZSTD_defaultCMem);
124035 +    if (cdict)
124036 +        cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
124037 +    return cdict;
124040 +ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
124042 +    ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
124043 +    ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
124044 +                                     ZSTD_dlm_byRef, ZSTD_dct_auto,
124045 +                                     cParams, ZSTD_defaultCMem);
124046 +    if (cdict)
124047 +        cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
124048 +    return cdict;
124051 +size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
124053 +    if (cdict==NULL) return 0;   /* support free on NULL */
124054 +    {   ZSTD_customMem const cMem = cdict->customMem;
124055 +        int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict);
124056 +        ZSTD_cwksp_free(&cdict->workspace, cMem);
124057 +        if (!cdictInWorkspace) {
124058 +            ZSTD_customFree(cdict, cMem);
124059 +        }
124060 +        return 0;
124061 +    }
124064 +/*! ZSTD_initStaticCDict_advanced() :
124065 + *  Generate a digested dictionary in provided memory area.
124066 + *  workspace: The memory area to emplace the dictionary into.
124067 + *             Provided pointer must 8-bytes aligned.
124068 + *             It must outlive dictionary usage.
124069 + *  workspaceSize: Use ZSTD_estimateCDictSize()
124070 + *                 to determine how large workspace must be.
124071 + *  cParams : use ZSTD_getCParams() to transform a compression level
124072 + *            into its relevants cParams.
124073 + * @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
124074 + *  Note : there is no corresponding "free" function.
124075 + *         Since workspace was allocated externally, it must be freed externally.
124076 + */
124077 +const ZSTD_CDict* ZSTD_initStaticCDict(
124078 +                                 void* workspace, size_t workspaceSize,
124079 +                           const void* dict, size_t dictSize,
124080 +                                 ZSTD_dictLoadMethod_e dictLoadMethod,
124081 +                                 ZSTD_dictContentType_e dictContentType,
124082 +                                 ZSTD_compressionParameters cParams)
124084 +    size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
124085 +    size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
124086 +                            + (dictLoadMethod == ZSTD_dlm_byRef ? 0
124087 +                               : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))))
124088 +                            + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
124089 +                            + matchStateSize;
124090 +    ZSTD_CDict* cdict;
124091 +    ZSTD_CCtx_params params;
124093 +    if ((size_t)workspace & 7) return NULL;  /* 8-aligned */
124095 +    {
124096 +        ZSTD_cwksp ws;
124097 +        ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
124098 +        cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
124099 +        if (cdict == NULL) return NULL;
124100 +        ZSTD_cwksp_move(&cdict->workspace, &ws);
124101 +    }
124103 +    DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
124104 +        (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
124105 +    if (workspaceSize < neededSize) return NULL;
124107 +    ZSTD_CCtxParams_init(&params, 0);
124108 +    params.cParams = cParams;
124110 +    if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
124111 +                                              dict, dictSize,
124112 +                                              dictLoadMethod, dictContentType,
124113 +                                              params) ))
124114 +        return NULL;
124116 +    return cdict;
124119 +ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
124121 +    assert(cdict != NULL);
124122 +    return cdict->matchState.cParams;
124125 +/*! ZSTD_getDictID_fromCDict() :
124126 + *  Provides the dictID of the dictionary loaded into `cdict`.
124127 + *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
124128 + *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
124129 +unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict)
124131 +    if (cdict==NULL) return 0;
124132 +    return cdict->dictID;
124136 +/* ZSTD_compressBegin_usingCDict_advanced() :
124137 + * cdict must be != NULL */
124138 +size_t ZSTD_compressBegin_usingCDict_advanced(
124139 +    ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
124140 +    ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
124142 +    ZSTD_CCtx_params cctxParams;
124143 +    DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced");
124144 +    RETURN_ERROR_IF(cdict==NULL, dictionary_wrong, "NULL pointer!");
124145 +    /* Initialize the cctxParams from the cdict */
124146 +    {
124147 +        ZSTD_parameters params;
124148 +        params.fParams = fParams;
124149 +        params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
124150 +                        || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
124151 +                        || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
124152 +                        || cdict->compressionLevel == 0 ) ?
124153 +                ZSTD_getCParamsFromCDict(cdict)
124154 +              : ZSTD_getCParams(cdict->compressionLevel,
124155 +                                pledgedSrcSize,
124156 +                                cdict->dictContentSize);
124157 +        ZSTD_CCtxParams_init_internal(&cctxParams, &params, cdict->compressionLevel);
124158 +    }
124159 +    /* Increase window log to fit the entire dictionary and source if the
124160 +     * source size is known. Limit the increase to 19, which is the
124161 +     * window log for compression level 1 with the largest source size.
124162 +     */
124163 +    if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
124164 +        U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);
124165 +        U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;
124166 +        cctxParams.cParams.windowLog = MAX(cctxParams.cParams.windowLog, limitedSrcLog);
124167 +    }
124168 +    return ZSTD_compressBegin_internal(cctx,
124169 +                                        NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
124170 +                                        cdict,
124171 +                                        &cctxParams, pledgedSrcSize,
124172 +                                        ZSTDb_not_buffered);
124175 +/* ZSTD_compressBegin_usingCDict() :
124176 + * pledgedSrcSize=0 means "unknown"
124177 + * if pledgedSrcSize>0, it will enable contentSizeFlag */
124178 +size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
124180 +    ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
124181 +    DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag);
124182 +    return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
124185 +size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
124186 +                                void* dst, size_t dstCapacity,
124187 +                                const void* src, size_t srcSize,
124188 +                                const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
124190 +    FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize), "");   /* will check if cdict != NULL */
124191 +    return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
124194 +/*! ZSTD_compress_usingCDict() :
124195 + *  Compression using a digested Dictionary.
124196 + *  Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
124197 + *  Note that compression parameters are decided at CDict creation time
124198 + *  while frame parameters are hardcoded */
124199 +size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
124200 +                                void* dst, size_t dstCapacity,
124201 +                                const void* src, size_t srcSize,
124202 +                                const ZSTD_CDict* cdict)
124204 +    ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
124205 +    return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
124210 +/* ******************************************************************
124211 +*  Streaming
124212 +********************************************************************/
124214 +ZSTD_CStream* ZSTD_createCStream(void)
124216 +    DEBUGLOG(3, "ZSTD_createCStream");
124217 +    return ZSTD_createCStream_advanced(ZSTD_defaultCMem);
124220 +ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)
124222 +    return ZSTD_initStaticCCtx(workspace, workspaceSize);
124225 +ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
124226 +{   /* CStream and CCtx are now same object */
124227 +    return ZSTD_createCCtx_advanced(customMem);
124230 +size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
124232 +    return ZSTD_freeCCtx(zcs);   /* same object */
124237 +/*======   Initialization   ======*/
124239 +size_t ZSTD_CStreamInSize(void)  { return ZSTD_BLOCKSIZE_MAX; }
124241 +size_t ZSTD_CStreamOutSize(void)
124243 +    return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
124246 +static ZSTD_cParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize)
124248 +    if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize))
124249 +        return ZSTD_cpm_attachDict;
124250 +    else
124251 +        return ZSTD_cpm_noAttachDict;
124254 +/* ZSTD_resetCStream():
124255 + * pledgedSrcSize == 0 means "unknown" */
124256 +size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss)
124258 +    /* temporary : 0 interpreted as "unknown" during transition period.
124259 +     * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
124260 +     * 0 will be interpreted as "empty" in the future.
124261 +     */
124262 +    U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
124263 +    DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize);
124264 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
124265 +    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
124266 +    return 0;
124269 +/*! ZSTD_initCStream_internal() :
124270 + *  Note : for lib/compress only. Used by zstdmt_compress.c.
124271 + *  Assumption 1 : params are valid
124272 + *  Assumption 2 : either dict, or cdict, is defined, not both */
124273 +size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
124274 +                    const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
124275 +                    const ZSTD_CCtx_params* params,
124276 +                    unsigned long long pledgedSrcSize)
124278 +    DEBUGLOG(4, "ZSTD_initCStream_internal");
124279 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
124280 +    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
124281 +    assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
124282 +    zcs->requestedParams = *params;
124283 +    assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
124284 +    if (dict) {
124285 +        FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
124286 +    } else {
124287 +        /* Dictionary is cleared if !cdict */
124288 +        FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
124289 +    }
124290 +    return 0;
124293 +/* ZSTD_initCStream_usingCDict_advanced() :
124294 + * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
124295 +size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
124296 +                                            const ZSTD_CDict* cdict,
124297 +                                            ZSTD_frameParameters fParams,
124298 +                                            unsigned long long pledgedSrcSize)
124300 +    DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
124301 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
124302 +    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
124303 +    zcs->requestedParams.fParams = fParams;
124304 +    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
124305 +    return 0;
124308 +/* note : cdict must outlive compression session */
124309 +size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
124311 +    DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
124312 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
124313 +    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
124314 +    return 0;
124318 +/* ZSTD_initCStream_advanced() :
124319 + * pledgedSrcSize must be exact.
124320 + * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
124321 + * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */
124322 +size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
124323 +                                 const void* dict, size_t dictSize,
124324 +                                 ZSTD_parameters params, unsigned long long pss)
124326 +    /* for compatibility with older programs relying on this behavior.
124327 +     * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN.
124328 +     * This line will be removed in the future.
124329 +     */
124330 +    U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
124331 +    DEBUGLOG(4, "ZSTD_initCStream_advanced");
124332 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
124333 +    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
124334 +    FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
124335 +    ZSTD_CCtxParams_setZstdParams(&zcs->requestedParams, &params);
124336 +    FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
124337 +    return 0;
124340 +size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
124342 +    DEBUGLOG(4, "ZSTD_initCStream_usingDict");
124343 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
124344 +    FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
124345 +    FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
124346 +    return 0;
124349 +size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
124351 +    /* temporary : 0 interpreted as "unknown" during transition period.
124352 +     * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
124353 +     * 0 will be interpreted as "empty" in the future.
124354 +     */
124355 +    U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
124356 +    DEBUGLOG(4, "ZSTD_initCStream_srcSize");
124357 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
124358 +    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
124359 +    FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
124360 +    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
124361 +    return 0;
124364 +size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
124366 +    DEBUGLOG(4, "ZSTD_initCStream");
124367 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
124368 +    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
124369 +    FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
124370 +    return 0;
124373 +/*======   Compression   ======*/
124375 +static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)
124377 +    size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
124378 +    if (hintInSize==0) hintInSize = cctx->blockSize;
124379 +    return hintInSize;
124382 +/** ZSTD_compressStream_generic():
124383 + *  internal function for all *compressStream*() variants
124384 + *  non-static, because can be called from zstdmt_compress.c
124385 + * @return : hint size for next input */
124386 +static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
124387 +                                          ZSTD_outBuffer* output,
124388 +                                          ZSTD_inBuffer* input,
124389 +                                          ZSTD_EndDirective const flushMode)
124391 +    const char* const istart = (const char*)input->src;
124392 +    const char* const iend = input->size != 0 ? istart + input->size : istart;
124393 +    const char* ip = input->pos != 0 ? istart + input->pos : istart;
124394 +    char* const ostart = (char*)output->dst;
124395 +    char* const oend = output->size != 0 ? ostart + output->size : ostart;
124396 +    char* op = output->pos != 0 ? ostart + output->pos : ostart;
124397 +    U32 someMoreWork = 1;
124399 +    /* check expectations */
124400 +    DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
124401 +    if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
124402 +        assert(zcs->inBuff != NULL);
124403 +        assert(zcs->inBuffSize > 0);
124404 +    }
124405 +    if (zcs->appliedParams.outBufferMode == ZSTD_bm_buffered) {
124406 +        assert(zcs->outBuff !=  NULL);
124407 +        assert(zcs->outBuffSize > 0);
124408 +    }
124409 +    assert(output->pos <= output->size);
124410 +    assert(input->pos <= input->size);
124411 +    assert((U32)flushMode <= (U32)ZSTD_e_end);
124413 +    while (someMoreWork) {
124414 +        switch(zcs->streamStage)
124415 +        {
124416 +        case zcss_init:
124417 +            RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!");
124419 +        case zcss_load:
124420 +            if ( (flushMode == ZSTD_e_end)
124421 +              && ( (size_t)(oend-op) >= ZSTD_compressBound(iend-ip)     /* Enough output space */
124422 +                || zcs->appliedParams.outBufferMode == ZSTD_bm_stable)  /* OR we are allowed to return dstSizeTooSmall */
124423 +              && (zcs->inBuffPos == 0) ) {
124424 +                /* shortcut to compression pass directly into output buffer */
124425 +                size_t const cSize = ZSTD_compressEnd(zcs,
124426 +                                                op, oend-op, ip, iend-ip);
124427 +                DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
124428 +                FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed");
124429 +                ip = iend;
124430 +                op += cSize;
124431 +                zcs->frameEnded = 1;
124432 +                ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
124433 +                someMoreWork = 0; break;
124434 +            }
124435 +            /* complete loading into inBuffer in buffered mode */
124436 +            if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
124437 +                size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
124438 +                size_t const loaded = ZSTD_limitCopy(
124439 +                                        zcs->inBuff + zcs->inBuffPos, toLoad,
124440 +                                        ip, iend-ip);
124441 +                zcs->inBuffPos += loaded;
124442 +                if (loaded != 0)
124443 +                    ip += loaded;
124444 +                if ( (flushMode == ZSTD_e_continue)
124445 +                  && (zcs->inBuffPos < zcs->inBuffTarget) ) {
124446 +                    /* not enough input to fill full block : stop here */
124447 +                    someMoreWork = 0; break;
124448 +                }
124449 +                if ( (flushMode == ZSTD_e_flush)
124450 +                  && (zcs->inBuffPos == zcs->inToCompress) ) {
124451 +                    /* empty */
124452 +                    someMoreWork = 0; break;
124453 +                }
124454 +            }
124455 +            /* compress current block (note : this stage cannot be stopped in the middle) */
124456 +            DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
124457 +            {   int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered);
124458 +                void* cDst;
124459 +                size_t cSize;
124460 +                size_t oSize = oend-op;
124461 +                size_t const iSize = inputBuffered
124462 +                    ? zcs->inBuffPos - zcs->inToCompress
124463 +                    : MIN((size_t)(iend - ip), zcs->blockSize);
124464 +                if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable)
124465 +                    cDst = op;   /* compress into output buffer, to skip flush stage */
124466 +                else
124467 +                    cDst = zcs->outBuff, oSize = zcs->outBuffSize;
124468 +                if (inputBuffered) {
124469 +                    unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
124470 +                    cSize = lastBlock ?
124471 +                            ZSTD_compressEnd(zcs, cDst, oSize,
124472 +                                        zcs->inBuff + zcs->inToCompress, iSize) :
124473 +                            ZSTD_compressContinue(zcs, cDst, oSize,
124474 +                                        zcs->inBuff + zcs->inToCompress, iSize);
124475 +                    FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
124476 +                    zcs->frameEnded = lastBlock;
124477 +                    /* prepare next block */
124478 +                    zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
124479 +                    if (zcs->inBuffTarget > zcs->inBuffSize)
124480 +                        zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
124481 +                    DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
124482 +                            (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
124483 +                    if (!lastBlock)
124484 +                        assert(zcs->inBuffTarget <= zcs->inBuffSize);
124485 +                    zcs->inToCompress = zcs->inBuffPos;
124486 +                } else {
124487 +                    unsigned const lastBlock = (ip + iSize == iend);
124488 +                    assert(flushMode == ZSTD_e_end /* Already validated */);
124489 +                    cSize = lastBlock ?
124490 +                            ZSTD_compressEnd(zcs, cDst, oSize, ip, iSize) :
124491 +                            ZSTD_compressContinue(zcs, cDst, oSize, ip, iSize);
124492 +                    /* Consume the input prior to error checking to mirror buffered mode. */
124493 +                    if (iSize > 0)
124494 +                        ip += iSize;
124495 +                    FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
124496 +                    zcs->frameEnded = lastBlock;
124497 +                    if (lastBlock)
124498 +                        assert(ip == iend);
124499 +                }
124500 +                if (cDst == op) {  /* no need to flush */
124501 +                    op += cSize;
124502 +                    if (zcs->frameEnded) {
124503 +                        DEBUGLOG(5, "Frame completed directly in outBuffer");
124504 +                        someMoreWork = 0;
124505 +                        ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
124506 +                    }
124507 +                    break;
124508 +                }
124509 +                zcs->outBuffContentSize = cSize;
124510 +                zcs->outBuffFlushedSize = 0;
124511 +                zcs->streamStage = zcss_flush; /* pass-through to flush stage */
124512 +            }
124513 +           /* fall-through */
124514 +        case zcss_flush:
124515 +            DEBUGLOG(5, "flush stage");
124516 +            assert(zcs->appliedParams.outBufferMode == ZSTD_bm_buffered);
124517 +            {   size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
124518 +                size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op),
124519 +                            zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
124520 +                DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
124521 +                            (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed);
124522 +                if (flushed)
124523 +                    op += flushed;
124524 +                zcs->outBuffFlushedSize += flushed;
124525 +                if (toFlush!=flushed) {
124526 +                    /* flush not fully completed, presumably because dst is too small */
124527 +                    assert(op==oend);
124528 +                    someMoreWork = 0;
124529 +                    break;
124530 +                }
124531 +                zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
124532 +                if (zcs->frameEnded) {
124533 +                    DEBUGLOG(5, "Frame completed on flush");
124534 +                    someMoreWork = 0;
124535 +                    ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
124536 +                    break;
124537 +                }
124538 +                zcs->streamStage = zcss_load;
124539 +                break;
124540 +            }
124542 +        default: /* impossible */
124543 +            assert(0);
124544 +        }
124545 +    }
124547 +    input->pos = ip - istart;
124548 +    output->pos = op - ostart;
124549 +    if (zcs->frameEnded) return 0;
124550 +    return ZSTD_nextInputSizeHint(zcs);
124553 +static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx)
124555 +    return ZSTD_nextInputSizeHint(cctx);
124559 +size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
124561 +    FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) , "");
124562 +    return ZSTD_nextInputSizeHint_MTorST(zcs);
124565 +/* After a compression call set the expected input/output buffer.
124566 + * This is validated at the start of the next compression call.
124567 + */
124568 +static void ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, ZSTD_outBuffer const* output, ZSTD_inBuffer const* input)
124570 +    if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
124571 +        cctx->expectedInBuffer = *input;
124572 +    }
124573 +    if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
124574 +        cctx->expectedOutBufferSize = output->size - output->pos;
124575 +    }
124578 +/* Validate that the input/output buffers match the expectations set by
124579 + * ZSTD_setBufferExpectations.
124580 + */
124581 +static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx,
124582 +                                        ZSTD_outBuffer const* output,
124583 +                                        ZSTD_inBuffer const* input,
124584 +                                        ZSTD_EndDirective endOp)
124586 +    if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
124587 +        ZSTD_inBuffer const expect = cctx->expectedInBuffer;
124588 +        if (expect.src != input->src || expect.pos != input->pos || expect.size != input->size)
124589 +            RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer enabled but input differs!");
124590 +        if (endOp != ZSTD_e_end)
124591 +            RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer can only be used with ZSTD_e_end!");
124592 +    }
124593 +    if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
124594 +        size_t const outBufferSize = output->size - output->pos;
124595 +        if (cctx->expectedOutBufferSize != outBufferSize)
124596 +            RETURN_ERROR(dstBuffer_wrong, "ZSTD_c_stableOutBuffer enabled but output size differs!");
124597 +    }
124598 +    return 0;
124601 +static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
124602 +                                             ZSTD_EndDirective endOp,
124603 +                                             size_t inSize) {
124604 +    ZSTD_CCtx_params params = cctx->requestedParams;
124605 +    ZSTD_prefixDict const prefixDict = cctx->prefixDict;
124606 +    FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */
124607 +    ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));   /* single usage */
124608 +    assert(prefixDict.dict==NULL || cctx->cdict==NULL);    /* only one can be set */
124609 +    if (cctx->cdict)
124610 +        params.compressionLevel = cctx->cdict->compressionLevel; /* let cdict take priority in terms of compression level */
124611 +    DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
124612 +    if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1;  /* auto-fix pledgedSrcSize */
124613 +    {
124614 +        size_t const dictSize = prefixDict.dict
124615 +                ? prefixDict.dictSize
124616 +                : (cctx->cdict ? cctx->cdict->dictContentSize : 0);
124617 +        ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, &params, cctx->pledgedSrcSizePlusOne - 1);
124618 +        params.cParams = ZSTD_getCParamsFromCCtxParams(
124619 +                &params, cctx->pledgedSrcSizePlusOne-1,
124620 +                dictSize, mode);
124621 +    }
124623 +    if (ZSTD_CParams_shouldEnableLdm(&params.cParams)) {
124624 +        /* Enable LDM by default for optimal parser and window size >= 128MB */
124625 +        DEBUGLOG(4, "LDM enabled by default (window size >= 128MB, strategy >= btopt)");
124626 +        params.ldmParams.enableLdm = 1;
124627 +    }
124629 +    {   U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1;
124630 +        assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
124631 +        FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
124632 +                prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, ZSTD_dtlm_fast,
124633 +                cctx->cdict,
124634 +                &params, pledgedSrcSize,
124635 +                ZSTDb_buffered) , "");
124636 +        assert(cctx->appliedParams.nbWorkers == 0);
124637 +        cctx->inToCompress = 0;
124638 +        cctx->inBuffPos = 0;
124639 +        if (cctx->appliedParams.inBufferMode == ZSTD_bm_buffered) {
124640 +            /* for small input: avoid automatic flush on reaching end of block, since
124641 +            * it would require to add a 3-bytes null block to end frame
124642 +            */
124643 +            cctx->inBuffTarget = cctx->blockSize + (cctx->blockSize == pledgedSrcSize);
124644 +        } else {
124645 +            cctx->inBuffTarget = 0;
124646 +        }
124647 +        cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0;
124648 +        cctx->streamStage = zcss_load;
124649 +        cctx->frameEnded = 0;
124650 +    }
124651 +    return 0;
124654 +size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
124655 +                             ZSTD_outBuffer* output,
124656 +                             ZSTD_inBuffer* input,
124657 +                             ZSTD_EndDirective endOp)
124659 +    DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
124660 +    /* check conditions */
124661 +    RETURN_ERROR_IF(output->pos > output->size, dstSize_tooSmall, "invalid output buffer");
124662 +    RETURN_ERROR_IF(input->pos  > input->size, srcSize_wrong, "invalid input buffer");
124663 +    RETURN_ERROR_IF((U32)endOp > (U32)ZSTD_e_end, parameter_outOfBound, "invalid endDirective");
124664 +    assert(cctx != NULL);
124666 +    /* transparent initialization stage */
124667 +    if (cctx->streamStage == zcss_init) {
124668 +        FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, input->size), "CompressStream2 initialization failed");
124669 +        ZSTD_setBufferExpectations(cctx, output, input);    /* Set initial buffer expectations now that we've initialized */
124670 +    }
124671 +    /* end of transparent initialization stage */
124673 +    FORWARD_IF_ERROR(ZSTD_checkBufferStability(cctx, output, input, endOp), "invalid buffers");
124674 +    /* compression stage */
124675 +    FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) , "");
124676 +    DEBUGLOG(5, "completed ZSTD_compressStream2");
124677 +    ZSTD_setBufferExpectations(cctx, output, input);
124678 +    return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
124681 +size_t ZSTD_compressStream2_simpleArgs (
124682 +                            ZSTD_CCtx* cctx,
124683 +                            void* dst, size_t dstCapacity, size_t* dstPos,
124684 +                      const void* src, size_t srcSize, size_t* srcPos,
124685 +                            ZSTD_EndDirective endOp)
124687 +    ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
124688 +    ZSTD_inBuffer  input  = { src, srcSize, *srcPos };
124689 +    /* ZSTD_compressStream2() will check validity of dstPos and srcPos */
124690 +    size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
124691 +    *dstPos = output.pos;
124692 +    *srcPos = input.pos;
124693 +    return cErr;
124696 +size_t ZSTD_compress2(ZSTD_CCtx* cctx,
124697 +                      void* dst, size_t dstCapacity,
124698 +                      const void* src, size_t srcSize)
124700 +    ZSTD_bufferMode_e const originalInBufferMode = cctx->requestedParams.inBufferMode;
124701 +    ZSTD_bufferMode_e const originalOutBufferMode = cctx->requestedParams.outBufferMode;
124702 +    DEBUGLOG(4, "ZSTD_compress2 (srcSize=%u)", (unsigned)srcSize);
124703 +    ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
124704 +    /* Enable stable input/output buffers. */
124705 +    cctx->requestedParams.inBufferMode = ZSTD_bm_stable;
124706 +    cctx->requestedParams.outBufferMode = ZSTD_bm_stable;
124707 +    {   size_t oPos = 0;
124708 +        size_t iPos = 0;
124709 +        size_t const result = ZSTD_compressStream2_simpleArgs(cctx,
124710 +                                        dst, dstCapacity, &oPos,
124711 +                                        src, srcSize, &iPos,
124712 +                                        ZSTD_e_end);
124713 +        /* Reset to the original values. */
124714 +        cctx->requestedParams.inBufferMode = originalInBufferMode;
124715 +        cctx->requestedParams.outBufferMode = originalOutBufferMode;
124716 +        FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed");
124717 +        if (result != 0) {  /* compression not completed, due to lack of output space */
124718 +            assert(oPos == dstCapacity);
124719 +            RETURN_ERROR(dstSize_tooSmall, "");
124720 +        }
124721 +        assert(iPos == srcSize);   /* all input is expected consumed */
124722 +        return oPos;
124723 +    }
124726 +typedef struct {
124727 +    U32 idx;             /* Index in array of ZSTD_Sequence */
124728 +    U32 posInSequence;   /* Position within sequence at idx */
124729 +    size_t posInSrc;        /* Number of bytes given by sequences provided so far */
124730 +} ZSTD_sequencePosition;
124732 +/* Returns a ZSTD error code if sequence is not valid */
124733 +static size_t ZSTD_validateSequence(U32 offCode, U32 matchLength,
124734 +                                    size_t posInSrc, U32 windowLog, size_t dictSize, U32 minMatch) {
124735 +    size_t offsetBound;
124736 +    U32 windowSize = 1 << windowLog;
124737 +    /* posInSrc represents the amount of data the the decoder would decode up to this point.
124738 +     * As long as the amount of data decoded is less than or equal to window size, offsets may be
124739 +     * larger than the total length of output decoded in order to reference the dict, even larger than
124740 +     * window size. After output surpasses windowSize, we're limited to windowSize offsets again.
124741 +     */
124742 +    offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize;
124743 +    RETURN_ERROR_IF(offCode > offsetBound + ZSTD_REP_MOVE, corruption_detected, "Offset too large!");
124744 +    RETURN_ERROR_IF(matchLength < minMatch, corruption_detected, "Matchlength too small");
124745 +    return 0;
124748 +/* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */
124749 +static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0) {
124750 +    U32 offCode = rawOffset + ZSTD_REP_MOVE;
124751 +    U32 repCode = 0;
124753 +    if (!ll0 && rawOffset == rep[0]) {
124754 +        repCode = 1;
124755 +    } else if (rawOffset == rep[1]) {
124756 +        repCode = 2 - ll0;
124757 +    } else if (rawOffset == rep[2]) {
124758 +        repCode = 3 - ll0;
124759 +    } else if (ll0 && rawOffset == rep[0] - 1) {
124760 +        repCode = 3;
124761 +    }
124762 +    if (repCode) {
124763 +        /* ZSTD_storeSeq expects a number in the range [0, 2] to represent a repcode */
124764 +        offCode = repCode - 1;
124765 +    }
124766 +    return offCode;
124769 +/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of
124770 + * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter.
124771 + */
124772 +static size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
124773 +                                                             const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
124774 +                                                             const void* src, size_t blockSize) {
124775 +    U32 idx = seqPos->idx;
124776 +    BYTE const* ip = (BYTE const*)(src);
124777 +    const BYTE* const iend = ip + blockSize;
124778 +    repcodes_t updatedRepcodes;
124779 +    U32 dictSize;
124780 +    U32 litLength;
124781 +    U32 matchLength;
124782 +    U32 ll0;
124783 +    U32 offCode;
124785 +    if (cctx->cdict) {
124786 +        dictSize = (U32)cctx->cdict->dictContentSize;
124787 +    } else if (cctx->prefixDict.dict) {
124788 +        dictSize = (U32)cctx->prefixDict.dictSize;
124789 +    } else {
124790 +        dictSize = 0;
124791 +    }
124792 +    ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
124793 +    for (; (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0) && idx < inSeqsSize; ++idx) {
124794 +        litLength = inSeqs[idx].litLength;
124795 +        matchLength = inSeqs[idx].matchLength;
124796 +        ll0 = litLength == 0;
124797 +        offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
124798 +        updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
124800 +        DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
124801 +        if (cctx->appliedParams.validateSequences) {
124802 +            seqPos->posInSrc += litLength + matchLength;
124803 +            FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
124804 +                                                cctx->appliedParams.cParams.windowLog, dictSize,
124805 +                                                cctx->appliedParams.cParams.minMatch),
124806 +                                                "Sequence validation failed");
124807 +        }
124808 +        RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
124809 +                        "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
124810 +        ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength - MINMATCH);
124811 +        ip += matchLength + litLength;
124812 +    }
124813 +    ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
124815 +    if (inSeqs[idx].litLength) {
124816 +        DEBUGLOG(6, "Storing last literals of size: %u", inSeqs[idx].litLength);
124817 +        ZSTD_storeLastLiterals(&cctx->seqStore, ip, inSeqs[idx].litLength);
124818 +        ip += inSeqs[idx].litLength;
124819 +        seqPos->posInSrc += inSeqs[idx].litLength;
124820 +    }
124821 +    RETURN_ERROR_IF(ip != iend, corruption_detected, "Blocksize doesn't agree with block delimiter!");
124822 +    seqPos->idx = idx+1;
124823 +    return 0;
124826 +/* Returns the number of bytes to move the current read position back by. Only non-zero
124827 + * if we ended up splitting a sequence. Otherwise, it may return a ZSTD error if something
124828 + * went wrong.
124830 + * This function will attempt to scan through blockSize bytes represented by the sequences
124831 + * in inSeqs, storing any (partial) sequences.
124833 + * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to
124834 + * avoid splitting a match, or to avoid splitting a match such that it would produce a match
124835 + * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block.
124836 + */
124837 +static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
124838 +                                                       const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
124839 +                                                       const void* src, size_t blockSize) {
124840 +    U32 idx = seqPos->idx;
124841 +    U32 startPosInSequence = seqPos->posInSequence;
124842 +    U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize;
124843 +    size_t dictSize;
124844 +    BYTE const* ip = (BYTE const*)(src);
124845 +    BYTE const* iend = ip + blockSize;  /* May be adjusted if we decide to process fewer than blockSize bytes */
124846 +    repcodes_t updatedRepcodes;
124847 +    U32 bytesAdjustment = 0;
124848 +    U32 finalMatchSplit = 0;
124849 +    U32 litLength;
124850 +    U32 matchLength;
124851 +    U32 rawOffset;
124852 +    U32 offCode;
124854 +    if (cctx->cdict) {
124855 +        dictSize = cctx->cdict->dictContentSize;
124856 +    } else if (cctx->prefixDict.dict) {
124857 +        dictSize = cctx->prefixDict.dictSize;
124858 +    } else {
124859 +        dictSize = 0;
124860 +    }
124861 +    DEBUGLOG(5, "ZSTD_copySequencesToSeqStore: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize);
124862 +    DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
124863 +    ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
124864 +    while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) {
124865 +        const ZSTD_Sequence currSeq = inSeqs[idx];
124866 +        litLength = currSeq.litLength;
124867 +        matchLength = currSeq.matchLength;
124868 +        rawOffset = currSeq.offset;
124870 +        /* Modify the sequence depending on where endPosInSequence lies */
124871 +        if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) {
124872 +            if (startPosInSequence >= litLength) {
124873 +                startPosInSequence -= litLength;
124874 +                litLength = 0;
124875 +                matchLength -= startPosInSequence;
124876 +            } else {
124877 +                litLength -= startPosInSequence;
124878 +            }
124879 +            /* Move to the next sequence */
124880 +            endPosInSequence -= currSeq.litLength + currSeq.matchLength;
124881 +            startPosInSequence = 0;
124882 +            idx++;
124883 +        } else {
124884 +            /* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence
124885 +               does not reach the end of the match. So, we have to split the sequence */
124886 +            DEBUGLOG(6, "Require a split: diff: %u, idx: %u PIS: %u",
124887 +                     currSeq.litLength + currSeq.matchLength - endPosInSequence, idx, endPosInSequence);
124888 +            if (endPosInSequence > litLength) {
124889 +                U32 firstHalfMatchLength;
124890 +                litLength = startPosInSequence >= litLength ? 0 : litLength - startPosInSequence;
124891 +                firstHalfMatchLength = endPosInSequence - startPosInSequence - litLength;
124892 +                if (matchLength > blockSize && firstHalfMatchLength >= cctx->appliedParams.cParams.minMatch) {
124893 +                    /* Only ever split the match if it is larger than the block size */
124894 +                    U32 secondHalfMatchLength = currSeq.matchLength + currSeq.litLength - endPosInSequence;
124895 +                    if (secondHalfMatchLength < cctx->appliedParams.cParams.minMatch) {
124896 +                        /* Move the endPosInSequence backward so that it creates match of minMatch length */
124897 +                        endPosInSequence -= cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
124898 +                        bytesAdjustment = cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
124899 +                        firstHalfMatchLength -= bytesAdjustment;
124900 +                    }
124901 +                    matchLength = firstHalfMatchLength;
124902 +                    /* Flag that we split the last match - after storing the sequence, exit the loop,
124903 +                       but keep the value of endPosInSequence */
124904 +                    finalMatchSplit = 1;
124905 +                } else {
124906 +                    /* Move the position in sequence backwards so that we don't split match, and break to store
124907 +                     * the last literals. We use the original currSeq.litLength as a marker for where endPosInSequence
124908 +                     * should go. We prefer to do this whenever it is not necessary to split the match, or if doing so
124909 +                     * would cause the first half of the match to be too small
124910 +                     */
124911 +                    bytesAdjustment = endPosInSequence - currSeq.litLength;
124912 +                    endPosInSequence = currSeq.litLength;
124913 +                    break;
124914 +                }
124915 +            } else {
124916 +                /* This sequence ends inside the literals, break to store the last literals */
124917 +                break;
124918 +            }
124919 +        }
124920 +        /* Check if this offset can be represented with a repcode */
124921 +        {   U32 ll0 = (litLength == 0);
124922 +            offCode = ZSTD_finalizeOffCode(rawOffset, updatedRepcodes.rep, ll0);
124923 +            updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
124924 +        }
124926 +        if (cctx->appliedParams.validateSequences) {
124927 +            seqPos->posInSrc += litLength + matchLength;
124928 +            FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
124929 +                                                   cctx->appliedParams.cParams.windowLog, dictSize,
124930 +                                                   cctx->appliedParams.cParams.minMatch),
124931 +                                                   "Sequence validation failed");
124932 +        }
124933 +        DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
124934 +        RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
124935 +                        "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
124936 +        ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength - MINMATCH);
124937 +        ip += matchLength + litLength;
124938 +    }
124939 +    DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
124940 +    assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength);
124941 +    seqPos->idx = idx;
124942 +    seqPos->posInSequence = endPosInSequence;
124943 +    ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
124945 +    iend -= bytesAdjustment;
124946 +    if (ip != iend) {
124947 +        /* Store any last literals */
124948 +        U32 lastLLSize = (U32)(iend - ip);
124949 +        assert(ip <= iend);
124950 +        DEBUGLOG(6, "Storing last literals of size: %u", lastLLSize);
124951 +        ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize);
124952 +        seqPos->posInSrc += lastLLSize;
124953 +    }
124955 +    return bytesAdjustment;
124958 +typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
124959 +                                       const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
124960 +                                       const void* src, size_t blockSize);
124961 +static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) {
124962 +    ZSTD_sequenceCopier sequenceCopier = NULL;
124963 +    assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode));
124964 +    if (mode == ZSTD_sf_explicitBlockDelimiters) {
124965 +        return ZSTD_copySequencesToSeqStoreExplicitBlockDelim;
124966 +    } else if (mode == ZSTD_sf_noBlockDelimiters) {
124967 +        return ZSTD_copySequencesToSeqStoreNoBlockDelim;
124968 +    }
124969 +    assert(sequenceCopier != NULL);
124970 +    return sequenceCopier;
124973 +/* Compress, block-by-block, all of the sequences given.
124975 + * Returns the cumulative size of all compressed blocks (including their headers), otherwise a ZSTD error.
124976 + */
124977 +static size_t ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
124978 +                                              void* dst, size_t dstCapacity,
124979 +                                              const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
124980 +                                              const void* src, size_t srcSize) {
124981 +    size_t cSize = 0;
124982 +    U32 lastBlock;
124983 +    size_t blockSize;
124984 +    size_t compressedSeqsSize;
124985 +    size_t remaining = srcSize;
124986 +    ZSTD_sequencePosition seqPos = {0, 0, 0};
124988 +    BYTE const* ip = (BYTE const*)src;
124989 +    BYTE* op = (BYTE*)dst;
124990 +    ZSTD_sequenceCopier sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);
124992 +    DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize);
124993 +    /* Special case: empty frame */
124994 +    if (remaining == 0) {
124995 +        U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1);
124996 +        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "No room for empty frame block header");
124997 +        MEM_writeLE32(op, cBlockHeader24);
124998 +        op += ZSTD_blockHeaderSize;
124999 +        dstCapacity -= ZSTD_blockHeaderSize;
125000 +        cSize += ZSTD_blockHeaderSize;
125001 +    }
125003 +    while (remaining) {
125004 +        size_t cBlockSize;
125005 +        size_t additionalByteAdjustment;
125006 +        lastBlock = remaining <= cctx->blockSize;
125007 +        blockSize = lastBlock ? (U32)remaining : (U32)cctx->blockSize;
125008 +        ZSTD_resetSeqStore(&cctx->seqStore);
125009 +        DEBUGLOG(4, "Working on new block. Blocksize: %zu", blockSize);
125011 +        additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize);
125012 +        FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy");
125013 +        blockSize -= additionalByteAdjustment;
125015 +        /* If blocks are too small, emit as a nocompress block */
125016 +        if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
125017 +            cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
125018 +            FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
125019 +            DEBUGLOG(4, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize);
125020 +            cSize += cBlockSize;
125021 +            ip += blockSize;
125022 +            op += cBlockSize;
125023 +            remaining -= blockSize;
125024 +            dstCapacity -= cBlockSize;
125025 +            continue;
125026 +        }
125028 +        compressedSeqsSize = ZSTD_entropyCompressSequences(&cctx->seqStore,
125029 +                                &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy,
125030 +                                &cctx->appliedParams,
125031 +                                op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize,
125032 +                                blockSize,
125033 +                                cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
125034 +                                cctx->bmi2);
125035 +        FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed");
125036 +        DEBUGLOG(4, "Compressed sequences size: %zu", compressedSeqsSize);
125038 +        if (!cctx->isFirstBlock &&
125039 +            ZSTD_maybeRLE(&cctx->seqStore) &&
125040 +            ZSTD_isRLE((BYTE const*)src, srcSize)) {
125041 +            /* We don't want to emit our first block as a RLE even if it qualifies because
125042 +            * doing so will cause the decoder (cli only) to throw a "should consume all input error."
125043 +            * This is only an issue for zstd <= v1.4.3
125044 +            */
125045 +            compressedSeqsSize = 1;
125046 +        }
125048 +        if (compressedSeqsSize == 0) {
125049 +            /* ZSTD_noCompressBlock writes the block header as well */
125050 +            cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
125051 +            FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
125052 +            DEBUGLOG(4, "Writing out nocompress block, size: %zu", cBlockSize);
125053 +        } else if (compressedSeqsSize == 1) {
125054 +            cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock);
125055 +            FORWARD_IF_ERROR(cBlockSize, "RLE compress block failed");
125056 +            DEBUGLOG(4, "Writing out RLE block, size: %zu", cBlockSize);
125057 +        } else {
125058 +            U32 cBlockHeader;
125059 +            /* Error checking and repcodes update */
125060 +            ZSTD_confirmRepcodesAndEntropyTables(cctx);
125061 +            if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
125062 +                cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
125064 +            /* Write block header into beginning of block*/
125065 +            cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3);
125066 +            MEM_writeLE24(op, cBlockHeader);
125067 +            cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize;
125068 +            DEBUGLOG(4, "Writing out compressed block, size: %zu", cBlockSize);
125069 +        }
125071 +        cSize += cBlockSize;
125072 +        DEBUGLOG(4, "cSize running total: %zu", cSize);
125074 +        if (lastBlock) {
125075 +            break;
125076 +        } else {
125077 +            ip += blockSize;
125078 +            op += cBlockSize;
125079 +            remaining -= blockSize;
125080 +            dstCapacity -= cBlockSize;
125081 +            cctx->isFirstBlock = 0;
125082 +        }
125083 +    }
125085 +    return cSize;
125088 +size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapacity,
125089 +                              const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
125090 +                              const void* src, size_t srcSize) {
125091 +    BYTE* op = (BYTE*)dst;
125092 +    size_t cSize = 0;
125093 +    size_t compressedBlocksSize = 0;
125094 +    size_t frameHeaderSize = 0;
125096 +    /* Transparent initialization stage, same as compressStream2() */
125097 +    DEBUGLOG(3, "ZSTD_compressSequences()");
125098 +    assert(cctx != NULL);
125099 +    FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed");
125100 +    /* Begin writing output, starting with frame header */
125101 +    frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, srcSize, cctx->dictID);
125102 +    op += frameHeaderSize;
125103 +    dstCapacity -= frameHeaderSize;
125104 +    cSize += frameHeaderSize;
125105 +    if (cctx->appliedParams.fParams.checksumFlag && srcSize) {
125106 +        xxh64_update(&cctx->xxhState, src, srcSize);
125107 +    }
125108 +    /* cSize includes block header size and compressed sequences size */
125109 +    compressedBlocksSize = ZSTD_compressSequences_internal(cctx,
125110 +                                                           op, dstCapacity,
125111 +                                                           inSeqs, inSeqsSize,
125112 +                                                           src, srcSize);
125113 +    FORWARD_IF_ERROR(compressedBlocksSize, "Compressing blocks failed!");
125114 +    cSize += compressedBlocksSize;
125115 +    dstCapacity -= compressedBlocksSize;
125117 +    if (cctx->appliedParams.fParams.checksumFlag) {
125118 +        U32 const checksum = (U32) xxh64_digest(&cctx->xxhState);
125119 +        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
125120 +        DEBUGLOG(4, "Write checksum : %08X", (unsigned)checksum);
125121 +        MEM_writeLE32((char*)dst + cSize, checksum);
125122 +        cSize += 4;
125123 +    }
125125 +    DEBUGLOG(3, "Final compressed size: %zu", cSize);
125126 +    return cSize;
125129 +/*======   Finalize   ======*/
125131 +/*! ZSTD_flushStream() :
125132 + * @return : amount of data remaining to flush */
125133 +size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
125135 +    ZSTD_inBuffer input = { NULL, 0, 0 };
125136 +    return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);
125140 +size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
125142 +    ZSTD_inBuffer input = { NULL, 0, 0 };
125143 +    size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
125144 +    FORWARD_IF_ERROR( remainingToFlush , "ZSTD_compressStream2 failed");
125145 +    if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush;   /* minimal estimation */
125146 +    /* single thread mode : attempt to calculate remaining to flush more precisely */
125147 +    {   size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
125148 +        size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4);
125149 +        size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize;
125150 +        DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush);
125151 +        return toFlush;
125152 +    }
125156 +/*-=====  Pre-defined compression levels  =====-*/
125158 +#define ZSTD_MAX_CLEVEL     22
125159 +int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
125160 +int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
125162 +static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
125163 +{   /* "default" - for any srcSize > 256 KB */
125164 +    /* W,  C,  H,  S,  L, TL, strat */
125165 +    { 19, 12, 13,  1,  6,  1, ZSTD_fast    },  /* base for negative levels */
125166 +    { 19, 13, 14,  1,  7,  0, ZSTD_fast    },  /* level  1 */
125167 +    { 20, 15, 16,  1,  6,  0, ZSTD_fast    },  /* level  2 */
125168 +    { 21, 16, 17,  1,  5,  0, ZSTD_dfast   },  /* level  3 */
125169 +    { 21, 18, 18,  1,  5,  0, ZSTD_dfast   },  /* level  4 */
125170 +    { 21, 18, 19,  2,  5,  2, ZSTD_greedy  },  /* level  5 */
125171 +    { 21, 19, 19,  3,  5,  4, ZSTD_greedy  },  /* level  6 */
125172 +    { 21, 19, 19,  3,  5,  8, ZSTD_lazy    },  /* level  7 */
125173 +    { 21, 19, 19,  3,  5, 16, ZSTD_lazy2   },  /* level  8 */
125174 +    { 21, 19, 20,  4,  5, 16, ZSTD_lazy2   },  /* level  9 */
125175 +    { 22, 20, 21,  4,  5, 16, ZSTD_lazy2   },  /* level 10 */
125176 +    { 22, 21, 22,  4,  5, 16, ZSTD_lazy2   },  /* level 11 */
125177 +    { 22, 21, 22,  5,  5, 16, ZSTD_lazy2   },  /* level 12 */
125178 +    { 22, 21, 22,  5,  5, 32, ZSTD_btlazy2 },  /* level 13 */
125179 +    { 22, 22, 23,  5,  5, 32, ZSTD_btlazy2 },  /* level 14 */
125180 +    { 22, 23, 23,  6,  5, 32, ZSTD_btlazy2 },  /* level 15 */
125181 +    { 22, 22, 22,  5,  5, 48, ZSTD_btopt   },  /* level 16 */
125182 +    { 23, 23, 22,  5,  4, 64, ZSTD_btopt   },  /* level 17 */
125183 +    { 23, 23, 22,  6,  3, 64, ZSTD_btultra },  /* level 18 */
125184 +    { 23, 24, 22,  7,  3,256, ZSTD_btultra2},  /* level 19 */
125185 +    { 25, 25, 23,  7,  3,256, ZSTD_btultra2},  /* level 20 */
125186 +    { 26, 26, 24,  7,  3,512, ZSTD_btultra2},  /* level 21 */
125187 +    { 27, 27, 25,  9,  3,999, ZSTD_btultra2},  /* level 22 */
125189 +{   /* for srcSize <= 256 KB */
125190 +    /* W,  C,  H,  S,  L,  T, strat */
125191 +    { 18, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
125192 +    { 18, 13, 14,  1,  6,  0, ZSTD_fast    },  /* level  1 */
125193 +    { 18, 14, 14,  1,  5,  0, ZSTD_dfast   },  /* level  2 */
125194 +    { 18, 16, 16,  1,  4,  0, ZSTD_dfast   },  /* level  3 */
125195 +    { 18, 16, 17,  2,  5,  2, ZSTD_greedy  },  /* level  4.*/
125196 +    { 18, 18, 18,  3,  5,  2, ZSTD_greedy  },  /* level  5.*/
125197 +    { 18, 18, 19,  3,  5,  4, ZSTD_lazy    },  /* level  6.*/
125198 +    { 18, 18, 19,  4,  4,  4, ZSTD_lazy    },  /* level  7 */
125199 +    { 18, 18, 19,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
125200 +    { 18, 18, 19,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
125201 +    { 18, 18, 19,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
125202 +    { 18, 18, 19,  5,  4, 12, ZSTD_btlazy2 },  /* level 11.*/
125203 +    { 18, 19, 19,  7,  4, 12, ZSTD_btlazy2 },  /* level 12.*/
125204 +    { 18, 18, 19,  4,  4, 16, ZSTD_btopt   },  /* level 13 */
125205 +    { 18, 18, 19,  4,  3, 32, ZSTD_btopt   },  /* level 14.*/
125206 +    { 18, 18, 19,  6,  3,128, ZSTD_btopt   },  /* level 15.*/
125207 +    { 18, 19, 19,  6,  3,128, ZSTD_btultra },  /* level 16.*/
125208 +    { 18, 19, 19,  8,  3,256, ZSTD_btultra },  /* level 17.*/
125209 +    { 18, 19, 19,  6,  3,128, ZSTD_btultra2},  /* level 18.*/
125210 +    { 18, 19, 19,  8,  3,256, ZSTD_btultra2},  /* level 19.*/
125211 +    { 18, 19, 19, 10,  3,512, ZSTD_btultra2},  /* level 20.*/
125212 +    { 18, 19, 19, 12,  3,512, ZSTD_btultra2},  /* level 21.*/
125213 +    { 18, 19, 19, 13,  3,999, ZSTD_btultra2},  /* level 22.*/
125215 +{   /* for srcSize <= 128 KB */
125216 +    /* W,  C,  H,  S,  L,  T, strat */
125217 +    { 17, 12, 12,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
125218 +    { 17, 12, 13,  1,  6,  0, ZSTD_fast    },  /* level  1 */
125219 +    { 17, 13, 15,  1,  5,  0, ZSTD_fast    },  /* level  2 */
125220 +    { 17, 15, 16,  2,  5,  0, ZSTD_dfast   },  /* level  3 */
125221 +    { 17, 17, 17,  2,  4,  0, ZSTD_dfast   },  /* level  4 */
125222 +    { 17, 16, 17,  3,  4,  2, ZSTD_greedy  },  /* level  5 */
125223 +    { 17, 17, 17,  3,  4,  4, ZSTD_lazy    },  /* level  6 */
125224 +    { 17, 17, 17,  3,  4,  8, ZSTD_lazy2   },  /* level  7 */
125225 +    { 17, 17, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
125226 +    { 17, 17, 17,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
125227 +    { 17, 17, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
125228 +    { 17, 17, 17,  5,  4,  8, ZSTD_btlazy2 },  /* level 11 */
125229 +    { 17, 18, 17,  7,  4, 12, ZSTD_btlazy2 },  /* level 12 */
125230 +    { 17, 18, 17,  3,  4, 12, ZSTD_btopt   },  /* level 13.*/
125231 +    { 17, 18, 17,  4,  3, 32, ZSTD_btopt   },  /* level 14.*/
125232 +    { 17, 18, 17,  6,  3,256, ZSTD_btopt   },  /* level 15.*/
125233 +    { 17, 18, 17,  6,  3,128, ZSTD_btultra },  /* level 16.*/
125234 +    { 17, 18, 17,  8,  3,256, ZSTD_btultra },  /* level 17.*/
125235 +    { 17, 18, 17, 10,  3,512, ZSTD_btultra },  /* level 18.*/
125236 +    { 17, 18, 17,  5,  3,256, ZSTD_btultra2},  /* level 19.*/
125237 +    { 17, 18, 17,  7,  3,512, ZSTD_btultra2},  /* level 20.*/
125238 +    { 17, 18, 17,  9,  3,512, ZSTD_btultra2},  /* level 21.*/
125239 +    { 17, 18, 17, 11,  3,999, ZSTD_btultra2},  /* level 22.*/
125241 +{   /* for srcSize <= 16 KB */
125242 +    /* W,  C,  H,  S,  L,  T, strat */
125243 +    { 14, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
125244 +    { 14, 14, 15,  1,  5,  0, ZSTD_fast    },  /* level  1 */
125245 +    { 14, 14, 15,  1,  4,  0, ZSTD_fast    },  /* level  2 */
125246 +    { 14, 14, 15,  2,  4,  0, ZSTD_dfast   },  /* level  3 */
125247 +    { 14, 14, 14,  4,  4,  2, ZSTD_greedy  },  /* level  4 */
125248 +    { 14, 14, 14,  3,  4,  4, ZSTD_lazy    },  /* level  5.*/
125249 +    { 14, 14, 14,  4,  4,  8, ZSTD_lazy2   },  /* level  6 */
125250 +    { 14, 14, 14,  6,  4,  8, ZSTD_lazy2   },  /* level  7 */
125251 +    { 14, 14, 14,  8,  4,  8, ZSTD_lazy2   },  /* level  8.*/
125252 +    { 14, 15, 14,  5,  4,  8, ZSTD_btlazy2 },  /* level  9.*/
125253 +    { 14, 15, 14,  9,  4,  8, ZSTD_btlazy2 },  /* level 10.*/
125254 +    { 14, 15, 14,  3,  4, 12, ZSTD_btopt   },  /* level 11.*/
125255 +    { 14, 15, 14,  4,  3, 24, ZSTD_btopt   },  /* level 12.*/
125256 +    { 14, 15, 14,  5,  3, 32, ZSTD_btultra },  /* level 13.*/
125257 +    { 14, 15, 15,  6,  3, 64, ZSTD_btultra },  /* level 14.*/
125258 +    { 14, 15, 15,  7,  3,256, ZSTD_btultra },  /* level 15.*/
125259 +    { 14, 15, 15,  5,  3, 48, ZSTD_btultra2},  /* level 16.*/
125260 +    { 14, 15, 15,  6,  3,128, ZSTD_btultra2},  /* level 17.*/
125261 +    { 14, 15, 15,  7,  3,256, ZSTD_btultra2},  /* level 18.*/
125262 +    { 14, 15, 15,  8,  3,256, ZSTD_btultra2},  /* level 19.*/
125263 +    { 14, 15, 15,  8,  3,512, ZSTD_btultra2},  /* level 20.*/
125264 +    { 14, 15, 15,  9,  3,512, ZSTD_btultra2},  /* level 21.*/
125265 +    { 14, 15, 15, 10,  3,999, ZSTD_btultra2},  /* level 22.*/
125269 +static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int const compressionLevel, size_t const dictSize)
125271 +    ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, 0, dictSize, ZSTD_cpm_createCDict);
125272 +    switch (cParams.strategy) {
125273 +        case ZSTD_fast:
125274 +        case ZSTD_dfast:
125275 +            break;
125276 +        case ZSTD_greedy:
125277 +        case ZSTD_lazy:
125278 +        case ZSTD_lazy2:
125279 +            cParams.hashLog += ZSTD_LAZY_DDSS_BUCKET_LOG;
125280 +            break;
125281 +        case ZSTD_btlazy2:
125282 +        case ZSTD_btopt:
125283 +        case ZSTD_btultra:
125284 +        case ZSTD_btultra2:
125285 +            break;
125286 +    }
125287 +    return cParams;
125290 +static int ZSTD_dedicatedDictSearch_isSupported(
125291 +        ZSTD_compressionParameters const* cParams)
125293 +    return (cParams->strategy >= ZSTD_greedy)
125294 +        && (cParams->strategy <= ZSTD_lazy2)
125295 +        && (cParams->hashLog >= cParams->chainLog)
125296 +        && (cParams->chainLog <= 24);
125300 + * Reverses the adjustment applied to cparams when enabling dedicated dict
125301 + * search. This is used to recover the params set to be used in the working
125302 + * context. (Otherwise, those tables would also grow.)
125303 + */
125304 +static void ZSTD_dedicatedDictSearch_revertCParams(
125305 +        ZSTD_compressionParameters* cParams) {
125306 +    switch (cParams->strategy) {
125307 +        case ZSTD_fast:
125308 +        case ZSTD_dfast:
125309 +            break;
125310 +        case ZSTD_greedy:
125311 +        case ZSTD_lazy:
125312 +        case ZSTD_lazy2:
125313 +            cParams->hashLog -= ZSTD_LAZY_DDSS_BUCKET_LOG;
125314 +            break;
125315 +        case ZSTD_btlazy2:
125316 +        case ZSTD_btopt:
125317 +        case ZSTD_btultra:
125318 +        case ZSTD_btultra2:
125319 +            break;
125320 +    }
125323 +static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
125325 +    switch (mode) {
125326 +    case ZSTD_cpm_unknown:
125327 +    case ZSTD_cpm_noAttachDict:
125328 +    case ZSTD_cpm_createCDict:
125329 +        break;
125330 +    case ZSTD_cpm_attachDict:
125331 +        dictSize = 0;
125332 +        break;
125333 +    default:
125334 +        assert(0);
125335 +        break;
125336 +    }
125337 +    {   int const unknown = srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN;
125338 +        size_t const addedSize = unknown && dictSize > 0 ? 500 : 0;
125339 +        return unknown && dictSize == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : srcSizeHint+dictSize+addedSize;
125340 +    }
125343 +/*! ZSTD_getCParams_internal() :
125344 + * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
125345 + *  Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown.
125346 + *        Use dictSize == 0 for unknown or unused.
125347 + *  Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_cParamMode_e`. */
125348 +static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
125350 +    U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode);
125351 +    U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
125352 +    int row;
125353 +    DEBUGLOG(5, "ZSTD_getCParams_internal (cLevel=%i)", compressionLevel);
125355 +    /* row */
125356 +    if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT;   /* 0 == default */
125357 +    else if (compressionLevel < 0) row = 0;   /* entry 0 is baseline for fast mode */
125358 +    else if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
125359 +    else row = compressionLevel;
125361 +    {   ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
125362 +        /* acceleration factor */
125363 +        if (compressionLevel < 0) {
125364 +            int const clampedCompressionLevel = MAX(ZSTD_minCLevel(), compressionLevel);
125365 +            cp.targetLength = (unsigned)(-clampedCompressionLevel);
125366 +        }
125367 +        /* refine parameters based on srcSize & dictSize */
125368 +        return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode);
125369 +    }
125372 +/*! ZSTD_getCParams() :
125373 + * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
125374 + *  Size values are optional, provide 0 if not known or unused */
125375 +ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
125377 +    if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
125378 +    return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
125381 +/*! ZSTD_getParams() :
125382 + *  same idea as ZSTD_getCParams()
125383 + * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
125384 + *  Fields of `ZSTD_frameParameters` are set to default values */
125385 +static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) {
125386 +    ZSTD_parameters params;
125387 +    ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode);
125388 +    DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
125389 +    ZSTD_memset(&params, 0, sizeof(params));
125390 +    params.cParams = cParams;
125391 +    params.fParams.contentSizeFlag = 1;
125392 +    return params;
125395 +/*! ZSTD_getParams() :
125396 + *  same idea as ZSTD_getCParams()
125397 + * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
125398 + *  Fields of `ZSTD_frameParameters` are set to default values */
125399 +ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
125400 +    if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
125401 +    return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
125403 diff --git a/lib/zstd/compress/zstd_compress_internal.h b/lib/zstd/compress/zstd_compress_internal.h
125404 new file mode 100644
125405 index 000000000000..b56c482322ba
125406 --- /dev/null
125407 +++ b/lib/zstd/compress/zstd_compress_internal.h
125408 @@ -0,0 +1,1188 @@
125410 + * Copyright (c) Yann Collet, Facebook, Inc.
125411 + * All rights reserved.
125413 + * This source code is licensed under both the BSD-style license (found in the
125414 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
125415 + * in the COPYING file in the root directory of this source tree).
125416 + * You may select, at your option, one of the above-listed licenses.
125417 + */
125419 +/* This header contains definitions
125420 + * that shall **only** be used by modules within lib/compress.
125421 + */
125423 +#ifndef ZSTD_COMPRESS_H
125424 +#define ZSTD_COMPRESS_H
125426 +/*-*************************************
125427 +*  Dependencies
125428 +***************************************/
125429 +#include "../common/zstd_internal.h"
125430 +#include "zstd_cwksp.h"
125433 +/*-*************************************
125434 +*  Constants
125435 +***************************************/
125436 +#define kSearchStrength      8
125437 +#define HASH_READ_SIZE       8
125438 +#define ZSTD_DUBT_UNSORTED_MARK 1   /* For btlazy2 strategy, index ZSTD_DUBT_UNSORTED_MARK==1 means "unsorted".
125439 +                                       It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
125440 +                                       It's not a big deal though : candidate will just be sorted again.
125441 +                                       Additionally, candidate position 1 will be lost.
125442 +                                       But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
125443 +                                       The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy.
125444 +                                       This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
125447 +/*-*************************************
125448 +*  Context memory management
125449 +***************************************/
125450 +typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
125451 +typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage;
125453 +typedef struct ZSTD_prefixDict_s {
125454 +    const void* dict;
125455 +    size_t dictSize;
125456 +    ZSTD_dictContentType_e dictContentType;
125457 +} ZSTD_prefixDict;
125459 +typedef struct {
125460 +    void* dictBuffer;
125461 +    void const* dict;
125462 +    size_t dictSize;
125463 +    ZSTD_dictContentType_e dictContentType;
125464 +    ZSTD_CDict* cdict;
125465 +} ZSTD_localDict;
125467 +typedef struct {
125468 +    HUF_CElt CTable[HUF_CTABLE_SIZE_U32(255)];
125469 +    HUF_repeat repeatMode;
125470 +} ZSTD_hufCTables_t;
125472 +typedef struct {
125473 +    FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
125474 +    FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
125475 +    FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
125476 +    FSE_repeat offcode_repeatMode;
125477 +    FSE_repeat matchlength_repeatMode;
125478 +    FSE_repeat litlength_repeatMode;
125479 +} ZSTD_fseCTables_t;
125481 +typedef struct {
125482 +    ZSTD_hufCTables_t huf;
125483 +    ZSTD_fseCTables_t fse;
125484 +} ZSTD_entropyCTables_t;
125486 +typedef struct {
125487 +    U32 off;            /* Offset code (offset + ZSTD_REP_MOVE) for the match */
125488 +    U32 len;            /* Raw length of match */
125489 +} ZSTD_match_t;
125491 +typedef struct {
125492 +    U32 offset;         /* Offset of sequence */
125493 +    U32 litLength;      /* Length of literals prior to match */
125494 +    U32 matchLength;    /* Raw length of match */
125495 +} rawSeq;
125497 +typedef struct {
125498 +  rawSeq* seq;          /* The start of the sequences */
125499 +  size_t pos;           /* The index in seq where reading stopped. pos <= size. */
125500 +  size_t posInSequence; /* The position within the sequence at seq[pos] where reading
125501 +                           stopped. posInSequence <= seq[pos].litLength + seq[pos].matchLength */
125502 +  size_t size;          /* The number of sequences. <= capacity. */
125503 +  size_t capacity;      /* The capacity starting from `seq` pointer */
125504 +} rawSeqStore_t;
125506 +UNUSED_ATTR static const rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0};
125508 +typedef struct {
125509 +    int price;
125510 +    U32 off;
125511 +    U32 mlen;
125512 +    U32 litlen;
125513 +    U32 rep[ZSTD_REP_NUM];
125514 +} ZSTD_optimal_t;
125516 +typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e;
125518 +typedef struct {
125519 +    /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
125520 +    unsigned* litFreq;           /* table of literals statistics, of size 256 */
125521 +    unsigned* litLengthFreq;     /* table of litLength statistics, of size (MaxLL+1) */
125522 +    unsigned* matchLengthFreq;   /* table of matchLength statistics, of size (MaxML+1) */
125523 +    unsigned* offCodeFreq;       /* table of offCode statistics, of size (MaxOff+1) */
125524 +    ZSTD_match_t* matchTable;    /* list of found matches, of size ZSTD_OPT_NUM+1 */
125525 +    ZSTD_optimal_t* priceTable;  /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
125527 +    U32  litSum;                 /* nb of literals */
125528 +    U32  litLengthSum;           /* nb of litLength codes */
125529 +    U32  matchLengthSum;         /* nb of matchLength codes */
125530 +    U32  offCodeSum;             /* nb of offset codes */
125531 +    U32  litSumBasePrice;        /* to compare to log2(litfreq) */
125532 +    U32  litLengthSumBasePrice;  /* to compare to log2(llfreq)  */
125533 +    U32  matchLengthSumBasePrice;/* to compare to log2(mlfreq)  */
125534 +    U32  offCodeSumBasePrice;    /* to compare to log2(offreq)  */
125535 +    ZSTD_OptPrice_e priceType;   /* prices can be determined dynamically, or follow a pre-defined cost structure */
125536 +    const ZSTD_entropyCTables_t* symbolCosts;  /* pre-calculated dictionary statistics */
125537 +    ZSTD_literalCompressionMode_e literalCompressionMode;
125538 +} optState_t;
125540 +typedef struct {
125541 +  ZSTD_entropyCTables_t entropy;
125542 +  U32 rep[ZSTD_REP_NUM];
125543 +} ZSTD_compressedBlockState_t;
125545 +typedef struct {
125546 +    BYTE const* nextSrc;    /* next block here to continue on current prefix */
125547 +    BYTE const* base;       /* All regular indexes relative to this position */
125548 +    BYTE const* dictBase;   /* extDict indexes relative to this position */
125549 +    U32 dictLimit;          /* below that point, need extDict */
125550 +    U32 lowLimit;           /* below that point, no more valid data */
125551 +} ZSTD_window_t;
125553 +typedef struct ZSTD_matchState_t ZSTD_matchState_t;
125554 +struct ZSTD_matchState_t {
125555 +    ZSTD_window_t window;   /* State for window round buffer management */
125556 +    U32 loadedDictEnd;      /* index of end of dictionary, within context's referential.
125557 +                             * When loadedDictEnd != 0, a dictionary is in use, and still valid.
125558 +                             * This relies on a mechanism to set loadedDictEnd=0 when dictionary is no longer within distance.
125559 +                             * Such mechanism is provided within ZSTD_window_enforceMaxDist() and ZSTD_checkDictValidity().
125560 +                             * When dict referential is copied into active context (i.e. not attached),
125561 +                             * loadedDictEnd == dictSize, since referential starts from zero.
125562 +                             */
125563 +    U32 nextToUpdate;       /* index from which to continue table update */
125564 +    U32 hashLog3;           /* dispatch table for matches of len==3 : larger == faster, more memory */
125565 +    U32* hashTable;
125566 +    U32* hashTable3;
125567 +    U32* chainTable;
125568 +    int dedicatedDictSearch;  /* Indicates whether this matchState is using the
125569 +                               * dedicated dictionary search structure.
125570 +                               */
125571 +    optState_t opt;         /* optimal parser state */
125572 +    const ZSTD_matchState_t* dictMatchState;
125573 +    ZSTD_compressionParameters cParams;
125574 +    const rawSeqStore_t* ldmSeqStore;
125577 +typedef struct {
125578 +    ZSTD_compressedBlockState_t* prevCBlock;
125579 +    ZSTD_compressedBlockState_t* nextCBlock;
125580 +    ZSTD_matchState_t matchState;
125581 +} ZSTD_blockState_t;
125583 +typedef struct {
125584 +    U32 offset;
125585 +    U32 checksum;
125586 +} ldmEntry_t;
125588 +typedef struct {
125589 +    BYTE const* split;
125590 +    U32 hash;
125591 +    U32 checksum;
125592 +    ldmEntry_t* bucket;
125593 +} ldmMatchCandidate_t;
125595 +#define LDM_BATCH_SIZE 64
125597 +typedef struct {
125598 +    ZSTD_window_t window;   /* State for the window round buffer management */
125599 +    ldmEntry_t* hashTable;
125600 +    U32 loadedDictEnd;
125601 +    BYTE* bucketOffsets;    /* Next position in bucket to insert entry */
125602 +    size_t splitIndices[LDM_BATCH_SIZE];
125603 +    ldmMatchCandidate_t matchCandidates[LDM_BATCH_SIZE];
125604 +} ldmState_t;
125606 +typedef struct {
125607 +    U32 enableLdm;          /* 1 if enable long distance matching */
125608 +    U32 hashLog;            /* Log size of hashTable */
125609 +    U32 bucketSizeLog;      /* Log bucket size for collision resolution, at most 8 */
125610 +    U32 minMatchLength;     /* Minimum match length */
125611 +    U32 hashRateLog;       /* Log number of entries to skip */
125612 +    U32 windowLog;          /* Window log for the LDM */
125613 +} ldmParams_t;
125615 +typedef struct {
125616 +    int collectSequences;
125617 +    ZSTD_Sequence* seqStart;
125618 +    size_t seqIndex;
125619 +    size_t maxSequences;
125620 +} SeqCollector;
125622 +struct ZSTD_CCtx_params_s {
125623 +    ZSTD_format_e format;
125624 +    ZSTD_compressionParameters cParams;
125625 +    ZSTD_frameParameters fParams;
125627 +    int compressionLevel;
125628 +    int forceWindow;           /* force back-references to respect limit of
125629 +                                * 1<<wLog, even for dictionary */
125630 +    size_t targetCBlockSize;   /* Tries to fit compressed block size to be around targetCBlockSize.
125631 +                                * No target when targetCBlockSize == 0.
125632 +                                * There is no guarantee on compressed block size */
125633 +    int srcSizeHint;           /* User's best guess of source size.
125634 +                                * Hint is not valid when srcSizeHint == 0.
125635 +                                * There is no guarantee that hint is close to actual source size */
125637 +    ZSTD_dictAttachPref_e attachDictPref;
125638 +    ZSTD_literalCompressionMode_e literalCompressionMode;
125640 +    /* Multithreading: used to pass parameters to mtctx */
125641 +    int nbWorkers;
125642 +    size_t jobSize;
125643 +    int overlapLog;
125644 +    int rsyncable;
125646 +    /* Long distance matching parameters */
125647 +    ldmParams_t ldmParams;
125649 +    /* Dedicated dict search algorithm trigger */
125650 +    int enableDedicatedDictSearch;
125652 +    /* Input/output buffer modes */
125653 +    ZSTD_bufferMode_e inBufferMode;
125654 +    ZSTD_bufferMode_e outBufferMode;
125656 +    /* Sequence compression API */
125657 +    ZSTD_sequenceFormat_e blockDelimiters;
125658 +    int validateSequences;
125660 +    /* Internal use, for createCCtxParams() and freeCCtxParams() only */
125661 +    ZSTD_customMem customMem;
125662 +};  /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
125664 +#define COMPRESS_SEQUENCES_WORKSPACE_SIZE (sizeof(unsigned) * (MaxSeq + 2))
125665 +#define ENTROPY_WORKSPACE_SIZE (HUF_WORKSPACE_SIZE + COMPRESS_SEQUENCES_WORKSPACE_SIZE)
125668 + * Indicates whether this compression proceeds directly from user-provided
125669 + * source buffer to user-provided destination buffer (ZSTDb_not_buffered), or
125670 + * whether the context needs to buffer the input/output (ZSTDb_buffered).
125671 + */
125672 +typedef enum {
125673 +    ZSTDb_not_buffered,
125674 +    ZSTDb_buffered
125675 +} ZSTD_buffered_policy_e;
125677 +struct ZSTD_CCtx_s {
125678 +    ZSTD_compressionStage_e stage;
125679 +    int cParamsChanged;                  /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
125680 +    int bmi2;                            /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
125681 +    ZSTD_CCtx_params requestedParams;
125682 +    ZSTD_CCtx_params appliedParams;
125683 +    U32   dictID;
125684 +    size_t dictContentSize;
125686 +    ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */
125687 +    size_t blockSize;
125688 +    unsigned long long pledgedSrcSizePlusOne;  /* this way, 0 (default) == unknown */
125689 +    unsigned long long consumedSrcSize;
125690 +    unsigned long long producedCSize;
125691 +    struct xxh64_state xxhState;
125692 +    ZSTD_customMem customMem;
125693 +    ZSTD_threadPool* pool;
125694 +    size_t staticSize;
125695 +    SeqCollector seqCollector;
125696 +    int isFirstBlock;
125697 +    int initialized;
125699 +    seqStore_t seqStore;      /* sequences storage ptrs */
125700 +    ldmState_t ldmState;      /* long distance matching state */
125701 +    rawSeq* ldmSequences;     /* Storage for the ldm output sequences */
125702 +    size_t maxNbLdmSequences;
125703 +    rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
125704 +    ZSTD_blockState_t blockState;
125705 +    U32* entropyWorkspace;  /* entropy workspace of ENTROPY_WORKSPACE_SIZE bytes */
125707 +    /* Wether we are streaming or not */
125708 +    ZSTD_buffered_policy_e bufferedPolicy;
125710 +    /* streaming */
125711 +    char*  inBuff;
125712 +    size_t inBuffSize;
125713 +    size_t inToCompress;
125714 +    size_t inBuffPos;
125715 +    size_t inBuffTarget;
125716 +    char*  outBuff;
125717 +    size_t outBuffSize;
125718 +    size_t outBuffContentSize;
125719 +    size_t outBuffFlushedSize;
125720 +    ZSTD_cStreamStage streamStage;
125721 +    U32    frameEnded;
125723 +    /* Stable in/out buffer verification */
125724 +    ZSTD_inBuffer expectedInBuffer;
125725 +    size_t expectedOutBufferSize;
125727 +    /* Dictionary */
125728 +    ZSTD_localDict localDict;
125729 +    const ZSTD_CDict* cdict;
125730 +    ZSTD_prefixDict prefixDict;   /* single-usage dictionary */
125732 +    /* Multi-threading */
125734 +    /* Tracing */
125737 +typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
125739 +typedef enum {
125740 +    ZSTD_noDict = 0,
125741 +    ZSTD_extDict = 1,
125742 +    ZSTD_dictMatchState = 2,
125743 +    ZSTD_dedicatedDictSearch = 3
125744 +} ZSTD_dictMode_e;
125746 +typedef enum {
125747 +    ZSTD_cpm_noAttachDict = 0,  /* Compression with ZSTD_noDict or ZSTD_extDict.
125748 +                                 * In this mode we use both the srcSize and the dictSize
125749 +                                 * when selecting and adjusting parameters.
125750 +                                 */
125751 +    ZSTD_cpm_attachDict = 1,    /* Compression with ZSTD_dictMatchState or ZSTD_dedicatedDictSearch.
125752 +                                 * In this mode we only take the srcSize into account when selecting
125753 +                                 * and adjusting parameters.
125754 +                                 */
125755 +    ZSTD_cpm_createCDict = 2,   /* Creating a CDict.
125756 +                                 * In this mode we take both the source size and the dictionary size
125757 +                                 * into account when selecting and adjusting the parameters.
125758 +                                 */
125759 +    ZSTD_cpm_unknown = 3,       /* ZSTD_getCParams, ZSTD_getParams, ZSTD_adjustParams.
125760 +                                 * We don't know what these parameters are for. We default to the legacy
125761 +                                 * behavior of taking both the source size and the dict size into account
125762 +                                 * when selecting and adjusting parameters.
125763 +                                 */
125764 +} ZSTD_cParamMode_e;
125766 +typedef size_t (*ZSTD_blockCompressor) (
125767 +        ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
125768 +        void const* src, size_t srcSize);
125769 +ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode);
125772 +MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
125774 +    static const BYTE LL_Code[64] = {  0,  1,  2,  3,  4,  5,  6,  7,
125775 +                                       8,  9, 10, 11, 12, 13, 14, 15,
125776 +                                      16, 16, 17, 17, 18, 18, 19, 19,
125777 +                                      20, 20, 20, 20, 21, 21, 21, 21,
125778 +                                      22, 22, 22, 22, 22, 22, 22, 22,
125779 +                                      23, 23, 23, 23, 23, 23, 23, 23,
125780 +                                      24, 24, 24, 24, 24, 24, 24, 24,
125781 +                                      24, 24, 24, 24, 24, 24, 24, 24 };
125782 +    static const U32 LL_deltaCode = 19;
125783 +    return (litLength > 63) ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
125786 +/* ZSTD_MLcode() :
125787 + * note : mlBase = matchLength - MINMATCH;
125788 + *        because it's the format it's stored in seqStore->sequences */
125789 +MEM_STATIC U32 ZSTD_MLcode(U32 mlBase)
125791 +    static const BYTE ML_Code[128] = { 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
125792 +                                      16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
125793 +                                      32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
125794 +                                      38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
125795 +                                      40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
125796 +                                      41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
125797 +                                      42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
125798 +                                      42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 };
125799 +    static const U32 ML_deltaCode = 36;
125800 +    return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase];
125803 +typedef struct repcodes_s {
125804 +    U32 rep[3];
125805 +} repcodes_t;
125807 +MEM_STATIC repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
125809 +    repcodes_t newReps;
125810 +    if (offset >= ZSTD_REP_NUM) {  /* full offset */
125811 +        newReps.rep[2] = rep[1];
125812 +        newReps.rep[1] = rep[0];
125813 +        newReps.rep[0] = offset - ZSTD_REP_MOVE;
125814 +    } else {   /* repcode */
125815 +        U32 const repCode = offset + ll0;
125816 +        if (repCode > 0) {  /* note : if repCode==0, no change */
125817 +            U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
125818 +            newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2];
125819 +            newReps.rep[1] = rep[0];
125820 +            newReps.rep[0] = currentOffset;
125821 +        } else {   /* repCode == 0 */
125822 +            ZSTD_memcpy(&newReps, rep, sizeof(newReps));
125823 +        }
125824 +    }
125825 +    return newReps;
125828 +/* ZSTD_cParam_withinBounds:
125829 + * @return 1 if value is within cParam bounds,
125830 + * 0 otherwise */
125831 +MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
125833 +    ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
125834 +    if (ZSTD_isError(bounds.error)) return 0;
125835 +    if (value < bounds.lowerBound) return 0;
125836 +    if (value > bounds.upperBound) return 0;
125837 +    return 1;
125840 +/* ZSTD_noCompressBlock() :
125841 + * Writes uncompressed block to dst buffer from given src.
125842 + * Returns the size of the block */
125843 +MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
125845 +    U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
125846 +    RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
125847 +                    dstSize_tooSmall, "dst buf too small for uncompressed block");
125848 +    MEM_writeLE24(dst, cBlockHeader24);
125849 +    ZSTD_memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
125850 +    return ZSTD_blockHeaderSize + srcSize;
125853 +MEM_STATIC size_t ZSTD_rleCompressBlock (void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock)
125855 +    BYTE* const op = (BYTE*)dst;
125856 +    U32 const cBlockHeader = lastBlock + (((U32)bt_rle)<<1) + (U32)(srcSize << 3);
125857 +    RETURN_ERROR_IF(dstCapacity < 4, dstSize_tooSmall, "");
125858 +    MEM_writeLE24(op, cBlockHeader);
125859 +    op[3] = src;
125860 +    return 4;
125864 +/* ZSTD_minGain() :
125865 + * minimum compression required
125866 + * to generate a compress block or a compressed literals section.
125867 + * note : use same formula for both situations */
125868 +MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
125870 +    U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;
125871 +    ZSTD_STATIC_ASSERT(ZSTD_btultra == 8);
125872 +    assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
125873 +    return (srcSize >> minlog) + 2;
125876 +MEM_STATIC int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams)
125878 +    switch (cctxParams->literalCompressionMode) {
125879 +    case ZSTD_lcm_huffman:
125880 +        return 0;
125881 +    case ZSTD_lcm_uncompressed:
125882 +        return 1;
125883 +    default:
125884 +        assert(0 /* impossible: pre-validated */);
125885 +        /* fall-through */
125886 +    case ZSTD_lcm_auto:
125887 +        return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
125888 +    }
125891 +/*! ZSTD_safecopyLiterals() :
125892 + *  memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w.
125893 + *  Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single
125894 + *  large copies.
125895 + */
125896 +static void ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w) {
125897 +    assert(iend > ilimit_w);
125898 +    if (ip <= ilimit_w) {
125899 +        ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap);
125900 +        op += ilimit_w - ip;
125901 +        ip = ilimit_w;
125902 +    }
125903 +    while (ip < iend) *op++ = *ip++;
125906 +/*! ZSTD_storeSeq() :
125907 + *  Store a sequence (litlen, litPtr, offCode and mlBase) into seqStore_t.
125908 + *  `offCode` : distance to match + ZSTD_REP_MOVE (values <= ZSTD_REP_MOVE are repCodes).
125909 + *  `mlBase` : matchLength - MINMATCH
125910 + *  Allowed to overread literals up to litLimit.
125912 +HINT_INLINE UNUSED_ATTR
125913 +void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, const BYTE* litLimit, U32 offCode, size_t mlBase)
125915 +    BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;
125916 +    BYTE const* const litEnd = literals + litLength;
125917 +#if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6)
125918 +    static const BYTE* g_start = NULL;
125919 +    if (g_start==NULL) g_start = (const BYTE*)literals;  /* note : index only works for compression within a single segment */
125920 +    {   U32 const pos = (U32)((const BYTE*)literals - g_start);
125921 +        DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u",
125922 +               pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offCode);
125923 +    }
125924 +#endif
125925 +    assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
125926 +    /* copy Literals */
125927 +    assert(seqStorePtr->maxNbLit <= 128 KB);
125928 +    assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);
125929 +    assert(literals + litLength <= litLimit);
125930 +    if (litEnd <= litLimit_w) {
125931 +        /* Common case we can use wildcopy.
125932 +        * First copy 16 bytes, because literals are likely short.
125933 +        */
125934 +        assert(WILDCOPY_OVERLENGTH >= 16);
125935 +        ZSTD_copy16(seqStorePtr->lit, literals);
125936 +        if (litLength > 16) {
125937 +            ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap);
125938 +        }
125939 +    } else {
125940 +        ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w);
125941 +    }
125942 +    seqStorePtr->lit += litLength;
125944 +    /* literal Length */
125945 +    if (litLength>0xFFFF) {
125946 +        assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
125947 +        seqStorePtr->longLengthID = 1;
125948 +        seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
125949 +    }
125950 +    seqStorePtr->sequences[0].litLength = (U16)litLength;
125952 +    /* match offset */
125953 +    seqStorePtr->sequences[0].offset = offCode + 1;
125955 +    /* match Length */
125956 +    if (mlBase>0xFFFF) {
125957 +        assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
125958 +        seqStorePtr->longLengthID = 2;
125959 +        seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
125960 +    }
125961 +    seqStorePtr->sequences[0].matchLength = (U16)mlBase;
125963 +    seqStorePtr->sequences++;
125967 +/*-*************************************
125968 +*  Match length counter
125969 +***************************************/
125970 +static unsigned ZSTD_NbCommonBytes (size_t val)
125972 +    if (MEM_isLittleEndian()) {
125973 +        if (MEM_64bits()) {
125974 +#       if (__GNUC__ >= 4)
125975 +            return (__builtin_ctzll((U64)val) >> 3);
125976 +#       else
125977 +            static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
125978 +                                                     0, 3, 1, 3, 1, 4, 2, 7,
125979 +                                                     0, 2, 3, 6, 1, 5, 3, 5,
125980 +                                                     1, 3, 4, 4, 2, 5, 6, 7,
125981 +                                                     7, 0, 1, 2, 3, 3, 4, 6,
125982 +                                                     2, 6, 5, 5, 3, 4, 5, 6,
125983 +                                                     7, 1, 2, 4, 6, 4, 4, 5,
125984 +                                                     7, 2, 6, 5, 7, 6, 7, 7 };
125985 +            return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
125986 +#       endif
125987 +        } else { /* 32 bits */
125988 +#       if (__GNUC__ >= 3)
125989 +            return (__builtin_ctz((U32)val) >> 3);
125990 +#       else
125991 +            static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
125992 +                                                     3, 2, 2, 1, 3, 2, 0, 1,
125993 +                                                     3, 3, 1, 2, 2, 2, 2, 0,
125994 +                                                     3, 1, 2, 0, 1, 0, 1, 1 };
125995 +            return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
125996 +#       endif
125997 +        }
125998 +    } else {  /* Big Endian CPU */
125999 +        if (MEM_64bits()) {
126000 +#       if (__GNUC__ >= 4)
126001 +            return (__builtin_clzll(val) >> 3);
126002 +#       else
126003 +            unsigned r;
126004 +            const unsigned n32 = sizeof(size_t)*4;   /* calculate this way due to compiler complaining in 32-bits mode */
126005 +            if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
126006 +            if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
126007 +            r += (!val);
126008 +            return r;
126009 +#       endif
126010 +        } else { /* 32 bits */
126011 +#       if (__GNUC__ >= 3)
126012 +            return (__builtin_clz((U32)val) >> 3);
126013 +#       else
126014 +            unsigned r;
126015 +            if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
126016 +            r += (!val);
126017 +            return r;
126018 +#       endif
126019 +    }   }
126023 +MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
126025 +    const BYTE* const pStart = pIn;
126026 +    const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
126028 +    if (pIn < pInLoopLimit) {
126029 +        { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
126030 +          if (diff) return ZSTD_NbCommonBytes(diff); }
126031 +        pIn+=sizeof(size_t); pMatch+=sizeof(size_t);
126032 +        while (pIn < pInLoopLimit) {
126033 +            size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
126034 +            if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
126035 +            pIn += ZSTD_NbCommonBytes(diff);
126036 +            return (size_t)(pIn - pStart);
126037 +    }   }
126038 +    if (MEM_64bits() && (pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }
126039 +    if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }
126040 +    if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
126041 +    return (size_t)(pIn - pStart);
126044 +/** ZSTD_count_2segments() :
126045 + *  can count match length with `ip` & `match` in 2 different segments.
126046 + *  convention : on reaching mEnd, match count continue starting from iStart
126047 + */
126048 +MEM_STATIC size_t
126049 +ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
126050 +                     const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
126052 +    const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
126053 +    size_t const matchLength = ZSTD_count(ip, match, vEnd);
126054 +    if (match + matchLength != mEnd) return matchLength;
126055 +    DEBUGLOG(7, "ZSTD_count_2segments: found a 2-parts match (current length==%zu)", matchLength);
126056 +    DEBUGLOG(7, "distance from match beginning to end dictionary = %zi", mEnd - match);
126057 +    DEBUGLOG(7, "distance from current pos to end buffer = %zi", iEnd - ip);
126058 +    DEBUGLOG(7, "next byte : ip==%02X, istart==%02X", ip[matchLength], *iStart);
126059 +    DEBUGLOG(7, "final match length = %zu", matchLength + ZSTD_count(ip+matchLength, iStart, iEnd));
126060 +    return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
126064 +/*-*************************************
126065 + *  Hashes
126066 + ***************************************/
126067 +static const U32 prime3bytes = 506832829U;
126068 +static U32    ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes)  >> (32-h) ; }
126069 +MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
126071 +static const U32 prime4bytes = 2654435761U;
126072 +static U32    ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
126073 +static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
126075 +static const U64 prime5bytes = 889523592379ULL;
126076 +static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u  << (64-40)) * prime5bytes) >> (64-h)) ; }
126077 +static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
126079 +static const U64 prime6bytes = 227718039650203ULL;
126080 +static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u  << (64-48)) * prime6bytes) >> (64-h)) ; }
126081 +static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
126083 +static const U64 prime7bytes = 58295818150454627ULL;
126084 +static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u  << (64-56)) * prime7bytes) >> (64-h)) ; }
126085 +static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
126087 +static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
126088 +static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
126089 +static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
126091 +MEM_STATIC FORCE_INLINE_ATTR
126092 +size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
126094 +    switch(mls)
126095 +    {
126096 +    default:
126097 +    case 4: return ZSTD_hash4Ptr(p, hBits);
126098 +    case 5: return ZSTD_hash5Ptr(p, hBits);
126099 +    case 6: return ZSTD_hash6Ptr(p, hBits);
126100 +    case 7: return ZSTD_hash7Ptr(p, hBits);
126101 +    case 8: return ZSTD_hash8Ptr(p, hBits);
126102 +    }
126105 +/** ZSTD_ipow() :
126106 + * Return base^exponent.
126107 + */
126108 +static U64 ZSTD_ipow(U64 base, U64 exponent)
126110 +    U64 power = 1;
126111 +    while (exponent) {
126112 +      if (exponent & 1) power *= base;
126113 +      exponent >>= 1;
126114 +      base *= base;
126115 +    }
126116 +    return power;
126119 +#define ZSTD_ROLL_HASH_CHAR_OFFSET 10
126121 +/** ZSTD_rollingHash_append() :
126122 + * Add the buffer to the hash value.
126123 + */
126124 +static U64 ZSTD_rollingHash_append(U64 hash, void const* buf, size_t size)
126126 +    BYTE const* istart = (BYTE const*)buf;
126127 +    size_t pos;
126128 +    for (pos = 0; pos < size; ++pos) {
126129 +        hash *= prime8bytes;
126130 +        hash += istart[pos] + ZSTD_ROLL_HASH_CHAR_OFFSET;
126131 +    }
126132 +    return hash;
126135 +/** ZSTD_rollingHash_compute() :
126136 + * Compute the rolling hash value of the buffer.
126137 + */
126138 +MEM_STATIC U64 ZSTD_rollingHash_compute(void const* buf, size_t size)
126140 +    return ZSTD_rollingHash_append(0, buf, size);
126143 +/** ZSTD_rollingHash_primePower() :
126144 + * Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash
126145 + * over a window of length bytes.
126146 + */
126147 +MEM_STATIC U64 ZSTD_rollingHash_primePower(U32 length)
126149 +    return ZSTD_ipow(prime8bytes, length - 1);
126152 +/** ZSTD_rollingHash_rotate() :
126153 + * Rotate the rolling hash by one byte.
126154 + */
126155 +MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64 primePower)
126157 +    hash -= (toRemove + ZSTD_ROLL_HASH_CHAR_OFFSET) * primePower;
126158 +    hash *= prime8bytes;
126159 +    hash += toAdd + ZSTD_ROLL_HASH_CHAR_OFFSET;
126160 +    return hash;
126163 +/*-*************************************
126164 +*  Round buffer management
126165 +***************************************/
126166 +#if (ZSTD_WINDOWLOG_MAX_64 > 31)
126167 +# error "ZSTD_WINDOWLOG_MAX is too large : would overflow ZSTD_CURRENT_MAX"
126168 +#endif
126169 +/* Max current allowed */
126170 +#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX))
126171 +/* Maximum chunk size before overflow correction needs to be called again */
126172 +#define ZSTD_CHUNKSIZE_MAX                                                     \
126173 +    ( ((U32)-1)                  /* Maximum ending current index */            \
126174 +    - ZSTD_CURRENT_MAX)          /* Maximum beginning lowLimit */
126177 + * ZSTD_window_clear():
126178 + * Clears the window containing the history by simply setting it to empty.
126179 + */
126180 +MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window)
126182 +    size_t const endT = (size_t)(window->nextSrc - window->base);
126183 +    U32 const end = (U32)endT;
126185 +    window->lowLimit = end;
126186 +    window->dictLimit = end;
126190 + * ZSTD_window_hasExtDict():
126191 + * Returns non-zero if the window has a non-empty extDict.
126192 + */
126193 +MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window)
126195 +    return window.lowLimit < window.dictLimit;
126199 + * ZSTD_matchState_dictMode():
126200 + * Inspects the provided matchState and figures out what dictMode should be
126201 + * passed to the compressor.
126202 + */
126203 +MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms)
126205 +    return ZSTD_window_hasExtDict(ms->window) ?
126206 +        ZSTD_extDict :
126207 +        ms->dictMatchState != NULL ?
126208 +            (ms->dictMatchState->dedicatedDictSearch ? ZSTD_dedicatedDictSearch : ZSTD_dictMatchState) :
126209 +            ZSTD_noDict;
126213 + * ZSTD_window_needOverflowCorrection():
126214 + * Returns non-zero if the indices are getting too large and need overflow
126215 + * protection.
126216 + */
126217 +MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
126218 +                                                  void const* srcEnd)
126220 +    U32 const curr = (U32)((BYTE const*)srcEnd - window.base);
126221 +    return curr > ZSTD_CURRENT_MAX;
126225 + * ZSTD_window_correctOverflow():
126226 + * Reduces the indices to protect from index overflow.
126227 + * Returns the correction made to the indices, which must be applied to every
126228 + * stored index.
126230 + * The least significant cycleLog bits of the indices must remain the same,
126231 + * which may be 0. Every index up to maxDist in the past must be valid.
126232 + * NOTE: (maxDist & cycleMask) must be zero.
126233 + */
126234 +MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
126235 +                                           U32 maxDist, void const* src)
126237 +    /* preemptive overflow correction:
126238 +     * 1. correction is large enough:
126239 +     *    lowLimit > (3<<29) ==> current > 3<<29 + 1<<windowLog
126240 +     *    1<<windowLog <= newCurrent < 1<<chainLog + 1<<windowLog
126241 +     *
126242 +     *    current - newCurrent
126243 +     *    > (3<<29 + 1<<windowLog) - (1<<windowLog + 1<<chainLog)
126244 +     *    > (3<<29) - (1<<chainLog)
126245 +     *    > (3<<29) - (1<<30)             (NOTE: chainLog <= 30)
126246 +     *    > 1<<29
126247 +     *
126248 +     * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow:
126249 +     *    After correction, current is less than (1<<chainLog + 1<<windowLog).
126250 +     *    In 64-bit mode we are safe, because we have 64-bit ptrdiff_t.
126251 +     *    In 32-bit mode we are safe, because (chainLog <= 29), so
126252 +     *    ip+ZSTD_CHUNKSIZE_MAX - cctx->base < 1<<32.
126253 +     * 3. (cctx->lowLimit + 1<<windowLog) < 1<<32:
126254 +     *    windowLog <= 31 ==> 3<<29 + 1<<windowLog < 7<<29 < 1<<32.
126255 +     */
126256 +    U32 const cycleMask = (1U << cycleLog) - 1;
126257 +    U32 const curr = (U32)((BYTE const*)src - window->base);
126258 +    U32 const currentCycle0 = curr & cycleMask;
126259 +    /* Exclude zero so that newCurrent - maxDist >= 1. */
126260 +    U32 const currentCycle1 = currentCycle0 == 0 ? (1U << cycleLog) : currentCycle0;
126261 +    U32 const newCurrent = currentCycle1 + maxDist;
126262 +    U32 const correction = curr - newCurrent;
126263 +    assert((maxDist & cycleMask) == 0);
126264 +    assert(curr > newCurrent);
126265 +    /* Loose bound, should be around 1<<29 (see above) */
126266 +    assert(correction > 1<<28);
126268 +    window->base += correction;
126269 +    window->dictBase += correction;
126270 +    if (window->lowLimit <= correction) window->lowLimit = 1;
126271 +    else window->lowLimit -= correction;
126272 +    if (window->dictLimit <= correction) window->dictLimit = 1;
126273 +    else window->dictLimit -= correction;
126275 +    /* Ensure we can still reference the full window. */
126276 +    assert(newCurrent >= maxDist);
126277 +    assert(newCurrent - maxDist >= 1);
126278 +    /* Ensure that lowLimit and dictLimit didn't underflow. */
126279 +    assert(window->lowLimit <= newCurrent);
126280 +    assert(window->dictLimit <= newCurrent);
126282 +    DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction,
126283 +             window->lowLimit);
126284 +    return correction;
126288 + * ZSTD_window_enforceMaxDist():
126289 + * Updates lowLimit so that:
126290 + *    (srcEnd - base) - lowLimit == maxDist + loadedDictEnd
126292 + * It ensures index is valid as long as index >= lowLimit.
126293 + * This must be called before a block compression call.
126295 + * loadedDictEnd is only defined if a dictionary is in use for current compression.
126296 + * As the name implies, loadedDictEnd represents the index at end of dictionary.
126297 + * The value lies within context's referential, it can be directly compared to blockEndIdx.
126299 + * If loadedDictEndPtr is NULL, no dictionary is in use, and we use loadedDictEnd == 0.
126300 + * If loadedDictEndPtr is not NULL, we set it to zero after updating lowLimit.
126301 + * This is because dictionaries are allowed to be referenced fully
126302 + * as long as the last byte of the dictionary is in the window.
126303 + * Once input has progressed beyond window size, dictionary cannot be referenced anymore.
126305 + * In normal dict mode, the dictionary lies between lowLimit and dictLimit.
126306 + * In dictMatchState mode, lowLimit and dictLimit are the same,
126307 + * and the dictionary is below them.
126308 + * forceWindow and dictMatchState are therefore incompatible.
126309 + */
126310 +MEM_STATIC void
126311 +ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
126312 +                     const void* blockEnd,
126313 +                           U32   maxDist,
126314 +                           U32*  loadedDictEndPtr,
126315 +                     const ZSTD_matchState_t** dictMatchStatePtr)
126317 +    U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
126318 +    U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
126319 +    DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
126320 +                (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
126322 +    /* - When there is no dictionary : loadedDictEnd == 0.
126323 +         In which case, the test (blockEndIdx > maxDist) is merely to avoid
126324 +         overflowing next operation `newLowLimit = blockEndIdx - maxDist`.
126325 +       - When there is a standard dictionary :
126326 +         Index referential is copied from the dictionary,
126327 +         which means it starts from 0.
126328 +         In which case, loadedDictEnd == dictSize,
126329 +         and it makes sense to compare `blockEndIdx > maxDist + dictSize`
126330 +         since `blockEndIdx` also starts from zero.
126331 +       - When there is an attached dictionary :
126332 +         loadedDictEnd is expressed within the referential of the context,
126333 +         so it can be directly compared against blockEndIdx.
126334 +    */
126335 +    if (blockEndIdx > maxDist + loadedDictEnd) {
126336 +        U32 const newLowLimit = blockEndIdx - maxDist;
126337 +        if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;
126338 +        if (window->dictLimit < window->lowLimit) {
126339 +            DEBUGLOG(5, "Update dictLimit to match lowLimit, from %u to %u",
126340 +                        (unsigned)window->dictLimit, (unsigned)window->lowLimit);
126341 +            window->dictLimit = window->lowLimit;
126342 +        }
126343 +        /* On reaching window size, dictionaries are invalidated */
126344 +        if (loadedDictEndPtr) *loadedDictEndPtr = 0;
126345 +        if (dictMatchStatePtr) *dictMatchStatePtr = NULL;
126346 +    }
126349 +/* Similar to ZSTD_window_enforceMaxDist(),
126350 + * but only invalidates dictionary
126351 + * when input progresses beyond window size.
126352 + * assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL)
126353 + *              loadedDictEnd uses same referential as window->base
126354 + *              maxDist is the window size */
126355 +MEM_STATIC void
126356 +ZSTD_checkDictValidity(const ZSTD_window_t* window,
126357 +                       const void* blockEnd,
126358 +                             U32   maxDist,
126359 +                             U32*  loadedDictEndPtr,
126360 +                       const ZSTD_matchState_t** dictMatchStatePtr)
126362 +    assert(loadedDictEndPtr != NULL);
126363 +    assert(dictMatchStatePtr != NULL);
126364 +    {   U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
126365 +        U32 const loadedDictEnd = *loadedDictEndPtr;
126366 +        DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
126367 +                    (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
126368 +        assert(blockEndIdx >= loadedDictEnd);
126370 +        if (blockEndIdx > loadedDictEnd + maxDist) {
126371 +            /* On reaching window size, dictionaries are invalidated.
126372 +             * For simplification, if window size is reached anywhere within next block,
126373 +             * the dictionary is invalidated for the full block.
126374 +             */
126375 +            DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)");
126376 +            *loadedDictEndPtr = 0;
126377 +            *dictMatchStatePtr = NULL;
126378 +        } else {
126379 +            if (*loadedDictEndPtr != 0) {
126380 +                DEBUGLOG(6, "dictionary considered valid for current block");
126381 +    }   }   }
126384 +MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) {
126385 +    ZSTD_memset(window, 0, sizeof(*window));
126386 +    window->base = (BYTE const*)"";
126387 +    window->dictBase = (BYTE const*)"";
126388 +    window->dictLimit = 1;    /* start from 1, so that 1st position is valid */
126389 +    window->lowLimit = 1;     /* it ensures first and later CCtx usages compress the same */
126390 +    window->nextSrc = window->base + 1;   /* see issue #1241 */
126394 + * ZSTD_window_update():
126395 + * Updates the window by appending [src, src + srcSize) to the window.
126396 + * If it is not contiguous, the current prefix becomes the extDict, and we
126397 + * forget about the extDict. Handles overlap of the prefix and extDict.
126398 + * Returns non-zero if the segment is contiguous.
126399 + */
126400 +MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
126401 +                                  void const* src, size_t srcSize)
126403 +    BYTE const* const ip = (BYTE const*)src;
126404 +    U32 contiguous = 1;
126405 +    DEBUGLOG(5, "ZSTD_window_update");
126406 +    if (srcSize == 0)
126407 +        return contiguous;
126408 +    assert(window->base != NULL);
126409 +    assert(window->dictBase != NULL);
126410 +    /* Check if blocks follow each other */
126411 +    if (src != window->nextSrc) {
126412 +        /* not contiguous */
126413 +        size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);
126414 +        DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit);
126415 +        window->lowLimit = window->dictLimit;
126416 +        assert(distanceFromBase == (size_t)(U32)distanceFromBase);  /* should never overflow */
126417 +        window->dictLimit = (U32)distanceFromBase;
126418 +        window->dictBase = window->base;
126419 +        window->base = ip - distanceFromBase;
126420 +        /* ms->nextToUpdate = window->dictLimit; */
126421 +        if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit;   /* too small extDict */
126422 +        contiguous = 0;
126423 +    }
126424 +    window->nextSrc = ip + srcSize;
126425 +    /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
126426 +    if ( (ip+srcSize > window->dictBase + window->lowLimit)
126427 +       & (ip < window->dictBase + window->dictLimit)) {
126428 +        ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;
126429 +        U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
126430 +        window->lowLimit = lowLimitMax;
126431 +        DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit);
126432 +    }
126433 +    return contiguous;
126437 + * Returns the lowest allowed match index. It may either be in the ext-dict or the prefix.
126438 + */
126439 +MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
126441 +    U32    const maxDistance = 1U << windowLog;
126442 +    U32    const lowestValid = ms->window.lowLimit;
126443 +    U32    const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
126444 +    U32    const isDictionary = (ms->loadedDictEnd != 0);
126445 +    /* When using a dictionary the entire dictionary is valid if a single byte of the dictionary
126446 +     * is within the window. We invalidate the dictionary (and set loadedDictEnd to 0) when it isn't
126447 +     * valid for the entire block. So this check is sufficient to find the lowest valid match index.
126448 +     */
126449 +    U32    const matchLowest = isDictionary ? lowestValid : withinWindow;
126450 +    return matchLowest;
126454 + * Returns the lowest allowed match index in the prefix.
126455 + */
126456 +MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
126458 +    U32    const maxDistance = 1U << windowLog;
126459 +    U32    const lowestValid = ms->window.dictLimit;
126460 +    U32    const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
126461 +    U32    const isDictionary = (ms->loadedDictEnd != 0);
126462 +    /* When computing the lowest prefix index we need to take the dictionary into account to handle
126463 +     * the edge case where the dictionary and the source are contiguous in memory.
126464 +     */
126465 +    U32    const matchLowest = isDictionary ? lowestValid : withinWindow;
126466 +    return matchLowest;
126471 +/* debug functions */
126472 +#if (DEBUGLEVEL>=2)
126474 +MEM_STATIC double ZSTD_fWeight(U32 rawStat)
126476 +    U32 const fp_accuracy = 8;
126477 +    U32 const fp_multiplier = (1 << fp_accuracy);
126478 +    U32 const newStat = rawStat + 1;
126479 +    U32 const hb = ZSTD_highbit32(newStat);
126480 +    U32 const BWeight = hb * fp_multiplier;
126481 +    U32 const FWeight = (newStat << fp_accuracy) >> hb;
126482 +    U32 const weight = BWeight + FWeight;
126483 +    assert(hb + fp_accuracy < 31);
126484 +    return (double)weight / fp_multiplier;
126487 +/* display a table content,
126488 + * listing each element, its frequency, and its predicted bit cost */
126489 +MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
126491 +    unsigned u, sum;
126492 +    for (u=0, sum=0; u<=max; u++) sum += table[u];
126493 +    DEBUGLOG(2, "total nb elts: %u", sum);
126494 +    for (u=0; u<=max; u++) {
126495 +        DEBUGLOG(2, "%2u: %5u  (%.2f)",
126496 +                u, table[u], ZSTD_fWeight(sum) - ZSTD_fWeight(table[u]) );
126497 +    }
126500 +#endif
126504 +/* ===============================================================
126505 + * Shared internal declarations
126506 + * These prototypes may be called from sources not in lib/compress
126507 + * =============================================================== */
126509 +/* ZSTD_loadCEntropy() :
126510 + * dict : must point at beginning of a valid zstd dictionary.
126511 + * return : size of dictionary header (size of magic number + dict ID + entropy tables)
126512 + * assumptions : magic number supposed already checked
126513 + *               and dictSize >= 8 */
126514 +size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
126515 +                         const void* const dict, size_t dictSize);
126517 +void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs);
126519 +/* ==============================================================
126520 + * Private declarations
126521 + * These prototypes shall only be called from within lib/compress
126522 + * ============================================================== */
126524 +/* ZSTD_getCParamsFromCCtxParams() :
126525 + * cParams are built depending on compressionLevel, src size hints,
126526 + * LDM and manually set compression parameters.
126527 + * Note: srcSizeHint == 0 means 0!
126528 + */
126529 +ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
126530 +        const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
126532 +/*! ZSTD_initCStream_internal() :
126533 + *  Private use only. Init streaming operation.
126534 + *  expects params to be valid.
126535 + *  must receive dict, or cdict, or none, but not both.
126536 + *  @return : 0, or an error code */
126537 +size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
126538 +                     const void* dict, size_t dictSize,
126539 +                     const ZSTD_CDict* cdict,
126540 +                     const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize);
126542 +void ZSTD_resetSeqStore(seqStore_t* ssPtr);
126544 +/*! ZSTD_getCParamsFromCDict() :
126545 + *  as the name implies */
126546 +ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
126548 +/* ZSTD_compressBegin_advanced_internal() :
126549 + * Private use only. To be called from zstdmt_compress.c. */
126550 +size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
126551 +                                    const void* dict, size_t dictSize,
126552 +                                    ZSTD_dictContentType_e dictContentType,
126553 +                                    ZSTD_dictTableLoadMethod_e dtlm,
126554 +                                    const ZSTD_CDict* cdict,
126555 +                                    const ZSTD_CCtx_params* params,
126556 +                                    unsigned long long pledgedSrcSize);
126558 +/* ZSTD_compress_advanced_internal() :
126559 + * Private use only. To be called from zstdmt_compress.c. */
126560 +size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
126561 +                                       void* dst, size_t dstCapacity,
126562 +                                 const void* src, size_t srcSize,
126563 +                                 const void* dict,size_t dictSize,
126564 +                                 const ZSTD_CCtx_params* params);
126567 +/* ZSTD_writeLastEmptyBlock() :
126568 + * output an empty Block with end-of-frame mark to complete a frame
126569 + * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
126570 + *           or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
126571 + */
126572 +size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
126575 +/* ZSTD_referenceExternalSequences() :
126576 + * Must be called before starting a compression operation.
126577 + * seqs must parse a prefix of the source.
126578 + * This cannot be used when long range matching is enabled.
126579 + * Zstd will use these sequences, and pass the literals to a secondary block
126580 + * compressor.
126581 + * @return : An error code on failure.
126582 + * NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory
126583 + * access and data corruption.
126584 + */
126585 +size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
126587 +/** ZSTD_cycleLog() :
126588 + *  condition for correct operation : hashLog > 1 */
126589 +U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat);
126591 +/** ZSTD_CCtx_trace() :
126592 + *  Trace the end of a compression call.
126593 + */
126594 +void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize);
126596 +#endif /* ZSTD_COMPRESS_H */
126597 diff --git a/lib/zstd/compress/zstd_compress_literals.c b/lib/zstd/compress/zstd_compress_literals.c
126598 new file mode 100644
126599 index 000000000000..655bcda4d1f1
126600 --- /dev/null
126601 +++ b/lib/zstd/compress/zstd_compress_literals.c
126602 @@ -0,0 +1,158 @@
126604 + * Copyright (c) Yann Collet, Facebook, Inc.
126605 + * All rights reserved.
126607 + * This source code is licensed under both the BSD-style license (found in the
126608 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
126609 + * in the COPYING file in the root directory of this source tree).
126610 + * You may select, at your option, one of the above-listed licenses.
126611 + */
126613 + /*-*************************************
126614 + *  Dependencies
126615 + ***************************************/
126616 +#include "zstd_compress_literals.h"
126618 +size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
126620 +    BYTE* const ostart = (BYTE*)dst;
126621 +    U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
126623 +    RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, "");
126625 +    switch(flSize)
126626 +    {
126627 +        case 1: /* 2 - 1 - 5 */
126628 +            ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
126629 +            break;
126630 +        case 2: /* 2 - 2 - 12 */
126631 +            MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
126632 +            break;
126633 +        case 3: /* 2 - 2 - 20 */
126634 +            MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
126635 +            break;
126636 +        default:   /* not necessary : flSize is {1,2,3} */
126637 +            assert(0);
126638 +    }
126640 +    ZSTD_memcpy(ostart + flSize, src, srcSize);
126641 +    DEBUGLOG(5, "Raw literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize));
126642 +    return srcSize + flSize;
126645 +size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
126647 +    BYTE* const ostart = (BYTE*)dst;
126648 +    U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
126650 +    (void)dstCapacity;  /* dstCapacity already guaranteed to be >=4, hence large enough */
126652 +    switch(flSize)
126653 +    {
126654 +        case 1: /* 2 - 1 - 5 */
126655 +            ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
126656 +            break;
126657 +        case 2: /* 2 - 2 - 12 */
126658 +            MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
126659 +            break;
126660 +        case 3: /* 2 - 2 - 20 */
126661 +            MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
126662 +            break;
126663 +        default:   /* not necessary : flSize is {1,2,3} */
126664 +            assert(0);
126665 +    }
126667 +    ostart[flSize] = *(const BYTE*)src;
126668 +    DEBUGLOG(5, "RLE literals: %u -> %u", (U32)srcSize, (U32)flSize + 1);
126669 +    return flSize+1;
126672 +size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
126673 +                              ZSTD_hufCTables_t* nextHuf,
126674 +                              ZSTD_strategy strategy, int disableLiteralCompression,
126675 +                              void* dst, size_t dstCapacity,
126676 +                        const void* src, size_t srcSize,
126677 +                              void* entropyWorkspace, size_t entropyWorkspaceSize,
126678 +                        const int bmi2)
126680 +    size_t const minGain = ZSTD_minGain(srcSize, strategy);
126681 +    size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
126682 +    BYTE*  const ostart = (BYTE*)dst;
126683 +    U32 singleStream = srcSize < 256;
126684 +    symbolEncodingType_e hType = set_compressed;
126685 +    size_t cLitSize;
126687 +    DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i srcSize=%u)",
126688 +                disableLiteralCompression, (U32)srcSize);
126690 +    /* Prepare nextEntropy assuming reusing the existing table */
126691 +    ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
126693 +    if (disableLiteralCompression)
126694 +        return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
126696 +    /* small ? don't even attempt compression (speed opt) */
126697 +#   define COMPRESS_LITERALS_SIZE_MIN 63
126698 +    {   size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
126699 +        if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
126700 +    }
126702 +    RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression");
126703 +    {   HUF_repeat repeat = prevHuf->repeatMode;
126704 +        int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
126705 +        if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
126706 +        cLitSize = singleStream ?
126707 +            HUF_compress1X_repeat(
126708 +                ostart+lhSize, dstCapacity-lhSize, src, srcSize,
126709 +                HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
126710 +                (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2) :
126711 +            HUF_compress4X_repeat(
126712 +                ostart+lhSize, dstCapacity-lhSize, src, srcSize,
126713 +                HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
126714 +                (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
126715 +        if (repeat != HUF_repeat_none) {
126716 +            /* reused the existing table */
126717 +            DEBUGLOG(5, "Reusing previous huffman table");
126718 +            hType = set_repeat;
126719 +        }
126720 +    }
126722 +    if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
126723 +        ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
126724 +        return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
126725 +    }
126726 +    if (cLitSize==1) {
126727 +        ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
126728 +        return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
126729 +    }
126731 +    if (hType == set_compressed) {
126732 +        /* using a newly constructed table */
126733 +        nextHuf->repeatMode = HUF_repeat_check;
126734 +    }
126736 +    /* Build header */
126737 +    switch(lhSize)
126738 +    {
126739 +    case 3: /* 2 - 2 - 10 - 10 */
126740 +        {   U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
126741 +            MEM_writeLE24(ostart, lhc);
126742 +            break;
126743 +        }
126744 +    case 4: /* 2 - 2 - 14 - 14 */
126745 +        {   U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
126746 +            MEM_writeLE32(ostart, lhc);
126747 +            break;
126748 +        }
126749 +    case 5: /* 2 - 2 - 18 - 18 */
126750 +        {   U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
126751 +            MEM_writeLE32(ostart, lhc);
126752 +            ostart[4] = (BYTE)(cLitSize >> 10);
126753 +            break;
126754 +        }
126755 +    default:  /* not possible : lhSize is {3,4,5} */
126756 +        assert(0);
126757 +    }
126758 +    DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)srcSize, (U32)(lhSize+cLitSize));
126759 +    return lhSize+cLitSize;
126761 diff --git a/lib/zstd/compress/zstd_compress_literals.h b/lib/zstd/compress/zstd_compress_literals.h
126762 new file mode 100644
126763 index 000000000000..9904c0cd30a0
126764 --- /dev/null
126765 +++ b/lib/zstd/compress/zstd_compress_literals.h
126766 @@ -0,0 +1,29 @@
126768 + * Copyright (c) Yann Collet, Facebook, Inc.
126769 + * All rights reserved.
126771 + * This source code is licensed under both the BSD-style license (found in the
126772 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
126773 + * in the COPYING file in the root directory of this source tree).
126774 + * You may select, at your option, one of the above-listed licenses.
126775 + */
126777 +#ifndef ZSTD_COMPRESS_LITERALS_H
126778 +#define ZSTD_COMPRESS_LITERALS_H
126780 +#include "zstd_compress_internal.h" /* ZSTD_hufCTables_t, ZSTD_minGain() */
126783 +size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
126785 +size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
126787 +size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
126788 +                              ZSTD_hufCTables_t* nextHuf,
126789 +                              ZSTD_strategy strategy, int disableLiteralCompression,
126790 +                              void* dst, size_t dstCapacity,
126791 +                        const void* src, size_t srcSize,
126792 +                              void* entropyWorkspace, size_t entropyWorkspaceSize,
126793 +                        const int bmi2);
126795 +#endif /* ZSTD_COMPRESS_LITERALS_H */
126796 diff --git a/lib/zstd/compress/zstd_compress_sequences.c b/lib/zstd/compress/zstd_compress_sequences.c
126797 new file mode 100644
126798 index 000000000000..08a5b89019dd
126799 --- /dev/null
126800 +++ b/lib/zstd/compress/zstd_compress_sequences.c
126801 @@ -0,0 +1,439 @@
126803 + * Copyright (c) Yann Collet, Facebook, Inc.
126804 + * All rights reserved.
126806 + * This source code is licensed under both the BSD-style license (found in the
126807 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
126808 + * in the COPYING file in the root directory of this source tree).
126809 + * You may select, at your option, one of the above-listed licenses.
126810 + */
126812 + /*-*************************************
126813 + *  Dependencies
126814 + ***************************************/
126815 +#include "zstd_compress_sequences.h"
126818 + * -log2(x / 256) lookup table for x in [0, 256).
126819 + * If x == 0: Return 0
126820 + * Else: Return floor(-log2(x / 256) * 256)
126821 + */
126822 +static unsigned const kInverseProbabilityLog256[256] = {
126823 +    0,    2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,
126824 +    1130, 1100, 1073, 1047, 1024, 1001, 980,  960,  941,  923,  906,  889,
126825 +    874,  859,  844,  830,  817,  804,  791,  779,  768,  756,  745,  734,
126826 +    724,  714,  704,  694,  685,  676,  667,  658,  650,  642,  633,  626,
126827 +    618,  610,  603,  595,  588,  581,  574,  567,  561,  554,  548,  542,
126828 +    535,  529,  523,  517,  512,  506,  500,  495,  489,  484,  478,  473,
126829 +    468,  463,  458,  453,  448,  443,  438,  434,  429,  424,  420,  415,
126830 +    411,  407,  402,  398,  394,  390,  386,  382,  377,  373,  370,  366,
126831 +    362,  358,  354,  350,  347,  343,  339,  336,  332,  329,  325,  322,
126832 +    318,  315,  311,  308,  305,  302,  298,  295,  292,  289,  286,  282,
126833 +    279,  276,  273,  270,  267,  264,  261,  258,  256,  253,  250,  247,
126834 +    244,  241,  239,  236,  233,  230,  228,  225,  222,  220,  217,  215,
126835 +    212,  209,  207,  204,  202,  199,  197,  194,  192,  190,  187,  185,
126836 +    182,  180,  178,  175,  173,  171,  168,  166,  164,  162,  159,  157,
126837 +    155,  153,  151,  149,  146,  144,  142,  140,  138,  136,  134,  132,
126838 +    130,  128,  126,  123,  121,  119,  117,  115,  114,  112,  110,  108,
126839 +    106,  104,  102,  100,  98,   96,   94,   93,   91,   89,   87,   85,
126840 +    83,   82,   80,   78,   76,   74,   73,   71,   69,   67,   66,   64,
126841 +    62,   61,   59,   57,   55,   54,   52,   50,   49,   47,   46,   44,
126842 +    42,   41,   39,   37,   36,   34,   33,   31,   30,   28,   26,   25,
126843 +    23,   22,   20,   19,   17,   16,   14,   13,   11,   10,   8,    7,
126844 +    5,    4,    2,    1,
126847 +static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
126848 +  void const* ptr = ctable;
126849 +  U16 const* u16ptr = (U16 const*)ptr;
126850 +  U32 const maxSymbolValue = MEM_read16(u16ptr + 1);
126851 +  return maxSymbolValue;
126855 + * Returns true if we should use ncount=-1 else we should
126856 + * use ncount=1 for low probability symbols instead.
126857 + */
126858 +static unsigned ZSTD_useLowProbCount(size_t const nbSeq)
126860 +    /* Heuristic: This should cover most blocks <= 16K and
126861 +     * start to fade out after 16K to about 32K depending on
126862 +     * comprssibility.
126863 +     */
126864 +    return nbSeq >= 2048;
126868 + * Returns the cost in bytes of encoding the normalized count header.
126869 + * Returns an error if any of the helper functions return an error.
126870 + */
126871 +static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
126872 +                              size_t const nbSeq, unsigned const FSELog)
126874 +    BYTE wksp[FSE_NCOUNTBOUND];
126875 +    S16 norm[MaxSeq + 1];
126876 +    const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
126877 +    FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max, ZSTD_useLowProbCount(nbSeq)), "");
126878 +    return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
126882 + * Returns the cost in bits of encoding the distribution described by count
126883 + * using the entropy bound.
126884 + */
126885 +static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total)
126887 +    unsigned cost = 0;
126888 +    unsigned s;
126889 +    for (s = 0; s <= max; ++s) {
126890 +        unsigned norm = (unsigned)((256 * count[s]) / total);
126891 +        if (count[s] != 0 && norm == 0)
126892 +            norm = 1;
126893 +        assert(count[s] < total);
126894 +        cost += count[s] * kInverseProbabilityLog256[norm];
126895 +    }
126896 +    return cost >> 8;
126900 + * Returns the cost in bits of encoding the distribution in count using ctable.
126901 + * Returns an error if ctable cannot represent all the symbols in count.
126902 + */
126903 +size_t ZSTD_fseBitCost(
126904 +    FSE_CTable const* ctable,
126905 +    unsigned const* count,
126906 +    unsigned const max)
126908 +    unsigned const kAccuracyLog = 8;
126909 +    size_t cost = 0;
126910 +    unsigned s;
126911 +    FSE_CState_t cstate;
126912 +    FSE_initCState(&cstate, ctable);
126913 +    if (ZSTD_getFSEMaxSymbolValue(ctable) < max) {
126914 +        DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u",
126915 +                    ZSTD_getFSEMaxSymbolValue(ctable), max);
126916 +        return ERROR(GENERIC);
126917 +    }
126918 +    for (s = 0; s <= max; ++s) {
126919 +        unsigned const tableLog = cstate.stateLog;
126920 +        unsigned const badCost = (tableLog + 1) << kAccuracyLog;
126921 +        unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
126922 +        if (count[s] == 0)
126923 +            continue;
126924 +        if (bitCost >= badCost) {
126925 +            DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s);
126926 +            return ERROR(GENERIC);
126927 +        }
126928 +        cost += (size_t)count[s] * bitCost;
126929 +    }
126930 +    return cost >> kAccuracyLog;
126934 + * Returns the cost in bits of encoding the distribution in count using the
126935 + * table described by norm. The max symbol support by norm is assumed >= max.
126936 + * norm must be valid for every symbol with non-zero probability in count.
126937 + */
126938 +size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
126939 +                             unsigned const* count, unsigned const max)
126941 +    unsigned const shift = 8 - accuracyLog;
126942 +    size_t cost = 0;
126943 +    unsigned s;
126944 +    assert(accuracyLog <= 8);
126945 +    for (s = 0; s <= max; ++s) {
126946 +        unsigned const normAcc = (norm[s] != -1) ? (unsigned)norm[s] : 1;
126947 +        unsigned const norm256 = normAcc << shift;
126948 +        assert(norm256 > 0);
126949 +        assert(norm256 < 256);
126950 +        cost += count[s] * kInverseProbabilityLog256[norm256];
126951 +    }
126952 +    return cost >> 8;
126955 +symbolEncodingType_e
126956 +ZSTD_selectEncodingType(
126957 +        FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
126958 +        size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
126959 +        FSE_CTable const* prevCTable,
126960 +        short const* defaultNorm, U32 defaultNormLog,
126961 +        ZSTD_defaultPolicy_e const isDefaultAllowed,
126962 +        ZSTD_strategy const strategy)
126964 +    ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
126965 +    if (mostFrequent == nbSeq) {
126966 +        *repeatMode = FSE_repeat_none;
126967 +        if (isDefaultAllowed && nbSeq <= 2) {
126968 +            /* Prefer set_basic over set_rle when there are 2 or less symbols,
126969 +             * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
126970 +             * If basic encoding isn't possible, always choose RLE.
126971 +             */
126972 +            DEBUGLOG(5, "Selected set_basic");
126973 +            return set_basic;
126974 +        }
126975 +        DEBUGLOG(5, "Selected set_rle");
126976 +        return set_rle;
126977 +    }
126978 +    if (strategy < ZSTD_lazy) {
126979 +        if (isDefaultAllowed) {
126980 +            size_t const staticFse_nbSeq_max = 1000;
126981 +            size_t const mult = 10 - strategy;
126982 +            size_t const baseLog = 3;
126983 +            size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog;  /* 28-36 for offset, 56-72 for lengths */
126984 +            assert(defaultNormLog >= 5 && defaultNormLog <= 6);  /* xx_DEFAULTNORMLOG */
126985 +            assert(mult <= 9 && mult >= 7);
126986 +            if ( (*repeatMode == FSE_repeat_valid)
126987 +              && (nbSeq < staticFse_nbSeq_max) ) {
126988 +                DEBUGLOG(5, "Selected set_repeat");
126989 +                return set_repeat;
126990 +            }
126991 +            if ( (nbSeq < dynamicFse_nbSeq_min)
126992 +              || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) {
126993 +                DEBUGLOG(5, "Selected set_basic");
126994 +                /* The format allows default tables to be repeated, but it isn't useful.
126995 +                 * When using simple heuristics to select encoding type, we don't want
126996 +                 * to confuse these tables with dictionaries. When running more careful
126997 +                 * analysis, we don't need to waste time checking both repeating tables
126998 +                 * and default tables.
126999 +                 */
127000 +                *repeatMode = FSE_repeat_none;
127001 +                return set_basic;
127002 +            }
127003 +        }
127004 +    } else {
127005 +        size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC);
127006 +        size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC);
127007 +        size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog);
127008 +        size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq);
127010 +        if (isDefaultAllowed) {
127011 +            assert(!ZSTD_isError(basicCost));
127012 +            assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost)));
127013 +        }
127014 +        assert(!ZSTD_isError(NCountCost));
127015 +        assert(compressedCost < ERROR(maxCode));
127016 +        DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u",
127017 +                    (unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost);
127018 +        if (basicCost <= repeatCost && basicCost <= compressedCost) {
127019 +            DEBUGLOG(5, "Selected set_basic");
127020 +            assert(isDefaultAllowed);
127021 +            *repeatMode = FSE_repeat_none;
127022 +            return set_basic;
127023 +        }
127024 +        if (repeatCost <= compressedCost) {
127025 +            DEBUGLOG(5, "Selected set_repeat");
127026 +            assert(!ZSTD_isError(repeatCost));
127027 +            return set_repeat;
127028 +        }
127029 +        assert(compressedCost < basicCost && compressedCost < repeatCost);
127030 +    }
127031 +    DEBUGLOG(5, "Selected set_compressed");
127032 +    *repeatMode = FSE_repeat_check;
127033 +    return set_compressed;
127036 +typedef struct {
127037 +    S16 norm[MaxSeq + 1];
127038 +    U32 wksp[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(MaxSeq, MaxFSELog)];
127039 +} ZSTD_BuildCTableWksp;
127041 +size_t
127042 +ZSTD_buildCTable(void* dst, size_t dstCapacity,
127043 +                FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
127044 +                unsigned* count, U32 max,
127045 +                const BYTE* codeTable, size_t nbSeq,
127046 +                const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
127047 +                const FSE_CTable* prevCTable, size_t prevCTableSize,
127048 +                void* entropyWorkspace, size_t entropyWorkspaceSize)
127050 +    BYTE* op = (BYTE*)dst;
127051 +    const BYTE* const oend = op + dstCapacity;
127052 +    DEBUGLOG(6, "ZSTD_buildCTable (dstCapacity=%u)", (unsigned)dstCapacity);
127054 +    switch (type) {
127055 +    case set_rle:
127056 +        FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max), "");
127057 +        RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall, "not enough space");
127058 +        *op = codeTable[0];
127059 +        return 1;
127060 +    case set_repeat:
127061 +        ZSTD_memcpy(nextCTable, prevCTable, prevCTableSize);
127062 +        return 0;
127063 +    case set_basic:
127064 +        FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize), "");  /* note : could be pre-calculated */
127065 +        return 0;
127066 +    case set_compressed: {
127067 +        ZSTD_BuildCTableWksp* wksp = (ZSTD_BuildCTableWksp*)entropyWorkspace;
127068 +        size_t nbSeq_1 = nbSeq;
127069 +        const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
127070 +        if (count[codeTable[nbSeq-1]] > 1) {
127071 +            count[codeTable[nbSeq-1]]--;
127072 +            nbSeq_1--;
127073 +        }
127074 +        assert(nbSeq_1 > 1);
127075 +        assert(entropyWorkspaceSize >= sizeof(ZSTD_BuildCTableWksp));
127076 +        (void)entropyWorkspaceSize;
127077 +        FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "");
127078 +        {   size_t const NCountSize = FSE_writeNCount(op, oend - op, wksp->norm, max, tableLog);   /* overflow protected */
127079 +            FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed");
127080 +            FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "");
127081 +            return NCountSize;
127082 +        }
127083 +    }
127084 +    default: assert(0); RETURN_ERROR(GENERIC, "impossible to reach");
127085 +    }
127088 +FORCE_INLINE_TEMPLATE size_t
127089 +ZSTD_encodeSequences_body(
127090 +            void* dst, size_t dstCapacity,
127091 +            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
127092 +            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
127093 +            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
127094 +            seqDef const* sequences, size_t nbSeq, int longOffsets)
127096 +    BIT_CStream_t blockStream;
127097 +    FSE_CState_t  stateMatchLength;
127098 +    FSE_CState_t  stateOffsetBits;
127099 +    FSE_CState_t  stateLitLength;
127101 +    RETURN_ERROR_IF(
127102 +        ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)),
127103 +        dstSize_tooSmall, "not enough space remaining");
127104 +    DEBUGLOG(6, "available space for bitstream : %i  (dstCapacity=%u)",
127105 +                (int)(blockStream.endPtr - blockStream.startPtr),
127106 +                (unsigned)dstCapacity);
127108 +    /* first symbols */
127109 +    FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
127110 +    FSE_initCState2(&stateOffsetBits,  CTable_OffsetBits,  ofCodeTable[nbSeq-1]);
127111 +    FSE_initCState2(&stateLitLength,   CTable_LitLength,   llCodeTable[nbSeq-1]);
127112 +    BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
127113 +    if (MEM_32bits()) BIT_flushBits(&blockStream);
127114 +    BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
127115 +    if (MEM_32bits()) BIT_flushBits(&blockStream);
127116 +    if (longOffsets) {
127117 +        U32 const ofBits = ofCodeTable[nbSeq-1];
127118 +        unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
127119 +        if (extraBits) {
127120 +            BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
127121 +            BIT_flushBits(&blockStream);
127122 +        }
127123 +        BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,
127124 +                    ofBits - extraBits);
127125 +    } else {
127126 +        BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
127127 +    }
127128 +    BIT_flushBits(&blockStream);
127130 +    {   size_t n;
127131 +        for (n=nbSeq-2 ; n<nbSeq ; n--) {      /* intentional underflow */
127132 +            BYTE const llCode = llCodeTable[n];
127133 +            BYTE const ofCode = ofCodeTable[n];
127134 +            BYTE const mlCode = mlCodeTable[n];
127135 +            U32  const llBits = LL_bits[llCode];
127136 +            U32  const ofBits = ofCode;
127137 +            U32  const mlBits = ML_bits[mlCode];
127138 +            DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
127139 +                        (unsigned)sequences[n].litLength,
127140 +                        (unsigned)sequences[n].matchLength + MINMATCH,
127141 +                        (unsigned)sequences[n].offset);
127142 +                                                                            /* 32b*/  /* 64b*/
127143 +                                                                            /* (7)*/  /* (7)*/
127144 +            FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode);       /* 15 */  /* 15 */
127145 +            FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode);      /* 24 */  /* 24 */
127146 +            if (MEM_32bits()) BIT_flushBits(&blockStream);                  /* (7)*/
127147 +            FSE_encodeSymbol(&blockStream, &stateLitLength, llCode);        /* 16 */  /* 33 */
127148 +            if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
127149 +                BIT_flushBits(&blockStream);                                /* (7)*/
127150 +            BIT_addBits(&blockStream, sequences[n].litLength, llBits);
127151 +            if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
127152 +            BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
127153 +            if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
127154 +            if (longOffsets) {
127155 +                unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
127156 +                if (extraBits) {
127157 +                    BIT_addBits(&blockStream, sequences[n].offset, extraBits);
127158 +                    BIT_flushBits(&blockStream);                            /* (7)*/
127159 +                }
127160 +                BIT_addBits(&blockStream, sequences[n].offset >> extraBits,
127161 +                            ofBits - extraBits);                            /* 31 */
127162 +            } else {
127163 +                BIT_addBits(&blockStream, sequences[n].offset, ofBits);     /* 31 */
127164 +            }
127165 +            BIT_flushBits(&blockStream);                                    /* (7)*/
127166 +            DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr));
127167 +    }   }
127169 +    DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog);
127170 +    FSE_flushCState(&blockStream, &stateMatchLength);
127171 +    DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog);
127172 +    FSE_flushCState(&blockStream, &stateOffsetBits);
127173 +    DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog);
127174 +    FSE_flushCState(&blockStream, &stateLitLength);
127176 +    {   size_t const streamSize = BIT_closeCStream(&blockStream);
127177 +        RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, "not enough space");
127178 +        return streamSize;
127179 +    }
127182 +static size_t
127183 +ZSTD_encodeSequences_default(
127184 +            void* dst, size_t dstCapacity,
127185 +            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
127186 +            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
127187 +            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
127188 +            seqDef const* sequences, size_t nbSeq, int longOffsets)
127190 +    return ZSTD_encodeSequences_body(dst, dstCapacity,
127191 +                                    CTable_MatchLength, mlCodeTable,
127192 +                                    CTable_OffsetBits, ofCodeTable,
127193 +                                    CTable_LitLength, llCodeTable,
127194 +                                    sequences, nbSeq, longOffsets);
127198 +#if DYNAMIC_BMI2
127200 +static TARGET_ATTRIBUTE("bmi2") size_t
127201 +ZSTD_encodeSequences_bmi2(
127202 +            void* dst, size_t dstCapacity,
127203 +            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
127204 +            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
127205 +            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
127206 +            seqDef const* sequences, size_t nbSeq, int longOffsets)
127208 +    return ZSTD_encodeSequences_body(dst, dstCapacity,
127209 +                                    CTable_MatchLength, mlCodeTable,
127210 +                                    CTable_OffsetBits, ofCodeTable,
127211 +                                    CTable_LitLength, llCodeTable,
127212 +                                    sequences, nbSeq, longOffsets);
127215 +#endif
127217 +size_t ZSTD_encodeSequences(
127218 +            void* dst, size_t dstCapacity,
127219 +            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
127220 +            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
127221 +            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
127222 +            seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
127224 +    DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity);
127225 +#if DYNAMIC_BMI2
127226 +    if (bmi2) {
127227 +        return ZSTD_encodeSequences_bmi2(dst, dstCapacity,
127228 +                                         CTable_MatchLength, mlCodeTable,
127229 +                                         CTable_OffsetBits, ofCodeTable,
127230 +                                         CTable_LitLength, llCodeTable,
127231 +                                         sequences, nbSeq, longOffsets);
127232 +    }
127233 +#endif
127234 +    (void)bmi2;
127235 +    return ZSTD_encodeSequences_default(dst, dstCapacity,
127236 +                                        CTable_MatchLength, mlCodeTable,
127237 +                                        CTable_OffsetBits, ofCodeTable,
127238 +                                        CTable_LitLength, llCodeTable,
127239 +                                        sequences, nbSeq, longOffsets);
127241 diff --git a/lib/zstd/compress/zstd_compress_sequences.h b/lib/zstd/compress/zstd_compress_sequences.h
127242 new file mode 100644
127243 index 000000000000..7991364c2f71
127244 --- /dev/null
127245 +++ b/lib/zstd/compress/zstd_compress_sequences.h
127246 @@ -0,0 +1,54 @@
127248 + * Copyright (c) Yann Collet, Facebook, Inc.
127249 + * All rights reserved.
127251 + * This source code is licensed under both the BSD-style license (found in the
127252 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
127253 + * in the COPYING file in the root directory of this source tree).
127254 + * You may select, at your option, one of the above-listed licenses.
127255 + */
127257 +#ifndef ZSTD_COMPRESS_SEQUENCES_H
127258 +#define ZSTD_COMPRESS_SEQUENCES_H
127260 +#include "../common/fse.h" /* FSE_repeat, FSE_CTable */
127261 +#include "../common/zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */
127263 +typedef enum {
127264 +    ZSTD_defaultDisallowed = 0,
127265 +    ZSTD_defaultAllowed = 1
127266 +} ZSTD_defaultPolicy_e;
127268 +symbolEncodingType_e
127269 +ZSTD_selectEncodingType(
127270 +        FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
127271 +        size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
127272 +        FSE_CTable const* prevCTable,
127273 +        short const* defaultNorm, U32 defaultNormLog,
127274 +        ZSTD_defaultPolicy_e const isDefaultAllowed,
127275 +        ZSTD_strategy const strategy);
127277 +size_t
127278 +ZSTD_buildCTable(void* dst, size_t dstCapacity,
127279 +                FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
127280 +                unsigned* count, U32 max,
127281 +                const BYTE* codeTable, size_t nbSeq,
127282 +                const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
127283 +                const FSE_CTable* prevCTable, size_t prevCTableSize,
127284 +                void* entropyWorkspace, size_t entropyWorkspaceSize);
127286 +size_t ZSTD_encodeSequences(
127287 +            void* dst, size_t dstCapacity,
127288 +            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
127289 +            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
127290 +            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
127291 +            seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2);
127293 +size_t ZSTD_fseBitCost(
127294 +    FSE_CTable const* ctable,
127295 +    unsigned const* count,
127296 +    unsigned const max);
127298 +size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
127299 +                             unsigned const* count, unsigned const max);
127300 +#endif /* ZSTD_COMPRESS_SEQUENCES_H */
127301 diff --git a/lib/zstd/compress/zstd_compress_superblock.c b/lib/zstd/compress/zstd_compress_superblock.c
127302 new file mode 100644
127303 index 000000000000..767f73f5bf3d
127304 --- /dev/null
127305 +++ b/lib/zstd/compress/zstd_compress_superblock.c
127306 @@ -0,0 +1,850 @@
127308 + * Copyright (c) Yann Collet, Facebook, Inc.
127309 + * All rights reserved.
127311 + * This source code is licensed under both the BSD-style license (found in the
127312 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
127313 + * in the COPYING file in the root directory of this source tree).
127314 + * You may select, at your option, one of the above-listed licenses.
127315 + */
127317 + /*-*************************************
127318 + *  Dependencies
127319 + ***************************************/
127320 +#include "zstd_compress_superblock.h"
127322 +#include "../common/zstd_internal.h"  /* ZSTD_getSequenceLength */
127323 +#include "hist.h"                     /* HIST_countFast_wksp */
127324 +#include "zstd_compress_internal.h"
127325 +#include "zstd_compress_sequences.h"
127326 +#include "zstd_compress_literals.h"
127328 +/*-*************************************
127329 +*  Superblock entropy buffer structs
127330 +***************************************/
127331 +/** ZSTD_hufCTablesMetadata_t :
127332 + *  Stores Literals Block Type for a super-block in hType, and
127333 + *  huffman tree description in hufDesBuffer.
127334 + *  hufDesSize refers to the size of huffman tree description in bytes.
127335 + *  This metadata is populated in ZSTD_buildSuperBlockEntropy_literal() */
127336 +typedef struct {
127337 +    symbolEncodingType_e hType;
127338 +    BYTE hufDesBuffer[ZSTD_MAX_HUF_HEADER_SIZE];
127339 +    size_t hufDesSize;
127340 +} ZSTD_hufCTablesMetadata_t;
127342 +/** ZSTD_fseCTablesMetadata_t :
127343 + *  Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and
127344 + *  fse tables in fseTablesBuffer.
127345 + *  fseTablesSize refers to the size of fse tables in bytes.
127346 + *  This metadata is populated in ZSTD_buildSuperBlockEntropy_sequences() */
127347 +typedef struct {
127348 +    symbolEncodingType_e llType;
127349 +    symbolEncodingType_e ofType;
127350 +    symbolEncodingType_e mlType;
127351 +    BYTE fseTablesBuffer[ZSTD_MAX_FSE_HEADERS_SIZE];
127352 +    size_t fseTablesSize;
127353 +    size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_compressSubBlock_sequences() */
127354 +} ZSTD_fseCTablesMetadata_t;
127356 +typedef struct {
127357 +    ZSTD_hufCTablesMetadata_t hufMetadata;
127358 +    ZSTD_fseCTablesMetadata_t fseMetadata;
127359 +} ZSTD_entropyCTablesMetadata_t;
127362 +/** ZSTD_buildSuperBlockEntropy_literal() :
127363 + *  Builds entropy for the super-block literals.
127364 + *  Stores literals block type (raw, rle, compressed, repeat) and
127365 + *  huffman description table to hufMetadata.
127366 + *  @return : size of huffman description table or error code */
127367 +static size_t ZSTD_buildSuperBlockEntropy_literal(void* const src, size_t srcSize,
127368 +                                            const ZSTD_hufCTables_t* prevHuf,
127369 +                                                  ZSTD_hufCTables_t* nextHuf,
127370 +                                                  ZSTD_hufCTablesMetadata_t* hufMetadata,
127371 +                                                  const int disableLiteralsCompression,
127372 +                                                  void* workspace, size_t wkspSize)
127374 +    BYTE* const wkspStart = (BYTE*)workspace;
127375 +    BYTE* const wkspEnd = wkspStart + wkspSize;
127376 +    BYTE* const countWkspStart = wkspStart;
127377 +    unsigned* const countWksp = (unsigned*)workspace;
127378 +    const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned);
127379 +    BYTE* const nodeWksp = countWkspStart + countWkspSize;
127380 +    const size_t nodeWkspSize = wkspEnd-nodeWksp;
127381 +    unsigned maxSymbolValue = 255;
127382 +    unsigned huffLog = HUF_TABLELOG_DEFAULT;
127383 +    HUF_repeat repeat = prevHuf->repeatMode;
127385 +    DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_literal (srcSize=%zu)", srcSize);
127387 +    /* Prepare nextEntropy assuming reusing the existing table */
127388 +    ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
127390 +    if (disableLiteralsCompression) {
127391 +        DEBUGLOG(5, "set_basic - disabled");
127392 +        hufMetadata->hType = set_basic;
127393 +        return 0;
127394 +    }
127396 +    /* small ? don't even attempt compression (speed opt) */
127397 +#   define COMPRESS_LITERALS_SIZE_MIN 63
127398 +    {   size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
127399 +        if (srcSize <= minLitSize) {
127400 +            DEBUGLOG(5, "set_basic - too small");
127401 +            hufMetadata->hType = set_basic;
127402 +            return 0;
127403 +        }
127404 +    }
127406 +    /* Scan input and build symbol stats */
127407 +    {   size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize);
127408 +        FORWARD_IF_ERROR(largest, "HIST_count_wksp failed");
127409 +        if (largest == srcSize) {
127410 +            DEBUGLOG(5, "set_rle");
127411 +            hufMetadata->hType = set_rle;
127412 +            return 0;
127413 +        }
127414 +        if (largest <= (srcSize >> 7)+4) {
127415 +            DEBUGLOG(5, "set_basic - no gain");
127416 +            hufMetadata->hType = set_basic;
127417 +            return 0;
127418 +        }
127419 +    }
127421 +    /* Validate the previous Huffman table */
127422 +    if (repeat == HUF_repeat_check && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) {
127423 +        repeat = HUF_repeat_none;
127424 +    }
127426 +    /* Build Huffman Tree */
127427 +    ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable));
127428 +    huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
127429 +    {   size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp,
127430 +                                                    maxSymbolValue, huffLog,
127431 +                                                    nodeWksp, nodeWkspSize);
127432 +        FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp");
127433 +        huffLog = (U32)maxBits;
127434 +        {   /* Build and write the CTable */
127435 +            size_t const newCSize = HUF_estimateCompressedSize(
127436 +                    (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue);
127437 +            size_t const hSize = HUF_writeCTable_wksp(
127438 +                    hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer),
127439 +                    (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog,
127440 +                    nodeWksp, nodeWkspSize);
127441 +            /* Check against repeating the previous CTable */
127442 +            if (repeat != HUF_repeat_none) {
127443 +                size_t const oldCSize = HUF_estimateCompressedSize(
127444 +                        (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue);
127445 +                if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) {
127446 +                    DEBUGLOG(5, "set_repeat - smaller");
127447 +                    ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
127448 +                    hufMetadata->hType = set_repeat;
127449 +                    return 0;
127450 +                }
127451 +            }
127452 +            if (newCSize + hSize >= srcSize) {
127453 +                DEBUGLOG(5, "set_basic - no gains");
127454 +                ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
127455 +                hufMetadata->hType = set_basic;
127456 +                return 0;
127457 +            }
127458 +            DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize);
127459 +            hufMetadata->hType = set_compressed;
127460 +            nextHuf->repeatMode = HUF_repeat_check;
127461 +            return hSize;
127462 +        }
127463 +    }
127466 +/** ZSTD_buildSuperBlockEntropy_sequences() :
127467 + *  Builds entropy for the super-block sequences.
127468 + *  Stores symbol compression modes and fse table to fseMetadata.
127469 + *  @return : size of fse tables or error code */
127470 +static size_t ZSTD_buildSuperBlockEntropy_sequences(seqStore_t* seqStorePtr,
127471 +                                              const ZSTD_fseCTables_t* prevEntropy,
127472 +                                                    ZSTD_fseCTables_t* nextEntropy,
127473 +                                              const ZSTD_CCtx_params* cctxParams,
127474 +                                                    ZSTD_fseCTablesMetadata_t* fseMetadata,
127475 +                                                    void* workspace, size_t wkspSize)
127477 +    BYTE* const wkspStart = (BYTE*)workspace;
127478 +    BYTE* const wkspEnd = wkspStart + wkspSize;
127479 +    BYTE* const countWkspStart = wkspStart;
127480 +    unsigned* const countWksp = (unsigned*)workspace;
127481 +    const size_t countWkspSize = (MaxSeq + 1) * sizeof(unsigned);
127482 +    BYTE* const cTableWksp = countWkspStart + countWkspSize;
127483 +    const size_t cTableWkspSize = wkspEnd-cTableWksp;
127484 +    ZSTD_strategy const strategy = cctxParams->cParams.strategy;
127485 +    FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable;
127486 +    FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable;
127487 +    FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable;
127488 +    const BYTE* const ofCodeTable = seqStorePtr->ofCode;
127489 +    const BYTE* const llCodeTable = seqStorePtr->llCode;
127490 +    const BYTE* const mlCodeTable = seqStorePtr->mlCode;
127491 +    size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
127492 +    BYTE* const ostart = fseMetadata->fseTablesBuffer;
127493 +    BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer);
127494 +    BYTE* op = ostart;
127496 +    assert(cTableWkspSize >= (1 << MaxFSELog) * sizeof(FSE_FUNCTION_TYPE));
127497 +    DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_sequences (nbSeq=%zu)", nbSeq);
127498 +    ZSTD_memset(workspace, 0, wkspSize);
127500 +    fseMetadata->lastCountSize = 0;
127501 +    /* convert length/distances into codes */
127502 +    ZSTD_seqToCodes(seqStorePtr);
127503 +    /* build CTable for Literal Lengths */
127504 +    {   U32 LLtype;
127505 +        unsigned max = MaxLL;
127506 +        size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, llCodeTable, nbSeq, workspace, wkspSize);  /* can't fail */
127507 +        DEBUGLOG(5, "Building LL table");
127508 +        nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
127509 +        LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode,
127510 +                                        countWksp, max, mostFrequent, nbSeq,
127511 +                                        LLFSELog, prevEntropy->litlengthCTable,
127512 +                                        LL_defaultNorm, LL_defaultNormLog,
127513 +                                        ZSTD_defaultAllowed, strategy);
127514 +        assert(set_basic < set_compressed && set_rle < set_compressed);
127515 +        assert(!(LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
127516 +        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
127517 +                                                    countWksp, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
127518 +                                                    prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable),
127519 +                                                    cTableWksp, cTableWkspSize);
127520 +            FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed");
127521 +            if (LLtype == set_compressed)
127522 +                fseMetadata->lastCountSize = countSize;
127523 +            op += countSize;
127524 +            fseMetadata->llType = (symbolEncodingType_e) LLtype;
127525 +    }   }
127526 +    /* build CTable for Offsets */
127527 +    {   U32 Offtype;
127528 +        unsigned max = MaxOff;
127529 +        size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, ofCodeTable, nbSeq, workspace, wkspSize);  /* can't fail */
127530 +        /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
127531 +        ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
127532 +        DEBUGLOG(5, "Building OF table");
127533 +        nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
127534 +        Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode,
127535 +                                        countWksp, max, mostFrequent, nbSeq,
127536 +                                        OffFSELog, prevEntropy->offcodeCTable,
127537 +                                        OF_defaultNorm, OF_defaultNormLog,
127538 +                                        defaultPolicy, strategy);
127539 +        assert(!(Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
127540 +        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
127541 +                                                    countWksp, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
127542 +                                                    prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable),
127543 +                                                    cTableWksp, cTableWkspSize);
127544 +            FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed");
127545 +            if (Offtype == set_compressed)
127546 +                fseMetadata->lastCountSize = countSize;
127547 +            op += countSize;
127548 +            fseMetadata->ofType = (symbolEncodingType_e) Offtype;
127549 +    }   }
127550 +    /* build CTable for MatchLengths */
127551 +    {   U32 MLtype;
127552 +        unsigned max = MaxML;
127553 +        size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, mlCodeTable, nbSeq, workspace, wkspSize);   /* can't fail */
127554 +        DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
127555 +        nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
127556 +        MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode,
127557 +                                        countWksp, max, mostFrequent, nbSeq,
127558 +                                        MLFSELog, prevEntropy->matchlengthCTable,
127559 +                                        ML_defaultNorm, ML_defaultNormLog,
127560 +                                        ZSTD_defaultAllowed, strategy);
127561 +        assert(!(MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
127562 +        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
127563 +                                                    countWksp, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
127564 +                                                    prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable),
127565 +                                                    cTableWksp, cTableWkspSize);
127566 +            FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed");
127567 +            if (MLtype == set_compressed)
127568 +                fseMetadata->lastCountSize = countSize;
127569 +            op += countSize;
127570 +            fseMetadata->mlType = (symbolEncodingType_e) MLtype;
127571 +    }   }
127572 +    assert((size_t) (op-ostart) <= sizeof(fseMetadata->fseTablesBuffer));
127573 +    return op-ostart;
127577 +/** ZSTD_buildSuperBlockEntropy() :
127578 + *  Builds entropy for the super-block.
127579 + *  @return : 0 on success or error code */
127580 +static size_t
127581 +ZSTD_buildSuperBlockEntropy(seqStore_t* seqStorePtr,
127582 +                      const ZSTD_entropyCTables_t* prevEntropy,
127583 +                            ZSTD_entropyCTables_t* nextEntropy,
127584 +                      const ZSTD_CCtx_params* cctxParams,
127585 +                            ZSTD_entropyCTablesMetadata_t* entropyMetadata,
127586 +                            void* workspace, size_t wkspSize)
127588 +    size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart;
127589 +    DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy");
127590 +    entropyMetadata->hufMetadata.hufDesSize =
127591 +        ZSTD_buildSuperBlockEntropy_literal(seqStorePtr->litStart, litSize,
127592 +                                            &prevEntropy->huf, &nextEntropy->huf,
127593 +                                            &entropyMetadata->hufMetadata,
127594 +                                            ZSTD_disableLiteralsCompression(cctxParams),
127595 +                                            workspace, wkspSize);
127596 +    FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildSuperBlockEntropy_literal failed");
127597 +    entropyMetadata->fseMetadata.fseTablesSize =
127598 +        ZSTD_buildSuperBlockEntropy_sequences(seqStorePtr,
127599 +                                              &prevEntropy->fse, &nextEntropy->fse,
127600 +                                              cctxParams,
127601 +                                              &entropyMetadata->fseMetadata,
127602 +                                              workspace, wkspSize);
127603 +    FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildSuperBlockEntropy_sequences failed");
127604 +    return 0;
127607 +/** ZSTD_compressSubBlock_literal() :
127608 + *  Compresses literals section for a sub-block.
127609 + *  When we have to write the Huffman table we will sometimes choose a header
127610 + *  size larger than necessary. This is because we have to pick the header size
127611 + *  before we know the table size + compressed size, so we have a bound on the
127612 + *  table size. If we guessed incorrectly, we fall back to uncompressed literals.
127614 + *  We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded
127615 + *  in writing the header, otherwise it is set to 0.
127617 + *  hufMetadata->hType has literals block type info.
127618 + *      If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block.
127619 + *      If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block.
127620 + *      If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block
127621 + *      If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block
127622 + *      and the following sub-blocks' literals sections will be Treeless_Literals_Block.
127623 + *  @return : compressed size of literals section of a sub-block
127624 + *            Or 0 if it unable to compress.
127625 + *            Or error code */
127626 +static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
127627 +                                    const ZSTD_hufCTablesMetadata_t* hufMetadata,
127628 +                                    const BYTE* literals, size_t litSize,
127629 +                                    void* dst, size_t dstSize,
127630 +                                    const int bmi2, int writeEntropy, int* entropyWritten)
127632 +    size_t const header = writeEntropy ? 200 : 0;
127633 +    size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header));
127634 +    BYTE* const ostart = (BYTE*)dst;
127635 +    BYTE* const oend = ostart + dstSize;
127636 +    BYTE* op = ostart + lhSize;
127637 +    U32 const singleStream = lhSize == 3;
127638 +    symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat;
127639 +    size_t cLitSize = 0;
127641 +    (void)bmi2; /* TODO bmi2... */
127643 +    DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy);
127645 +    *entropyWritten = 0;
127646 +    if (litSize == 0 || hufMetadata->hType == set_basic) {
127647 +      DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal");
127648 +      return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
127649 +    } else if (hufMetadata->hType == set_rle) {
127650 +      DEBUGLOG(5, "ZSTD_compressSubBlock_literal using rle literal");
127651 +      return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize);
127652 +    }
127654 +    assert(litSize > 0);
127655 +    assert(hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat);
127657 +    if (writeEntropy && hufMetadata->hType == set_compressed) {
127658 +        ZSTD_memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize);
127659 +        op += hufMetadata->hufDesSize;
127660 +        cLitSize += hufMetadata->hufDesSize;
127661 +        DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize);
127662 +    }
127664 +    /* TODO bmi2 */
127665 +    {   const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable)
127666 +                                          : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable);
127667 +        op += cSize;
127668 +        cLitSize += cSize;
127669 +        if (cSize == 0 || ERR_isError(cSize)) {
127670 +            DEBUGLOG(5, "Failed to write entropy tables %s", ZSTD_getErrorName(cSize));
127671 +            return 0;
127672 +        }
127673 +        /* If we expand and we aren't writing a header then emit uncompressed */
127674 +        if (!writeEntropy && cLitSize >= litSize) {
127675 +            DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal because uncompressible");
127676 +            return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
127677 +        }
127678 +        /* If we are writing headers then allow expansion that doesn't change our header size. */
127679 +        if (lhSize < (size_t)(3 + (cLitSize >= 1 KB) + (cLitSize >= 16 KB))) {
127680 +            assert(cLitSize > litSize);
127681 +            DEBUGLOG(5, "Literals expanded beyond allowed header size");
127682 +            return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
127683 +        }
127684 +        DEBUGLOG(5, "ZSTD_compressSubBlock_literal (cSize=%zu)", cSize);
127685 +    }
127687 +    /* Build header */
127688 +    switch(lhSize)
127689 +    {
127690 +    case 3: /* 2 - 2 - 10 - 10 */
127691 +        {   U32 const lhc = hType + ((!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14);
127692 +            MEM_writeLE24(ostart, lhc);
127693 +            break;
127694 +        }
127695 +    case 4: /* 2 - 2 - 14 - 14 */
127696 +        {   U32 const lhc = hType + (2 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<18);
127697 +            MEM_writeLE32(ostart, lhc);
127698 +            break;
127699 +        }
127700 +    case 5: /* 2 - 2 - 18 - 18 */
127701 +        {   U32 const lhc = hType + (3 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<22);
127702 +            MEM_writeLE32(ostart, lhc);
127703 +            ostart[4] = (BYTE)(cLitSize >> 10);
127704 +            break;
127705 +        }
127706 +    default:  /* not possible : lhSize is {3,4,5} */
127707 +        assert(0);
127708 +    }
127709 +    *entropyWritten = 1;
127710 +    DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart));
127711 +    return op-ostart;
127714 +static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* sequences, size_t nbSeq, size_t litSize, int lastSequence) {
127715 +    const seqDef* const sstart = sequences;
127716 +    const seqDef* const send = sequences + nbSeq;
127717 +    const seqDef* sp = sstart;
127718 +    size_t matchLengthSum = 0;
127719 +    size_t litLengthSum = 0;
127720 +    while (send-sp > 0) {
127721 +        ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp);
127722 +        litLengthSum += seqLen.litLength;
127723 +        matchLengthSum += seqLen.matchLength;
127724 +        sp++;
127725 +    }
127726 +    assert(litLengthSum <= litSize);
127727 +    if (!lastSequence) {
127728 +        assert(litLengthSum == litSize);
127729 +    }
127730 +    return matchLengthSum + litSize;
127733 +/** ZSTD_compressSubBlock_sequences() :
127734 + *  Compresses sequences section for a sub-block.
127735 + *  fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have
127736 + *  symbol compression modes for the super-block.
127737 + *  The first successfully compressed block will have these in its header.
127738 + *  We set entropyWritten=1 when we succeed in compressing the sequences.
127739 + *  The following sub-blocks will always have repeat mode.
127740 + *  @return : compressed size of sequences section of a sub-block
127741 + *            Or 0 if it is unable to compress
127742 + *            Or error code. */
127743 +static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables,
127744 +                                              const ZSTD_fseCTablesMetadata_t* fseMetadata,
127745 +                                              const seqDef* sequences, size_t nbSeq,
127746 +                                              const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
127747 +                                              const ZSTD_CCtx_params* cctxParams,
127748 +                                              void* dst, size_t dstCapacity,
127749 +                                              const int bmi2, int writeEntropy, int* entropyWritten)
127751 +    const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
127752 +    BYTE* const ostart = (BYTE*)dst;
127753 +    BYTE* const oend = ostart + dstCapacity;
127754 +    BYTE* op = ostart;
127755 +    BYTE* seqHead;
127757 +    DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (nbSeq=%zu, writeEntropy=%d, longOffsets=%d)", nbSeq, writeEntropy, longOffsets);
127759 +    *entropyWritten = 0;
127760 +    /* Sequences Header */
127761 +    RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
127762 +                    dstSize_tooSmall, "");
127763 +    if (nbSeq < 0x7F)
127764 +        *op++ = (BYTE)nbSeq;
127765 +    else if (nbSeq < LONGNBSEQ)
127766 +        op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
127767 +    else
127768 +        op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
127769 +    if (nbSeq==0) {
127770 +        return op - ostart;
127771 +    }
127773 +    /* seqHead : flags for FSE encoding type */
127774 +    seqHead = op++;
127776 +    DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (seqHeadSize=%u)", (unsigned)(op-ostart));
127778 +    if (writeEntropy) {
127779 +        const U32 LLtype = fseMetadata->llType;
127780 +        const U32 Offtype = fseMetadata->ofType;
127781 +        const U32 MLtype = fseMetadata->mlType;
127782 +        DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)", fseMetadata->fseTablesSize);
127783 +        *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
127784 +        ZSTD_memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize);
127785 +        op += fseMetadata->fseTablesSize;
127786 +    } else {
127787 +        const U32 repeat = set_repeat;
127788 +        *seqHead = (BYTE)((repeat<<6) + (repeat<<4) + (repeat<<2));
127789 +    }
127791 +    {   size_t const bitstreamSize = ZSTD_encodeSequences(
127792 +                                        op, oend - op,
127793 +                                        fseTables->matchlengthCTable, mlCode,
127794 +                                        fseTables->offcodeCTable, ofCode,
127795 +                                        fseTables->litlengthCTable, llCode,
127796 +                                        sequences, nbSeq,
127797 +                                        longOffsets, bmi2);
127798 +        FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
127799 +        op += bitstreamSize;
127800 +        /* zstd versions <= 1.3.4 mistakenly report corruption when
127801 +         * FSE_readNCount() receives a buffer < 4 bytes.
127802 +         * Fixed by https://github.com/facebook/zstd/pull/1146.
127803 +         * This can happen when the last set_compressed table present is 2
127804 +         * bytes and the bitstream is only one byte.
127805 +         * In this exceedingly rare case, we will simply emit an uncompressed
127806 +         * block, since it isn't worth optimizing.
127807 +         */
127808 +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
127809 +        if (writeEntropy && fseMetadata->lastCountSize && fseMetadata->lastCountSize + bitstreamSize < 4) {
127810 +            /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
127811 +            assert(fseMetadata->lastCountSize + bitstreamSize == 3);
127812 +            DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
127813 +                        "emitting an uncompressed block.");
127814 +            return 0;
127815 +        }
127816 +#endif
127817 +        DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (bitstreamSize=%zu)", bitstreamSize);
127818 +    }
127820 +    /* zstd versions <= 1.4.0 mistakenly report error when
127821 +     * sequences section body size is less than 3 bytes.
127822 +     * Fixed by https://github.com/facebook/zstd/pull/1664.
127823 +     * This can happen when the previous sequences section block is compressed
127824 +     * with rle mode and the current block's sequences section is compressed
127825 +     * with repeat mode where sequences section body size can be 1 byte.
127826 +     */
127827 +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
127828 +    if (op-seqHead < 4) {
127829 +        DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.4.0 by emitting "
127830 +                    "an uncompressed block when sequences are < 4 bytes");
127831 +        return 0;
127832 +    }
127833 +#endif
127835 +    *entropyWritten = 1;
127836 +    return op - ostart;
127839 +/** ZSTD_compressSubBlock() :
127840 + *  Compresses a single sub-block.
127841 + *  @return : compressed size of the sub-block
127842 + *            Or 0 if it failed to compress. */
127843 +static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy,
127844 +                                    const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
127845 +                                    const seqDef* sequences, size_t nbSeq,
127846 +                                    const BYTE* literals, size_t litSize,
127847 +                                    const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
127848 +                                    const ZSTD_CCtx_params* cctxParams,
127849 +                                    void* dst, size_t dstCapacity,
127850 +                                    const int bmi2,
127851 +                                    int writeLitEntropy, int writeSeqEntropy,
127852 +                                    int* litEntropyWritten, int* seqEntropyWritten,
127853 +                                    U32 lastBlock)
127855 +    BYTE* const ostart = (BYTE*)dst;
127856 +    BYTE* const oend = ostart + dstCapacity;
127857 +    BYTE* op = ostart + ZSTD_blockHeaderSize;
127858 +    DEBUGLOG(5, "ZSTD_compressSubBlock (litSize=%zu, nbSeq=%zu, writeLitEntropy=%d, writeSeqEntropy=%d, lastBlock=%d)",
127859 +                litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock);
127860 +    {   size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable,
127861 +                                                        &entropyMetadata->hufMetadata, literals, litSize,
127862 +                                                        op, oend-op, bmi2, writeLitEntropy, litEntropyWritten);
127863 +        FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed");
127864 +        if (cLitSize == 0) return 0;
127865 +        op += cLitSize;
127866 +    }
127867 +    {   size_t cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse,
127868 +                                                  &entropyMetadata->fseMetadata,
127869 +                                                  sequences, nbSeq,
127870 +                                                  llCode, mlCode, ofCode,
127871 +                                                  cctxParams,
127872 +                                                  op, oend-op,
127873 +                                                  bmi2, writeSeqEntropy, seqEntropyWritten);
127874 +        FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed");
127875 +        if (cSeqSize == 0) return 0;
127876 +        op += cSeqSize;
127877 +    }
127878 +    /* Write block header */
127879 +    {   size_t cSize = (op-ostart)-ZSTD_blockHeaderSize;
127880 +        U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
127881 +        MEM_writeLE24(ostart, cBlockHeader24);
127882 +    }
127883 +    return op-ostart;
127886 +static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize,
127887 +                                                const ZSTD_hufCTables_t* huf,
127888 +                                                const ZSTD_hufCTablesMetadata_t* hufMetadata,
127889 +                                                void* workspace, size_t wkspSize,
127890 +                                                int writeEntropy)
127892 +    unsigned* const countWksp = (unsigned*)workspace;
127893 +    unsigned maxSymbolValue = 255;
127894 +    size_t literalSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
127896 +    if (hufMetadata->hType == set_basic) return litSize;
127897 +    else if (hufMetadata->hType == set_rle) return 1;
127898 +    else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) {
127899 +        size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize);
127900 +        if (ZSTD_isError(largest)) return litSize;
127901 +        {   size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue);
127902 +            if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize;
127903 +            return cLitSizeEstimate + literalSectionHeaderSize;
127904 +    }   }
127905 +    assert(0); /* impossible */
127906 +    return 0;
127909 +static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type,
127910 +                        const BYTE* codeTable, unsigned maxCode,
127911 +                        size_t nbSeq, const FSE_CTable* fseCTable,
127912 +                        const U32* additionalBits,
127913 +                        short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
127914 +                        void* workspace, size_t wkspSize)
127916 +    unsigned* const countWksp = (unsigned*)workspace;
127917 +    const BYTE* ctp = codeTable;
127918 +    const BYTE* const ctStart = ctp;
127919 +    const BYTE* const ctEnd = ctStart + nbSeq;
127920 +    size_t cSymbolTypeSizeEstimateInBits = 0;
127921 +    unsigned max = maxCode;
127923 +    HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize);  /* can't fail */
127924 +    if (type == set_basic) {
127925 +        /* We selected this encoding type, so it must be valid. */
127926 +        assert(max <= defaultMax);
127927 +        cSymbolTypeSizeEstimateInBits = max <= defaultMax
127928 +                ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max)
127929 +                : ERROR(GENERIC);
127930 +    } else if (type == set_rle) {
127931 +        cSymbolTypeSizeEstimateInBits = 0;
127932 +    } else if (type == set_compressed || type == set_repeat) {
127933 +        cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max);
127934 +    }
127935 +    if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) return nbSeq * 10;
127936 +    while (ctp < ctEnd) {
127937 +        if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp];
127938 +        else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */
127939 +        ctp++;
127940 +    }
127941 +    return cSymbolTypeSizeEstimateInBits / 8;
127944 +static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable,
127945 +                                                  const BYTE* llCodeTable,
127946 +                                                  const BYTE* mlCodeTable,
127947 +                                                  size_t nbSeq,
127948 +                                                  const ZSTD_fseCTables_t* fseTables,
127949 +                                                  const ZSTD_fseCTablesMetadata_t* fseMetadata,
127950 +                                                  void* workspace, size_t wkspSize,
127951 +                                                  int writeEntropy)
127953 +    size_t sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
127954 +    size_t cSeqSizeEstimate = 0;
127955 +    cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff,
127956 +                                         nbSeq, fseTables->offcodeCTable, NULL,
127957 +                                         OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
127958 +                                         workspace, wkspSize);
127959 +    cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL,
127960 +                                         nbSeq, fseTables->litlengthCTable, LL_bits,
127961 +                                         LL_defaultNorm, LL_defaultNormLog, MaxLL,
127962 +                                         workspace, wkspSize);
127963 +    cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML,
127964 +                                         nbSeq, fseTables->matchlengthCTable, ML_bits,
127965 +                                         ML_defaultNorm, ML_defaultNormLog, MaxML,
127966 +                                         workspace, wkspSize);
127967 +    if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;
127968 +    return cSeqSizeEstimate + sequencesSectionHeaderSize;
127971 +static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize,
127972 +                                        const BYTE* ofCodeTable,
127973 +                                        const BYTE* llCodeTable,
127974 +                                        const BYTE* mlCodeTable,
127975 +                                        size_t nbSeq,
127976 +                                        const ZSTD_entropyCTables_t* entropy,
127977 +                                        const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
127978 +                                        void* workspace, size_t wkspSize,
127979 +                                        int writeLitEntropy, int writeSeqEntropy) {
127980 +    size_t cSizeEstimate = 0;
127981 +    cSizeEstimate += ZSTD_estimateSubBlockSize_literal(literals, litSize,
127982 +                                                         &entropy->huf, &entropyMetadata->hufMetadata,
127983 +                                                         workspace, wkspSize, writeLitEntropy);
127984 +    cSizeEstimate += ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
127985 +                                                         nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
127986 +                                                         workspace, wkspSize, writeSeqEntropy);
127987 +    return cSizeEstimate + ZSTD_blockHeaderSize;
127990 +static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata)
127992 +    if (fseMetadata->llType == set_compressed || fseMetadata->llType == set_rle)
127993 +        return 1;
127994 +    if (fseMetadata->mlType == set_compressed || fseMetadata->mlType == set_rle)
127995 +        return 1;
127996 +    if (fseMetadata->ofType == set_compressed || fseMetadata->ofType == set_rle)
127997 +        return 1;
127998 +    return 0;
128001 +/** ZSTD_compressSubBlock_multi() :
128002 + *  Breaks super-block into multiple sub-blocks and compresses them.
128003 + *  Entropy will be written to the first block.
128004 + *  The following blocks will use repeat mode to compress.
128005 + *  All sub-blocks are compressed blocks (no raw or rle blocks).
128006 + *  @return : compressed size of the super block (which is multiple ZSTD blocks)
128007 + *            Or 0 if it failed to compress. */
128008 +static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
128009 +                            const ZSTD_compressedBlockState_t* prevCBlock,
128010 +                            ZSTD_compressedBlockState_t* nextCBlock,
128011 +                            const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
128012 +                            const ZSTD_CCtx_params* cctxParams,
128013 +                                  void* dst, size_t dstCapacity,
128014 +                            const void* src, size_t srcSize,
128015 +                            const int bmi2, U32 lastBlock,
128016 +                            void* workspace, size_t wkspSize)
128018 +    const seqDef* const sstart = seqStorePtr->sequencesStart;
128019 +    const seqDef* const send = seqStorePtr->sequences;
128020 +    const seqDef* sp = sstart;
128021 +    const BYTE* const lstart = seqStorePtr->litStart;
128022 +    const BYTE* const lend = seqStorePtr->lit;
128023 +    const BYTE* lp = lstart;
128024 +    BYTE const* ip = (BYTE const*)src;
128025 +    BYTE const* const iend = ip + srcSize;
128026 +    BYTE* const ostart = (BYTE*)dst;
128027 +    BYTE* const oend = ostart + dstCapacity;
128028 +    BYTE* op = ostart;
128029 +    const BYTE* llCodePtr = seqStorePtr->llCode;
128030 +    const BYTE* mlCodePtr = seqStorePtr->mlCode;
128031 +    const BYTE* ofCodePtr = seqStorePtr->ofCode;
128032 +    size_t targetCBlockSize = cctxParams->targetCBlockSize;
128033 +    size_t litSize, seqCount;
128034 +    int writeLitEntropy = entropyMetadata->hufMetadata.hType == set_compressed;
128035 +    int writeSeqEntropy = 1;
128036 +    int lastSequence = 0;
128038 +    DEBUGLOG(5, "ZSTD_compressSubBlock_multi (litSize=%u, nbSeq=%u)",
128039 +                (unsigned)(lend-lp), (unsigned)(send-sstart));
128041 +    litSize = 0;
128042 +    seqCount = 0;
128043 +    do {
128044 +        size_t cBlockSizeEstimate = 0;
128045 +        if (sstart == send) {
128046 +            lastSequence = 1;
128047 +        } else {
128048 +            const seqDef* const sequence = sp + seqCount;
128049 +            lastSequence = sequence == send - 1;
128050 +            litSize += ZSTD_getSequenceLength(seqStorePtr, sequence).litLength;
128051 +            seqCount++;
128052 +        }
128053 +        if (lastSequence) {
128054 +            assert(lp <= lend);
128055 +            assert(litSize <= (size_t)(lend - lp));
128056 +            litSize = (size_t)(lend - lp);
128057 +        }
128058 +        /* I think there is an optimization opportunity here.
128059 +         * Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful
128060 +         * since it recalculates estimate from scratch.
128061 +         * For example, it would recount literal distribution and symbol codes everytime.
128062 +         */
128063 +        cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount,
128064 +                                                       &nextCBlock->entropy, entropyMetadata,
128065 +                                                       workspace, wkspSize, writeLitEntropy, writeSeqEntropy);
128066 +        if (cBlockSizeEstimate > targetCBlockSize || lastSequence) {
128067 +            int litEntropyWritten = 0;
128068 +            int seqEntropyWritten = 0;
128069 +            const size_t decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, lastSequence);
128070 +            const size_t cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata,
128071 +                                                       sp, seqCount,
128072 +                                                       lp, litSize,
128073 +                                                       llCodePtr, mlCodePtr, ofCodePtr,
128074 +                                                       cctxParams,
128075 +                                                       op, oend-op,
128076 +                                                       bmi2, writeLitEntropy, writeSeqEntropy,
128077 +                                                       &litEntropyWritten, &seqEntropyWritten,
128078 +                                                       lastBlock && lastSequence);
128079 +            FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed");
128080 +            if (cSize > 0 && cSize < decompressedSize) {
128081 +                DEBUGLOG(5, "Committed the sub-block");
128082 +                assert(ip + decompressedSize <= iend);
128083 +                ip += decompressedSize;
128084 +                sp += seqCount;
128085 +                lp += litSize;
128086 +                op += cSize;
128087 +                llCodePtr += seqCount;
128088 +                mlCodePtr += seqCount;
128089 +                ofCodePtr += seqCount;
128090 +                litSize = 0;
128091 +                seqCount = 0;
128092 +                /* Entropy only needs to be written once */
128093 +                if (litEntropyWritten) {
128094 +                    writeLitEntropy = 0;
128095 +                }
128096 +                if (seqEntropyWritten) {
128097 +                    writeSeqEntropy = 0;
128098 +                }
128099 +            }
128100 +        }
128101 +    } while (!lastSequence);
128102 +    if (writeLitEntropy) {
128103 +        DEBUGLOG(5, "ZSTD_compressSubBlock_multi has literal entropy tables unwritten");
128104 +        ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf));
128105 +    }
128106 +    if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) {
128107 +        /* If we haven't written our entropy tables, then we've violated our contract and
128108 +         * must emit an uncompressed block.
128109 +         */
128110 +        DEBUGLOG(5, "ZSTD_compressSubBlock_multi has sequence entropy tables unwritten");
128111 +        return 0;
128112 +    }
128113 +    if (ip < iend) {
128114 +        size_t const cSize = ZSTD_noCompressBlock(op, oend - op, ip, iend - ip, lastBlock);
128115 +        DEBUGLOG(5, "ZSTD_compressSubBlock_multi last sub-block uncompressed, %zu bytes", (size_t)(iend - ip));
128116 +        FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
128117 +        assert(cSize != 0);
128118 +        op += cSize;
128119 +        /* We have to regenerate the repcodes because we've skipped some sequences */
128120 +        if (sp < send) {
128121 +            seqDef const* seq;
128122 +            repcodes_t rep;
128123 +            ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep));
128124 +            for (seq = sstart; seq < sp; ++seq) {
128125 +                rep = ZSTD_updateRep(rep.rep, seq->offset - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
128126 +            }
128127 +            ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep));
128128 +        }
128129 +    }
128130 +    DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed");
128131 +    return op-ostart;
128134 +size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
128135 +                               void* dst, size_t dstCapacity,
128136 +                               void const* src, size_t srcSize,
128137 +                               unsigned lastBlock) {
128138 +    ZSTD_entropyCTablesMetadata_t entropyMetadata;
128140 +    FORWARD_IF_ERROR(ZSTD_buildSuperBlockEntropy(&zc->seqStore,
128141 +          &zc->blockState.prevCBlock->entropy,
128142 +          &zc->blockState.nextCBlock->entropy,
128143 +          &zc->appliedParams,
128144 +          &entropyMetadata,
128145 +          zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
128147 +    return ZSTD_compressSubBlock_multi(&zc->seqStore,
128148 +            zc->blockState.prevCBlock,
128149 +            zc->blockState.nextCBlock,
128150 +            &entropyMetadata,
128151 +            &zc->appliedParams,
128152 +            dst, dstCapacity,
128153 +            src, srcSize,
128154 +            zc->bmi2, lastBlock,
128155 +            zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */);
128157 diff --git a/lib/zstd/compress/zstd_compress_superblock.h b/lib/zstd/compress/zstd_compress_superblock.h
128158 new file mode 100644
128159 index 000000000000..224ece79546e
128160 --- /dev/null
128161 +++ b/lib/zstd/compress/zstd_compress_superblock.h
128162 @@ -0,0 +1,32 @@
128164 + * Copyright (c) Yann Collet, Facebook, Inc.
128165 + * All rights reserved.
128167 + * This source code is licensed under both the BSD-style license (found in the
128168 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
128169 + * in the COPYING file in the root directory of this source tree).
128170 + * You may select, at your option, one of the above-listed licenses.
128171 + */
128173 +#ifndef ZSTD_COMPRESS_ADVANCED_H
128174 +#define ZSTD_COMPRESS_ADVANCED_H
128176 +/*-*************************************
128177 +*  Dependencies
128178 +***************************************/
128180 +#include <linux/zstd.h> /* ZSTD_CCtx */
128182 +/*-*************************************
128183 +*  Target Compressed Block Size
128184 +***************************************/
128186 +/* ZSTD_compressSuperBlock() :
128187 + * Used to compress a super block when targetCBlockSize is being used.
128188 + * The given block will be compressed into multiple sub blocks that are around targetCBlockSize. */
128189 +size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
128190 +                               void* dst, size_t dstCapacity,
128191 +                               void const* src, size_t srcSize,
128192 +                               unsigned lastBlock);
128194 +#endif /* ZSTD_COMPRESS_ADVANCED_H */
128195 diff --git a/lib/zstd/compress/zstd_cwksp.h b/lib/zstd/compress/zstd_cwksp.h
128196 new file mode 100644
128197 index 000000000000..c231cc500ef5
128198 --- /dev/null
128199 +++ b/lib/zstd/compress/zstd_cwksp.h
128200 @@ -0,0 +1,482 @@
128202 + * Copyright (c) Yann Collet, Facebook, Inc.
128203 + * All rights reserved.
128205 + * This source code is licensed under both the BSD-style license (found in the
128206 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
128207 + * in the COPYING file in the root directory of this source tree).
128208 + * You may select, at your option, one of the above-listed licenses.
128209 + */
128211 +#ifndef ZSTD_CWKSP_H
128212 +#define ZSTD_CWKSP_H
128214 +/*-*************************************
128215 +*  Dependencies
128216 +***************************************/
128217 +#include "../common/zstd_internal.h"
128220 +/*-*************************************
128221 +*  Constants
128222 +***************************************/
128224 +/* Since the workspace is effectively its own little malloc implementation /
128225 + * arena, when we run under ASAN, we should similarly insert redzones between
128226 + * each internal element of the workspace, so ASAN will catch overruns that
128227 + * reach outside an object but that stay inside the workspace.
128229 + * This defines the size of that redzone.
128230 + */
128231 +#ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
128232 +#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
128233 +#endif
128235 +/*-*************************************
128236 +*  Structures
128237 +***************************************/
128238 +typedef enum {
128239 +    ZSTD_cwksp_alloc_objects,
128240 +    ZSTD_cwksp_alloc_buffers,
128241 +    ZSTD_cwksp_alloc_aligned
128242 +} ZSTD_cwksp_alloc_phase_e;
128245 + * Used to describe whether the workspace is statically allocated (and will not
128246 + * necessarily ever be freed), or if it's dynamically allocated and we can
128247 + * expect a well-formed caller to free this.
128248 + */
128249 +typedef enum {
128250 +    ZSTD_cwksp_dynamic_alloc,
128251 +    ZSTD_cwksp_static_alloc
128252 +} ZSTD_cwksp_static_alloc_e;
128255 + * Zstd fits all its internal datastructures into a single continuous buffer,
128256 + * so that it only needs to perform a single OS allocation (or so that a buffer
128257 + * can be provided to it and it can perform no allocations at all). This buffer
128258 + * is called the workspace.
128260 + * Several optimizations complicate that process of allocating memory ranges
128261 + * from this workspace for each internal datastructure:
128263 + * - These different internal datastructures have different setup requirements:
128265 + *   - The static objects need to be cleared once and can then be trivially
128266 + *     reused for each compression.
128268 + *   - Various buffers don't need to be initialized at all--they are always
128269 + *     written into before they're read.
128271 + *   - The matchstate tables have a unique requirement that they don't need
128272 + *     their memory to be totally cleared, but they do need the memory to have
128273 + *     some bound, i.e., a guarantee that all values in the memory they've been
128274 + *     allocated is less than some maximum value (which is the starting value
128275 + *     for the indices that they will then use for compression). When this
128276 + *     guarantee is provided to them, they can use the memory without any setup
128277 + *     work. When it can't, they have to clear the area.
128279 + * - These buffers also have different alignment requirements.
128281 + * - We would like to reuse the objects in the workspace for multiple
128282 + *   compressions without having to perform any expensive reallocation or
128283 + *   reinitialization work.
128285 + * - We would like to be able to efficiently reuse the workspace across
128286 + *   multiple compressions **even when the compression parameters change** and
128287 + *   we need to resize some of the objects (where possible).
128289 + * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
128290 + * abstraction was created. It works as follows:
128292 + * Workspace Layout:
128294 + * [                        ... workspace ...                         ]
128295 + * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
128297 + * The various objects that live in the workspace are divided into the
128298 + * following categories, and are allocated separately:
128300 + * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
128301 + *   so that literally everything fits in a single buffer. Note: if present,
128302 + *   this must be the first object in the workspace, since ZSTD_customFree{CCtx,
128303 + *   CDict}() rely on a pointer comparison to see whether one or two frees are
128304 + *   required.
128306 + * - Fixed size objects: these are fixed-size, fixed-count objects that are
128307 + *   nonetheless "dynamically" allocated in the workspace so that we can
128308 + *   control how they're initialized separately from the broader ZSTD_CCtx.
128309 + *   Examples:
128310 + *   - Entropy Workspace
128311 + *   - 2 x ZSTD_compressedBlockState_t
128312 + *   - CDict dictionary contents
128314 + * - Tables: these are any of several different datastructures (hash tables,
128315 + *   chain tables, binary trees) that all respect a common format: they are
128316 + *   uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
128317 + *   Their sizes depend on the cparams.
128319 + * - Aligned: these buffers are used for various purposes that require 4 byte
128320 + *   alignment, but don't require any initialization before they're used.
128322 + * - Buffers: these buffers are used for various purposes that don't require
128323 + *   any alignment or initialization before they're used. This means they can
128324 + *   be moved around at no cost for a new compression.
128326 + * Allocating Memory:
128328 + * The various types of objects must be allocated in order, so they can be
128329 + * correctly packed into the workspace buffer. That order is:
128331 + * 1. Objects
128332 + * 2. Buffers
128333 + * 3. Aligned
128334 + * 4. Tables
128336 + * Attempts to reserve objects of different types out of order will fail.
128337 + */
128338 +typedef struct {
128339 +    void* workspace;
128340 +    void* workspaceEnd;
128342 +    void* objectEnd;
128343 +    void* tableEnd;
128344 +    void* tableValidEnd;
128345 +    void* allocStart;
128347 +    BYTE allocFailed;
128348 +    int workspaceOversizedDuration;
128349 +    ZSTD_cwksp_alloc_phase_e phase;
128350 +    ZSTD_cwksp_static_alloc_e isStatic;
128351 +} ZSTD_cwksp;
128353 +/*-*************************************
128354 +*  Functions
128355 +***************************************/
128357 +MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
128359 +MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
128360 +    (void)ws;
128361 +    assert(ws->workspace <= ws->objectEnd);
128362 +    assert(ws->objectEnd <= ws->tableEnd);
128363 +    assert(ws->objectEnd <= ws->tableValidEnd);
128364 +    assert(ws->tableEnd <= ws->allocStart);
128365 +    assert(ws->tableValidEnd <= ws->allocStart);
128366 +    assert(ws->allocStart <= ws->workspaceEnd);
128370 + * Align must be a power of 2.
128371 + */
128372 +MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
128373 +    size_t const mask = align - 1;
128374 +    assert((align & mask) == 0);
128375 +    return (size + mask) & ~mask;
128379 + * Use this to determine how much space in the workspace we will consume to
128380 + * allocate this object. (Normally it should be exactly the size of the object,
128381 + * but under special conditions, like ASAN, where we pad each object, it might
128382 + * be larger.)
128384 + * Since tables aren't currently redzoned, you don't need to call through this
128385 + * to figure out how much space you need for the matchState tables. Everything
128386 + * else is though.
128387 + */
128388 +MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
128389 +    if (size == 0)
128390 +        return 0;
128391 +    return size;
128394 +MEM_STATIC void ZSTD_cwksp_internal_advance_phase(
128395 +        ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
128396 +    assert(phase >= ws->phase);
128397 +    if (phase > ws->phase) {
128398 +        if (ws->phase < ZSTD_cwksp_alloc_buffers &&
128399 +                phase >= ZSTD_cwksp_alloc_buffers) {
128400 +            ws->tableValidEnd = ws->objectEnd;
128401 +        }
128402 +        if (ws->phase < ZSTD_cwksp_alloc_aligned &&
128403 +                phase >= ZSTD_cwksp_alloc_aligned) {
128404 +            /* If unaligned allocations down from a too-large top have left us
128405 +             * unaligned, we need to realign our alloc ptr. Technically, this
128406 +             * can consume space that is unaccounted for in the neededSpace
128407 +             * calculation. However, I believe this can only happen when the
128408 +             * workspace is too large, and specifically when it is too large
128409 +             * by a larger margin than the space that will be consumed. */
128410 +            /* TODO: cleaner, compiler warning friendly way to do this??? */
128411 +            ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
128412 +            if (ws->allocStart < ws->tableValidEnd) {
128413 +                ws->tableValidEnd = ws->allocStart;
128414 +            }
128415 +        }
128416 +        ws->phase = phase;
128417 +    }
128421 + * Returns whether this object/buffer/etc was allocated in this workspace.
128422 + */
128423 +MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
128424 +    return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
128428 + * Internal function. Do not use directly.
128429 + */
128430 +MEM_STATIC void* ZSTD_cwksp_reserve_internal(
128431 +        ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
128432 +    void* alloc;
128433 +    void* bottom = ws->tableEnd;
128434 +    ZSTD_cwksp_internal_advance_phase(ws, phase);
128435 +    alloc = (BYTE *)ws->allocStart - bytes;
128437 +    if (bytes == 0)
128438 +        return NULL;
128441 +    DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
128442 +        alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
128443 +    ZSTD_cwksp_assert_internal_consistency(ws);
128444 +    assert(alloc >= bottom);
128445 +    if (alloc < bottom) {
128446 +        DEBUGLOG(4, "cwksp: alloc failed!");
128447 +        ws->allocFailed = 1;
128448 +        return NULL;
128449 +    }
128450 +    if (alloc < ws->tableValidEnd) {
128451 +        ws->tableValidEnd = alloc;
128452 +    }
128453 +    ws->allocStart = alloc;
128456 +    return alloc;
128460 + * Reserves and returns unaligned memory.
128461 + */
128462 +MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
128463 +    return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
128467 + * Reserves and returns memory sized on and aligned on sizeof(unsigned).
128468 + */
128469 +MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
128470 +    assert((bytes & (sizeof(U32)-1)) == 0);
128471 +    return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
128475 + * Aligned on sizeof(unsigned). These buffers have the special property that
128476 + * their values remain constrained, allowing us to re-use them without
128477 + * memset()-ing them.
128478 + */
128479 +MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
128480 +    const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
128481 +    void* alloc = ws->tableEnd;
128482 +    void* end = (BYTE *)alloc + bytes;
128483 +    void* top = ws->allocStart;
128485 +    DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
128486 +        alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
128487 +    assert((bytes & (sizeof(U32)-1)) == 0);
128488 +    ZSTD_cwksp_internal_advance_phase(ws, phase);
128489 +    ZSTD_cwksp_assert_internal_consistency(ws);
128490 +    assert(end <= top);
128491 +    if (end > top) {
128492 +        DEBUGLOG(4, "cwksp: table alloc failed!");
128493 +        ws->allocFailed = 1;
128494 +        return NULL;
128495 +    }
128496 +    ws->tableEnd = end;
128499 +    return alloc;
128503 + * Aligned on sizeof(void*).
128504 + */
128505 +MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
128506 +    size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
128507 +    void* alloc = ws->objectEnd;
128508 +    void* end = (BYTE*)alloc + roundedBytes;
128511 +    DEBUGLOG(5,
128512 +        "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
128513 +        alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
128514 +    assert(((size_t)alloc & (sizeof(void*)-1)) == 0);
128515 +    assert((bytes & (sizeof(void*)-1)) == 0);
128516 +    ZSTD_cwksp_assert_internal_consistency(ws);
128517 +    /* we must be in the first phase, no advance is possible */
128518 +    if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
128519 +        DEBUGLOG(4, "cwksp: object alloc failed!");
128520 +        ws->allocFailed = 1;
128521 +        return NULL;
128522 +    }
128523 +    ws->objectEnd = end;
128524 +    ws->tableEnd = end;
128525 +    ws->tableValidEnd = end;
128528 +    return alloc;
128531 +MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
128532 +    DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
128535 +    assert(ws->tableValidEnd >= ws->objectEnd);
128536 +    assert(ws->tableValidEnd <= ws->allocStart);
128537 +    ws->tableValidEnd = ws->objectEnd;
128538 +    ZSTD_cwksp_assert_internal_consistency(ws);
128541 +MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
128542 +    DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
128543 +    assert(ws->tableValidEnd >= ws->objectEnd);
128544 +    assert(ws->tableValidEnd <= ws->allocStart);
128545 +    if (ws->tableValidEnd < ws->tableEnd) {
128546 +        ws->tableValidEnd = ws->tableEnd;
128547 +    }
128548 +    ZSTD_cwksp_assert_internal_consistency(ws);
128552 + * Zero the part of the allocated tables not already marked clean.
128553 + */
128554 +MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
128555 +    DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
128556 +    assert(ws->tableValidEnd >= ws->objectEnd);
128557 +    assert(ws->tableValidEnd <= ws->allocStart);
128558 +    if (ws->tableValidEnd < ws->tableEnd) {
128559 +        ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
128560 +    }
128561 +    ZSTD_cwksp_mark_tables_clean(ws);
128565 + * Invalidates table allocations.
128566 + * All other allocations remain valid.
128567 + */
128568 +MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
128569 +    DEBUGLOG(4, "cwksp: clearing tables!");
128572 +    ws->tableEnd = ws->objectEnd;
128573 +    ZSTD_cwksp_assert_internal_consistency(ws);
128577 + * Invalidates all buffer, aligned, and table allocations.
128578 + * Object allocations remain valid.
128579 + */
128580 +MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
128581 +    DEBUGLOG(4, "cwksp: clearing!");
128585 +    ws->tableEnd = ws->objectEnd;
128586 +    ws->allocStart = ws->workspaceEnd;
128587 +    ws->allocFailed = 0;
128588 +    if (ws->phase > ZSTD_cwksp_alloc_buffers) {
128589 +        ws->phase = ZSTD_cwksp_alloc_buffers;
128590 +    }
128591 +    ZSTD_cwksp_assert_internal_consistency(ws);
128595 + * The provided workspace takes ownership of the buffer [start, start+size).
128596 + * Any existing values in the workspace are ignored (the previously managed
128597 + * buffer, if present, must be separately freed).
128598 + */
128599 +MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
128600 +    DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
128601 +    assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
128602 +    ws->workspace = start;
128603 +    ws->workspaceEnd = (BYTE*)start + size;
128604 +    ws->objectEnd = ws->workspace;
128605 +    ws->tableValidEnd = ws->objectEnd;
128606 +    ws->phase = ZSTD_cwksp_alloc_objects;
128607 +    ws->isStatic = isStatic;
128608 +    ZSTD_cwksp_clear(ws);
128609 +    ws->workspaceOversizedDuration = 0;
128610 +    ZSTD_cwksp_assert_internal_consistency(ws);
128613 +MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
128614 +    void* workspace = ZSTD_customMalloc(size, customMem);
128615 +    DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
128616 +    RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
128617 +    ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
128618 +    return 0;
128621 +MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
128622 +    void *ptr = ws->workspace;
128623 +    DEBUGLOG(4, "cwksp: freeing workspace");
128624 +    ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
128625 +    ZSTD_customFree(ptr, customMem);
128629 + * Moves the management of a workspace from one cwksp to another. The src cwksp
128630 + * is left in an invalid state (src must be re-init()'ed before it's used again).
128631 + */
128632 +MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
128633 +    *dst = *src;
128634 +    ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
128637 +MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
128638 +    return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
128641 +MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
128642 +    return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
128643 +         + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
128646 +MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
128647 +    return ws->allocFailed;
128650 +/*-*************************************
128651 +*  Functions Checking Free Space
128652 +***************************************/
128654 +MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
128655 +    return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
128658 +MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
128659 +    return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
128662 +MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
128663 +    return ZSTD_cwksp_check_available(
128664 +        ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
128667 +MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
128668 +    return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
128669 +        && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
128672 +MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
128673 +        ZSTD_cwksp* ws, size_t additionalNeededSpace) {
128674 +    if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
128675 +        ws->workspaceOversizedDuration++;
128676 +    } else {
128677 +        ws->workspaceOversizedDuration = 0;
128678 +    }
128682 +#endif /* ZSTD_CWKSP_H */
128683 diff --git a/lib/zstd/compress/zstd_double_fast.c b/lib/zstd/compress/zstd_double_fast.c
128684 new file mode 100644
128685 index 000000000000..b99172e9d2e4
128686 --- /dev/null
128687 +++ b/lib/zstd/compress/zstd_double_fast.c
128688 @@ -0,0 +1,521 @@
128690 + * Copyright (c) Yann Collet, Facebook, Inc.
128691 + * All rights reserved.
128693 + * This source code is licensed under both the BSD-style license (found in the
128694 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
128695 + * in the COPYING file in the root directory of this source tree).
128696 + * You may select, at your option, one of the above-listed licenses.
128697 + */
128699 +#include "zstd_compress_internal.h"
128700 +#include "zstd_double_fast.h"
128703 +void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
128704 +                              void const* end, ZSTD_dictTableLoadMethod_e dtlm)
128706 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
128707 +    U32* const hashLarge = ms->hashTable;
128708 +    U32  const hBitsL = cParams->hashLog;
128709 +    U32  const mls = cParams->minMatch;
128710 +    U32* const hashSmall = ms->chainTable;
128711 +    U32  const hBitsS = cParams->chainLog;
128712 +    const BYTE* const base = ms->window.base;
128713 +    const BYTE* ip = base + ms->nextToUpdate;
128714 +    const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
128715 +    const U32 fastHashFillStep = 3;
128717 +    /* Always insert every fastHashFillStep position into the hash tables.
128718 +     * Insert the other positions into the large hash table if their entry
128719 +     * is empty.
128720 +     */
128721 +    for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
128722 +        U32 const curr = (U32)(ip - base);
128723 +        U32 i;
128724 +        for (i = 0; i < fastHashFillStep; ++i) {
128725 +            size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls);
128726 +            size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8);
128727 +            if (i == 0)
128728 +                hashSmall[smHash] = curr + i;
128729 +            if (i == 0 || hashLarge[lgHash] == 0)
128730 +                hashLarge[lgHash] = curr + i;
128731 +            /* Only load extra positions for ZSTD_dtlm_full */
128732 +            if (dtlm == ZSTD_dtlm_fast)
128733 +                break;
128734 +    }   }
128738 +FORCE_INLINE_TEMPLATE
128739 +size_t ZSTD_compressBlock_doubleFast_generic(
128740 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
128741 +        void const* src, size_t srcSize,
128742 +        U32 const mls /* template */, ZSTD_dictMode_e const dictMode)
128744 +    ZSTD_compressionParameters const* cParams = &ms->cParams;
128745 +    U32* const hashLong = ms->hashTable;
128746 +    const U32 hBitsL = cParams->hashLog;
128747 +    U32* const hashSmall = ms->chainTable;
128748 +    const U32 hBitsS = cParams->chainLog;
128749 +    const BYTE* const base = ms->window.base;
128750 +    const BYTE* const istart = (const BYTE*)src;
128751 +    const BYTE* ip = istart;
128752 +    const BYTE* anchor = istart;
128753 +    const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
128754 +    /* presumes that, if there is a dictionary, it must be using Attach mode */
128755 +    const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
128756 +    const BYTE* const prefixLowest = base + prefixLowestIndex;
128757 +    const BYTE* const iend = istart + srcSize;
128758 +    const BYTE* const ilimit = iend - HASH_READ_SIZE;
128759 +    U32 offset_1=rep[0], offset_2=rep[1];
128760 +    U32 offsetSaved = 0;
128762 +    const ZSTD_matchState_t* const dms = ms->dictMatchState;
128763 +    const ZSTD_compressionParameters* const dictCParams =
128764 +                                     dictMode == ZSTD_dictMatchState ?
128765 +                                     &dms->cParams : NULL;
128766 +    const U32* const dictHashLong  = dictMode == ZSTD_dictMatchState ?
128767 +                                     dms->hashTable : NULL;
128768 +    const U32* const dictHashSmall = dictMode == ZSTD_dictMatchState ?
128769 +                                     dms->chainTable : NULL;
128770 +    const U32 dictStartIndex       = dictMode == ZSTD_dictMatchState ?
128771 +                                     dms->window.dictLimit : 0;
128772 +    const BYTE* const dictBase     = dictMode == ZSTD_dictMatchState ?
128773 +                                     dms->window.base : NULL;
128774 +    const BYTE* const dictStart    = dictMode == ZSTD_dictMatchState ?
128775 +                                     dictBase + dictStartIndex : NULL;
128776 +    const BYTE* const dictEnd      = dictMode == ZSTD_dictMatchState ?
128777 +                                     dms->window.nextSrc : NULL;
128778 +    const U32 dictIndexDelta       = dictMode == ZSTD_dictMatchState ?
128779 +                                     prefixLowestIndex - (U32)(dictEnd - dictBase) :
128780 +                                     0;
128781 +    const U32 dictHBitsL           = dictMode == ZSTD_dictMatchState ?
128782 +                                     dictCParams->hashLog : hBitsL;
128783 +    const U32 dictHBitsS           = dictMode == ZSTD_dictMatchState ?
128784 +                                     dictCParams->chainLog : hBitsS;
128785 +    const U32 dictAndPrefixLength  = (U32)((ip - prefixLowest) + (dictEnd - dictStart));
128787 +    DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_generic");
128789 +    assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState);
128791 +    /* if a dictionary is attached, it must be within window range */
128792 +    if (dictMode == ZSTD_dictMatchState) {
128793 +        assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex);
128794 +    }
128796 +    /* init */
128797 +    ip += (dictAndPrefixLength == 0);
128798 +    if (dictMode == ZSTD_noDict) {
128799 +        U32 const curr = (U32)(ip - base);
128800 +        U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
128801 +        U32 const maxRep = curr - windowLow;
128802 +        if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
128803 +        if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
128804 +    }
128805 +    if (dictMode == ZSTD_dictMatchState) {
128806 +        /* dictMatchState repCode checks don't currently handle repCode == 0
128807 +         * disabling. */
128808 +        assert(offset_1 <= dictAndPrefixLength);
128809 +        assert(offset_2 <= dictAndPrefixLength);
128810 +    }
128812 +    /* Main Search Loop */
128813 +    while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */
128814 +        size_t mLength;
128815 +        U32 offset;
128816 +        size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
128817 +        size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
128818 +        size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8);
128819 +        size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls);
128820 +        U32 const curr = (U32)(ip-base);
128821 +        U32 const matchIndexL = hashLong[h2];
128822 +        U32 matchIndexS = hashSmall[h];
128823 +        const BYTE* matchLong = base + matchIndexL;
128824 +        const BYTE* match = base + matchIndexS;
128825 +        const U32 repIndex = curr + 1 - offset_1;
128826 +        const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
128827 +                            && repIndex < prefixLowestIndex) ?
128828 +                               dictBase + (repIndex - dictIndexDelta) :
128829 +                               base + repIndex;
128830 +        hashLong[h2] = hashSmall[h] = curr;   /* update hash tables */
128832 +        /* check dictMatchState repcode */
128833 +        if (dictMode == ZSTD_dictMatchState
128834 +            && ((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
128835 +            && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
128836 +            const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
128837 +            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
128838 +            ip++;
128839 +            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
128840 +            goto _match_stored;
128841 +        }
128843 +        /* check noDict repcode */
128844 +        if ( dictMode == ZSTD_noDict
128845 +          && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
128846 +            mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
128847 +            ip++;
128848 +            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
128849 +            goto _match_stored;
128850 +        }
128852 +        if (matchIndexL > prefixLowestIndex) {
128853 +            /* check prefix long match */
128854 +            if (MEM_read64(matchLong) == MEM_read64(ip)) {
128855 +                mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
128856 +                offset = (U32)(ip-matchLong);
128857 +                while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
128858 +                goto _match_found;
128859 +            }
128860 +        } else if (dictMode == ZSTD_dictMatchState) {
128861 +            /* check dictMatchState long match */
128862 +            U32 const dictMatchIndexL = dictHashLong[dictHL];
128863 +            const BYTE* dictMatchL = dictBase + dictMatchIndexL;
128864 +            assert(dictMatchL < dictEnd);
128866 +            if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) {
128867 +                mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8;
128868 +                offset = (U32)(curr - dictMatchIndexL - dictIndexDelta);
128869 +                while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */
128870 +                goto _match_found;
128871 +        }   }
128873 +        if (matchIndexS > prefixLowestIndex) {
128874 +            /* check prefix short match */
128875 +            if (MEM_read32(match) == MEM_read32(ip)) {
128876 +                goto _search_next_long;
128877 +            }
128878 +        } else if (dictMode == ZSTD_dictMatchState) {
128879 +            /* check dictMatchState short match */
128880 +            U32 const dictMatchIndexS = dictHashSmall[dictHS];
128881 +            match = dictBase + dictMatchIndexS;
128882 +            matchIndexS = dictMatchIndexS + dictIndexDelta;
128884 +            if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) {
128885 +                goto _search_next_long;
128886 +        }   }
128888 +        ip += ((ip-anchor) >> kSearchStrength) + 1;
128889 +#if defined(__aarch64__)
128890 +        PREFETCH_L1(ip+256);
128891 +#endif
128892 +        continue;
128894 +_search_next_long:
128896 +        {   size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
128897 +            size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8);
128898 +            U32 const matchIndexL3 = hashLong[hl3];
128899 +            const BYTE* matchL3 = base + matchIndexL3;
128900 +            hashLong[hl3] = curr + 1;
128902 +            /* check prefix long +1 match */
128903 +            if (matchIndexL3 > prefixLowestIndex) {
128904 +                if (MEM_read64(matchL3) == MEM_read64(ip+1)) {
128905 +                    mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
128906 +                    ip++;
128907 +                    offset = (U32)(ip-matchL3);
128908 +                    while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
128909 +                    goto _match_found;
128910 +                }
128911 +            } else if (dictMode == ZSTD_dictMatchState) {
128912 +                /* check dict long +1 match */
128913 +                U32 const dictMatchIndexL3 = dictHashLong[dictHLNext];
128914 +                const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3;
128915 +                assert(dictMatchL3 < dictEnd);
128916 +                if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) {
128917 +                    mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8;
128918 +                    ip++;
128919 +                    offset = (U32)(curr + 1 - dictMatchIndexL3 - dictIndexDelta);
128920 +                    while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */
128921 +                    goto _match_found;
128922 +        }   }   }
128924 +        /* if no long +1 match, explore the short match we found */
128925 +        if (dictMode == ZSTD_dictMatchState && matchIndexS < prefixLowestIndex) {
128926 +            mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4;
128927 +            offset = (U32)(curr - matchIndexS);
128928 +            while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
128929 +        } else {
128930 +            mLength = ZSTD_count(ip+4, match+4, iend) + 4;
128931 +            offset = (U32)(ip - match);
128932 +            while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
128933 +        }
128935 +        /* fall-through */
128937 +_match_found:
128938 +        offset_2 = offset_1;
128939 +        offset_1 = offset;
128941 +        ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
128943 +_match_stored:
128944 +        /* match found */
128945 +        ip += mLength;
128946 +        anchor = ip;
128948 +        if (ip <= ilimit) {
128949 +            /* Complementary insertion */
128950 +            /* done after iLimit test, as candidates could be > iend-8 */
128951 +            {   U32 const indexToInsert = curr+2;
128952 +                hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
128953 +                hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
128954 +                hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
128955 +                hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
128956 +            }
128958 +            /* check immediate repcode */
128959 +            if (dictMode == ZSTD_dictMatchState) {
128960 +                while (ip <= ilimit) {
128961 +                    U32 const current2 = (U32)(ip-base);
128962 +                    U32 const repIndex2 = current2 - offset_2;
128963 +                    const BYTE* repMatch2 = dictMode == ZSTD_dictMatchState
128964 +                        && repIndex2 < prefixLowestIndex ?
128965 +                            dictBase + repIndex2 - dictIndexDelta :
128966 +                            base + repIndex2;
128967 +                    if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
128968 +                       && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
128969 +                        const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
128970 +                        size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
128971 +                        U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
128972 +                        ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
128973 +                        hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
128974 +                        hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
128975 +                        ip += repLength2;
128976 +                        anchor = ip;
128977 +                        continue;
128978 +                    }
128979 +                    break;
128980 +            }   }
128982 +            if (dictMode == ZSTD_noDict) {
128983 +                while ( (ip <= ilimit)
128984 +                     && ( (offset_2>0)
128985 +                        & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
128986 +                    /* store sequence */
128987 +                    size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
128988 +                    U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff;  /* swap offset_2 <=> offset_1 */
128989 +                    hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
128990 +                    hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
128991 +                    ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, rLength-MINMATCH);
128992 +                    ip += rLength;
128993 +                    anchor = ip;
128994 +                    continue;   /* faster when present ... (?) */
128995 +        }   }   }
128996 +    }   /* while (ip < ilimit) */
128998 +    /* save reps for next block */
128999 +    rep[0] = offset_1 ? offset_1 : offsetSaved;
129000 +    rep[1] = offset_2 ? offset_2 : offsetSaved;
129002 +    /* Return the last literals size */
129003 +    return (size_t)(iend - anchor);
129007 +size_t ZSTD_compressBlock_doubleFast(
129008 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129009 +        void const* src, size_t srcSize)
129011 +    const U32 mls = ms->cParams.minMatch;
129012 +    switch(mls)
129013 +    {
129014 +    default: /* includes case 3 */
129015 +    case 4 :
129016 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict);
129017 +    case 5 :
129018 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict);
129019 +    case 6 :
129020 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict);
129021 +    case 7 :
129022 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict);
129023 +    }
129027 +size_t ZSTD_compressBlock_doubleFast_dictMatchState(
129028 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129029 +        void const* src, size_t srcSize)
129031 +    const U32 mls = ms->cParams.minMatch;
129032 +    switch(mls)
129033 +    {
129034 +    default: /* includes case 3 */
129035 +    case 4 :
129036 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState);
129037 +    case 5 :
129038 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState);
129039 +    case 6 :
129040 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState);
129041 +    case 7 :
129042 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState);
129043 +    }
129047 +static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
129048 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129049 +        void const* src, size_t srcSize,
129050 +        U32 const mls /* template */)
129052 +    ZSTD_compressionParameters const* cParams = &ms->cParams;
129053 +    U32* const hashLong = ms->hashTable;
129054 +    U32  const hBitsL = cParams->hashLog;
129055 +    U32* const hashSmall = ms->chainTable;
129056 +    U32  const hBitsS = cParams->chainLog;
129057 +    const BYTE* const istart = (const BYTE*)src;
129058 +    const BYTE* ip = istart;
129059 +    const BYTE* anchor = istart;
129060 +    const BYTE* const iend = istart + srcSize;
129061 +    const BYTE* const ilimit = iend - 8;
129062 +    const BYTE* const base = ms->window.base;
129063 +    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
129064 +    const U32   lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
129065 +    const U32   dictStartIndex = lowLimit;
129066 +    const U32   dictLimit = ms->window.dictLimit;
129067 +    const U32   prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit;
129068 +    const BYTE* const prefixStart = base + prefixStartIndex;
129069 +    const BYTE* const dictBase = ms->window.dictBase;
129070 +    const BYTE* const dictStart = dictBase + dictStartIndex;
129071 +    const BYTE* const dictEnd = dictBase + prefixStartIndex;
129072 +    U32 offset_1=rep[0], offset_2=rep[1];
129074 +    DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize);
129076 +    /* if extDict is invalidated due to maxDistance, switch to "regular" variant */
129077 +    if (prefixStartIndex == dictStartIndex)
129078 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, mls, ZSTD_noDict);
129080 +    /* Search Loop */
129081 +    while (ip < ilimit) {  /* < instead of <=, because (ip+1) */
129082 +        const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
129083 +        const U32 matchIndex = hashSmall[hSmall];
129084 +        const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
129085 +        const BYTE* match = matchBase + matchIndex;
129087 +        const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
129088 +        const U32 matchLongIndex = hashLong[hLong];
129089 +        const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base;
129090 +        const BYTE* matchLong = matchLongBase + matchLongIndex;
129092 +        const U32 curr = (U32)(ip-base);
129093 +        const U32 repIndex = curr + 1 - offset_1;   /* offset_1 expected <= curr +1 */
129094 +        const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
129095 +        const BYTE* const repMatch = repBase + repIndex;
129096 +        size_t mLength;
129097 +        hashSmall[hSmall] = hashLong[hLong] = curr;   /* update hash table */
129099 +        if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
129100 +            & (repIndex > dictStartIndex))
129101 +          && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
129102 +            const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
129103 +            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
129104 +            ip++;
129105 +            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
129106 +        } else {
129107 +            if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
129108 +                const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
129109 +                const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart;
129110 +                U32 offset;
129111 +                mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8;
129112 +                offset = curr - matchLongIndex;
129113 +                while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; }   /* catch up */
129114 +                offset_2 = offset_1;
129115 +                offset_1 = offset;
129116 +                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
129118 +            } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
129119 +                size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
129120 +                U32 const matchIndex3 = hashLong[h3];
129121 +                const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base;
129122 +                const BYTE* match3 = match3Base + matchIndex3;
129123 +                U32 offset;
129124 +                hashLong[h3] = curr + 1;
129125 +                if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
129126 +                    const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend;
129127 +                    const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart;
129128 +                    mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8;
129129 +                    ip++;
129130 +                    offset = curr+1 - matchIndex3;
129131 +                    while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
129132 +                } else {
129133 +                    const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
129134 +                    const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
129135 +                    mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
129136 +                    offset = curr - matchIndex;
129137 +                    while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
129138 +                }
129139 +                offset_2 = offset_1;
129140 +                offset_1 = offset;
129141 +                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
129143 +            } else {
129144 +                ip += ((ip-anchor) >> kSearchStrength) + 1;
129145 +                continue;
129146 +        }   }
129148 +        /* move to next sequence start */
129149 +        ip += mLength;
129150 +        anchor = ip;
129152 +        if (ip <= ilimit) {
129153 +            /* Complementary insertion */
129154 +            /* done after iLimit test, as candidates could be > iend-8 */
129155 +            {   U32 const indexToInsert = curr+2;
129156 +                hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
129157 +                hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
129158 +                hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
129159 +                hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
129160 +            }
129162 +            /* check immediate repcode */
129163 +            while (ip <= ilimit) {
129164 +                U32 const current2 = (U32)(ip-base);
129165 +                U32 const repIndex2 = current2 - offset_2;
129166 +                const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
129167 +                if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3)   /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
129168 +                    & (repIndex2 > dictStartIndex))
129169 +                  && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
129170 +                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
129171 +                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
129172 +                    U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
129173 +                    ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
129174 +                    hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
129175 +                    hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
129176 +                    ip += repLength2;
129177 +                    anchor = ip;
129178 +                    continue;
129179 +                }
129180 +                break;
129181 +    }   }   }
129183 +    /* save reps for next block */
129184 +    rep[0] = offset_1;
129185 +    rep[1] = offset_2;
129187 +    /* Return the last literals size */
129188 +    return (size_t)(iend - anchor);
129192 +size_t ZSTD_compressBlock_doubleFast_extDict(
129193 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129194 +        void const* src, size_t srcSize)
129196 +    U32 const mls = ms->cParams.minMatch;
129197 +    switch(mls)
129198 +    {
129199 +    default: /* includes case 3 */
129200 +    case 4 :
129201 +        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
129202 +    case 5 :
129203 +        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
129204 +    case 6 :
129205 +        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
129206 +    case 7 :
129207 +        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
129208 +    }
129210 diff --git a/lib/zstd/compress/zstd_double_fast.h b/lib/zstd/compress/zstd_double_fast.h
129211 new file mode 100644
129212 index 000000000000..6822bde65a1d
129213 --- /dev/null
129214 +++ b/lib/zstd/compress/zstd_double_fast.h
129215 @@ -0,0 +1,32 @@
129217 + * Copyright (c) Yann Collet, Facebook, Inc.
129218 + * All rights reserved.
129220 + * This source code is licensed under both the BSD-style license (found in the
129221 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
129222 + * in the COPYING file in the root directory of this source tree).
129223 + * You may select, at your option, one of the above-listed licenses.
129224 + */
129226 +#ifndef ZSTD_DOUBLE_FAST_H
129227 +#define ZSTD_DOUBLE_FAST_H
129230 +#include "../common/mem.h"      /* U32 */
129231 +#include "zstd_compress_internal.h"     /* ZSTD_CCtx, size_t */
129233 +void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
129234 +                              void const* end, ZSTD_dictTableLoadMethod_e dtlm);
129235 +size_t ZSTD_compressBlock_doubleFast(
129236 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129237 +        void const* src, size_t srcSize);
129238 +size_t ZSTD_compressBlock_doubleFast_dictMatchState(
129239 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129240 +        void const* src, size_t srcSize);
129241 +size_t ZSTD_compressBlock_doubleFast_extDict(
129242 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129243 +        void const* src, size_t srcSize);
129247 +#endif /* ZSTD_DOUBLE_FAST_H */
129248 diff --git a/lib/zstd/compress/zstd_fast.c b/lib/zstd/compress/zstd_fast.c
129249 new file mode 100644
129250 index 000000000000..96b7d48e2868
129251 --- /dev/null
129252 +++ b/lib/zstd/compress/zstd_fast.c
129253 @@ -0,0 +1,496 @@
129255 + * Copyright (c) Yann Collet, Facebook, Inc.
129256 + * All rights reserved.
129258 + * This source code is licensed under both the BSD-style license (found in the
129259 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
129260 + * in the COPYING file in the root directory of this source tree).
129261 + * You may select, at your option, one of the above-listed licenses.
129262 + */
129264 +#include "zstd_compress_internal.h"  /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
129265 +#include "zstd_fast.h"
129268 +void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
129269 +                        const void* const end,
129270 +                        ZSTD_dictTableLoadMethod_e dtlm)
129272 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
129273 +    U32* const hashTable = ms->hashTable;
129274 +    U32  const hBits = cParams->hashLog;
129275 +    U32  const mls = cParams->minMatch;
129276 +    const BYTE* const base = ms->window.base;
129277 +    const BYTE* ip = base + ms->nextToUpdate;
129278 +    const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
129279 +    const U32 fastHashFillStep = 3;
129281 +    /* Always insert every fastHashFillStep position into the hash table.
129282 +     * Insert the other positions if their hash entry is empty.
129283 +     */
129284 +    for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
129285 +        U32 const curr = (U32)(ip - base);
129286 +        size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
129287 +        hashTable[hash0] = curr;
129288 +        if (dtlm == ZSTD_dtlm_fast) continue;
129289 +        /* Only load extra positions for ZSTD_dtlm_full */
129290 +        {   U32 p;
129291 +            for (p = 1; p < fastHashFillStep; ++p) {
129292 +                size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);
129293 +                if (hashTable[hash] == 0) {  /* not yet filled */
129294 +                    hashTable[hash] = curr + p;
129295 +    }   }   }   }
129299 +FORCE_INLINE_TEMPLATE size_t
129300 +ZSTD_compressBlock_fast_generic(
129301 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129302 +        void const* src, size_t srcSize,
129303 +        U32 const mls)
129305 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
129306 +    U32* const hashTable = ms->hashTable;
129307 +    U32 const hlog = cParams->hashLog;
129308 +    /* support stepSize of 0 */
129309 +    size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
129310 +    const BYTE* const base = ms->window.base;
129311 +    const BYTE* const istart = (const BYTE*)src;
129312 +    /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */
129313 +    const BYTE* ip0 = istart;
129314 +    const BYTE* ip1;
129315 +    const BYTE* anchor = istart;
129316 +    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
129317 +    const U32   prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
129318 +    const BYTE* const prefixStart = base + prefixStartIndex;
129319 +    const BYTE* const iend = istart + srcSize;
129320 +    const BYTE* const ilimit = iend - HASH_READ_SIZE;
129321 +    U32 offset_1=rep[0], offset_2=rep[1];
129322 +    U32 offsetSaved = 0;
129324 +    /* init */
129325 +    DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
129326 +    ip0 += (ip0 == prefixStart);
129327 +    ip1 = ip0 + 1;
129328 +    {   U32 const curr = (U32)(ip0 - base);
129329 +        U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
129330 +        U32 const maxRep = curr - windowLow;
129331 +        if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
129332 +        if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
129333 +    }
129335 +    /* Main Search Loop */
129336 +#ifdef __INTEL_COMPILER
129337 +    /* From intel 'The vector pragma indicates that the loop should be
129338 +     * vectorized if it is legal to do so'. Can be used together with
129339 +     * #pragma ivdep (but have opted to exclude that because intel
129340 +     * warns against using it).*/
129341 +    #pragma vector always
129342 +#endif
129343 +    while (ip1 < ilimit) {   /* < instead of <=, because check at ip0+2 */
129344 +        size_t mLength;
129345 +        BYTE const* ip2 = ip0 + 2;
129346 +        size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls);
129347 +        U32 const val0 = MEM_read32(ip0);
129348 +        size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls);
129349 +        U32 const val1 = MEM_read32(ip1);
129350 +        U32 const current0 = (U32)(ip0-base);
129351 +        U32 const current1 = (U32)(ip1-base);
129352 +        U32 const matchIndex0 = hashTable[h0];
129353 +        U32 const matchIndex1 = hashTable[h1];
129354 +        BYTE const* repMatch = ip2 - offset_1;
129355 +        const BYTE* match0 = base + matchIndex0;
129356 +        const BYTE* match1 = base + matchIndex1;
129357 +        U32 offcode;
129359 +#if defined(__aarch64__)
129360 +        PREFETCH_L1(ip0+256);
129361 +#endif
129363 +        hashTable[h0] = current0;   /* update hash table */
129364 +        hashTable[h1] = current1;   /* update hash table */
129366 +        assert(ip0 + 1 == ip1);
129368 +        if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) {
129369 +            mLength = (ip2[-1] == repMatch[-1]) ? 1 : 0;
129370 +            ip0 = ip2 - mLength;
129371 +            match0 = repMatch - mLength;
129372 +            mLength += 4;
129373 +            offcode = 0;
129374 +            goto _match;
129375 +        }
129376 +        if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) {
129377 +            /* found a regular match */
129378 +            goto _offset;
129379 +        }
129380 +        if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) {
129381 +            /* found a regular match after one literal */
129382 +            ip0 = ip1;
129383 +            match0 = match1;
129384 +            goto _offset;
129385 +        }
129386 +        {   size_t const step = ((size_t)(ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
129387 +            assert(step >= 2);
129388 +            ip0 += step;
129389 +            ip1 += step;
129390 +            continue;
129391 +        }
129392 +_offset: /* Requires: ip0, match0 */
129393 +        /* Compute the offset code */
129394 +        offset_2 = offset_1;
129395 +        offset_1 = (U32)(ip0-match0);
129396 +        offcode = offset_1 + ZSTD_REP_MOVE;
129397 +        mLength = 4;
129398 +        /* Count the backwards match length */
129399 +        while (((ip0>anchor) & (match0>prefixStart))
129400 +             && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */
129402 +_match: /* Requires: ip0, match0, offcode */
129403 +        /* Count the forward length */
129404 +        mLength += ZSTD_count(ip0+mLength, match0+mLength, iend);
129405 +        ZSTD_storeSeq(seqStore, (size_t)(ip0-anchor), anchor, iend, offcode, mLength-MINMATCH);
129406 +        /* match found */
129407 +        ip0 += mLength;
129408 +        anchor = ip0;
129410 +        if (ip0 <= ilimit) {
129411 +            /* Fill Table */
129412 +            assert(base+current0+2 > istart);  /* check base overflow */
129413 +            hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2;  /* here because current+2 could be > iend-8 */
129414 +            hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
129416 +            if (offset_2 > 0) { /* offset_2==0 means offset_2 is invalidated */
129417 +                while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) {
129418 +                    /* store sequence */
129419 +                    size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;
129420 +                    { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
129421 +                    hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
129422 +                    ip0 += rLength;
129423 +                    ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH);
129424 +                    anchor = ip0;
129425 +                    continue;   /* faster when present (confirmed on gcc-8) ... (?) */
129426 +        }   }   }
129427 +        ip1 = ip0 + 1;
129428 +    }
129430 +    /* save reps for next block */
129431 +    rep[0] = offset_1 ? offset_1 : offsetSaved;
129432 +    rep[1] = offset_2 ? offset_2 : offsetSaved;
129434 +    /* Return the last literals size */
129435 +    return (size_t)(iend - anchor);
129439 +size_t ZSTD_compressBlock_fast(
129440 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129441 +        void const* src, size_t srcSize)
129443 +    U32 const mls = ms->cParams.minMatch;
129444 +    assert(ms->dictMatchState == NULL);
129445 +    switch(mls)
129446 +    {
129447 +    default: /* includes case 3 */
129448 +    case 4 :
129449 +        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4);
129450 +    case 5 :
129451 +        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5);
129452 +    case 6 :
129453 +        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6);
129454 +    case 7 :
129455 +        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7);
129456 +    }
129459 +FORCE_INLINE_TEMPLATE
129460 +size_t ZSTD_compressBlock_fast_dictMatchState_generic(
129461 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129462 +        void const* src, size_t srcSize, U32 const mls)
129464 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
129465 +    U32* const hashTable = ms->hashTable;
129466 +    U32 const hlog = cParams->hashLog;
129467 +    /* support stepSize of 0 */
129468 +    U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
129469 +    const BYTE* const base = ms->window.base;
129470 +    const BYTE* const istart = (const BYTE*)src;
129471 +    const BYTE* ip = istart;
129472 +    const BYTE* anchor = istart;
129473 +    const U32   prefixStartIndex = ms->window.dictLimit;
129474 +    const BYTE* const prefixStart = base + prefixStartIndex;
129475 +    const BYTE* const iend = istart + srcSize;
129476 +    const BYTE* const ilimit = iend - HASH_READ_SIZE;
129477 +    U32 offset_1=rep[0], offset_2=rep[1];
129478 +    U32 offsetSaved = 0;
129480 +    const ZSTD_matchState_t* const dms = ms->dictMatchState;
129481 +    const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
129482 +    const U32* const dictHashTable = dms->hashTable;
129483 +    const U32 dictStartIndex       = dms->window.dictLimit;
129484 +    const BYTE* const dictBase     = dms->window.base;
129485 +    const BYTE* const dictStart    = dictBase + dictStartIndex;
129486 +    const BYTE* const dictEnd      = dms->window.nextSrc;
129487 +    const U32 dictIndexDelta       = prefixStartIndex - (U32)(dictEnd - dictBase);
129488 +    const U32 dictAndPrefixLength  = (U32)(ip - prefixStart + dictEnd - dictStart);
129489 +    const U32 dictHLog             = dictCParams->hashLog;
129491 +    /* if a dictionary is still attached, it necessarily means that
129492 +     * it is within window size. So we just check it. */
129493 +    const U32 maxDistance = 1U << cParams->windowLog;
129494 +    const U32 endIndex = (U32)((size_t)(ip - base) + srcSize);
129495 +    assert(endIndex - prefixStartIndex <= maxDistance);
129496 +    (void)maxDistance; (void)endIndex;   /* these variables are not used when assert() is disabled */
129498 +    /* ensure there will be no underflow
129499 +     * when translating a dict index into a local index */
129500 +    assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
129502 +    /* init */
129503 +    DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
129504 +    ip += (dictAndPrefixLength == 0);
129505 +    /* dictMatchState repCode checks don't currently handle repCode == 0
129506 +     * disabling. */
129507 +    assert(offset_1 <= dictAndPrefixLength);
129508 +    assert(offset_2 <= dictAndPrefixLength);
129510 +    /* Main Search Loop */
129511 +    while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */
129512 +        size_t mLength;
129513 +        size_t const h = ZSTD_hashPtr(ip, hlog, mls);
129514 +        U32 const curr = (U32)(ip-base);
129515 +        U32 const matchIndex = hashTable[h];
129516 +        const BYTE* match = base + matchIndex;
129517 +        const U32 repIndex = curr + 1 - offset_1;
129518 +        const BYTE* repMatch = (repIndex < prefixStartIndex) ?
129519 +                               dictBase + (repIndex - dictIndexDelta) :
129520 +                               base + repIndex;
129521 +        hashTable[h] = curr;   /* update hash table */
129523 +        if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
129524 +          && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
129525 +            const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
129526 +            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
129527 +            ip++;
129528 +            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
129529 +        } else if ( (matchIndex <= prefixStartIndex) ) {
129530 +            size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
129531 +            U32 const dictMatchIndex = dictHashTable[dictHash];
129532 +            const BYTE* dictMatch = dictBase + dictMatchIndex;
129533 +            if (dictMatchIndex <= dictStartIndex ||
129534 +                MEM_read32(dictMatch) != MEM_read32(ip)) {
129535 +                assert(stepSize >= 1);
129536 +                ip += ((ip-anchor) >> kSearchStrength) + stepSize;
129537 +                continue;
129538 +            } else {
129539 +                /* found a dict match */
129540 +                U32 const offset = (U32)(curr-dictMatchIndex-dictIndexDelta);
129541 +                mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
129542 +                while (((ip>anchor) & (dictMatch>dictStart))
129543 +                     && (ip[-1] == dictMatch[-1])) {
129544 +                    ip--; dictMatch--; mLength++;
129545 +                } /* catch up */
129546 +                offset_2 = offset_1;
129547 +                offset_1 = offset;
129548 +                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
129549 +            }
129550 +        } else if (MEM_read32(match) != MEM_read32(ip)) {
129551 +            /* it's not a match, and we're not going to check the dictionary */
129552 +            assert(stepSize >= 1);
129553 +            ip += ((ip-anchor) >> kSearchStrength) + stepSize;
129554 +            continue;
129555 +        } else {
129556 +            /* found a regular match */
129557 +            U32 const offset = (U32)(ip-match);
129558 +            mLength = ZSTD_count(ip+4, match+4, iend) + 4;
129559 +            while (((ip>anchor) & (match>prefixStart))
129560 +                 && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
129561 +            offset_2 = offset_1;
129562 +            offset_1 = offset;
129563 +            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
129564 +        }
129566 +        /* match found */
129567 +        ip += mLength;
129568 +        anchor = ip;
129570 +        if (ip <= ilimit) {
129571 +            /* Fill Table */
129572 +            assert(base+curr+2 > istart);  /* check base overflow */
129573 +            hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2;  /* here because curr+2 could be > iend-8 */
129574 +            hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
129576 +            /* check immediate repcode */
129577 +            while (ip <= ilimit) {
129578 +                U32 const current2 = (U32)(ip-base);
129579 +                U32 const repIndex2 = current2 - offset_2;
129580 +                const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
129581 +                        dictBase - dictIndexDelta + repIndex2 :
129582 +                        base + repIndex2;
129583 +                if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
129584 +                   && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
129585 +                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
129586 +                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
129587 +                    U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
129588 +                    ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
129589 +                    hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
129590 +                    ip += repLength2;
129591 +                    anchor = ip;
129592 +                    continue;
129593 +                }
129594 +                break;
129595 +            }
129596 +        }
129597 +    }
129599 +    /* save reps for next block */
129600 +    rep[0] = offset_1 ? offset_1 : offsetSaved;
129601 +    rep[1] = offset_2 ? offset_2 : offsetSaved;
129603 +    /* Return the last literals size */
129604 +    return (size_t)(iend - anchor);
129607 +size_t ZSTD_compressBlock_fast_dictMatchState(
129608 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129609 +        void const* src, size_t srcSize)
129611 +    U32 const mls = ms->cParams.minMatch;
129612 +    assert(ms->dictMatchState != NULL);
129613 +    switch(mls)
129614 +    {
129615 +    default: /* includes case 3 */
129616 +    case 4 :
129617 +        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4);
129618 +    case 5 :
129619 +        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5);
129620 +    case 6 :
129621 +        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6);
129622 +    case 7 :
129623 +        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7);
129624 +    }
129628 +static size_t ZSTD_compressBlock_fast_extDict_generic(
129629 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129630 +        void const* src, size_t srcSize, U32 const mls)
129632 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
129633 +    U32* const hashTable = ms->hashTable;
129634 +    U32 const hlog = cParams->hashLog;
129635 +    /* support stepSize of 0 */
129636 +    U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
129637 +    const BYTE* const base = ms->window.base;
129638 +    const BYTE* const dictBase = ms->window.dictBase;
129639 +    const BYTE* const istart = (const BYTE*)src;
129640 +    const BYTE* ip = istart;
129641 +    const BYTE* anchor = istart;
129642 +    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
129643 +    const U32   lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
129644 +    const U32   dictStartIndex = lowLimit;
129645 +    const BYTE* const dictStart = dictBase + dictStartIndex;
129646 +    const U32   dictLimit = ms->window.dictLimit;
129647 +    const U32   prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit;
129648 +    const BYTE* const prefixStart = base + prefixStartIndex;
129649 +    const BYTE* const dictEnd = dictBase + prefixStartIndex;
129650 +    const BYTE* const iend = istart + srcSize;
129651 +    const BYTE* const ilimit = iend - 8;
129652 +    U32 offset_1=rep[0], offset_2=rep[1];
129654 +    DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);
129656 +    /* switch to "regular" variant if extDict is invalidated due to maxDistance */
129657 +    if (prefixStartIndex == dictStartIndex)
129658 +        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls);
129660 +    /* Search Loop */
129661 +    while (ip < ilimit) {  /* < instead of <=, because (ip+1) */
129662 +        const size_t h = ZSTD_hashPtr(ip, hlog, mls);
129663 +        const U32    matchIndex = hashTable[h];
129664 +        const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
129665 +        const BYTE*  match = matchBase + matchIndex;
129666 +        const U32    curr = (U32)(ip-base);
129667 +        const U32    repIndex = curr + 1 - offset_1;
129668 +        const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
129669 +        const BYTE* const repMatch = repBase + repIndex;
129670 +        hashTable[h] = curr;   /* update hash table */
129671 +        DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr);
129672 +        assert(offset_1 <= curr +1);   /* check repIndex */
129674 +        if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))
129675 +           && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
129676 +            const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
129677 +            size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
129678 +            ip++;
129679 +            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, rLength-MINMATCH);
129680 +            ip += rLength;
129681 +            anchor = ip;
129682 +        } else {
129683 +            if ( (matchIndex < dictStartIndex) ||
129684 +                 (MEM_read32(match) != MEM_read32(ip)) ) {
129685 +                assert(stepSize >= 1);
129686 +                ip += ((ip-anchor) >> kSearchStrength) + stepSize;
129687 +                continue;
129688 +            }
129689 +            {   const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
129690 +                const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
129691 +                U32 const offset = curr - matchIndex;
129692 +                size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
129693 +                while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
129694 +                offset_2 = offset_1; offset_1 = offset;  /* update offset history */
129695 +                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
129696 +                ip += mLength;
129697 +                anchor = ip;
129698 +        }   }
129700 +        if (ip <= ilimit) {
129701 +            /* Fill Table */
129702 +            hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2;
129703 +            hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
129704 +            /* check immediate repcode */
129705 +            while (ip <= ilimit) {
129706 +                U32 const current2 = (U32)(ip-base);
129707 +                U32 const repIndex2 = current2 - offset_2;
129708 +                const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
129709 +                if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex))  /* intentional overflow */
129710 +                   && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
129711 +                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
129712 +                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
129713 +                    { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; }  /* swap offset_2 <=> offset_1 */
129714 +                    ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, 0 /*offcode*/, repLength2-MINMATCH);
129715 +                    hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
129716 +                    ip += repLength2;
129717 +                    anchor = ip;
129718 +                    continue;
129719 +                }
129720 +                break;
129721 +    }   }   }
129723 +    /* save reps for next block */
129724 +    rep[0] = offset_1;
129725 +    rep[1] = offset_2;
129727 +    /* Return the last literals size */
129728 +    return (size_t)(iend - anchor);
129732 +size_t ZSTD_compressBlock_fast_extDict(
129733 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129734 +        void const* src, size_t srcSize)
129736 +    U32 const mls = ms->cParams.minMatch;
129737 +    switch(mls)
129738 +    {
129739 +    default: /* includes case 3 */
129740 +    case 4 :
129741 +        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
129742 +    case 5 :
129743 +        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
129744 +    case 6 :
129745 +        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
129746 +    case 7 :
129747 +        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
129748 +    }
129750 diff --git a/lib/zstd/compress/zstd_fast.h b/lib/zstd/compress/zstd_fast.h
129751 new file mode 100644
129752 index 000000000000..fddc2f532d21
129753 --- /dev/null
129754 +++ b/lib/zstd/compress/zstd_fast.h
129755 @@ -0,0 +1,31 @@
129757 + * Copyright (c) Yann Collet, Facebook, Inc.
129758 + * All rights reserved.
129760 + * This source code is licensed under both the BSD-style license (found in the
129761 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
129762 + * in the COPYING file in the root directory of this source tree).
129763 + * You may select, at your option, one of the above-listed licenses.
129764 + */
129766 +#ifndef ZSTD_FAST_H
129767 +#define ZSTD_FAST_H
129770 +#include "../common/mem.h"      /* U32 */
129771 +#include "zstd_compress_internal.h"
129773 +void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
129774 +                        void const* end, ZSTD_dictTableLoadMethod_e dtlm);
129775 +size_t ZSTD_compressBlock_fast(
129776 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129777 +        void const* src, size_t srcSize);
129778 +size_t ZSTD_compressBlock_fast_dictMatchState(
129779 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129780 +        void const* src, size_t srcSize);
129781 +size_t ZSTD_compressBlock_fast_extDict(
129782 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129783 +        void const* src, size_t srcSize);
129786 +#endif /* ZSTD_FAST_H */
129787 diff --git a/lib/zstd/compress/zstd_lazy.c b/lib/zstd/compress/zstd_lazy.c
129788 new file mode 100644
129789 index 000000000000..39aa2569aabc
129790 --- /dev/null
129791 +++ b/lib/zstd/compress/zstd_lazy.c
129792 @@ -0,0 +1,1412 @@
129794 + * Copyright (c) Yann Collet, Facebook, Inc.
129795 + * All rights reserved.
129797 + * This source code is licensed under both the BSD-style license (found in the
129798 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
129799 + * in the COPYING file in the root directory of this source tree).
129800 + * You may select, at your option, one of the above-listed licenses.
129801 + */
129803 +#include "zstd_compress_internal.h"
129804 +#include "zstd_lazy.h"
129807 +/*-*************************************
129808 +*  Binary Tree search
129809 +***************************************/
129811 +static void
129812 +ZSTD_updateDUBT(ZSTD_matchState_t* ms,
129813 +                const BYTE* ip, const BYTE* iend,
129814 +                U32 mls)
129816 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
129817 +    U32* const hashTable = ms->hashTable;
129818 +    U32  const hashLog = cParams->hashLog;
129820 +    U32* const bt = ms->chainTable;
129821 +    U32  const btLog  = cParams->chainLog - 1;
129822 +    U32  const btMask = (1 << btLog) - 1;
129824 +    const BYTE* const base = ms->window.base;
129825 +    U32 const target = (U32)(ip - base);
129826 +    U32 idx = ms->nextToUpdate;
129828 +    if (idx != target)
129829 +        DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)",
129830 +                    idx, target, ms->window.dictLimit);
129831 +    assert(ip + 8 <= iend);   /* condition for ZSTD_hashPtr */
129832 +    (void)iend;
129834 +    assert(idx >= ms->window.dictLimit);   /* condition for valid base+idx */
129835 +    for ( ; idx < target ; idx++) {
129836 +        size_t const h  = ZSTD_hashPtr(base + idx, hashLog, mls);   /* assumption : ip + 8 <= iend */
129837 +        U32    const matchIndex = hashTable[h];
129839 +        U32*   const nextCandidatePtr = bt + 2*(idx&btMask);
129840 +        U32*   const sortMarkPtr  = nextCandidatePtr + 1;
129842 +        DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx);
129843 +        hashTable[h] = idx;   /* Update Hash Table */
129844 +        *nextCandidatePtr = matchIndex;   /* update BT like a chain */
129845 +        *sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK;
129846 +    }
129847 +    ms->nextToUpdate = target;
129851 +/** ZSTD_insertDUBT1() :
129852 + *  sort one already inserted but unsorted position
129853 + *  assumption : curr >= btlow == (curr - btmask)
129854 + *  doesn't fail */
129855 +static void
129856 +ZSTD_insertDUBT1(ZSTD_matchState_t* ms,
129857 +                 U32 curr, const BYTE* inputEnd,
129858 +                 U32 nbCompares, U32 btLow,
129859 +                 const ZSTD_dictMode_e dictMode)
129861 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
129862 +    U32* const bt = ms->chainTable;
129863 +    U32  const btLog  = cParams->chainLog - 1;
129864 +    U32  const btMask = (1 << btLog) - 1;
129865 +    size_t commonLengthSmaller=0, commonLengthLarger=0;
129866 +    const BYTE* const base = ms->window.base;
129867 +    const BYTE* const dictBase = ms->window.dictBase;
129868 +    const U32 dictLimit = ms->window.dictLimit;
129869 +    const BYTE* const ip = (curr>=dictLimit) ? base + curr : dictBase + curr;
129870 +    const BYTE* const iend = (curr>=dictLimit) ? inputEnd : dictBase + dictLimit;
129871 +    const BYTE* const dictEnd = dictBase + dictLimit;
129872 +    const BYTE* const prefixStart = base + dictLimit;
129873 +    const BYTE* match;
129874 +    U32* smallerPtr = bt + 2*(curr&btMask);
129875 +    U32* largerPtr  = smallerPtr + 1;
129876 +    U32 matchIndex = *smallerPtr;   /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */
129877 +    U32 dummy32;   /* to be nullified at the end */
129878 +    U32 const windowValid = ms->window.lowLimit;
129879 +    U32 const maxDistance = 1U << cParams->windowLog;
129880 +    U32 const windowLow = (curr - windowValid > maxDistance) ? curr - maxDistance : windowValid;
129883 +    DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
129884 +                curr, dictLimit, windowLow);
129885 +    assert(curr >= btLow);
129886 +    assert(ip < iend);   /* condition for ZSTD_count */
129888 +    while (nbCompares-- && (matchIndex > windowLow)) {
129889 +        U32* const nextPtr = bt + 2*(matchIndex & btMask);
129890 +        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
129891 +        assert(matchIndex < curr);
129892 +        /* note : all candidates are now supposed sorted,
129893 +         * but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK
129894 +         * when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */
129896 +        if ( (dictMode != ZSTD_extDict)
129897 +          || (matchIndex+matchLength >= dictLimit)  /* both in current segment*/
129898 +          || (curr < dictLimit) /* both in extDict */) {
129899 +            const BYTE* const mBase = ( (dictMode != ZSTD_extDict)
129900 +                                     || (matchIndex+matchLength >= dictLimit)) ?
129901 +                                        base : dictBase;
129902 +            assert( (matchIndex+matchLength >= dictLimit)   /* might be wrong if extDict is incorrectly set to 0 */
129903 +                 || (curr < dictLimit) );
129904 +            match = mBase + matchIndex;
129905 +            matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
129906 +        } else {
129907 +            match = dictBase + matchIndex;
129908 +            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
129909 +            if (matchIndex+matchLength >= dictLimit)
129910 +                match = base + matchIndex;   /* preparation for next read of match[matchLength] */
129911 +        }
129913 +        DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
129914 +                    curr, matchIndex, (U32)matchLength);
129916 +        if (ip+matchLength == iend) {   /* equal : no way to know if inf or sup */
129917 +            break;   /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
129918 +        }
129920 +        if (match[matchLength] < ip[matchLength]) {  /* necessarily within buffer */
129921 +            /* match is smaller than current */
129922 +            *smallerPtr = matchIndex;             /* update smaller idx */
129923 +            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
129924 +            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
129925 +            DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u",
129926 +                        matchIndex, btLow, nextPtr[1]);
129927 +            smallerPtr = nextPtr+1;               /* new "candidate" => larger than match, which was smaller than target */
129928 +            matchIndex = nextPtr[1];              /* new matchIndex, larger than previous and closer to current */
129929 +        } else {
129930 +            /* match is larger than current */
129931 +            *largerPtr = matchIndex;
129932 +            commonLengthLarger = matchLength;
129933 +            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
129934 +            DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u",
129935 +                        matchIndex, btLow, nextPtr[0]);
129936 +            largerPtr = nextPtr;
129937 +            matchIndex = nextPtr[0];
129938 +    }   }
129940 +    *smallerPtr = *largerPtr = 0;
129944 +static size_t
129945 +ZSTD_DUBT_findBetterDictMatch (
129946 +        ZSTD_matchState_t* ms,
129947 +        const BYTE* const ip, const BYTE* const iend,
129948 +        size_t* offsetPtr,
129949 +        size_t bestLength,
129950 +        U32 nbCompares,
129951 +        U32 const mls,
129952 +        const ZSTD_dictMode_e dictMode)
129954 +    const ZSTD_matchState_t * const dms = ms->dictMatchState;
129955 +    const ZSTD_compressionParameters* const dmsCParams = &dms->cParams;
129956 +    const U32 * const dictHashTable = dms->hashTable;
129957 +    U32         const hashLog = dmsCParams->hashLog;
129958 +    size_t      const h  = ZSTD_hashPtr(ip, hashLog, mls);
129959 +    U32               dictMatchIndex = dictHashTable[h];
129961 +    const BYTE* const base = ms->window.base;
129962 +    const BYTE* const prefixStart = base + ms->window.dictLimit;
129963 +    U32         const curr = (U32)(ip-base);
129964 +    const BYTE* const dictBase = dms->window.base;
129965 +    const BYTE* const dictEnd = dms->window.nextSrc;
129966 +    U32         const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base);
129967 +    U32         const dictLowLimit = dms->window.lowLimit;
129968 +    U32         const dictIndexDelta = ms->window.lowLimit - dictHighLimit;
129970 +    U32*        const dictBt = dms->chainTable;
129971 +    U32         const btLog  = dmsCParams->chainLog - 1;
129972 +    U32         const btMask = (1 << btLog) - 1;
129973 +    U32         const btLow = (btMask >= dictHighLimit - dictLowLimit) ? dictLowLimit : dictHighLimit - btMask;
129975 +    size_t commonLengthSmaller=0, commonLengthLarger=0;
129977 +    (void)dictMode;
129978 +    assert(dictMode == ZSTD_dictMatchState);
129980 +    while (nbCompares-- && (dictMatchIndex > dictLowLimit)) {
129981 +        U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask);
129982 +        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
129983 +        const BYTE* match = dictBase + dictMatchIndex;
129984 +        matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
129985 +        if (dictMatchIndex+matchLength >= dictHighLimit)
129986 +            match = base + dictMatchIndex + dictIndexDelta;   /* to prepare for next usage of match[matchLength] */
129988 +        if (matchLength > bestLength) {
129989 +            U32 matchIndex = dictMatchIndex + dictIndexDelta;
129990 +            if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
129991 +                DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)",
129992 +                    curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + curr - matchIndex, dictMatchIndex, matchIndex);
129993 +                bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
129994 +            }
129995 +            if (ip+matchLength == iend) {   /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */
129996 +                break;   /* drop, to guarantee consistency (miss a little bit of compression) */
129997 +            }
129998 +        }
130000 +        if (match[matchLength] < ip[matchLength]) {
130001 +            if (dictMatchIndex <= btLow) { break; }   /* beyond tree size, stop the search */
130002 +            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
130003 +            dictMatchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
130004 +        } else {
130005 +            /* match is larger than current */
130006 +            if (dictMatchIndex <= btLow) { break; }   /* beyond tree size, stop the search */
130007 +            commonLengthLarger = matchLength;
130008 +            dictMatchIndex = nextPtr[0];
130009 +        }
130010 +    }
130012 +    if (bestLength >= MINMATCH) {
130013 +        U32 const mIndex = curr - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
130014 +        DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
130015 +                    curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
130016 +    }
130017 +    return bestLength;
130022 +static size_t
130023 +ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
130024 +                        const BYTE* const ip, const BYTE* const iend,
130025 +                        size_t* offsetPtr,
130026 +                        U32 const mls,
130027 +                        const ZSTD_dictMode_e dictMode)
130029 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
130030 +    U32*   const hashTable = ms->hashTable;
130031 +    U32    const hashLog = cParams->hashLog;
130032 +    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
130033 +    U32          matchIndex  = hashTable[h];
130035 +    const BYTE* const base = ms->window.base;
130036 +    U32    const curr = (U32)(ip-base);
130037 +    U32    const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
130039 +    U32*   const bt = ms->chainTable;
130040 +    U32    const btLog  = cParams->chainLog - 1;
130041 +    U32    const btMask = (1 << btLog) - 1;
130042 +    U32    const btLow = (btMask >= curr) ? 0 : curr - btMask;
130043 +    U32    const unsortLimit = MAX(btLow, windowLow);
130045 +    U32*         nextCandidate = bt + 2*(matchIndex&btMask);
130046 +    U32*         unsortedMark = bt + 2*(matchIndex&btMask) + 1;
130047 +    U32          nbCompares = 1U << cParams->searchLog;
130048 +    U32          nbCandidates = nbCompares;
130049 +    U32          previousCandidate = 0;
130051 +    DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", curr);
130052 +    assert(ip <= iend-8);   /* required for h calculation */
130053 +    assert(dictMode != ZSTD_dedicatedDictSearch);
130055 +    /* reach end of unsorted candidates list */
130056 +    while ( (matchIndex > unsortLimit)
130057 +         && (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK)
130058 +         && (nbCandidates > 1) ) {
130059 +        DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted",
130060 +                    matchIndex);
130061 +        *unsortedMark = previousCandidate;  /* the unsortedMark becomes a reversed chain, to move up back to original position */
130062 +        previousCandidate = matchIndex;
130063 +        matchIndex = *nextCandidate;
130064 +        nextCandidate = bt + 2*(matchIndex&btMask);
130065 +        unsortedMark = bt + 2*(matchIndex&btMask) + 1;
130066 +        nbCandidates --;
130067 +    }
130069 +    /* nullify last candidate if it's still unsorted
130070 +     * simplification, detrimental to compression ratio, beneficial for speed */
130071 +    if ( (matchIndex > unsortLimit)
130072 +      && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {
130073 +        DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u",
130074 +                    matchIndex);
130075 +        *nextCandidate = *unsortedMark = 0;
130076 +    }
130078 +    /* batch sort stacked candidates */
130079 +    matchIndex = previousCandidate;
130080 +    while (matchIndex) {  /* will end on matchIndex == 0 */
130081 +        U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;
130082 +        U32 const nextCandidateIdx = *nextCandidateIdxPtr;
130083 +        ZSTD_insertDUBT1(ms, matchIndex, iend,
130084 +                         nbCandidates, unsortLimit, dictMode);
130085 +        matchIndex = nextCandidateIdx;
130086 +        nbCandidates++;
130087 +    }
130089 +    /* find longest match */
130090 +    {   size_t commonLengthSmaller = 0, commonLengthLarger = 0;
130091 +        const BYTE* const dictBase = ms->window.dictBase;
130092 +        const U32 dictLimit = ms->window.dictLimit;
130093 +        const BYTE* const dictEnd = dictBase + dictLimit;
130094 +        const BYTE* const prefixStart = base + dictLimit;
130095 +        U32* smallerPtr = bt + 2*(curr&btMask);
130096 +        U32* largerPtr  = bt + 2*(curr&btMask) + 1;
130097 +        U32 matchEndIdx = curr + 8 + 1;
130098 +        U32 dummy32;   /* to be nullified at the end */
130099 +        size_t bestLength = 0;
130101 +        matchIndex  = hashTable[h];
130102 +        hashTable[h] = curr;   /* Update Hash Table */
130104 +        while (nbCompares-- && (matchIndex > windowLow)) {
130105 +            U32* const nextPtr = bt + 2*(matchIndex & btMask);
130106 +            size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
130107 +            const BYTE* match;
130109 +            if ((dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) {
130110 +                match = base + matchIndex;
130111 +                matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
130112 +            } else {
130113 +                match = dictBase + matchIndex;
130114 +                matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
130115 +                if (matchIndex+matchLength >= dictLimit)
130116 +                    match = base + matchIndex;   /* to prepare for next usage of match[matchLength] */
130117 +            }
130119 +            if (matchLength > bestLength) {
130120 +                if (matchLength > matchEndIdx - matchIndex)
130121 +                    matchEndIdx = matchIndex + (U32)matchLength;
130122 +                if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
130123 +                    bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
130124 +                if (ip+matchLength == iend) {   /* equal : no way to know if inf or sup */
130125 +                    if (dictMode == ZSTD_dictMatchState) {
130126 +                        nbCompares = 0; /* in addition to avoiding checking any
130127 +                                         * further in this loop, make sure we
130128 +                                         * skip checking in the dictionary. */
130129 +                    }
130130 +                    break;   /* drop, to guarantee consistency (miss a little bit of compression) */
130131 +                }
130132 +            }
130134 +            if (match[matchLength] < ip[matchLength]) {
130135 +                /* match is smaller than current */
130136 +                *smallerPtr = matchIndex;             /* update smaller idx */
130137 +                commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
130138 +                if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
130139 +                smallerPtr = nextPtr+1;               /* new "smaller" => larger of match */
130140 +                matchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
130141 +            } else {
130142 +                /* match is larger than current */
130143 +                *largerPtr = matchIndex;
130144 +                commonLengthLarger = matchLength;
130145 +                if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
130146 +                largerPtr = nextPtr;
130147 +                matchIndex = nextPtr[0];
130148 +        }   }
130150 +        *smallerPtr = *largerPtr = 0;
130152 +        if (dictMode == ZSTD_dictMatchState && nbCompares) {
130153 +            bestLength = ZSTD_DUBT_findBetterDictMatch(
130154 +                    ms, ip, iend,
130155 +                    offsetPtr, bestLength, nbCompares,
130156 +                    mls, dictMode);
130157 +        }
130159 +        assert(matchEndIdx > curr+8); /* ensure nextToUpdate is increased */
130160 +        ms->nextToUpdate = matchEndIdx - 8;   /* skip repetitive patterns */
130161 +        if (bestLength >= MINMATCH) {
130162 +            U32 const mIndex = curr - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
130163 +            DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
130164 +                        curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
130165 +        }
130166 +        return bestLength;
130167 +    }
130171 +/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
130172 +FORCE_INLINE_TEMPLATE size_t
130173 +ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
130174 +                const BYTE* const ip, const BYTE* const iLimit,
130175 +                      size_t* offsetPtr,
130176 +                const U32 mls /* template */,
130177 +                const ZSTD_dictMode_e dictMode)
130179 +    DEBUGLOG(7, "ZSTD_BtFindBestMatch");
130180 +    if (ip < ms->window.base + ms->nextToUpdate) return 0;   /* skipped area */
130181 +    ZSTD_updateDUBT(ms, ip, iLimit, mls);
130182 +    return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode);
130186 +static size_t
130187 +ZSTD_BtFindBestMatch_selectMLS (  ZSTD_matchState_t* ms,
130188 +                            const BYTE* ip, const BYTE* const iLimit,
130189 +                                  size_t* offsetPtr)
130191 +    switch(ms->cParams.minMatch)
130192 +    {
130193 +    default : /* includes case 3 */
130194 +    case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
130195 +    case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
130196 +    case 7 :
130197 +    case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
130198 +    }
130202 +static size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS (
130203 +                        ZSTD_matchState_t* ms,
130204 +                        const BYTE* ip, const BYTE* const iLimit,
130205 +                        size_t* offsetPtr)
130207 +    switch(ms->cParams.minMatch)
130208 +    {
130209 +    default : /* includes case 3 */
130210 +    case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
130211 +    case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
130212 +    case 7 :
130213 +    case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
130214 +    }
130218 +static size_t ZSTD_BtFindBestMatch_extDict_selectMLS (
130219 +                        ZSTD_matchState_t* ms,
130220 +                        const BYTE* ip, const BYTE* const iLimit,
130221 +                        size_t* offsetPtr)
130223 +    switch(ms->cParams.minMatch)
130224 +    {
130225 +    default : /* includes case 3 */
130226 +    case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
130227 +    case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
130228 +    case 7 :
130229 +    case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
130230 +    }
130235 +/* *********************************
130236 +*  Hash Chain
130237 +***********************************/
130238 +#define NEXT_IN_CHAIN(d, mask)   chainTable[(d) & (mask)]
130240 +/* Update chains up to ip (excluded)
130241 +   Assumption : always within prefix (i.e. not within extDict) */
130242 +FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
130243 +                        ZSTD_matchState_t* ms,
130244 +                        const ZSTD_compressionParameters* const cParams,
130245 +                        const BYTE* ip, U32 const mls)
130247 +    U32* const hashTable  = ms->hashTable;
130248 +    const U32 hashLog = cParams->hashLog;
130249 +    U32* const chainTable = ms->chainTable;
130250 +    const U32 chainMask = (1 << cParams->chainLog) - 1;
130251 +    const BYTE* const base = ms->window.base;
130252 +    const U32 target = (U32)(ip - base);
130253 +    U32 idx = ms->nextToUpdate;
130255 +    while(idx < target) { /* catch up */
130256 +        size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
130257 +        NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
130258 +        hashTable[h] = idx;
130259 +        idx++;
130260 +    }
130262 +    ms->nextToUpdate = target;
130263 +    return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
130266 +U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
130267 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
130268 +    return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch);
130271 +void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip)
130273 +    const BYTE* const base = ms->window.base;
130274 +    U32 const target = (U32)(ip - base);
130275 +    U32* const hashTable = ms->hashTable;
130276 +    U32* const chainTable = ms->chainTable;
130277 +    U32 const chainSize = 1 << ms->cParams.chainLog;
130278 +    U32 idx = ms->nextToUpdate;
130279 +    U32 const minChain = chainSize < target ? target - chainSize : idx;
130280 +    U32 const bucketSize = 1 << ZSTD_LAZY_DDSS_BUCKET_LOG;
130281 +    U32 const cacheSize = bucketSize - 1;
130282 +    U32 const chainAttempts = (1 << ms->cParams.searchLog) - cacheSize;
130283 +    U32 const chainLimit = chainAttempts > 255 ? 255 : chainAttempts;
130285 +    /* We know the hashtable is oversized by a factor of `bucketSize`.
130286 +     * We are going to temporarily pretend `bucketSize == 1`, keeping only a
130287 +     * single entry. We will use the rest of the space to construct a temporary
130288 +     * chaintable.
130289 +     */
130290 +    U32 const hashLog = ms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;
130291 +    U32* const tmpHashTable = hashTable;
130292 +    U32* const tmpChainTable = hashTable + ((size_t)1 << hashLog);
130293 +    U32 const tmpChainSize = ((1 << ZSTD_LAZY_DDSS_BUCKET_LOG) - 1) << hashLog;
130294 +    U32 const tmpMinChain = tmpChainSize < target ? target - tmpChainSize : idx;
130296 +    U32 hashIdx;
130298 +    assert(ms->cParams.chainLog <= 24);
130299 +    assert(ms->cParams.hashLog >= ms->cParams.chainLog);
130300 +    assert(idx != 0);
130301 +    assert(tmpMinChain <= minChain);
130303 +    /* fill conventional hash table and conventional chain table */
130304 +    for ( ; idx < target; idx++) {
130305 +        U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch);
130306 +        if (idx >= tmpMinChain) {
130307 +            tmpChainTable[idx - tmpMinChain] = hashTable[h];
130308 +        }
130309 +        tmpHashTable[h] = idx;
130310 +    }
130312 +    /* sort chains into ddss chain table */
130313 +    {
130314 +        U32 chainPos = 0;
130315 +        for (hashIdx = 0; hashIdx < (1U << hashLog); hashIdx++) {
130316 +            U32 count;
130317 +            U32 countBeyondMinChain = 0;
130318 +            U32 i = tmpHashTable[hashIdx];
130319 +            for (count = 0; i >= tmpMinChain && count < cacheSize; count++) {
130320 +                /* skip through the chain to the first position that won't be
130321 +                 * in the hash cache bucket */
130322 +                if (i < minChain) {
130323 +                    countBeyondMinChain++;
130324 +                }
130325 +                i = tmpChainTable[i - tmpMinChain];
130326 +            }
130327 +            if (count == cacheSize) {
130328 +                for (count = 0; count < chainLimit;) {
130329 +                    if (i < minChain) {
130330 +                        if (!i || countBeyondMinChain++ > cacheSize) {
130331 +                            /* only allow pulling `cacheSize` number of entries
130332 +                             * into the cache or chainTable beyond `minChain`,
130333 +                             * to replace the entries pulled out of the
130334 +                             * chainTable into the cache. This lets us reach
130335 +                             * back further without increasing the total number
130336 +                             * of entries in the chainTable, guaranteeing the
130337 +                             * DDSS chain table will fit into the space
130338 +                             * allocated for the regular one. */
130339 +                            break;
130340 +                        }
130341 +                    }
130342 +                    chainTable[chainPos++] = i;
130343 +                    count++;
130344 +                    if (i < tmpMinChain) {
130345 +                        break;
130346 +                    }
130347 +                    i = tmpChainTable[i - tmpMinChain];
130348 +                }
130349 +            } else {
130350 +                count = 0;
130351 +            }
130352 +            if (count) {
130353 +                tmpHashTable[hashIdx] = ((chainPos - count) << 8) + count;
130354 +            } else {
130355 +                tmpHashTable[hashIdx] = 0;
130356 +            }
130357 +        }
130358 +        assert(chainPos <= chainSize); /* I believe this is guaranteed... */
130359 +    }
130361 +    /* move chain pointers into the last entry of each hash bucket */
130362 +    for (hashIdx = (1 << hashLog); hashIdx; ) {
130363 +        U32 const bucketIdx = --hashIdx << ZSTD_LAZY_DDSS_BUCKET_LOG;
130364 +        U32 const chainPackedPointer = tmpHashTable[hashIdx];
130365 +        U32 i;
130366 +        for (i = 0; i < cacheSize; i++) {
130367 +            hashTable[bucketIdx + i] = 0;
130368 +        }
130369 +        hashTable[bucketIdx + bucketSize - 1] = chainPackedPointer;
130370 +    }
130372 +    /* fill the buckets of the hash table */
130373 +    for (idx = ms->nextToUpdate; idx < target; idx++) {
130374 +        U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch)
130375 +                   << ZSTD_LAZY_DDSS_BUCKET_LOG;
130376 +        U32 i;
130377 +        /* Shift hash cache down 1. */
130378 +        for (i = cacheSize - 1; i; i--)
130379 +            hashTable[h + i] = hashTable[h + i - 1];
130380 +        hashTable[h] = idx;
130381 +    }
130383 +    ms->nextToUpdate = target;
130387 +/* inlining is important to hardwire a hot branch (template emulation) */
130388 +FORCE_INLINE_TEMPLATE
130389 +size_t ZSTD_HcFindBestMatch_generic (
130390 +                        ZSTD_matchState_t* ms,
130391 +                        const BYTE* const ip, const BYTE* const iLimit,
130392 +                        size_t* offsetPtr,
130393 +                        const U32 mls, const ZSTD_dictMode_e dictMode)
130395 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
130396 +    U32* const chainTable = ms->chainTable;
130397 +    const U32 chainSize = (1 << cParams->chainLog);
130398 +    const U32 chainMask = chainSize-1;
130399 +    const BYTE* const base = ms->window.base;
130400 +    const BYTE* const dictBase = ms->window.dictBase;
130401 +    const U32 dictLimit = ms->window.dictLimit;
130402 +    const BYTE* const prefixStart = base + dictLimit;
130403 +    const BYTE* const dictEnd = dictBase + dictLimit;
130404 +    const U32 curr = (U32)(ip-base);
130405 +    const U32 maxDistance = 1U << cParams->windowLog;
130406 +    const U32 lowestValid = ms->window.lowLimit;
130407 +    const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
130408 +    const U32 isDictionary = (ms->loadedDictEnd != 0);
130409 +    const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
130410 +    const U32 minChain = curr > chainSize ? curr - chainSize : 0;
130411 +    U32 nbAttempts = 1U << cParams->searchLog;
130412 +    size_t ml=4-1;
130414 +    const ZSTD_matchState_t* const dms = ms->dictMatchState;
130415 +    const U32 ddsHashLog = dictMode == ZSTD_dedicatedDictSearch
130416 +                         ? dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
130417 +    const size_t ddsIdx = dictMode == ZSTD_dedicatedDictSearch
130418 +                        ? ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
130420 +    U32 matchIndex;
130422 +    if (dictMode == ZSTD_dedicatedDictSearch) {
130423 +        const U32* entry = &dms->hashTable[ddsIdx];
130424 +        PREFETCH_L1(entry);
130425 +    }
130427 +    /* HC4 match finder */
130428 +    matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
130430 +    for ( ; (matchIndex>=lowLimit) & (nbAttempts>0) ; nbAttempts--) {
130431 +        size_t currentMl=0;
130432 +        if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
130433 +            const BYTE* const match = base + matchIndex;
130434 +            assert(matchIndex >= dictLimit);   /* ensures this is true if dictMode != ZSTD_extDict */
130435 +            if (match[ml] == ip[ml])   /* potentially better */
130436 +                currentMl = ZSTD_count(ip, match, iLimit);
130437 +        } else {
130438 +            const BYTE* const match = dictBase + matchIndex;
130439 +            assert(match+4 <= dictEnd);
130440 +            if (MEM_read32(match) == MEM_read32(ip))   /* assumption : matchIndex <= dictLimit-4 (by table construction) */
130441 +                currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
130442 +        }
130444 +        /* save best solution */
130445 +        if (currentMl > ml) {
130446 +            ml = currentMl;
130447 +            *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE;
130448 +            if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
130449 +        }
130451 +        if (matchIndex <= minChain) break;
130452 +        matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
130453 +    }
130455 +    if (dictMode == ZSTD_dedicatedDictSearch) {
130456 +        const U32 ddsLowestIndex  = dms->window.dictLimit;
130457 +        const BYTE* const ddsBase = dms->window.base;
130458 +        const BYTE* const ddsEnd  = dms->window.nextSrc;
130459 +        const U32 ddsSize         = (U32)(ddsEnd - ddsBase);
130460 +        const U32 ddsIndexDelta   = dictLimit - ddsSize;
130461 +        const U32 bucketSize      = (1 << ZSTD_LAZY_DDSS_BUCKET_LOG);
130462 +        const U32 bucketLimit     = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1;
130463 +        U32 ddsAttempt;
130465 +        for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) {
130466 +            PREFETCH_L1(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]);
130467 +        }
130469 +        {
130470 +            U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
130471 +            U32 const chainIndex = chainPackedPointer >> 8;
130473 +            PREFETCH_L1(&dms->chainTable[chainIndex]);
130474 +        }
130476 +        for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) {
130477 +            size_t currentMl=0;
130478 +            const BYTE* match;
130479 +            matchIndex = dms->hashTable[ddsIdx + ddsAttempt];
130480 +            match = ddsBase + matchIndex;
130482 +            if (!matchIndex) {
130483 +                return ml;
130484 +            }
130486 +            /* guaranteed by table construction */
130487 +            (void)ddsLowestIndex;
130488 +            assert(matchIndex >= ddsLowestIndex);
130489 +            assert(match+4 <= ddsEnd);
130490 +            if (MEM_read32(match) == MEM_read32(ip)) {
130491 +                /* assumption : matchIndex <= dictLimit-4 (by table construction) */
130492 +                currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
130493 +            }
130495 +            /* save best solution */
130496 +            if (currentMl > ml) {
130497 +                ml = currentMl;
130498 +                *offsetPtr = curr - (matchIndex + ddsIndexDelta) + ZSTD_REP_MOVE;
130499 +                if (ip+currentMl == iLimit) {
130500 +                    /* best possible, avoids read overflow on next attempt */
130501 +                    return ml;
130502 +                }
130503 +            }
130504 +        }
130506 +        {
130507 +            U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
130508 +            U32 chainIndex = chainPackedPointer >> 8;
130509 +            U32 const chainLength = chainPackedPointer & 0xFF;
130510 +            U32 const chainAttempts = nbAttempts - ddsAttempt;
130511 +            U32 const chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts;
130512 +            U32 chainAttempt;
130514 +            for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++) {
130515 +                PREFETCH_L1(ddsBase + dms->chainTable[chainIndex + chainAttempt]);
130516 +            }
130518 +            for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++, chainIndex++) {
130519 +                size_t currentMl=0;
130520 +                const BYTE* match;
130521 +                matchIndex = dms->chainTable[chainIndex];
130522 +                match = ddsBase + matchIndex;
130524 +                /* guaranteed by table construction */
130525 +                assert(matchIndex >= ddsLowestIndex);
130526 +                assert(match+4 <= ddsEnd);
130527 +                if (MEM_read32(match) == MEM_read32(ip)) {
130528 +                    /* assumption : matchIndex <= dictLimit-4 (by table construction) */
130529 +                    currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
130530 +                }
130532 +                /* save best solution */
130533 +                if (currentMl > ml) {
130534 +                    ml = currentMl;
130535 +                    *offsetPtr = curr - (matchIndex + ddsIndexDelta) + ZSTD_REP_MOVE;
130536 +                    if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
130537 +                }
130538 +            }
130539 +        }
130540 +    } else if (dictMode == ZSTD_dictMatchState) {
130541 +        const U32* const dmsChainTable = dms->chainTable;
130542 +        const U32 dmsChainSize         = (1 << dms->cParams.chainLog);
130543 +        const U32 dmsChainMask         = dmsChainSize - 1;
130544 +        const U32 dmsLowestIndex       = dms->window.dictLimit;
130545 +        const BYTE* const dmsBase      = dms->window.base;
130546 +        const BYTE* const dmsEnd       = dms->window.nextSrc;
130547 +        const U32 dmsSize              = (U32)(dmsEnd - dmsBase);
130548 +        const U32 dmsIndexDelta        = dictLimit - dmsSize;
130549 +        const U32 dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0;
130551 +        matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)];
130553 +        for ( ; (matchIndex>=dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) {
130554 +            size_t currentMl=0;
130555 +            const BYTE* const match = dmsBase + matchIndex;
130556 +            assert(match+4 <= dmsEnd);
130557 +            if (MEM_read32(match) == MEM_read32(ip))   /* assumption : matchIndex <= dictLimit-4 (by table construction) */
130558 +                currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
130560 +            /* save best solution */
130561 +            if (currentMl > ml) {
130562 +                ml = currentMl;
130563 +                *offsetPtr = curr - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE;
130564 +                if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
130565 +            }
130567 +            if (matchIndex <= dmsMinChain) break;
130569 +            matchIndex = dmsChainTable[matchIndex & dmsChainMask];
130570 +        }
130571 +    }
130573 +    return ml;
130577 +FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
130578 +                        ZSTD_matchState_t* ms,
130579 +                        const BYTE* ip, const BYTE* const iLimit,
130580 +                        size_t* offsetPtr)
130582 +    switch(ms->cParams.minMatch)
130583 +    {
130584 +    default : /* includes case 3 */
130585 +    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
130586 +    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
130587 +    case 7 :
130588 +    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
130589 +    }
130593 +static size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS (
130594 +                        ZSTD_matchState_t* ms,
130595 +                        const BYTE* ip, const BYTE* const iLimit,
130596 +                        size_t* offsetPtr)
130598 +    switch(ms->cParams.minMatch)
130599 +    {
130600 +    default : /* includes case 3 */
130601 +    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
130602 +    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
130603 +    case 7 :
130604 +    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
130605 +    }
130609 +static size_t ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS (
130610 +                        ZSTD_matchState_t* ms,
130611 +                        const BYTE* ip, const BYTE* const iLimit,
130612 +                        size_t* offsetPtr)
130614 +    switch(ms->cParams.minMatch)
130615 +    {
130616 +    default : /* includes case 3 */
130617 +    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dedicatedDictSearch);
130618 +    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dedicatedDictSearch);
130619 +    case 7 :
130620 +    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dedicatedDictSearch);
130621 +    }
130625 +FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
130626 +                        ZSTD_matchState_t* ms,
130627 +                        const BYTE* ip, const BYTE* const iLimit,
130628 +                        size_t* offsetPtr)
130630 +    switch(ms->cParams.minMatch)
130631 +    {
130632 +    default : /* includes case 3 */
130633 +    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
130634 +    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
130635 +    case 7 :
130636 +    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
130637 +    }
130641 +/* *******************************
130642 +*  Common parser - lazy strategy
130643 +*********************************/
130644 +typedef enum { search_hashChain, search_binaryTree } searchMethod_e;
130646 +FORCE_INLINE_TEMPLATE size_t
130647 +ZSTD_compressBlock_lazy_generic(
130648 +                        ZSTD_matchState_t* ms, seqStore_t* seqStore,
130649 +                        U32 rep[ZSTD_REP_NUM],
130650 +                        const void* src, size_t srcSize,
130651 +                        const searchMethod_e searchMethod, const U32 depth,
130652 +                        ZSTD_dictMode_e const dictMode)
130654 +    const BYTE* const istart = (const BYTE*)src;
130655 +    const BYTE* ip = istart;
130656 +    const BYTE* anchor = istart;
130657 +    const BYTE* const iend = istart + srcSize;
130658 +    const BYTE* const ilimit = iend - 8;
130659 +    const BYTE* const base = ms->window.base;
130660 +    const U32 prefixLowestIndex = ms->window.dictLimit;
130661 +    const BYTE* const prefixLowest = base + prefixLowestIndex;
130663 +    typedef size_t (*searchMax_f)(
130664 +                        ZSTD_matchState_t* ms,
130665 +                        const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
130667 +    /**
130668 +     * This table is indexed first by the four ZSTD_dictMode_e values, and then
130669 +     * by the two searchMethod_e values. NULLs are placed for configurations
130670 +     * that should never occur (extDict modes go to the other implementation
130671 +     * below and there is no DDSS for binary tree search yet).
130672 +     */
130673 +    const searchMax_f searchFuncs[4][2] = {
130674 +        {
130675 +            ZSTD_HcFindBestMatch_selectMLS,
130676 +            ZSTD_BtFindBestMatch_selectMLS
130677 +        },
130678 +        {
130679 +            NULL,
130680 +            NULL
130681 +        },
130682 +        {
130683 +            ZSTD_HcFindBestMatch_dictMatchState_selectMLS,
130684 +            ZSTD_BtFindBestMatch_dictMatchState_selectMLS
130685 +        },
130686 +        {
130687 +            ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS,
130688 +            NULL
130689 +        }
130690 +    };
130692 +    searchMax_f const searchMax = searchFuncs[dictMode][searchMethod == search_binaryTree];
130693 +    U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
130695 +    const int isDMS = dictMode == ZSTD_dictMatchState;
130696 +    const int isDDS = dictMode == ZSTD_dedicatedDictSearch;
130697 +    const int isDxS = isDMS || isDDS;
130698 +    const ZSTD_matchState_t* const dms = ms->dictMatchState;
130699 +    const U32 dictLowestIndex      = isDxS ? dms->window.dictLimit : 0;
130700 +    const BYTE* const dictBase     = isDxS ? dms->window.base : NULL;
130701 +    const BYTE* const dictLowest   = isDxS ? dictBase + dictLowestIndex : NULL;
130702 +    const BYTE* const dictEnd      = isDxS ? dms->window.nextSrc : NULL;
130703 +    const U32 dictIndexDelta       = isDxS ?
130704 +                                     prefixLowestIndex - (U32)(dictEnd - dictBase) :
130705 +                                     0;
130706 +    const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest));
130708 +    assert(searchMax != NULL);
130710 +    DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u)", (U32)dictMode);
130712 +    /* init */
130713 +    ip += (dictAndPrefixLength == 0);
130714 +    if (dictMode == ZSTD_noDict) {
130715 +        U32 const curr = (U32)(ip - base);
130716 +        U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog);
130717 +        U32 const maxRep = curr - windowLow;
130718 +        if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
130719 +        if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
130720 +    }
130721 +    if (isDxS) {
130722 +        /* dictMatchState repCode checks don't currently handle repCode == 0
130723 +         * disabling. */
130724 +        assert(offset_1 <= dictAndPrefixLength);
130725 +        assert(offset_2 <= dictAndPrefixLength);
130726 +    }
130728 +    /* Match Loop */
130729 +#if defined(__x86_64__)
130730 +    /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
130731 +     * code alignment is perturbed. To fix the instability align the loop on 32-bytes.
130732 +     */
130733 +    __asm__(".p2align 5");
130734 +#endif
130735 +    while (ip < ilimit) {
130736 +        size_t matchLength=0;
130737 +        size_t offset=0;
130738 +        const BYTE* start=ip+1;
130740 +        /* check repCode */
130741 +        if (isDxS) {
130742 +            const U32 repIndex = (U32)(ip - base) + 1 - offset_1;
130743 +            const BYTE* repMatch = ((dictMode == ZSTD_dictMatchState || dictMode == ZSTD_dedicatedDictSearch)
130744 +                                && repIndex < prefixLowestIndex) ?
130745 +                                   dictBase + (repIndex - dictIndexDelta) :
130746 +                                   base + repIndex;
130747 +            if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
130748 +                && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
130749 +                const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
130750 +                matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
130751 +                if (depth==0) goto _storeSequence;
130752 +            }
130753 +        }
130754 +        if ( dictMode == ZSTD_noDict
130755 +          && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
130756 +            matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
130757 +            if (depth==0) goto _storeSequence;
130758 +        }
130760 +        /* first search (depth 0) */
130761 +        {   size_t offsetFound = 999999999;
130762 +            size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
130763 +            if (ml2 > matchLength)
130764 +                matchLength = ml2, start = ip, offset=offsetFound;
130765 +        }
130767 +        if (matchLength < 4) {
130768 +            ip += ((ip-anchor) >> kSearchStrength) + 1;   /* jump faster over incompressible sections */
130769 +            continue;
130770 +        }
130772 +        /* let's try to find a better solution */
130773 +        if (depth>=1)
130774 +        while (ip<ilimit) {
130775 +            ip ++;
130776 +            if ( (dictMode == ZSTD_noDict)
130777 +              && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
130778 +                size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
130779 +                int const gain2 = (int)(mlRep * 3);
130780 +                int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
130781 +                if ((mlRep >= 4) && (gain2 > gain1))
130782 +                    matchLength = mlRep, offset = 0, start = ip;
130783 +            }
130784 +            if (isDxS) {
130785 +                const U32 repIndex = (U32)(ip - base) - offset_1;
130786 +                const BYTE* repMatch = repIndex < prefixLowestIndex ?
130787 +                               dictBase + (repIndex - dictIndexDelta) :
130788 +                               base + repIndex;
130789 +                if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
130790 +                    && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
130791 +                    const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
130792 +                    size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
130793 +                    int const gain2 = (int)(mlRep * 3);
130794 +                    int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
130795 +                    if ((mlRep >= 4) && (gain2 > gain1))
130796 +                        matchLength = mlRep, offset = 0, start = ip;
130797 +                }
130798 +            }
130799 +            {   size_t offset2=999999999;
130800 +                size_t const ml2 = searchMax(ms, ip, iend, &offset2);
130801 +                int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
130802 +                int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
130803 +                if ((ml2 >= 4) && (gain2 > gain1)) {
130804 +                    matchLength = ml2, offset = offset2, start = ip;
130805 +                    continue;   /* search a better one */
130806 +            }   }
130808 +            /* let's find an even better one */
130809 +            if ((depth==2) && (ip<ilimit)) {
130810 +                ip ++;
130811 +                if ( (dictMode == ZSTD_noDict)
130812 +                  && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
130813 +                    size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
130814 +                    int const gain2 = (int)(mlRep * 4);
130815 +                    int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
130816 +                    if ((mlRep >= 4) && (gain2 > gain1))
130817 +                        matchLength = mlRep, offset = 0, start = ip;
130818 +                }
130819 +                if (isDxS) {
130820 +                    const U32 repIndex = (U32)(ip - base) - offset_1;
130821 +                    const BYTE* repMatch = repIndex < prefixLowestIndex ?
130822 +                                   dictBase + (repIndex - dictIndexDelta) :
130823 +                                   base + repIndex;
130824 +                    if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
130825 +                        && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
130826 +                        const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
130827 +                        size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
130828 +                        int const gain2 = (int)(mlRep * 4);
130829 +                        int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
130830 +                        if ((mlRep >= 4) && (gain2 > gain1))
130831 +                            matchLength = mlRep, offset = 0, start = ip;
130832 +                    }
130833 +                }
130834 +                {   size_t offset2=999999999;
130835 +                    size_t const ml2 = searchMax(ms, ip, iend, &offset2);
130836 +                    int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
130837 +                    int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
130838 +                    if ((ml2 >= 4) && (gain2 > gain1)) {
130839 +                        matchLength = ml2, offset = offset2, start = ip;
130840 +                        continue;
130841 +            }   }   }
130842 +            break;  /* nothing found : store previous solution */
130843 +        }
130845 +        /* NOTE:
130846 +         * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
130847 +         * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
130848 +         * overflows the pointer, which is undefined behavior.
130849 +         */
130850 +        /* catch up */
130851 +        if (offset) {
130852 +            if (dictMode == ZSTD_noDict) {
130853 +                while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > prefixLowest))
130854 +                     && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) )  /* only search for offset within prefix */
130855 +                    { start--; matchLength++; }
130856 +            }
130857 +            if (isDxS) {
130858 +                U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
130859 +                const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex;
130860 +                const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest;
130861 +                while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; }  /* catch up */
130862 +            }
130863 +            offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
130864 +        }
130865 +        /* store sequence */
130866 +_storeSequence:
130867 +        {   size_t const litLength = start - anchor;
130868 +            ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
130869 +            anchor = ip = start + matchLength;
130870 +        }
130872 +        /* check immediate repcode */
130873 +        if (isDxS) {
130874 +            while (ip <= ilimit) {
130875 +                U32 const current2 = (U32)(ip-base);
130876 +                U32 const repIndex = current2 - offset_2;
130877 +                const BYTE* repMatch = repIndex < prefixLowestIndex ?
130878 +                        dictBase - dictIndexDelta + repIndex :
130879 +                        base + repIndex;
130880 +                if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */)
130881 +                   && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
130882 +                    const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
130883 +                    matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
130884 +                    offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset;   /* swap offset_2 <=> offset_1 */
130885 +                    ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
130886 +                    ip += matchLength;
130887 +                    anchor = ip;
130888 +                    continue;
130889 +                }
130890 +                break;
130891 +            }
130892 +        }
130894 +        if (dictMode == ZSTD_noDict) {
130895 +            while ( ((ip <= ilimit) & (offset_2>0))
130896 +                 && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
130897 +                /* store sequence */
130898 +                matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
130899 +                offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
130900 +                ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
130901 +                ip += matchLength;
130902 +                anchor = ip;
130903 +                continue;   /* faster when present ... (?) */
130904 +    }   }   }
130906 +    /* Save reps for next block */
130907 +    rep[0] = offset_1 ? offset_1 : savedOffset;
130908 +    rep[1] = offset_2 ? offset_2 : savedOffset;
130910 +    /* Return the last literals size */
130911 +    return (size_t)(iend - anchor);
130915 +size_t ZSTD_compressBlock_btlazy2(
130916 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130917 +        void const* src, size_t srcSize)
130919 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
130922 +size_t ZSTD_compressBlock_lazy2(
130923 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130924 +        void const* src, size_t srcSize)
130926 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
130929 +size_t ZSTD_compressBlock_lazy(
130930 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130931 +        void const* src, size_t srcSize)
130933 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
130936 +size_t ZSTD_compressBlock_greedy(
130937 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130938 +        void const* src, size_t srcSize)
130940 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
130943 +size_t ZSTD_compressBlock_btlazy2_dictMatchState(
130944 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130945 +        void const* src, size_t srcSize)
130947 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
130950 +size_t ZSTD_compressBlock_lazy2_dictMatchState(
130951 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130952 +        void const* src, size_t srcSize)
130954 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);
130957 +size_t ZSTD_compressBlock_lazy_dictMatchState(
130958 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130959 +        void const* src, size_t srcSize)
130961 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
130964 +size_t ZSTD_compressBlock_greedy_dictMatchState(
130965 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130966 +        void const* src, size_t srcSize)
130968 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
130972 +size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
130973 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130974 +        void const* src, size_t srcSize)
130976 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch);
130979 +size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
130980 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130981 +        void const* src, size_t srcSize)
130983 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch);
130986 +size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
130987 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
130988 +        void const* src, size_t srcSize)
130990 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch);
130994 +FORCE_INLINE_TEMPLATE
130995 +size_t ZSTD_compressBlock_lazy_extDict_generic(
130996 +                        ZSTD_matchState_t* ms, seqStore_t* seqStore,
130997 +                        U32 rep[ZSTD_REP_NUM],
130998 +                        const void* src, size_t srcSize,
130999 +                        const searchMethod_e searchMethod, const U32 depth)
131001 +    const BYTE* const istart = (const BYTE*)src;
131002 +    const BYTE* ip = istart;
131003 +    const BYTE* anchor = istart;
131004 +    const BYTE* const iend = istart + srcSize;
131005 +    const BYTE* const ilimit = iend - 8;
131006 +    const BYTE* const base = ms->window.base;
131007 +    const U32 dictLimit = ms->window.dictLimit;
131008 +    const BYTE* const prefixStart = base + dictLimit;
131009 +    const BYTE* const dictBase = ms->window.dictBase;
131010 +    const BYTE* const dictEnd  = dictBase + dictLimit;
131011 +    const BYTE* const dictStart  = dictBase + ms->window.lowLimit;
131012 +    const U32 windowLog = ms->cParams.windowLog;
131014 +    typedef size_t (*searchMax_f)(
131015 +                        ZSTD_matchState_t* ms,
131016 +                        const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
131017 +    searchMax_f searchMax = searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
131019 +    U32 offset_1 = rep[0], offset_2 = rep[1];
131021 +    DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic");
131023 +    /* init */
131024 +    ip += (ip == prefixStart);
131026 +    /* Match Loop */
131027 +#if defined(__x86_64__)
131028 +    /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
131029 +     * code alignment is perturbed. To fix the instability align the loop on 32-bytes.
131030 +     */
131031 +    __asm__(".p2align 5");
131032 +#endif
131033 +    while (ip < ilimit) {
131034 +        size_t matchLength=0;
131035 +        size_t offset=0;
131036 +        const BYTE* start=ip+1;
131037 +        U32 curr = (U32)(ip-base);
131039 +        /* check repCode */
131040 +        {   const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr+1, windowLog);
131041 +            const U32 repIndex = (U32)(curr+1 - offset_1);
131042 +            const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
131043 +            const BYTE* const repMatch = repBase + repIndex;
131044 +            if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow))   /* intentional overflow */
131045 +            if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
131046 +                /* repcode detected we should take it */
131047 +                const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
131048 +                matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;
131049 +                if (depth==0) goto _storeSequence;
131050 +        }   }
131052 +        /* first search (depth 0) */
131053 +        {   size_t offsetFound = 999999999;
131054 +            size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
131055 +            if (ml2 > matchLength)
131056 +                matchLength = ml2, start = ip, offset=offsetFound;
131057 +        }
131059 +         if (matchLength < 4) {
131060 +            ip += ((ip-anchor) >> kSearchStrength) + 1;   /* jump faster over incompressible sections */
131061 +            continue;
131062 +        }
131064 +        /* let's try to find a better solution */
131065 +        if (depth>=1)
131066 +        while (ip<ilimit) {
131067 +            ip ++;
131068 +            curr++;
131069 +            /* check repCode */
131070 +            if (offset) {
131071 +                const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
131072 +                const U32 repIndex = (U32)(curr - offset_1);
131073 +                const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
131074 +                const BYTE* const repMatch = repBase + repIndex;
131075 +                if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow))  /* intentional overflow */
131076 +                if (MEM_read32(ip) == MEM_read32(repMatch)) {
131077 +                    /* repcode detected */
131078 +                    const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
131079 +                    size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
131080 +                    int const gain2 = (int)(repLength * 3);
131081 +                    int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
131082 +                    if ((repLength >= 4) && (gain2 > gain1))
131083 +                        matchLength = repLength, offset = 0, start = ip;
131084 +            }   }
131086 +            /* search match, depth 1 */
131087 +            {   size_t offset2=999999999;
131088 +                size_t const ml2 = searchMax(ms, ip, iend, &offset2);
131089 +                int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
131090 +                int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
131091 +                if ((ml2 >= 4) && (gain2 > gain1)) {
131092 +                    matchLength = ml2, offset = offset2, start = ip;
131093 +                    continue;   /* search a better one */
131094 +            }   }
131096 +            /* let's find an even better one */
131097 +            if ((depth==2) && (ip<ilimit)) {
131098 +                ip ++;
131099 +                curr++;
131100 +                /* check repCode */
131101 +                if (offset) {
131102 +                    const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
131103 +                    const U32 repIndex = (U32)(curr - offset_1);
131104 +                    const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
131105 +                    const BYTE* const repMatch = repBase + repIndex;
131106 +                    if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow))  /* intentional overflow */
131107 +                    if (MEM_read32(ip) == MEM_read32(repMatch)) {
131108 +                        /* repcode detected */
131109 +                        const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
131110 +                        size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
131111 +                        int const gain2 = (int)(repLength * 4);
131112 +                        int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
131113 +                        if ((repLength >= 4) && (gain2 > gain1))
131114 +                            matchLength = repLength, offset = 0, start = ip;
131115 +                }   }
131117 +                /* search match, depth 2 */
131118 +                {   size_t offset2=999999999;
131119 +                    size_t const ml2 = searchMax(ms, ip, iend, &offset2);
131120 +                    int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
131121 +                    int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
131122 +                    if ((ml2 >= 4) && (gain2 > gain1)) {
131123 +                        matchLength = ml2, offset = offset2, start = ip;
131124 +                        continue;
131125 +            }   }   }
131126 +            break;  /* nothing found : store previous solution */
131127 +        }
131129 +        /* catch up */
131130 +        if (offset) {
131131 +            U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
131132 +            const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
131133 +            const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
131134 +            while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; }  /* catch up */
131135 +            offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
131136 +        }
131138 +        /* store sequence */
131139 +_storeSequence:
131140 +        {   size_t const litLength = start - anchor;
131141 +            ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
131142 +            anchor = ip = start + matchLength;
131143 +        }
131145 +        /* check immediate repcode */
131146 +        while (ip <= ilimit) {
131147 +            const U32 repCurrent = (U32)(ip-base);
131148 +            const U32 windowLow = ZSTD_getLowestMatchIndex(ms, repCurrent, windowLog);
131149 +            const U32 repIndex = repCurrent - offset_2;
131150 +            const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
131151 +            const BYTE* const repMatch = repBase + repIndex;
131152 +            if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow))  /* intentional overflow */
131153 +            if (MEM_read32(ip) == MEM_read32(repMatch)) {
131154 +                /* repcode detected we should take it */
131155 +                const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
131156 +                matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
131157 +                offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset;   /* swap offset history */
131158 +                ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
131159 +                ip += matchLength;
131160 +                anchor = ip;
131161 +                continue;   /* faster when present ... (?) */
131162 +            }
131163 +            break;
131164 +    }   }
131166 +    /* Save reps for next block */
131167 +    rep[0] = offset_1;
131168 +    rep[1] = offset_2;
131170 +    /* Return the last literals size */
131171 +    return (size_t)(iend - anchor);
131175 +size_t ZSTD_compressBlock_greedy_extDict(
131176 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
131177 +        void const* src, size_t srcSize)
131179 +    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0);
131182 +size_t ZSTD_compressBlock_lazy_extDict(
131183 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
131184 +        void const* src, size_t srcSize)
131187 +    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);
131190 +size_t ZSTD_compressBlock_lazy2_extDict(
131191 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
131192 +        void const* src, size_t srcSize)
131195 +    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);
131198 +size_t ZSTD_compressBlock_btlazy2_extDict(
131199 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
131200 +        void const* src, size_t srcSize)
131203 +    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
131205 diff --git a/lib/zstd/compress/zstd_lazy.h b/lib/zstd/compress/zstd_lazy.h
131206 new file mode 100644
131207 index 000000000000..1fb7621e6a88
131208 --- /dev/null
131209 +++ b/lib/zstd/compress/zstd_lazy.h
131210 @@ -0,0 +1,81 @@
131212 + * Copyright (c) Yann Collet, Facebook, Inc.
131213 + * All rights reserved.
131215 + * This source code is licensed under both the BSD-style license (found in the
131216 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
131217 + * in the COPYING file in the root directory of this source tree).
131218 + * You may select, at your option, one of the above-listed licenses.
131219 + */
131221 +#ifndef ZSTD_LAZY_H
131222 +#define ZSTD_LAZY_H
131225 +#include "zstd_compress_internal.h"
131228 + * Dedicated Dictionary Search Structure bucket log. In the
131229 + * ZSTD_dedicatedDictSearch mode, the hashTable has
131230 + * 2 ** ZSTD_LAZY_DDSS_BUCKET_LOG entries in each bucket, rather than just
131231 + * one.
131232 + */
131233 +#define ZSTD_LAZY_DDSS_BUCKET_LOG 2
131235 +U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
131237 +void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip);
131239 +void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue);  /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
131241 +size_t ZSTD_compressBlock_btlazy2(
131242 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
131243 +        void const* src, size_t srcSize);
131244 +size_t ZSTD_compressBlock_lazy2(
131245 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
131246 +        void const* src, size_t srcSize);
131247 +size_t ZSTD_compressBlock_lazy(
131248 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
131249 +        void const* src, size_t srcSize);
131250 +size_t ZSTD_compressBlock_greedy(
131251 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
131252 +        void const* src, size_t srcSize);
131254 +size_t ZSTD_compressBlock_btlazy2_dictMatchState(
131255 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
131256 +        void const* src, size_t srcSize);
131257 +size_t ZSTD_compressBlock_lazy2_dictMatchState(
131258 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
131259 +        void const* src, size_t srcSize);
131260 +size_t ZSTD_compressBlock_lazy_dictMatchState(
131261 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
131262 +        void const* src, size_t srcSize);
131263 +size_t ZSTD_compressBlock_greedy_dictMatchState(
131264 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
131265 +        void const* src, size_t srcSize);
131267 +size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
131268 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
131269 +        void const* src, size_t srcSize);
131270 +size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
131271 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
131272 +        void const* src, size_t srcSize);
131273 +size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
131274 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
131275 +        void const* src, size_t srcSize);
131277 +size_t ZSTD_compressBlock_greedy_extDict(
131278 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
131279 +        void const* src, size_t srcSize);
131280 +size_t ZSTD_compressBlock_lazy_extDict(
131281 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
131282 +        void const* src, size_t srcSize);
131283 +size_t ZSTD_compressBlock_lazy2_extDict(
131284 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
131285 +        void const* src, size_t srcSize);
131286 +size_t ZSTD_compressBlock_btlazy2_extDict(
131287 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
131288 +        void const* src, size_t srcSize);
131291 +#endif /* ZSTD_LAZY_H */
131292 diff --git a/lib/zstd/compress/zstd_ldm.c b/lib/zstd/compress/zstd_ldm.c
131293 new file mode 100644
131294 index 000000000000..084fd24fdca8
131295 --- /dev/null
131296 +++ b/lib/zstd/compress/zstd_ldm.c
131297 @@ -0,0 +1,686 @@
131299 + * Copyright (c) Yann Collet, Facebook, Inc.
131300 + * All rights reserved.
131302 + * This source code is licensed under both the BSD-style license (found in the
131303 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
131304 + * in the COPYING file in the root directory of this source tree).
131305 + * You may select, at your option, one of the above-listed licenses.
131306 + */
131308 +#include "zstd_ldm.h"
131310 +#include "../common/debug.h"
131311 +#include <linux/xxhash.h>
131312 +#include "zstd_fast.h"          /* ZSTD_fillHashTable() */
131313 +#include "zstd_double_fast.h"   /* ZSTD_fillDoubleHashTable() */
131314 +#include "zstd_ldm_geartab.h"
131316 +#define LDM_BUCKET_SIZE_LOG 3
131317 +#define LDM_MIN_MATCH_LENGTH 64
131318 +#define LDM_HASH_RLOG 7
131320 +typedef struct {
131321 +    U64 rolling;
131322 +    U64 stopMask;
131323 +} ldmRollingHashState_t;
131325 +/** ZSTD_ldm_gear_init():
131327 + * Initializes the rolling hash state such that it will honor the
131328 + * settings in params. */
131329 +static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params)
131331 +    unsigned maxBitsInMask = MIN(params->minMatchLength, 64);
131332 +    unsigned hashRateLog = params->hashRateLog;
131334 +    state->rolling = ~(U32)0;
131336 +    /* The choice of the splitting criterion is subject to two conditions:
131337 +     *   1. it has to trigger on average every 2^(hashRateLog) bytes;
131338 +     *   2. ideally, it has to depend on a window of minMatchLength bytes.
131339 +     *
131340 +     * In the gear hash algorithm, bit n depends on the last n bytes;
131341 +     * so in order to obtain a good quality splitting criterion it is
131342 +     * preferable to use bits with high weight.
131343 +     *
131344 +     * To match condition 1 we use a mask with hashRateLog bits set
131345 +     * and, because of the previous remark, we make sure these bits
131346 +     * have the highest possible weight while still respecting
131347 +     * condition 2.
131348 +     */
131349 +    if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) {
131350 +        state->stopMask = (((U64)1 << hashRateLog) - 1) << (maxBitsInMask - hashRateLog);
131351 +    } else {
131352 +        /* In this degenerate case we simply honor the hash rate. */
131353 +        state->stopMask = ((U64)1 << hashRateLog) - 1;
131354 +    }
131357 +/** ZSTD_ldm_gear_feed():
131359 + * Registers in the splits array all the split points found in the first
131360 + * size bytes following the data pointer. This function terminates when
131361 + * either all the data has been processed or LDM_BATCH_SIZE splits are
131362 + * present in the splits array.
131364 + * Precondition: The splits array must not be full.
131365 + * Returns: The number of bytes processed. */
131366 +static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state,
131367 +                                 BYTE const* data, size_t size,
131368 +                                 size_t* splits, unsigned* numSplits)
131370 +    size_t n;
131371 +    U64 hash, mask;
131373 +    hash = state->rolling;
131374 +    mask = state->stopMask;
131375 +    n = 0;
131377 +#define GEAR_ITER_ONCE() do { \
131378 +        hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
131379 +        n += 1; \
131380 +        if (UNLIKELY((hash & mask) == 0)) { \
131381 +            splits[*numSplits] = n; \
131382 +            *numSplits += 1; \
131383 +            if (*numSplits == LDM_BATCH_SIZE) \
131384 +                goto done; \
131385 +        } \
131386 +    } while (0)
131388 +    while (n + 3 < size) {
131389 +        GEAR_ITER_ONCE();
131390 +        GEAR_ITER_ONCE();
131391 +        GEAR_ITER_ONCE();
131392 +        GEAR_ITER_ONCE();
131393 +    }
131394 +    while (n < size) {
131395 +        GEAR_ITER_ONCE();
131396 +    }
131398 +#undef GEAR_ITER_ONCE
131400 +done:
131401 +    state->rolling = hash;
131402 +    return n;
131405 +void ZSTD_ldm_adjustParameters(ldmParams_t* params,
131406 +                               ZSTD_compressionParameters const* cParams)
131408 +    params->windowLog = cParams->windowLog;
131409 +    ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
131410 +    DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
131411 +    if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
131412 +    if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
131413 +    if (params->hashLog == 0) {
131414 +        params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
131415 +        assert(params->hashLog <= ZSTD_HASHLOG_MAX);
131416 +    }
131417 +    if (params->hashRateLog == 0) {
131418 +        params->hashRateLog = params->windowLog < params->hashLog
131419 +                                   ? 0
131420 +                                   : params->windowLog - params->hashLog;
131421 +    }
131422 +    params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
131425 +size_t ZSTD_ldm_getTableSize(ldmParams_t params)
131427 +    size_t const ldmHSize = ((size_t)1) << params.hashLog;
131428 +    size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
131429 +    size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
131430 +    size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
131431 +                           + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
131432 +    return params.enableLdm ? totalSize : 0;
131435 +size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
131437 +    return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0;
131440 +/** ZSTD_ldm_getBucket() :
131441 + *  Returns a pointer to the start of the bucket associated with hash. */
131442 +static ldmEntry_t* ZSTD_ldm_getBucket(
131443 +        ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
131445 +    return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
131448 +/** ZSTD_ldm_insertEntry() :
131449 + *  Insert the entry with corresponding hash into the hash table */
131450 +static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
131451 +                                 size_t const hash, const ldmEntry_t entry,
131452 +                                 ldmParams_t const ldmParams)
131454 +    BYTE* const pOffset = ldmState->bucketOffsets + hash;
131455 +    unsigned const offset = *pOffset;
131457 +    *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry;
131458 +    *pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1));
131462 +/** ZSTD_ldm_countBackwardsMatch() :
131463 + *  Returns the number of bytes that match backwards before pIn and pMatch.
131465 + *  We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
131466 +static size_t ZSTD_ldm_countBackwardsMatch(
131467 +            const BYTE* pIn, const BYTE* pAnchor,
131468 +            const BYTE* pMatch, const BYTE* pMatchBase)
131470 +    size_t matchLength = 0;
131471 +    while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) {
131472 +        pIn--;
131473 +        pMatch--;
131474 +        matchLength++;
131475 +    }
131476 +    return matchLength;
131479 +/** ZSTD_ldm_countBackwardsMatch_2segments() :
131480 + *  Returns the number of bytes that match backwards from pMatch,
131481 + *  even with the backwards match spanning 2 different segments.
131483 + *  On reaching `pMatchBase`, start counting from mEnd */
131484 +static size_t ZSTD_ldm_countBackwardsMatch_2segments(
131485 +                    const BYTE* pIn, const BYTE* pAnchor,
131486 +                    const BYTE* pMatch, const BYTE* pMatchBase,
131487 +                    const BYTE* pExtDictStart, const BYTE* pExtDictEnd)
131489 +    size_t matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase);
131490 +    if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) {
131491 +        /* If backwards match is entirely in the extDict or prefix, immediately return */
131492 +        return matchLength;
131493 +    }
131494 +    DEBUGLOG(7, "ZSTD_ldm_countBackwardsMatch_2segments: found 2-parts backwards match (length in prefix==%zu)", matchLength);
131495 +    matchLength += ZSTD_ldm_countBackwardsMatch(pIn - matchLength, pAnchor, pExtDictEnd, pExtDictStart);
131496 +    DEBUGLOG(7, "final backwards match length = %zu", matchLength);
131497 +    return matchLength;
131500 +/** ZSTD_ldm_fillFastTables() :
131502 + *  Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
131503 + *  This is similar to ZSTD_loadDictionaryContent.
131505 + *  The tables for the other strategies are filled within their
131506 + *  block compressors. */
131507 +static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
131508 +                                      void const* end)
131510 +    const BYTE* const iend = (const BYTE*)end;
131512 +    switch(ms->cParams.strategy)
131513 +    {
131514 +    case ZSTD_fast:
131515 +        ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast);
131516 +        break;
131518 +    case ZSTD_dfast:
131519 +        ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast);
131520 +        break;
131522 +    case ZSTD_greedy:
131523 +    case ZSTD_lazy:
131524 +    case ZSTD_lazy2:
131525 +    case ZSTD_btlazy2:
131526 +    case ZSTD_btopt:
131527 +    case ZSTD_btultra:
131528 +    case ZSTD_btultra2:
131529 +        break;
131530 +    default:
131531 +        assert(0);  /* not possible : not a valid strategy id */
131532 +    }
131534 +    return 0;
131537 +void ZSTD_ldm_fillHashTable(
131538 +            ldmState_t* ldmState, const BYTE* ip,
131539 +            const BYTE* iend, ldmParams_t const* params)
131541 +    U32 const minMatchLength = params->minMatchLength;
131542 +    U32 const hBits = params->hashLog - params->bucketSizeLog;
131543 +    BYTE const* const base = ldmState->window.base;
131544 +    BYTE const* const istart = ip;
131545 +    ldmRollingHashState_t hashState;
131546 +    size_t* const splits = ldmState->splitIndices;
131547 +    unsigned numSplits;
131549 +    DEBUGLOG(5, "ZSTD_ldm_fillHashTable");
131551 +    ZSTD_ldm_gear_init(&hashState, params);
131552 +    while (ip < iend) {
131553 +        size_t hashed;
131554 +        unsigned n;
131556 +        numSplits = 0;
131557 +        hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits);
131559 +        for (n = 0; n < numSplits; n++) {
131560 +            if (ip + splits[n] >= istart + minMatchLength) {
131561 +                BYTE const* const split = ip + splits[n] - minMatchLength;
131562 +                U64 const xxhash = xxh64(split, minMatchLength, 0);
131563 +                U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
131564 +                ldmEntry_t entry;
131566 +                entry.offset = (U32)(split - base);
131567 +                entry.checksum = (U32)(xxhash >> 32);
131568 +                ZSTD_ldm_insertEntry(ldmState, hash, entry, *params);
131569 +            }
131570 +        }
131572 +        ip += hashed;
131573 +    }
131577 +/** ZSTD_ldm_limitTableUpdate() :
131579 + *  Sets cctx->nextToUpdate to a position corresponding closer to anchor
131580 + *  if it is far way
131581 + *  (after a long match, only update tables a limited amount). */
131582 +static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
131584 +    U32 const curr = (U32)(anchor - ms->window.base);
131585 +    if (curr > ms->nextToUpdate + 1024) {
131586 +        ms->nextToUpdate =
131587 +            curr - MIN(512, curr - ms->nextToUpdate - 1024);
131588 +    }
131591 +static size_t ZSTD_ldm_generateSequences_internal(
131592 +        ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
131593 +        ldmParams_t const* params, void const* src, size_t srcSize)
131595 +    /* LDM parameters */
131596 +    int const extDict = ZSTD_window_hasExtDict(ldmState->window);
131597 +    U32 const minMatchLength = params->minMatchLength;
131598 +    U32 const entsPerBucket = 1U << params->bucketSizeLog;
131599 +    U32 const hBits = params->hashLog - params->bucketSizeLog;
131600 +    /* Prefix and extDict parameters */
131601 +    U32 const dictLimit = ldmState->window.dictLimit;
131602 +    U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
131603 +    BYTE const* const base = ldmState->window.base;
131604 +    BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL;
131605 +    BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL;
131606 +    BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL;
131607 +    BYTE const* const lowPrefixPtr = base + dictLimit;
131608 +    /* Input bounds */
131609 +    BYTE const* const istart = (BYTE const*)src;
131610 +    BYTE const* const iend = istart + srcSize;
131611 +    BYTE const* const ilimit = iend - HASH_READ_SIZE;
131612 +    /* Input positions */
131613 +    BYTE const* anchor = istart;
131614 +    BYTE const* ip = istart;
131615 +    /* Rolling hash state */
131616 +    ldmRollingHashState_t hashState;
131617 +    /* Arrays for staged-processing */
131618 +    size_t* const splits = ldmState->splitIndices;
131619 +    ldmMatchCandidate_t* const candidates = ldmState->matchCandidates;
131620 +    unsigned numSplits;
131622 +    if (srcSize < minMatchLength)
131623 +        return iend - anchor;
131625 +    /* Initialize the rolling hash state with the first minMatchLength bytes */
131626 +    ZSTD_ldm_gear_init(&hashState, params);
131627 +    {
131628 +        size_t n = 0;
131630 +        while (n < minMatchLength) {
131631 +            numSplits = 0;
131632 +            n += ZSTD_ldm_gear_feed(&hashState, ip + n, minMatchLength - n,
131633 +                                    splits, &numSplits);
131634 +        }
131635 +        ip += minMatchLength;
131636 +    }
131638 +    while (ip < ilimit) {
131639 +        size_t hashed;
131640 +        unsigned n;
131642 +        numSplits = 0;
131643 +        hashed = ZSTD_ldm_gear_feed(&hashState, ip, ilimit - ip,
131644 +                                    splits, &numSplits);
131646 +        for (n = 0; n < numSplits; n++) {
131647 +            BYTE const* const split = ip + splits[n] - minMatchLength;
131648 +            U64 const xxhash = xxh64(split, minMatchLength, 0);
131649 +            U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
131651 +            candidates[n].split = split;
131652 +            candidates[n].hash = hash;
131653 +            candidates[n].checksum = (U32)(xxhash >> 32);
131654 +            candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params);
131655 +            PREFETCH_L1(candidates[n].bucket);
131656 +        }
131658 +        for (n = 0; n < numSplits; n++) {
131659 +            size_t forwardMatchLength = 0, backwardMatchLength = 0,
131660 +                   bestMatchLength = 0, mLength;
131661 +            BYTE const* const split = candidates[n].split;
131662 +            U32 const checksum = candidates[n].checksum;
131663 +            U32 const hash = candidates[n].hash;
131664 +            ldmEntry_t* const bucket = candidates[n].bucket;
131665 +            ldmEntry_t const* cur;
131666 +            ldmEntry_t const* bestEntry = NULL;
131667 +            ldmEntry_t newEntry;
131669 +            newEntry.offset = (U32)(split - base);
131670 +            newEntry.checksum = checksum;
131672 +            /* If a split point would generate a sequence overlapping with
131673 +             * the previous one, we merely register it in the hash table and
131674 +             * move on */
131675 +            if (split < anchor) {
131676 +                ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
131677 +                continue;
131678 +            }
131680 +            for (cur = bucket; cur < bucket + entsPerBucket; cur++) {
131681 +                size_t curForwardMatchLength, curBackwardMatchLength,
131682 +                       curTotalMatchLength;
131683 +                if (cur->checksum != checksum || cur->offset <= lowestIndex) {
131684 +                    continue;
131685 +                }
131686 +                if (extDict) {
131687 +                    BYTE const* const curMatchBase =
131688 +                        cur->offset < dictLimit ? dictBase : base;
131689 +                    BYTE const* const pMatch = curMatchBase + cur->offset;
131690 +                    BYTE const* const matchEnd =
131691 +                        cur->offset < dictLimit ? dictEnd : iend;
131692 +                    BYTE const* const lowMatchPtr =
131693 +                        cur->offset < dictLimit ? dictStart : lowPrefixPtr;
131694 +                    curForwardMatchLength =
131695 +                        ZSTD_count_2segments(split, pMatch, iend, matchEnd, lowPrefixPtr);
131696 +                    if (curForwardMatchLength < minMatchLength) {
131697 +                        continue;
131698 +                    }
131699 +                    curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments(
131700 +                            split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd);
131701 +                } else { /* !extDict */
131702 +                    BYTE const* const pMatch = base + cur->offset;
131703 +                    curForwardMatchLength = ZSTD_count(split, pMatch, iend);
131704 +                    if (curForwardMatchLength < minMatchLength) {
131705 +                        continue;
131706 +                    }
131707 +                    curBackwardMatchLength =
131708 +                        ZSTD_ldm_countBackwardsMatch(split, anchor, pMatch, lowPrefixPtr);
131709 +                }
131710 +                curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength;
131712 +                if (curTotalMatchLength > bestMatchLength) {
131713 +                    bestMatchLength = curTotalMatchLength;
131714 +                    forwardMatchLength = curForwardMatchLength;
131715 +                    backwardMatchLength = curBackwardMatchLength;
131716 +                    bestEntry = cur;
131717 +                }
131718 +            }
131720 +            /* No match found -- insert an entry into the hash table
131721 +             * and process the next candidate match */
131722 +            if (bestEntry == NULL) {
131723 +                ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
131724 +                continue;
131725 +            }
131727 +            /* Match found */
131728 +            mLength = forwardMatchLength + backwardMatchLength;
131729 +            {
131730 +                U32 const offset = (U32)(split - base) - bestEntry->offset;
131731 +                rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
131733 +                /* Out of sequence storage */
131734 +                if (rawSeqStore->size == rawSeqStore->capacity)
131735 +                    return ERROR(dstSize_tooSmall);
131736 +                seq->litLength = (U32)(split - backwardMatchLength - anchor);
131737 +                seq->matchLength = (U32)mLength;
131738 +                seq->offset = offset;
131739 +                rawSeqStore->size++;
131740 +            }
131742 +            /* Insert the current entry into the hash table --- it must be
131743 +             * done after the previous block to avoid clobbering bestEntry */
131744 +            ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
131746 +            anchor = split + forwardMatchLength;
131747 +        }
131749 +        ip += hashed;
131750 +    }
131752 +    return iend - anchor;
131755 +/*! ZSTD_ldm_reduceTable() :
131756 + *  reduce table indexes by `reducerValue` */
131757 +static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
131758 +                                 U32 const reducerValue)
131760 +    U32 u;
131761 +    for (u = 0; u < size; u++) {
131762 +        if (table[u].offset < reducerValue) table[u].offset = 0;
131763 +        else table[u].offset -= reducerValue;
131764 +    }
131767 +size_t ZSTD_ldm_generateSequences(
131768 +        ldmState_t* ldmState, rawSeqStore_t* sequences,
131769 +        ldmParams_t const* params, void const* src, size_t srcSize)
131771 +    U32 const maxDist = 1U << params->windowLog;
131772 +    BYTE const* const istart = (BYTE const*)src;
131773 +    BYTE const* const iend = istart + srcSize;
131774 +    size_t const kMaxChunkSize = 1 << 20;
131775 +    size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0);
131776 +    size_t chunk;
131777 +    size_t leftoverSize = 0;
131779 +    assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize);
131780 +    /* Check that ZSTD_window_update() has been called for this chunk prior
131781 +     * to passing it to this function.
131782 +     */
131783 +    assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
131784 +    /* The input could be very large (in zstdmt), so it must be broken up into
131785 +     * chunks to enforce the maximum distance and handle overflow correction.
131786 +     */
131787 +    assert(sequences->pos <= sequences->size);
131788 +    assert(sequences->size <= sequences->capacity);
131789 +    for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {
131790 +        BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;
131791 +        size_t const remaining = (size_t)(iend - chunkStart);
131792 +        BYTE const *const chunkEnd =
131793 +            (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;
131794 +        size_t const chunkSize = chunkEnd - chunkStart;
131795 +        size_t newLeftoverSize;
131796 +        size_t const prevSize = sequences->size;
131798 +        assert(chunkStart < iend);
131799 +        /* 1. Perform overflow correction if necessary. */
131800 +        if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) {
131801 +            U32 const ldmHSize = 1U << params->hashLog;
131802 +            U32 const correction = ZSTD_window_correctOverflow(
131803 +                &ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);
131804 +            ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
131805 +            /* invalidate dictionaries on overflow correction */
131806 +            ldmState->loadedDictEnd = 0;
131807 +        }
131808 +        /* 2. We enforce the maximum offset allowed.
131809 +         *
131810 +         * kMaxChunkSize should be small enough that we don't lose too much of
131811 +         * the window through early invalidation.
131812 +         * TODO: * Test the chunk size.
131813 +         *       * Try invalidation after the sequence generation and test the
131814 +         *         the offset against maxDist directly.
131815 +         *
131816 +         * NOTE: Because of dictionaries + sequence splitting we MUST make sure
131817 +         * that any offset used is valid at the END of the sequence, since it may
131818 +         * be split into two sequences. This condition holds when using
131819 +         * ZSTD_window_enforceMaxDist(), but if we move to checking offsets
131820 +         * against maxDist directly, we'll have to carefully handle that case.
131821 +         */
131822 +        ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL);
131823 +        /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
131824 +        newLeftoverSize = ZSTD_ldm_generateSequences_internal(
131825 +            ldmState, sequences, params, chunkStart, chunkSize);
131826 +        if (ZSTD_isError(newLeftoverSize))
131827 +            return newLeftoverSize;
131828 +        /* 4. We add the leftover literals from previous iterations to the first
131829 +         *    newly generated sequence, or add the `newLeftoverSize` if none are
131830 +         *    generated.
131831 +         */
131832 +        /* Prepend the leftover literals from the last call */
131833 +        if (prevSize < sequences->size) {
131834 +            sequences->seq[prevSize].litLength += (U32)leftoverSize;
131835 +            leftoverSize = newLeftoverSize;
131836 +        } else {
131837 +            assert(newLeftoverSize == chunkSize);
131838 +            leftoverSize += chunkSize;
131839 +        }
131840 +    }
131841 +    return 0;
131844 +void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) {
131845 +    while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
131846 +        rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
131847 +        if (srcSize <= seq->litLength) {
131848 +            /* Skip past srcSize literals */
131849 +            seq->litLength -= (U32)srcSize;
131850 +            return;
131851 +        }
131852 +        srcSize -= seq->litLength;
131853 +        seq->litLength = 0;
131854 +        if (srcSize < seq->matchLength) {
131855 +            /* Skip past the first srcSize of the match */
131856 +            seq->matchLength -= (U32)srcSize;
131857 +            if (seq->matchLength < minMatch) {
131858 +                /* The match is too short, omit it */
131859 +                if (rawSeqStore->pos + 1 < rawSeqStore->size) {
131860 +                    seq[1].litLength += seq[0].matchLength;
131861 +                }
131862 +                rawSeqStore->pos++;
131863 +            }
131864 +            return;
131865 +        }
131866 +        srcSize -= seq->matchLength;
131867 +        seq->matchLength = 0;
131868 +        rawSeqStore->pos++;
131869 +    }
131873 + * If the sequence length is longer than remaining then the sequence is split
131874 + * between this block and the next.
131876 + * Returns the current sequence to handle, or if the rest of the block should
131877 + * be literals, it returns a sequence with offset == 0.
131878 + */
131879 +static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
131880 +                                 U32 const remaining, U32 const minMatch)
131882 +    rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
131883 +    assert(sequence.offset > 0);
131884 +    /* Likely: No partial sequence */
131885 +    if (remaining >= sequence.litLength + sequence.matchLength) {
131886 +        rawSeqStore->pos++;
131887 +        return sequence;
131888 +    }
131889 +    /* Cut the sequence short (offset == 0 ==> rest is literals). */
131890 +    if (remaining <= sequence.litLength) {
131891 +        sequence.offset = 0;
131892 +    } else if (remaining < sequence.litLength + sequence.matchLength) {
131893 +        sequence.matchLength = remaining - sequence.litLength;
131894 +        if (sequence.matchLength < minMatch) {
131895 +            sequence.offset = 0;
131896 +        }
131897 +    }
131898 +    /* Skip past `remaining` bytes for the future sequences. */
131899 +    ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch);
131900 +    return sequence;
131903 +void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
131904 +    U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
131905 +    while (currPos && rawSeqStore->pos < rawSeqStore->size) {
131906 +        rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
131907 +        if (currPos >= currSeq.litLength + currSeq.matchLength) {
131908 +            currPos -= currSeq.litLength + currSeq.matchLength;
131909 +            rawSeqStore->pos++;
131910 +        } else {
131911 +            rawSeqStore->posInSequence = currPos;
131912 +            break;
131913 +        }
131914 +    }
131915 +    if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
131916 +        rawSeqStore->posInSequence = 0;
131917 +    }
131920 +size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
131921 +    ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
131922 +    void const* src, size_t srcSize)
131924 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
131925 +    unsigned const minMatch = cParams->minMatch;
131926 +    ZSTD_blockCompressor const blockCompressor =
131927 +        ZSTD_selectBlockCompressor(cParams->strategy, ZSTD_matchState_dictMode(ms));
131928 +    /* Input bounds */
131929 +    BYTE const* const istart = (BYTE const*)src;
131930 +    BYTE const* const iend = istart + srcSize;
131931 +    /* Input positions */
131932 +    BYTE const* ip = istart;
131934 +    DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
131935 +    /* If using opt parser, use LDMs only as candidates rather than always accepting them */
131936 +    if (cParams->strategy >= ZSTD_btopt) {
131937 +        size_t lastLLSize;
131938 +        ms->ldmSeqStore = rawSeqStore;
131939 +        lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize);
131940 +        ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize);
131941 +        return lastLLSize;
131942 +    }
131944 +    assert(rawSeqStore->pos <= rawSeqStore->size);
131945 +    assert(rawSeqStore->size <= rawSeqStore->capacity);
131946 +    /* Loop through each sequence and apply the block compressor to the literals */
131947 +    while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
131948 +        /* maybeSplitSequence updates rawSeqStore->pos */
131949 +        rawSeq const sequence = maybeSplitSequence(rawSeqStore,
131950 +                                                   (U32)(iend - ip), minMatch);
131951 +        int i;
131952 +        /* End signal */
131953 +        if (sequence.offset == 0)
131954 +            break;
131956 +        assert(ip + sequence.litLength + sequence.matchLength <= iend);
131958 +        /* Fill tables for block compressor */
131959 +        ZSTD_ldm_limitTableUpdate(ms, ip);
131960 +        ZSTD_ldm_fillFastTables(ms, ip);
131961 +        /* Run the block compressor */
131962 +        DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength);
131963 +        {
131964 +            size_t const newLitLength =
131965 +                blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
131966 +            ip += sequence.litLength;
131967 +            /* Update the repcodes */
131968 +            for (i = ZSTD_REP_NUM - 1; i > 0; i--)
131969 +                rep[i] = rep[i-1];
131970 +            rep[0] = sequence.offset;
131971 +            /* Store the sequence */
131972 +            ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
131973 +                          sequence.offset + ZSTD_REP_MOVE,
131974 +                          sequence.matchLength - MINMATCH);
131975 +            ip += sequence.matchLength;
131976 +        }
131977 +    }
131978 +    /* Fill the tables for the block compressor */
131979 +    ZSTD_ldm_limitTableUpdate(ms, ip);
131980 +    ZSTD_ldm_fillFastTables(ms, ip);
131981 +    /* Compress the last literals */
131982 +    return blockCompressor(ms, seqStore, rep, ip, iend - ip);
131984 diff --git a/lib/zstd/compress/zstd_ldm.h b/lib/zstd/compress/zstd_ldm.h
131985 new file mode 100644
131986 index 000000000000..5ee467eaca2e
131987 --- /dev/null
131988 +++ b/lib/zstd/compress/zstd_ldm.h
131989 @@ -0,0 +1,110 @@
131991 + * Copyright (c) Yann Collet, Facebook, Inc.
131992 + * All rights reserved.
131994 + * This source code is licensed under both the BSD-style license (found in the
131995 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
131996 + * in the COPYING file in the root directory of this source tree).
131997 + * You may select, at your option, one of the above-listed licenses.
131998 + */
132000 +#ifndef ZSTD_LDM_H
132001 +#define ZSTD_LDM_H
132004 +#include "zstd_compress_internal.h"   /* ldmParams_t, U32 */
132005 +#include <linux/zstd.h>   /* ZSTD_CCtx, size_t */
132007 +/*-*************************************
132008 +*  Long distance matching
132009 +***************************************/
132011 +#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_LIMIT_DEFAULT
132013 +void ZSTD_ldm_fillHashTable(
132014 +            ldmState_t* state, const BYTE* ip,
132015 +            const BYTE* iend, ldmParams_t const* params);
132018 + * ZSTD_ldm_generateSequences():
132020 + * Generates the sequences using the long distance match finder.
132021 + * Generates long range matching sequences in `sequences`, which parse a prefix
132022 + * of the source. `sequences` must be large enough to store every sequence,
132023 + * which can be checked with `ZSTD_ldm_getMaxNbSeq()`.
132024 + * @returns 0 or an error code.
132026 + * NOTE: The user must have called ZSTD_window_update() for all of the input
132027 + * they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks.
132028 + * NOTE: This function returns an error if it runs out of space to store
132029 + *       sequences.
132030 + */
132031 +size_t ZSTD_ldm_generateSequences(
132032 +            ldmState_t* ldms, rawSeqStore_t* sequences,
132033 +            ldmParams_t const* params, void const* src, size_t srcSize);
132036 + * ZSTD_ldm_blockCompress():
132038 + * Compresses a block using the predefined sequences, along with a secondary
132039 + * block compressor. The literals section of every sequence is passed to the
132040 + * secondary block compressor, and those sequences are interspersed with the
132041 + * predefined sequences. Returns the length of the last literals.
132042 + * Updates `rawSeqStore.pos` to indicate how many sequences have been consumed.
132043 + * `rawSeqStore.seq` may also be updated to split the last sequence between two
132044 + * blocks.
132045 + * @return The length of the last literals.
132047 + * NOTE: The source must be at most the maximum block size, but the predefined
132048 + * sequences can be any size, and may be longer than the block. In the case that
132049 + * they are longer than the block, the last sequences may need to be split into
132050 + * two. We handle that case correctly, and update `rawSeqStore` appropriately.
132051 + * NOTE: This function does not return any errors.
132052 + */
132053 +size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
132054 +            ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
132055 +            void const* src, size_t srcSize);
132058 + * ZSTD_ldm_skipSequences():
132060 + * Skip past `srcSize` bytes worth of sequences in `rawSeqStore`.
132061 + * Avoids emitting matches less than `minMatch` bytes.
132062 + * Must be called for data that is not passed to ZSTD_ldm_blockCompress().
132063 + */
132064 +void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize,
132065 +    U32 const minMatch);
132067 +/* ZSTD_ldm_skipRawSeqStoreBytes():
132068 + * Moves forward in rawSeqStore by nbBytes, updating fields 'pos' and 'posInSequence'.
132069 + * Not to be used in conjunction with ZSTD_ldm_skipSequences().
132070 + * Must be called for data with is not passed to ZSTD_ldm_blockCompress().
132071 + */
132072 +void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes);
132074 +/** ZSTD_ldm_getTableSize() :
132075 + *  Estimate the space needed for long distance matching tables or 0 if LDM is
132076 + *  disabled.
132077 + */
132078 +size_t ZSTD_ldm_getTableSize(ldmParams_t params);
132080 +/** ZSTD_ldm_getSeqSpace() :
132081 + *  Return an upper bound on the number of sequences that can be produced by
132082 + *  the long distance matcher, or 0 if LDM is disabled.
132083 + */
132084 +size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize);
132086 +/** ZSTD_ldm_adjustParameters() :
132087 + *  If the params->hashRateLog is not set, set it to its default value based on
132088 + *  windowLog and params->hashLog.
132090 + *  Ensures that params->bucketSizeLog is <= params->hashLog (setting it to
132091 + *  params->hashLog if it is not).
132093 + *  Ensures that the minMatchLength >= targetLength during optimal parsing.
132094 + */
132095 +void ZSTD_ldm_adjustParameters(ldmParams_t* params,
132096 +                               ZSTD_compressionParameters const* cParams);
132099 +#endif /* ZSTD_FAST_H */
132100 diff --git a/lib/zstd/compress/zstd_ldm_geartab.h b/lib/zstd/compress/zstd_ldm_geartab.h
132101 new file mode 100644
132102 index 000000000000..e5c24d856b0a
132103 --- /dev/null
132104 +++ b/lib/zstd/compress/zstd_ldm_geartab.h
132105 @@ -0,0 +1,103 @@
132107 + * Copyright (c) Yann Collet, Facebook, Inc.
132108 + * All rights reserved.
132110 + * This source code is licensed under both the BSD-style license (found in the
132111 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
132112 + * in the COPYING file in the root directory of this source tree).
132113 + * You may select, at your option, one of the above-listed licenses.
132114 + */
132116 +#ifndef ZSTD_LDM_GEARTAB_H
132117 +#define ZSTD_LDM_GEARTAB_H
132119 +static U64 ZSTD_ldm_gearTab[256] = {
132120 +    0xf5b8f72c5f77775c, 0x84935f266b7ac412, 0xb647ada9ca730ccc,
132121 +    0xb065bb4b114fb1de, 0x34584e7e8c3a9fd0, 0x4e97e17c6ae26b05,
132122 +    0x3a03d743bc99a604, 0xcecd042422c4044f, 0x76de76c58524259e,
132123 +    0x9c8528f65badeaca, 0x86563706e2097529, 0x2902475fa375d889,
132124 +    0xafb32a9739a5ebe6, 0xce2714da3883e639, 0x21eaf821722e69e,
132125 +    0x37b628620b628,    0x49a8d455d88caf5,  0x8556d711e6958140,
132126 +    0x4f7ae74fc605c1f,  0x829f0c3468bd3a20, 0x4ffdc885c625179e,
132127 +    0x8473de048a3daf1b, 0x51008822b05646b2, 0x69d75d12b2d1cc5f,
132128 +    0x8c9d4a19159154bc, 0xc3cc10f4abbd4003, 0xd06ddc1cecb97391,
132129 +    0xbe48e6e7ed80302e, 0x3481db31cee03547, 0xacc3f67cdaa1d210,
132130 +    0x65cb771d8c7f96cc, 0x8eb27177055723dd, 0xc789950d44cd94be,
132131 +    0x934feadc3700b12b, 0x5e485f11edbdf182, 0x1e2e2a46fd64767a,
132132 +    0x2969ca71d82efa7c, 0x9d46e9935ebbba2e, 0xe056b67e05e6822b,
132133 +    0x94d73f55739d03a0, 0xcd7010bdb69b5a03, 0x455ef9fcd79b82f4,
132134 +    0x869cb54a8749c161, 0x38d1a4fa6185d225, 0xb475166f94bbe9bb,
132135 +    0xa4143548720959f1, 0x7aed4780ba6b26ba, 0xd0ce264439e02312,
132136 +    0x84366d746078d508, 0xa8ce973c72ed17be, 0x21c323a29a430b01,
132137 +    0x9962d617e3af80ee, 0xab0ce91d9c8cf75b, 0x530e8ee6d19a4dbc,
132138 +    0x2ef68c0cf53f5d72, 0xc03a681640a85506, 0x496e4e9f9c310967,
132139 +    0x78580472b59b14a0, 0x273824c23b388577, 0x66bf923ad45cb553,
132140 +    0x47ae1a5a2492ba86, 0x35e304569e229659, 0x4765182a46870b6f,
132141 +    0x6cbab625e9099412, 0xddac9a2e598522c1, 0x7172086e666624f2,
132142 +    0xdf5003ca503b7837, 0x88c0c1db78563d09, 0x58d51865acfc289d,
132143 +    0x177671aec65224f1, 0xfb79d8a241e967d7, 0x2be1e101cad9a49a,
132144 +    0x6625682f6e29186b, 0x399553457ac06e50, 0x35dffb4c23abb74,
132145 +    0x429db2591f54aade, 0xc52802a8037d1009, 0x6acb27381f0b25f3,
132146 +    0xf45e2551ee4f823b, 0x8b0ea2d99580c2f7, 0x3bed519cbcb4e1e1,
132147 +    0xff452823dbb010a,  0x9d42ed614f3dd267, 0x5b9313c06257c57b,
132148 +    0xa114b8008b5e1442, 0xc1fe311c11c13d4b, 0x66e8763ea34c5568,
132149 +    0x8b982af1c262f05d, 0xee8876faaa75fbb7, 0x8a62a4d0d172bb2a,
132150 +    0xc13d94a3b7449a97, 0x6dbbba9dc15d037c, 0xc786101f1d92e0f1,
132151 +    0xd78681a907a0b79b, 0xf61aaf2962c9abb9, 0x2cfd16fcd3cb7ad9,
132152 +    0x868c5b6744624d21, 0x25e650899c74ddd7, 0xba042af4a7c37463,
132153 +    0x4eb1a539465a3eca, 0xbe09dbf03b05d5ca, 0x774e5a362b5472ba,
132154 +    0x47a1221229d183cd, 0x504b0ca18ef5a2df, 0xdffbdfbde2456eb9,
132155 +    0x46cd2b2fbee34634, 0xf2aef8fe819d98c3, 0x357f5276d4599d61,
132156 +    0x24a5483879c453e3, 0x88026889192b4b9,  0x28da96671782dbec,
132157 +    0x4ef37c40588e9aaa, 0x8837b90651bc9fb3, 0xc164f741d3f0e5d6,
132158 +    0xbc135a0a704b70ba, 0x69cd868f7622ada,  0xbc37ba89e0b9c0ab,
132159 +    0x47c14a01323552f6, 0x4f00794bacee98bb, 0x7107de7d637a69d5,
132160 +    0x88af793bb6f2255e, 0xf3c6466b8799b598, 0xc288c616aa7f3b59,
132161 +    0x81ca63cf42fca3fd, 0x88d85ace36a2674b, 0xd056bd3792389e7,
132162 +    0xe55c396c4e9dd32d, 0xbefb504571e6c0a6, 0x96ab32115e91e8cc,
132163 +    0xbf8acb18de8f38d1, 0x66dae58801672606, 0x833b6017872317fb,
132164 +    0xb87c16f2d1c92864, 0xdb766a74e58b669c, 0x89659f85c61417be,
132165 +    0xc8daad856011ea0c, 0x76a4b565b6fe7eae, 0xa469d085f6237312,
132166 +    0xaaf0365683a3e96c, 0x4dbb746f8424f7b8, 0x638755af4e4acc1,
132167 +    0x3d7807f5bde64486, 0x17be6d8f5bbb7639, 0x903f0cd44dc35dc,
132168 +    0x67b672eafdf1196c, 0xa676ff93ed4c82f1, 0x521d1004c5053d9d,
132169 +    0x37ba9ad09ccc9202, 0x84e54d297aacfb51, 0xa0b4b776a143445,
132170 +    0x820d471e20b348e,  0x1874383cb83d46dc, 0x97edeec7a1efe11c,
132171 +    0xb330e50b1bdc42aa, 0x1dd91955ce70e032, 0xa514cdb88f2939d5,
132172 +    0x2791233fd90db9d3, 0x7b670a4cc50f7a9b, 0x77c07d2a05c6dfa5,
132173 +    0xe3778b6646d0a6fa, 0xb39c8eda47b56749, 0x933ed448addbef28,
132174 +    0xaf846af6ab7d0bf4, 0xe5af208eb666e49,  0x5e6622f73534cd6a,
132175 +    0x297daeca42ef5b6e, 0x862daef3d35539a6, 0xe68722498f8e1ea9,
132176 +    0x981c53093dc0d572, 0xfa09b0bfbf86fbf5, 0x30b1e96166219f15,
132177 +    0x70e7d466bdc4fb83, 0x5a66736e35f2a8e9, 0xcddb59d2b7c1baef,
132178 +    0xd6c7d247d26d8996, 0xea4e39eac8de1ba3, 0x539c8bb19fa3aff2,
132179 +    0x9f90e4c5fd508d8,  0xa34e5956fbaf3385, 0x2e2f8e151d3ef375,
132180 +    0x173691e9b83faec1, 0xb85a8d56bf016379, 0x8382381267408ae3,
132181 +    0xb90f901bbdc0096d, 0x7c6ad32933bcec65, 0x76bb5e2f2c8ad595,
132182 +    0x390f851a6cf46d28, 0xc3e6064da1c2da72, 0xc52a0c101cfa5389,
132183 +    0xd78eaf84a3fbc530, 0x3781b9e2288b997e, 0x73c2f6dea83d05c4,
132184 +    0x4228e364c5b5ed7,  0x9d7a3edf0da43911, 0x8edcfeda24686756,
132185 +    0x5e7667a7b7a9b3a1, 0x4c4f389fa143791d, 0xb08bc1023da7cddc,
132186 +    0x7ab4be3ae529b1cc, 0x754e6132dbe74ff9, 0x71635442a839df45,
132187 +    0x2f6fb1643fbe52de, 0x961e0a42cf7a8177, 0xf3b45d83d89ef2ea,
132188 +    0xee3de4cf4a6e3e9b, 0xcd6848542c3295e7, 0xe4cee1664c78662f,
132189 +    0x9947548b474c68c4, 0x25d73777a5ed8b0b, 0xc915b1d636b7fc,
132190 +    0x21c2ba75d9b0d2da, 0x5f6b5dcf608a64a1, 0xdcf333255ff9570c,
132191 +    0x633b922418ced4ee, 0xc136dde0b004b34a, 0x58cc83b05d4b2f5a,
132192 +    0x5eb424dda28e42d2, 0x62df47369739cd98, 0xb4e0b42485e4ce17,
132193 +    0x16e1f0c1f9a8d1e7, 0x8ec3916707560ebf, 0x62ba6e2df2cc9db3,
132194 +    0xcbf9f4ff77d83a16, 0x78d9d7d07d2bbcc4, 0xef554ce1e02c41f4,
132195 +    0x8d7581127eccf94d, 0xa9b53336cb3c8a05, 0x38c42c0bf45c4f91,
132196 +    0x640893cdf4488863, 0x80ec34bc575ea568, 0x39f324f5b48eaa40,
132197 +    0xe9d9ed1f8eff527f, 0x9224fc058cc5a214, 0xbaba00b04cfe7741,
132198 +    0x309a9f120fcf52af, 0xa558f3ec65626212, 0x424bec8b7adabe2f,
132199 +    0x41622513a6aea433, 0xb88da2d5324ca798, 0xd287733b245528a4,
132200 +    0x9a44697e6d68aec3, 0x7b1093be2f49bb28, 0x50bbec632e3d8aad,
132201 +    0x6cd90723e1ea8283, 0x897b9e7431b02bf3, 0x219efdcb338a7047,
132202 +    0x3b0311f0a27c0656, 0xdb17bf91c0db96e7, 0x8cd4fd6b4e85a5b2,
132203 +    0xfab071054ba6409d, 0x40d6fe831fa9dfd9, 0xaf358debad7d791e,
132204 +    0xeb8d0e25a65e3e58, 0xbbcbd3df14e08580, 0xcf751f27ecdab2b,
132205 +    0x2b4da14f2613d8f4
132208 +#endif /* ZSTD_LDM_GEARTAB_H */
132209 diff --git a/lib/zstd/compress/zstd_opt.c b/lib/zstd/compress/zstd_opt.c
132210 new file mode 100644
132211 index 000000000000..9ab92d4ef499
132212 --- /dev/null
132213 +++ b/lib/zstd/compress/zstd_opt.c
132214 @@ -0,0 +1,1345 @@
132216 + * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
132217 + * All rights reserved.
132219 + * This source code is licensed under both the BSD-style license (found in the
132220 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
132221 + * in the COPYING file in the root directory of this source tree).
132222 + * You may select, at your option, one of the above-listed licenses.
132223 + */
132225 +#include "zstd_compress_internal.h"
132226 +#include "hist.h"
132227 +#include "zstd_opt.h"
132230 +#define ZSTD_LITFREQ_ADD    2   /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
132231 +#define ZSTD_FREQ_DIV       4   /* log factor when using previous stats to init next stats */
132232 +#define ZSTD_MAX_PRICE     (1<<30)
132234 +#define ZSTD_PREDEF_THRESHOLD 1024   /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
132237 +/*-*************************************
132238 +*  Price functions for optimal parser
132239 +***************************************/
132241 +#if 0    /* approximation at bit level */
132242 +#  define BITCOST_ACCURACY 0
132243 +#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
132244 +#  define WEIGHT(stat)  ((void)opt, ZSTD_bitWeight(stat))
132245 +#elif 0  /* fractional bit accuracy */
132246 +#  define BITCOST_ACCURACY 8
132247 +#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
132248 +#  define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat))
132249 +#else    /* opt==approx, ultra==accurate */
132250 +#  define BITCOST_ACCURACY 8
132251 +#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
132252 +#  define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat))
132253 +#endif
132255 +MEM_STATIC U32 ZSTD_bitWeight(U32 stat)
132257 +    return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER);
132260 +MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat)
132262 +    U32 const stat = rawStat + 1;
132263 +    U32 const hb = ZSTD_highbit32(stat);
132264 +    U32 const BWeight = hb * BITCOST_MULTIPLIER;
132265 +    U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb;
132266 +    U32 const weight = BWeight + FWeight;
132267 +    assert(hb + BITCOST_ACCURACY < 31);
132268 +    return weight;
132271 +#if (DEBUGLEVEL>=2)
132272 +/* debugging function,
132273 + * @return price in bytes as fractional value
132274 + * for debug messages only */
132275 +MEM_STATIC double ZSTD_fCost(U32 price)
132277 +    return (double)price / (BITCOST_MULTIPLIER*8);
132279 +#endif
132281 +static int ZSTD_compressedLiterals(optState_t const* const optPtr)
132283 +    return optPtr->literalCompressionMode != ZSTD_lcm_uncompressed;
132286 +static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
132288 +    if (ZSTD_compressedLiterals(optPtr))
132289 +        optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel);
132290 +    optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel);
132291 +    optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel);
132292 +    optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel);
132296 +/* ZSTD_downscaleStat() :
132297 + * reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus)
132298 + * return the resulting sum of elements */
132299 +static U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus)
132301 +    U32 s, sum=0;
132302 +    DEBUGLOG(5, "ZSTD_downscaleStat (nbElts=%u)", (unsigned)lastEltIndex+1);
132303 +    assert(ZSTD_FREQ_DIV+malus > 0 && ZSTD_FREQ_DIV+malus < 31);
132304 +    for (s=0; s<lastEltIndex+1; s++) {
132305 +        table[s] = 1 + (table[s] >> (ZSTD_FREQ_DIV+malus));
132306 +        sum += table[s];
132307 +    }
132308 +    return sum;
132311 +/* ZSTD_rescaleFreqs() :
132312 + * if first block (detected by optPtr->litLengthSum == 0) : init statistics
132313 + *    take hints from dictionary if there is one
132314 + *    or init from zero, using src for literals stats, or flat 1 for match symbols
132315 + * otherwise downscale existing stats, to be used as seed for next block.
132316 + */
132317 +static void
132318 +ZSTD_rescaleFreqs(optState_t* const optPtr,
132319 +            const BYTE* const src, size_t const srcSize,
132320 +                  int const optLevel)
132322 +    int const compressedLiterals = ZSTD_compressedLiterals(optPtr);
132323 +    DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize);
132324 +    optPtr->priceType = zop_dynamic;
132326 +    if (optPtr->litLengthSum == 0) {  /* first block : init */
132327 +        if (srcSize <= ZSTD_PREDEF_THRESHOLD) {  /* heuristic */
132328 +            DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef");
132329 +            optPtr->priceType = zop_predef;
132330 +        }
132332 +        assert(optPtr->symbolCosts != NULL);
132333 +        if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) {
132334 +            /* huffman table presumed generated by dictionary */
132335 +            optPtr->priceType = zop_dynamic;
132337 +            if (compressedLiterals) {
132338 +                unsigned lit;
132339 +                assert(optPtr->litFreq != NULL);
132340 +                optPtr->litSum = 0;
132341 +                for (lit=0; lit<=MaxLit; lit++) {
132342 +                    U32 const scaleLog = 11;   /* scale to 2K */
132343 +                    U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit);
132344 +                    assert(bitCost <= scaleLog);
132345 +                    optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
132346 +                    optPtr->litSum += optPtr->litFreq[lit];
132347 +            }   }
132349 +            {   unsigned ll;
132350 +                FSE_CState_t llstate;
132351 +                FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable);
132352 +                optPtr->litLengthSum = 0;
132353 +                for (ll=0; ll<=MaxLL; ll++) {
132354 +                    U32 const scaleLog = 10;   /* scale to 1K */
132355 +                    U32 const bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll);
132356 +                    assert(bitCost < scaleLog);
132357 +                    optPtr->litLengthFreq[ll] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
132358 +                    optPtr->litLengthSum += optPtr->litLengthFreq[ll];
132359 +            }   }
132361 +            {   unsigned ml;
132362 +                FSE_CState_t mlstate;
132363 +                FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable);
132364 +                optPtr->matchLengthSum = 0;
132365 +                for (ml=0; ml<=MaxML; ml++) {
132366 +                    U32 const scaleLog = 10;
132367 +                    U32 const bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml);
132368 +                    assert(bitCost < scaleLog);
132369 +                    optPtr->matchLengthFreq[ml] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
132370 +                    optPtr->matchLengthSum += optPtr->matchLengthFreq[ml];
132371 +            }   }
132373 +            {   unsigned of;
132374 +                FSE_CState_t ofstate;
132375 +                FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable);
132376 +                optPtr->offCodeSum = 0;
132377 +                for (of=0; of<=MaxOff; of++) {
132378 +                    U32 const scaleLog = 10;
132379 +                    U32 const bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of);
132380 +                    assert(bitCost < scaleLog);
132381 +                    optPtr->offCodeFreq[of] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
132382 +                    optPtr->offCodeSum += optPtr->offCodeFreq[of];
132383 +            }   }
132385 +        } else {  /* not a dictionary */
132387 +            assert(optPtr->litFreq != NULL);
132388 +            if (compressedLiterals) {
132389 +                unsigned lit = MaxLit;
132390 +                HIST_count_simple(optPtr->litFreq, &lit, src, srcSize);   /* use raw first block to init statistics */
132391 +                optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
132392 +            }
132394 +            {   unsigned ll;
132395 +                for (ll=0; ll<=MaxLL; ll++)
132396 +                    optPtr->litLengthFreq[ll] = 1;
132397 +            }
132398 +            optPtr->litLengthSum = MaxLL+1;
132400 +            {   unsigned ml;
132401 +                for (ml=0; ml<=MaxML; ml++)
132402 +                    optPtr->matchLengthFreq[ml] = 1;
132403 +            }
132404 +            optPtr->matchLengthSum = MaxML+1;
132406 +            {   unsigned of;
132407 +                for (of=0; of<=MaxOff; of++)
132408 +                    optPtr->offCodeFreq[of] = 1;
132409 +            }
132410 +            optPtr->offCodeSum = MaxOff+1;
132412 +        }
132414 +    } else {   /* new block : re-use previous statistics, scaled down */
132416 +        if (compressedLiterals)
132417 +            optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
132418 +        optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0);
132419 +        optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0);
132420 +        optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0);
132421 +    }
132423 +    ZSTD_setBasePrices(optPtr, optLevel);
132426 +/* ZSTD_rawLiteralsCost() :
132427 + * price of literals (only) in specified segment (which length can be 0).
132428 + * does not include price of literalLength symbol */
132429 +static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
132430 +                                const optState_t* const optPtr,
132431 +                                int optLevel)
132433 +    if (litLength == 0) return 0;
132435 +    if (!ZSTD_compressedLiterals(optPtr))
132436 +        return (litLength << 3) * BITCOST_MULTIPLIER;  /* Uncompressed - 8 bytes per literal. */
132438 +    if (optPtr->priceType == zop_predef)
132439 +        return (litLength*6) * BITCOST_MULTIPLIER;  /* 6 bit per literal - no statistic used */
132441 +    /* dynamic statistics */
132442 +    {   U32 price = litLength * optPtr->litSumBasePrice;
132443 +        U32 u;
132444 +        for (u=0; u < litLength; u++) {
132445 +            assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice);   /* literal cost should never be negative */
132446 +            price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel);
132447 +        }
132448 +        return price;
132449 +    }
132452 +/* ZSTD_litLengthPrice() :
132453 + * cost of literalLength symbol */
132454 +static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel)
132456 +    if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel);
132458 +    /* dynamic statistics */
132459 +    {   U32 const llCode = ZSTD_LLcode(litLength);
132460 +        return (LL_bits[llCode] * BITCOST_MULTIPLIER)
132461 +             + optPtr->litLengthSumBasePrice
132462 +             - WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
132463 +    }
132466 +/* ZSTD_getMatchPrice() :
132467 + * Provides the cost of the match part (offset + matchLength) of a sequence
132468 + * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
132469 + * optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */
132470 +FORCE_INLINE_TEMPLATE U32
132471 +ZSTD_getMatchPrice(U32 const offset,
132472 +                   U32 const matchLength,
132473 +             const optState_t* const optPtr,
132474 +                   int const optLevel)
132476 +    U32 price;
132477 +    U32 const offCode = ZSTD_highbit32(offset+1);
132478 +    U32 const mlBase = matchLength - MINMATCH;
132479 +    assert(matchLength >= MINMATCH);
132481 +    if (optPtr->priceType == zop_predef)  /* fixed scheme, do not use statistics */
132482 +        return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER);
132484 +    /* dynamic statistics */
132485 +    price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel));
132486 +    if ((optLevel<2) /*static*/ && offCode >= 20)
132487 +        price += (offCode-19)*2 * BITCOST_MULTIPLIER; /* handicap for long distance offsets, favor decompression speed */
132489 +    /* match Length */
132490 +    {   U32 const mlCode = ZSTD_MLcode(mlBase);
132491 +        price += (ML_bits[mlCode] * BITCOST_MULTIPLIER) + (optPtr->matchLengthSumBasePrice - WEIGHT(optPtr->matchLengthFreq[mlCode], optLevel));
132492 +    }
132494 +    price += BITCOST_MULTIPLIER / 5;   /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */
132496 +    DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price);
132497 +    return price;
132500 +/* ZSTD_updateStats() :
132501 + * assumption : literals + litLengtn <= iend */
132502 +static void ZSTD_updateStats(optState_t* const optPtr,
132503 +                             U32 litLength, const BYTE* literals,
132504 +                             U32 offsetCode, U32 matchLength)
132506 +    /* literals */
132507 +    if (ZSTD_compressedLiterals(optPtr)) {
132508 +        U32 u;
132509 +        for (u=0; u < litLength; u++)
132510 +            optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
132511 +        optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
132512 +    }
132514 +    /* literal Length */
132515 +    {   U32 const llCode = ZSTD_LLcode(litLength);
132516 +        optPtr->litLengthFreq[llCode]++;
132517 +        optPtr->litLengthSum++;
132518 +    }
132520 +    /* match offset code (0-2=>repCode; 3+=>offset+2) */
132521 +    {   U32 const offCode = ZSTD_highbit32(offsetCode+1);
132522 +        assert(offCode <= MaxOff);
132523 +        optPtr->offCodeFreq[offCode]++;
132524 +        optPtr->offCodeSum++;
132525 +    }
132527 +    /* match Length */
132528 +    {   U32 const mlBase = matchLength - MINMATCH;
132529 +        U32 const mlCode = ZSTD_MLcode(mlBase);
132530 +        optPtr->matchLengthFreq[mlCode]++;
132531 +        optPtr->matchLengthSum++;
132532 +    }
132536 +/* ZSTD_readMINMATCH() :
132537 + * function safe only for comparisons
132538 + * assumption : memPtr must be at least 4 bytes before end of buffer */
132539 +MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
132541 +    switch (length)
132542 +    {
132543 +    default :
132544 +    case 4 : return MEM_read32(memPtr);
132545 +    case 3 : if (MEM_isLittleEndian())
132546 +                return MEM_read32(memPtr)<<8;
132547 +             else
132548 +                return MEM_read32(memPtr)>>8;
132549 +    }
132553 +/* Update hashTable3 up to ip (excluded)
132554 +   Assumption : always within prefix (i.e. not within extDict) */
132555 +static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms,
132556 +                                              U32* nextToUpdate3,
132557 +                                              const BYTE* const ip)
132559 +    U32* const hashTable3 = ms->hashTable3;
132560 +    U32 const hashLog3 = ms->hashLog3;
132561 +    const BYTE* const base = ms->window.base;
132562 +    U32 idx = *nextToUpdate3;
132563 +    U32 const target = (U32)(ip - base);
132564 +    size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3);
132565 +    assert(hashLog3 > 0);
132567 +    while(idx < target) {
132568 +        hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx;
132569 +        idx++;
132570 +    }
132572 +    *nextToUpdate3 = target;
132573 +    return hashTable3[hash3];
132577 +/*-*************************************
132578 +*  Binary Tree search
132579 +***************************************/
132580 +/** ZSTD_insertBt1() : add one or multiple positions to tree.
132581 + *  ip : assumed <= iend-8 .
132582 + * @return : nb of positions added */
132583 +static U32 ZSTD_insertBt1(
132584 +                ZSTD_matchState_t* ms,
132585 +                const BYTE* const ip, const BYTE* const iend,
132586 +                U32 const mls, const int extDict)
132588 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
132589 +    U32*   const hashTable = ms->hashTable;
132590 +    U32    const hashLog = cParams->hashLog;
132591 +    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
132592 +    U32*   const bt = ms->chainTable;
132593 +    U32    const btLog  = cParams->chainLog - 1;
132594 +    U32    const btMask = (1 << btLog) - 1;
132595 +    U32 matchIndex = hashTable[h];
132596 +    size_t commonLengthSmaller=0, commonLengthLarger=0;
132597 +    const BYTE* const base = ms->window.base;
132598 +    const BYTE* const dictBase = ms->window.dictBase;
132599 +    const U32 dictLimit = ms->window.dictLimit;
132600 +    const BYTE* const dictEnd = dictBase + dictLimit;
132601 +    const BYTE* const prefixStart = base + dictLimit;
132602 +    const BYTE* match;
132603 +    const U32 curr = (U32)(ip-base);
132604 +    const U32 btLow = btMask >= curr ? 0 : curr - btMask;
132605 +    U32* smallerPtr = bt + 2*(curr&btMask);
132606 +    U32* largerPtr  = smallerPtr + 1;
132607 +    U32 dummy32;   /* to be nullified at the end */
132608 +    U32 const windowLow = ms->window.lowLimit;
132609 +    U32 matchEndIdx = curr+8+1;
132610 +    size_t bestLength = 8;
132611 +    U32 nbCompares = 1U << cParams->searchLog;
132612 +#ifdef ZSTD_C_PREDICT
132613 +    U32 predictedSmall = *(bt + 2*((curr-1)&btMask) + 0);
132614 +    U32 predictedLarge = *(bt + 2*((curr-1)&btMask) + 1);
132615 +    predictedSmall += (predictedSmall>0);
132616 +    predictedLarge += (predictedLarge>0);
132617 +#endif /* ZSTD_C_PREDICT */
132619 +    DEBUGLOG(8, "ZSTD_insertBt1 (%u)", curr);
132621 +    assert(ip <= iend-8);   /* required for h calculation */
132622 +    hashTable[h] = curr;   /* Update Hash Table */
132624 +    assert(windowLow > 0);
132625 +    while (nbCompares-- && (matchIndex >= windowLow)) {
132626 +        U32* const nextPtr = bt + 2*(matchIndex & btMask);
132627 +        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
132628 +        assert(matchIndex < curr);
132630 +#ifdef ZSTD_C_PREDICT   /* note : can create issues when hlog small <= 11 */
132631 +        const U32* predictPtr = bt + 2*((matchIndex-1) & btMask);   /* written this way, as bt is a roll buffer */
132632 +        if (matchIndex == predictedSmall) {
132633 +            /* no need to check length, result known */
132634 +            *smallerPtr = matchIndex;
132635 +            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
132636 +            smallerPtr = nextPtr+1;               /* new "smaller" => larger of match */
132637 +            matchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
132638 +            predictedSmall = predictPtr[1] + (predictPtr[1]>0);
132639 +            continue;
132640 +        }
132641 +        if (matchIndex == predictedLarge) {
132642 +            *largerPtr = matchIndex;
132643 +            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
132644 +            largerPtr = nextPtr;
132645 +            matchIndex = nextPtr[0];
132646 +            predictedLarge = predictPtr[0] + (predictPtr[0]>0);
132647 +            continue;
132648 +        }
132649 +#endif
132651 +        if (!extDict || (matchIndex+matchLength >= dictLimit)) {
132652 +            assert(matchIndex+matchLength >= dictLimit);   /* might be wrong if actually extDict */
132653 +            match = base + matchIndex;
132654 +            matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
132655 +        } else {
132656 +            match = dictBase + matchIndex;
132657 +            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
132658 +            if (matchIndex+matchLength >= dictLimit)
132659 +                match = base + matchIndex;   /* to prepare for next usage of match[matchLength] */
132660 +        }
132662 +        if (matchLength > bestLength) {
132663 +            bestLength = matchLength;
132664 +            if (matchLength > matchEndIdx - matchIndex)
132665 +                matchEndIdx = matchIndex + (U32)matchLength;
132666 +        }
132668 +        if (ip+matchLength == iend) {   /* equal : no way to know if inf or sup */
132669 +            break;   /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
132670 +        }
132672 +        if (match[matchLength] < ip[matchLength]) {  /* necessarily within buffer */
132673 +            /* match is smaller than current */
132674 +            *smallerPtr = matchIndex;             /* update smaller idx */
132675 +            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
132676 +            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
132677 +            smallerPtr = nextPtr+1;               /* new "candidate" => larger than match, which was smaller than target */
132678 +            matchIndex = nextPtr[1];              /* new matchIndex, larger than previous and closer to current */
132679 +        } else {
132680 +            /* match is larger than current */
132681 +            *largerPtr = matchIndex;
132682 +            commonLengthLarger = matchLength;
132683 +            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
132684 +            largerPtr = nextPtr;
132685 +            matchIndex = nextPtr[0];
132686 +    }   }
132688 +    *smallerPtr = *largerPtr = 0;
132689 +    {   U32 positions = 0;
132690 +        if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384));   /* speed optimization */
132691 +        assert(matchEndIdx > curr + 8);
132692 +        return MAX(positions, matchEndIdx - (curr + 8));
132693 +    }
132696 +FORCE_INLINE_TEMPLATE
132697 +void ZSTD_updateTree_internal(
132698 +                ZSTD_matchState_t* ms,
132699 +                const BYTE* const ip, const BYTE* const iend,
132700 +                const U32 mls, const ZSTD_dictMode_e dictMode)
132702 +    const BYTE* const base = ms->window.base;
132703 +    U32 const target = (U32)(ip - base);
132704 +    U32 idx = ms->nextToUpdate;
132705 +    DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u  (dictMode:%u)",
132706 +                idx, target, dictMode);
132708 +    while(idx < target) {
132709 +        U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict);
132710 +        assert(idx < (U32)(idx + forward));
132711 +        idx += forward;
132712 +    }
132713 +    assert((size_t)(ip - base) <= (size_t)(U32)(-1));
132714 +    assert((size_t)(iend - base) <= (size_t)(U32)(-1));
132715 +    ms->nextToUpdate = target;
132718 +void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {
132719 +    ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict);
132722 +FORCE_INLINE_TEMPLATE
132723 +U32 ZSTD_insertBtAndGetAllMatches (
132724 +                    ZSTD_match_t* matches,   /* store result (found matches) in this table (presumed large enough) */
132725 +                    ZSTD_matchState_t* ms,
132726 +                    U32* nextToUpdate3,
132727 +                    const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode,
132728 +                    const U32 rep[ZSTD_REP_NUM],
132729 +                    U32 const ll0,   /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
132730 +                    const U32 lengthToBeat,
132731 +                    U32 const mls /* template */)
132733 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
132734 +    U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
132735 +    const BYTE* const base = ms->window.base;
132736 +    U32 const curr = (U32)(ip-base);
132737 +    U32 const hashLog = cParams->hashLog;
132738 +    U32 const minMatch = (mls==3) ? 3 : 4;
132739 +    U32* const hashTable = ms->hashTable;
132740 +    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
132741 +    U32 matchIndex  = hashTable[h];
132742 +    U32* const bt   = ms->chainTable;
132743 +    U32 const btLog = cParams->chainLog - 1;
132744 +    U32 const btMask= (1U << btLog) - 1;
132745 +    size_t commonLengthSmaller=0, commonLengthLarger=0;
132746 +    const BYTE* const dictBase = ms->window.dictBase;
132747 +    U32 const dictLimit = ms->window.dictLimit;
132748 +    const BYTE* const dictEnd = dictBase + dictLimit;
132749 +    const BYTE* const prefixStart = base + dictLimit;
132750 +    U32 const btLow = (btMask >= curr) ? 0 : curr - btMask;
132751 +    U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
132752 +    U32 const matchLow = windowLow ? windowLow : 1;
132753 +    U32* smallerPtr = bt + 2*(curr&btMask);
132754 +    U32* largerPtr  = bt + 2*(curr&btMask) + 1;
132755 +    U32 matchEndIdx = curr+8+1;   /* farthest referenced position of any match => detects repetitive patterns */
132756 +    U32 dummy32;   /* to be nullified at the end */
132757 +    U32 mnum = 0;
132758 +    U32 nbCompares = 1U << cParams->searchLog;
132760 +    const ZSTD_matchState_t* dms    = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL;
132761 +    const ZSTD_compressionParameters* const dmsCParams =
132762 +                                      dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL;
132763 +    const BYTE* const dmsBase       = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL;
132764 +    const BYTE* const dmsEnd        = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL;
132765 +    U32         const dmsHighLimit  = dictMode == ZSTD_dictMatchState ? (U32)(dmsEnd - dmsBase) : 0;
132766 +    U32         const dmsLowLimit   = dictMode == ZSTD_dictMatchState ? dms->window.lowLimit : 0;
132767 +    U32         const dmsIndexDelta = dictMode == ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0;
132768 +    U32         const dmsHashLog    = dictMode == ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog;
132769 +    U32         const dmsBtLog      = dictMode == ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog;
132770 +    U32         const dmsBtMask     = dictMode == ZSTD_dictMatchState ? (1U << dmsBtLog) - 1 : 0;
132771 +    U32         const dmsBtLow      = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit;
132773 +    size_t bestLength = lengthToBeat-1;
132774 +    DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", curr);
132776 +    /* check repCode */
132777 +    assert(ll0 <= 1);   /* necessarily 1 or 0 */
132778 +    {   U32 const lastR = ZSTD_REP_NUM + ll0;
132779 +        U32 repCode;
132780 +        for (repCode = ll0; repCode < lastR; repCode++) {
132781 +            U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
132782 +            U32 const repIndex = curr - repOffset;
132783 +            U32 repLen = 0;
132784 +            assert(curr >= dictLimit);
132785 +            if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < curr-dictLimit) {  /* equivalent to `curr > repIndex >= dictLimit` */
132786 +                /* We must validate the repcode offset because when we're using a dictionary the
132787 +                 * valid offset range shrinks when the dictionary goes out of bounds.
132788 +                 */
132789 +                if ((repIndex >= windowLow) & (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch))) {
132790 +                    repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch;
132791 +                }
132792 +            } else {  /* repIndex < dictLimit || repIndex >= curr */
132793 +                const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ?
132794 +                                             dmsBase + repIndex - dmsIndexDelta :
132795 +                                             dictBase + repIndex;
132796 +                assert(curr >= windowLow);
132797 +                if ( dictMode == ZSTD_extDict
132798 +                  && ( ((repOffset-1) /*intentional overflow*/ < curr - windowLow)  /* equivalent to `curr > repIndex >= windowLow` */
132799 +                     & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
132800 +                  && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
132801 +                    repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
132802 +                }
132803 +                if (dictMode == ZSTD_dictMatchState
132804 +                  && ( ((repOffset-1) /*intentional overflow*/ < curr - (dmsLowLimit + dmsIndexDelta))  /* equivalent to `curr > repIndex >= dmsLowLimit` */
132805 +                     & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */
132806 +                  && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
132807 +                    repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch;
132808 +            }   }
132809 +            /* save longer solution */
132810 +            if (repLen > bestLength) {
132811 +                DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u",
132812 +                            repCode, ll0, repOffset, repLen);
132813 +                bestLength = repLen;
132814 +                matches[mnum].off = repCode - ll0;
132815 +                matches[mnum].len = (U32)repLen;
132816 +                mnum++;
132817 +                if ( (repLen > sufficient_len)
132818 +                   | (ip+repLen == iLimit) ) {  /* best possible */
132819 +                    return mnum;
132820 +    }   }   }   }
132822 +    /* HC3 match finder */
132823 +    if ((mls == 3) /*static*/ && (bestLength < mls)) {
132824 +        U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip);
132825 +        if ((matchIndex3 >= matchLow)
132826 +          & (curr - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
132827 +            size_t mlen;
132828 +            if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) {
132829 +                const BYTE* const match = base + matchIndex3;
132830 +                mlen = ZSTD_count(ip, match, iLimit);
132831 +            } else {
132832 +                const BYTE* const match = dictBase + matchIndex3;
132833 +                mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart);
132834 +            }
132836 +            /* save best solution */
132837 +            if (mlen >= mls /* == 3 > bestLength */) {
132838 +                DEBUGLOG(8, "found small match with hlog3, of length %u",
132839 +                            (U32)mlen);
132840 +                bestLength = mlen;
132841 +                assert(curr > matchIndex3);
132842 +                assert(mnum==0);  /* no prior solution */
132843 +                matches[0].off = (curr - matchIndex3) + ZSTD_REP_MOVE;
132844 +                matches[0].len = (U32)mlen;
132845 +                mnum = 1;
132846 +                if ( (mlen > sufficient_len) |
132847 +                     (ip+mlen == iLimit) ) {  /* best possible length */
132848 +                    ms->nextToUpdate = curr+1;  /* skip insertion */
132849 +                    return 1;
132850 +        }   }   }
132851 +        /* no dictMatchState lookup: dicts don't have a populated HC3 table */
132852 +    }
132854 +    hashTable[h] = curr;   /* Update Hash Table */
132856 +    while (nbCompares-- && (matchIndex >= matchLow)) {
132857 +        U32* const nextPtr = bt + 2*(matchIndex & btMask);
132858 +        const BYTE* match;
132859 +        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
132860 +        assert(curr > matchIndex);
132862 +        if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) {
132863 +            assert(matchIndex+matchLength >= dictLimit);  /* ensure the condition is correct when !extDict */
132864 +            match = base + matchIndex;
132865 +            if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0);  /* ensure early section of match is equal as expected */
132866 +            matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);
132867 +        } else {
132868 +            match = dictBase + matchIndex;
132869 +            assert(memcmp(match, ip, matchLength) == 0);  /* ensure early section of match is equal as expected */
132870 +            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
132871 +            if (matchIndex+matchLength >= dictLimit)
132872 +                match = base + matchIndex;   /* prepare for match[matchLength] read */
132873 +        }
132875 +        if (matchLength > bestLength) {
132876 +            DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)",
132877 +                    (U32)matchLength, curr - matchIndex, curr - matchIndex + ZSTD_REP_MOVE);
132878 +            assert(matchEndIdx > matchIndex);
132879 +            if (matchLength > matchEndIdx - matchIndex)
132880 +                matchEndIdx = matchIndex + (U32)matchLength;
132881 +            bestLength = matchLength;
132882 +            matches[mnum].off = (curr - matchIndex) + ZSTD_REP_MOVE;
132883 +            matches[mnum].len = (U32)matchLength;
132884 +            mnum++;
132885 +            if ( (matchLength > ZSTD_OPT_NUM)
132886 +               | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
132887 +                if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */
132888 +                break; /* drop, to preserve bt consistency (miss a little bit of compression) */
132889 +            }
132890 +        }
132892 +        if (match[matchLength] < ip[matchLength]) {
132893 +            /* match smaller than current */
132894 +            *smallerPtr = matchIndex;             /* update smaller idx */
132895 +            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
132896 +            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
132897 +            smallerPtr = nextPtr+1;               /* new candidate => larger than match, which was smaller than current */
132898 +            matchIndex = nextPtr[1];              /* new matchIndex, larger than previous, closer to current */
132899 +        } else {
132900 +            *largerPtr = matchIndex;
132901 +            commonLengthLarger = matchLength;
132902 +            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
132903 +            largerPtr = nextPtr;
132904 +            matchIndex = nextPtr[0];
132905 +    }   }
132907 +    *smallerPtr = *largerPtr = 0;
132909 +    if (dictMode == ZSTD_dictMatchState && nbCompares) {
132910 +        size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls);
132911 +        U32 dictMatchIndex = dms->hashTable[dmsH];
132912 +        const U32* const dmsBt = dms->chainTable;
132913 +        commonLengthSmaller = commonLengthLarger = 0;
132914 +        while (nbCompares-- && (dictMatchIndex > dmsLowLimit)) {
132915 +            const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask);
132916 +            size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
132917 +            const BYTE* match = dmsBase + dictMatchIndex;
132918 +            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dmsEnd, prefixStart);
132919 +            if (dictMatchIndex+matchLength >= dmsHighLimit)
132920 +                match = base + dictMatchIndex + dmsIndexDelta;   /* to prepare for next usage of match[matchLength] */
132922 +            if (matchLength > bestLength) {
132923 +                matchIndex = dictMatchIndex + dmsIndexDelta;
132924 +                DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)",
132925 +                        (U32)matchLength, curr - matchIndex, curr - matchIndex + ZSTD_REP_MOVE);
132926 +                if (matchLength > matchEndIdx - matchIndex)
132927 +                    matchEndIdx = matchIndex + (U32)matchLength;
132928 +                bestLength = matchLength;
132929 +                matches[mnum].off = (curr - matchIndex) + ZSTD_REP_MOVE;
132930 +                matches[mnum].len = (U32)matchLength;
132931 +                mnum++;
132932 +                if ( (matchLength > ZSTD_OPT_NUM)
132933 +                   | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
132934 +                    break;   /* drop, to guarantee consistency (miss a little bit of compression) */
132935 +                }
132936 +            }
132938 +            if (dictMatchIndex <= dmsBtLow) { break; }   /* beyond tree size, stop the search */
132939 +            if (match[matchLength] < ip[matchLength]) {
132940 +                commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
132941 +                dictMatchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
132942 +            } else {
132943 +                /* match is larger than current */
132944 +                commonLengthLarger = matchLength;
132945 +                dictMatchIndex = nextPtr[0];
132946 +            }
132947 +        }
132948 +    }
132950 +    assert(matchEndIdx > curr+8);
132951 +    ms->nextToUpdate = matchEndIdx - 8;  /* skip repetitive patterns */
132952 +    return mnum;
132956 +FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches (
132957 +                        ZSTD_match_t* matches,   /* store result (match found, increasing size) in this table */
132958 +                        ZSTD_matchState_t* ms,
132959 +                        U32* nextToUpdate3,
132960 +                        const BYTE* ip, const BYTE* const iHighLimit, const ZSTD_dictMode_e dictMode,
132961 +                        const U32 rep[ZSTD_REP_NUM],
132962 +                        U32 const ll0,
132963 +                        U32 const lengthToBeat)
132965 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
132966 +    U32 const matchLengthSearch = cParams->minMatch;
132967 +    DEBUGLOG(8, "ZSTD_BtGetAllMatches");
132968 +    if (ip < ms->window.base + ms->nextToUpdate) return 0;   /* skipped area */
132969 +    ZSTD_updateTree_internal(ms, ip, iHighLimit, matchLengthSearch, dictMode);
132970 +    switch(matchLengthSearch)
132971 +    {
132972 +    case 3 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 3);
132973 +    default :
132974 +    case 4 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 4);
132975 +    case 5 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 5);
132976 +    case 7 :
132977 +    case 6 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 6);
132978 +    }
132981 +/*************************
132982 +*  LDM helper functions  *
132983 +*************************/
132985 +/* Struct containing info needed to make decision about ldm inclusion */
132986 +typedef struct {
132987 +    rawSeqStore_t seqStore;         /* External match candidates store for this block */
132988 +    U32 startPosInBlock;            /* Start position of the current match candidate */
132989 +    U32 endPosInBlock;              /* End position of the current match candidate */
132990 +    U32 offset;                     /* Offset of the match candidate */
132991 +} ZSTD_optLdm_t;
132993 +/* ZSTD_optLdm_skipRawSeqStoreBytes():
132994 + * Moves forward in rawSeqStore by nbBytes, which will update the fields 'pos' and 'posInSequence'.
132995 + */
132996 +static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
132997 +    U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
132998 +    while (currPos && rawSeqStore->pos < rawSeqStore->size) {
132999 +        rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
133000 +        if (currPos >= currSeq.litLength + currSeq.matchLength) {
133001 +            currPos -= currSeq.litLength + currSeq.matchLength;
133002 +            rawSeqStore->pos++;
133003 +        } else {
133004 +            rawSeqStore->posInSequence = currPos;
133005 +            break;
133006 +        }
133007 +    }
133008 +    if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
133009 +        rawSeqStore->posInSequence = 0;
133010 +    }
133013 +/* ZSTD_opt_getNextMatchAndUpdateSeqStore():
133014 + * Calculates the beginning and end of the next match in the current block.
133015 + * Updates 'pos' and 'posInSequence' of the ldmSeqStore.
133016 + */
133017 +static void ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock,
133018 +                                                   U32 blockBytesRemaining) {
133019 +    rawSeq currSeq;
133020 +    U32 currBlockEndPos;
133021 +    U32 literalsBytesRemaining;
133022 +    U32 matchBytesRemaining;
133024 +    /* Setting match end position to MAX to ensure we never use an LDM during this block */
133025 +    if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
133026 +        optLdm->startPosInBlock = UINT_MAX;
133027 +        optLdm->endPosInBlock = UINT_MAX;
133028 +        return;
133029 +    }
133030 +    /* Calculate appropriate bytes left in matchLength and litLength after adjusting
133031 +       based on ldmSeqStore->posInSequence */
133032 +    currSeq = optLdm->seqStore.seq[optLdm->seqStore.pos];
133033 +    assert(optLdm->seqStore.posInSequence <= currSeq.litLength + currSeq.matchLength);
133034 +    currBlockEndPos = currPosInBlock + blockBytesRemaining;
133035 +    literalsBytesRemaining = (optLdm->seqStore.posInSequence < currSeq.litLength) ?
133036 +            currSeq.litLength - (U32)optLdm->seqStore.posInSequence :
133037 +            0;
133038 +    matchBytesRemaining = (literalsBytesRemaining == 0) ?
133039 +            currSeq.matchLength - ((U32)optLdm->seqStore.posInSequence - currSeq.litLength) :
133040 +            currSeq.matchLength;
133042 +    /* If there are more literal bytes than bytes remaining in block, no ldm is possible */
133043 +    if (literalsBytesRemaining >= blockBytesRemaining) {
133044 +        optLdm->startPosInBlock = UINT_MAX;
133045 +        optLdm->endPosInBlock = UINT_MAX;
133046 +        ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, blockBytesRemaining);
133047 +        return;
133048 +    }
133050 +    /* Matches may be < MINMATCH by this process. In that case, we will reject them
133051 +       when we are deciding whether or not to add the ldm */
133052 +    optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining;
133053 +    optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining;
133054 +    optLdm->offset = currSeq.offset;
133056 +    if (optLdm->endPosInBlock > currBlockEndPos) {
133057 +        /* Match ends after the block ends, we can't use the whole match */
133058 +        optLdm->endPosInBlock = currBlockEndPos;
133059 +        ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, currBlockEndPos - currPosInBlock);
133060 +    } else {
133061 +        /* Consume nb of bytes equal to size of sequence left */
133062 +        ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, literalsBytesRemaining + matchBytesRemaining);
133063 +    }
133066 +/* ZSTD_optLdm_maybeAddMatch():
133067 + * Adds a match if it's long enough, based on it's 'matchStartPosInBlock'
133068 + * and 'matchEndPosInBlock', into 'matches'. Maintains the correct ordering of 'matches'
133069 + */
133070 +static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
133071 +                                      ZSTD_optLdm_t* optLdm, U32 currPosInBlock) {
133072 +    U32 posDiff = currPosInBlock - optLdm->startPosInBlock;
133073 +    /* Note: ZSTD_match_t actually contains offCode and matchLength (before subtracting MINMATCH) */
133074 +    U32 candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff;
133075 +    U32 candidateOffCode = optLdm->offset + ZSTD_REP_MOVE;
133077 +    /* Ensure that current block position is not outside of the match */
133078 +    if (currPosInBlock < optLdm->startPosInBlock
133079 +      || currPosInBlock >= optLdm->endPosInBlock
133080 +      || candidateMatchLength < MINMATCH) {
133081 +        return;
133082 +    }
133084 +    if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) {
133085 +        DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offCode: %u matchLength %u) at block position=%u",
133086 +                 candidateOffCode, candidateMatchLength, currPosInBlock);
133087 +        matches[*nbMatches].len = candidateMatchLength;
133088 +        matches[*nbMatches].off = candidateOffCode;
133089 +        (*nbMatches)++;
133090 +    }
133093 +/* ZSTD_optLdm_processMatchCandidate():
133094 + * Wrapper function to update ldm seq store and call ldm functions as necessary.
133095 + */
133096 +static void ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, ZSTD_match_t* matches, U32* nbMatches,
133097 +                                              U32 currPosInBlock, U32 remainingBytes) {
133098 +    if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
133099 +        return;
133100 +    }
133102 +    if (currPosInBlock >= optLdm->endPosInBlock) {
133103 +        if (currPosInBlock > optLdm->endPosInBlock) {
133104 +            /* The position at which ZSTD_optLdm_processMatchCandidate() is called is not necessarily
133105 +             * at the end of a match from the ldm seq store, and will often be some bytes
133106 +             * over beyond matchEndPosInBlock. As such, we need to correct for these "overshoots"
133107 +             */
133108 +            U32 posOvershoot = currPosInBlock - optLdm->endPosInBlock;
133109 +            ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, posOvershoot);
133110 +        }
133111 +        ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes);
133112 +    }
133113 +    ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock);
133116 +/*-*******************************
133117 +*  Optimal parser
133118 +*********************************/
133121 +static U32 ZSTD_totalLen(ZSTD_optimal_t sol)
133123 +    return sol.litlen + sol.mlen;
133126 +#if 0 /* debug */
133128 +static void
133129 +listStats(const U32* table, int lastEltID)
133131 +    int const nbElts = lastEltID + 1;
133132 +    int enb;
133133 +    for (enb=0; enb < nbElts; enb++) {
133134 +        (void)table;
133135 +        /* RAWLOG(2, "%3i:%3i,  ", enb, table[enb]); */
133136 +        RAWLOG(2, "%4i,", table[enb]);
133137 +    }
133138 +    RAWLOG(2, " \n");
133141 +#endif
133143 +FORCE_INLINE_TEMPLATE size_t
133144 +ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
133145 +                               seqStore_t* seqStore,
133146 +                               U32 rep[ZSTD_REP_NUM],
133147 +                         const void* src, size_t srcSize,
133148 +                         const int optLevel,
133149 +                         const ZSTD_dictMode_e dictMode)
133151 +    optState_t* const optStatePtr = &ms->opt;
133152 +    const BYTE* const istart = (const BYTE*)src;
133153 +    const BYTE* ip = istart;
133154 +    const BYTE* anchor = istart;
133155 +    const BYTE* const iend = istart + srcSize;
133156 +    const BYTE* const ilimit = iend - 8;
133157 +    const BYTE* const base = ms->window.base;
133158 +    const BYTE* const prefixStart = base + ms->window.dictLimit;
133159 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
133161 +    U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
133162 +    U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4;
133163 +    U32 nextToUpdate3 = ms->nextToUpdate;
133165 +    ZSTD_optimal_t* const opt = optStatePtr->priceTable;
133166 +    ZSTD_match_t* const matches = optStatePtr->matchTable;
133167 +    ZSTD_optimal_t lastSequence;
133168 +    ZSTD_optLdm_t optLdm;
133170 +    optLdm.seqStore = ms->ldmSeqStore ? *ms->ldmSeqStore : kNullRawSeqStore;
133171 +    optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0;
133172 +    ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (U32)(ip-istart), (U32)(iend-ip));
133174 +    /* init */
133175 +    DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u",
133176 +                (U32)(ip - base), ms->window.dictLimit, ms->nextToUpdate);
133177 +    assert(optLevel <= 2);
133178 +    ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel);
133179 +    ip += (ip==prefixStart);
133181 +    /* Match Loop */
133182 +    while (ip < ilimit) {
133183 +        U32 cur, last_pos = 0;
133185 +        /* find first match */
133186 +        {   U32 const litlen = (U32)(ip - anchor);
133187 +            U32 const ll0 = !litlen;
133188 +            U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, ip, iend, dictMode, rep, ll0, minMatch);
133189 +            ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
133190 +                                              (U32)(ip-istart), (U32)(iend - ip));
133191 +            if (!nbMatches) { ip++; continue; }
133193 +            /* initialize opt[0] */
133194 +            { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
133195 +            opt[0].mlen = 0;  /* means is_a_literal */
133196 +            opt[0].litlen = litlen;
133197 +            /* We don't need to include the actual price of the literals because
133198 +             * it is static for the duration of the forward pass, and is included
133199 +             * in every price. We include the literal length to avoid negative
133200 +             * prices when we subtract the previous literal length.
133201 +             */
133202 +            opt[0].price = ZSTD_litLengthPrice(litlen, optStatePtr, optLevel);
133204 +            /* large match -> immediate encoding */
133205 +            {   U32 const maxML = matches[nbMatches-1].len;
133206 +                U32 const maxOffset = matches[nbMatches-1].off;
133207 +                DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series",
133208 +                            nbMatches, maxML, maxOffset, (U32)(ip-prefixStart));
133210 +                if (maxML > sufficient_len) {
133211 +                    lastSequence.litlen = litlen;
133212 +                    lastSequence.mlen = maxML;
133213 +                    lastSequence.off = maxOffset;
133214 +                    DEBUGLOG(6, "large match (%u>%u), immediate encoding",
133215 +                                maxML, sufficient_len);
133216 +                    cur = 0;
133217 +                    last_pos = ZSTD_totalLen(lastSequence);
133218 +                    goto _shortestPath;
133219 +            }   }
133221 +            /* set prices for first matches starting position == 0 */
133222 +            {   U32 const literalsPrice = opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
133223 +                U32 pos;
133224 +                U32 matchNb;
133225 +                for (pos = 1; pos < minMatch; pos++) {
133226 +                    opt[pos].price = ZSTD_MAX_PRICE;   /* mlen, litlen and price will be fixed during forward scanning */
133227 +                }
133228 +                for (matchNb = 0; matchNb < nbMatches; matchNb++) {
133229 +                    U32 const offset = matches[matchNb].off;
133230 +                    U32 const end = matches[matchNb].len;
133231 +                    for ( ; pos <= end ; pos++ ) {
133232 +                        U32 const matchPrice = ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel);
133233 +                        U32 const sequencePrice = literalsPrice + matchPrice;
133234 +                        DEBUGLOG(7, "rPos:%u => set initial price : %.2f",
133235 +                                    pos, ZSTD_fCost(sequencePrice));
133236 +                        opt[pos].mlen = pos;
133237 +                        opt[pos].off = offset;
133238 +                        opt[pos].litlen = litlen;
133239 +                        opt[pos].price = sequencePrice;
133240 +                }   }
133241 +                last_pos = pos-1;
133242 +            }
133243 +        }
133245 +        /* check further positions */
133246 +        for (cur = 1; cur <= last_pos; cur++) {
133247 +            const BYTE* const inr = ip + cur;
133248 +            assert(cur < ZSTD_OPT_NUM);
133249 +            DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur)
133251 +            /* Fix current position with one literal if cheaper */
133252 +            {   U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1;
133253 +                int const price = opt[cur-1].price
133254 +                                + ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
133255 +                                + ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
133256 +                                - ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
133257 +                assert(price < 1000000000); /* overflow check */
133258 +                if (price <= opt[cur].price) {
133259 +                    DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
133260 +                                inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
133261 +                                opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]);
133262 +                    opt[cur].mlen = 0;
133263 +                    opt[cur].off = 0;
133264 +                    opt[cur].litlen = litlen;
133265 +                    opt[cur].price = price;
133266 +                } else {
133267 +                    DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)",
133268 +                                inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price),
133269 +                                opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]);
133270 +                }
133271 +            }
133273 +            /* Set the repcodes of the current position. We must do it here
133274 +             * because we rely on the repcodes of the 2nd to last sequence being
133275 +             * correct to set the next chunks repcodes during the backward
133276 +             * traversal.
133277 +             */
133278 +            ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t));
133279 +            assert(cur >= opt[cur].mlen);
133280 +            if (opt[cur].mlen != 0) {
133281 +                U32 const prev = cur - opt[cur].mlen;
133282 +                repcodes_t newReps = ZSTD_updateRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0);
133283 +                ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t));
133284 +            } else {
133285 +                ZSTD_memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t));
133286 +            }
133288 +            /* last match must start at a minimum distance of 8 from oend */
133289 +            if (inr > ilimit) continue;
133291 +            if (cur == last_pos) break;
133293 +            if ( (optLevel==0) /*static_test*/
133294 +              && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) {
133295 +                DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1);
133296 +                continue;  /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
133297 +            }
133299 +            {   U32 const ll0 = (opt[cur].mlen != 0);
133300 +                U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;
133301 +                U32 const previousPrice = opt[cur].price;
133302 +                U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
133303 +                U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, inr, iend, dictMode, opt[cur].rep, ll0, minMatch);
133304 +                U32 matchNb;
133306 +                ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
133307 +                                                  (U32)(inr-istart), (U32)(iend-inr));
133309 +                if (!nbMatches) {
133310 +                    DEBUGLOG(7, "rPos:%u : no match found", cur);
133311 +                    continue;
133312 +                }
133314 +                {   U32 const maxML = matches[nbMatches-1].len;
133315 +                    DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u",
133316 +                                inr-istart, cur, nbMatches, maxML);
133318 +                    if ( (maxML > sufficient_len)
133319 +                      || (cur + maxML >= ZSTD_OPT_NUM) ) {
133320 +                        lastSequence.mlen = maxML;
133321 +                        lastSequence.off = matches[nbMatches-1].off;
133322 +                        lastSequence.litlen = litlen;
133323 +                        cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0;  /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */
133324 +                        last_pos = cur + ZSTD_totalLen(lastSequence);
133325 +                        if (cur > ZSTD_OPT_NUM) cur = 0;   /* underflow => first match */
133326 +                        goto _shortestPath;
133327 +                }   }
133329 +                /* set prices using matches found at position == cur */
133330 +                for (matchNb = 0; matchNb < nbMatches; matchNb++) {
133331 +                    U32 const offset = matches[matchNb].off;
133332 +                    U32 const lastML = matches[matchNb].len;
133333 +                    U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
133334 +                    U32 mlen;
133336 +                    DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u",
133337 +                                matchNb, matches[matchNb].off, lastML, litlen);
133339 +                    for (mlen = lastML; mlen >= startML; mlen--) {  /* scan downward */
133340 +                        U32 const pos = cur + mlen;
133341 +                        int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
133343 +                        if ((pos > last_pos) || (price < opt[pos].price)) {
133344 +                            DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)",
133345 +                                        pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
133346 +                            while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; }   /* fill empty positions */
133347 +                            opt[pos].mlen = mlen;
133348 +                            opt[pos].off = offset;
133349 +                            opt[pos].litlen = litlen;
133350 +                            opt[pos].price = price;
133351 +                        } else {
133352 +                            DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)",
133353 +                                        pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
133354 +                            if (optLevel==0) break;  /* early update abort; gets ~+10% speed for about -0.01 ratio loss */
133355 +                        }
133356 +            }   }   }
133357 +        }  /* for (cur = 1; cur <= last_pos; cur++) */
133359 +        lastSequence = opt[last_pos];
133360 +        cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0;  /* single sequence, and it starts before `ip` */
133361 +        assert(cur < ZSTD_OPT_NUM);  /* control overflow*/
133363 +_shortestPath:   /* cur, last_pos, best_mlen, best_off have to be set */
133364 +        assert(opt[0].mlen == 0);
133366 +        /* Set the next chunk's repcodes based on the repcodes of the beginning
133367 +         * of the last match, and the last sequence. This avoids us having to
133368 +         * update them while traversing the sequences.
133369 +         */
133370 +        if (lastSequence.mlen != 0) {
133371 +            repcodes_t reps = ZSTD_updateRep(opt[cur].rep, lastSequence.off, lastSequence.litlen==0);
133372 +            ZSTD_memcpy(rep, &reps, sizeof(reps));
133373 +        } else {
133374 +            ZSTD_memcpy(rep, opt[cur].rep, sizeof(repcodes_t));
133375 +        }
133377 +        {   U32 const storeEnd = cur + 1;
133378 +            U32 storeStart = storeEnd;
133379 +            U32 seqPos = cur;
133381 +            DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)",
133382 +                        last_pos, cur); (void)last_pos;
133383 +            assert(storeEnd < ZSTD_OPT_NUM);
133384 +            DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
133385 +                        storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off);
133386 +            opt[storeEnd] = lastSequence;
133387 +            while (seqPos > 0) {
133388 +                U32 const backDist = ZSTD_totalLen(opt[seqPos]);
133389 +                storeStart--;
133390 +                DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
133391 +                            seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off);
133392 +                opt[storeStart] = opt[seqPos];
133393 +                seqPos = (seqPos > backDist) ? seqPos - backDist : 0;
133394 +            }
133396 +            /* save sequences */
133397 +            DEBUGLOG(6, "sending selected sequences into seqStore")
133398 +            {   U32 storePos;
133399 +                for (storePos=storeStart; storePos <= storeEnd; storePos++) {
133400 +                    U32 const llen = opt[storePos].litlen;
133401 +                    U32 const mlen = opt[storePos].mlen;
133402 +                    U32 const offCode = opt[storePos].off;
133403 +                    U32 const advance = llen + mlen;
133404 +                    DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u",
133405 +                                anchor - istart, (unsigned)llen, (unsigned)mlen);
133407 +                    if (mlen==0) {  /* only literals => must be last "sequence", actually starting a new stream of sequences */
133408 +                        assert(storePos == storeEnd);   /* must be last sequence */
133409 +                        ip = anchor + llen;     /* last "sequence" is a bunch of literals => don't progress anchor */
133410 +                        continue;   /* will finish */
133411 +                    }
133413 +                    assert(anchor + llen <= iend);
133414 +                    ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
133415 +                    ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen-MINMATCH);
133416 +                    anchor += advance;
133417 +                    ip = anchor;
133418 +            }   }
133419 +            ZSTD_setBasePrices(optStatePtr, optLevel);
133420 +        }
133421 +    }   /* while (ip < ilimit) */
133423 +    /* Return the last literals size */
133424 +    return (size_t)(iend - anchor);
133428 +size_t ZSTD_compressBlock_btopt(
133429 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
133430 +        const void* src, size_t srcSize)
133432 +    DEBUGLOG(5, "ZSTD_compressBlock_btopt");
133433 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_noDict);
133437 +/* used in 2-pass strategy */
133438 +static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus)
133440 +    U32 s, sum=0;
133441 +    assert(ZSTD_FREQ_DIV+bonus >= 0);
133442 +    for (s=0; s<lastEltIndex+1; s++) {
133443 +        table[s] <<= ZSTD_FREQ_DIV+bonus;
133444 +        table[s]--;
133445 +        sum += table[s];
133446 +    }
133447 +    return sum;
133450 +/* used in 2-pass strategy */
133451 +MEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr)
133453 +    if (ZSTD_compressedLiterals(optPtr))
133454 +        optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0);
133455 +    optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0);
133456 +    optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0);
133457 +    optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0);
133460 +/* ZSTD_initStats_ultra():
133461 + * make a first compression pass, just to seed stats with more accurate starting values.
133462 + * only works on first block, with no dictionary and no ldm.
133463 + * this function cannot error, hence its contract must be respected.
133464 + */
133465 +static void
133466 +ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
133467 +                     seqStore_t* seqStore,
133468 +                     U32 rep[ZSTD_REP_NUM],
133469 +               const void* src, size_t srcSize)
133471 +    U32 tmpRep[ZSTD_REP_NUM];  /* updated rep codes will sink here */
133472 +    ZSTD_memcpy(tmpRep, rep, sizeof(tmpRep));
133474 +    DEBUGLOG(4, "ZSTD_initStats_ultra (srcSize=%zu)", srcSize);
133475 +    assert(ms->opt.litLengthSum == 0);    /* first block */
133476 +    assert(seqStore->sequences == seqStore->sequencesStart);   /* no ldm */
133477 +    assert(ms->window.dictLimit == ms->window.lowLimit);   /* no dictionary */
133478 +    assert(ms->window.dictLimit - ms->nextToUpdate <= 1);  /* no prefix (note: intentional overflow, defined as 2-complement) */
133480 +    ZSTD_compressBlock_opt_generic(ms, seqStore, tmpRep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);   /* generate stats into ms->opt*/
133482 +    /* invalidate first scan from history */
133483 +    ZSTD_resetSeqStore(seqStore);
133484 +    ms->window.base -= srcSize;
133485 +    ms->window.dictLimit += (U32)srcSize;
133486 +    ms->window.lowLimit = ms->window.dictLimit;
133487 +    ms->nextToUpdate = ms->window.dictLimit;
133489 +    /* re-inforce weight of collected statistics */
133490 +    ZSTD_upscaleStats(&ms->opt);
133493 +size_t ZSTD_compressBlock_btultra(
133494 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
133495 +        const void* src, size_t srcSize)
133497 +    DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize);
133498 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
133501 +size_t ZSTD_compressBlock_btultra2(
133502 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
133503 +        const void* src, size_t srcSize)
133505 +    U32 const curr = (U32)((const BYTE*)src - ms->window.base);
133506 +    DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize);
133508 +    /* 2-pass strategy:
133509 +     * this strategy makes a first pass over first block to collect statistics
133510 +     * and seed next round's statistics with it.
133511 +     * After 1st pass, function forgets everything, and starts a new block.
133512 +     * Consequently, this can only work if no data has been previously loaded in tables,
133513 +     * aka, no dictionary, no prefix, no ldm preprocessing.
133514 +     * The compression ratio gain is generally small (~0.5% on first block),
133515 +     * the cost is 2x cpu time on first block. */
133516 +    assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
133517 +    if ( (ms->opt.litLengthSum==0)   /* first block */
133518 +      && (seqStore->sequences == seqStore->sequencesStart)  /* no ldm */
133519 +      && (ms->window.dictLimit == ms->window.lowLimit)   /* no dictionary */
133520 +      && (curr == ms->window.dictLimit)   /* start of frame, nothing already loaded nor skipped */
133521 +      && (srcSize > ZSTD_PREDEF_THRESHOLD)
133522 +      ) {
133523 +        ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize);
133524 +    }
133526 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
133529 +size_t ZSTD_compressBlock_btopt_dictMatchState(
133530 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
133531 +        const void* src, size_t srcSize)
133533 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_dictMatchState);
133536 +size_t ZSTD_compressBlock_btultra_dictMatchState(
133537 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
133538 +        const void* src, size_t srcSize)
133540 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_dictMatchState);
133543 +size_t ZSTD_compressBlock_btopt_extDict(
133544 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
133545 +        const void* src, size_t srcSize)
133547 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_extDict);
133550 +size_t ZSTD_compressBlock_btultra_extDict(
133551 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
133552 +        const void* src, size_t srcSize)
133554 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_extDict);
133557 +/* note : no btultra2 variant for extDict nor dictMatchState,
133558 + * because btultra2 is not meant to work with dictionaries
133559 + * and is only specific for the first block (no prefix) */
133560 diff --git a/lib/zstd/compress/zstd_opt.h b/lib/zstd/compress/zstd_opt.h
133561 new file mode 100644
133562 index 000000000000..22b862858ba7
133563 --- /dev/null
133564 +++ b/lib/zstd/compress/zstd_opt.h
133565 @@ -0,0 +1,50 @@
133567 + * Copyright (c) Yann Collet, Facebook, Inc.
133568 + * All rights reserved.
133570 + * This source code is licensed under both the BSD-style license (found in the
133571 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
133572 + * in the COPYING file in the root directory of this source tree).
133573 + * You may select, at your option, one of the above-listed licenses.
133574 + */
133576 +#ifndef ZSTD_OPT_H
133577 +#define ZSTD_OPT_H
133580 +#include "zstd_compress_internal.h"
133582 +/* used in ZSTD_loadDictionaryContent() */
133583 +void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend);
133585 +size_t ZSTD_compressBlock_btopt(
133586 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
133587 +        void const* src, size_t srcSize);
133588 +size_t ZSTD_compressBlock_btultra(
133589 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
133590 +        void const* src, size_t srcSize);
133591 +size_t ZSTD_compressBlock_btultra2(
133592 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
133593 +        void const* src, size_t srcSize);
133596 +size_t ZSTD_compressBlock_btopt_dictMatchState(
133597 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
133598 +        void const* src, size_t srcSize);
133599 +size_t ZSTD_compressBlock_btultra_dictMatchState(
133600 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
133601 +        void const* src, size_t srcSize);
133603 +size_t ZSTD_compressBlock_btopt_extDict(
133604 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
133605 +        void const* src, size_t srcSize);
133606 +size_t ZSTD_compressBlock_btultra_extDict(
133607 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
133608 +        void const* src, size_t srcSize);
133610 +        /* note : no btultra2 variant for extDict nor dictMatchState,
133611 +         * because btultra2 is not meant to work with dictionaries
133612 +         * and is only specific for the first block (no prefix) */
133615 +#endif /* ZSTD_OPT_H */
133616 diff --git a/lib/zstd/decompress.c b/lib/zstd/decompress.c
133617 deleted file mode 100644
133618 index 66cd487a326a..000000000000
133619 --- a/lib/zstd/decompress.c
133620 +++ /dev/null
133621 @@ -1,2531 +0,0 @@
133623 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
133624 - * All rights reserved.
133626 - * This source code is licensed under the BSD-style license found in the
133627 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
133628 - * An additional grant of patent rights can be found in the PATENTS file in the
133629 - * same directory.
133631 - * This program is free software; you can redistribute it and/or modify it under
133632 - * the terms of the GNU General Public License version 2 as published by the
133633 - * Free Software Foundation. This program is dual-licensed; you may select
133634 - * either version 2 of the GNU General Public License ("GPL") or BSD license
133635 - * ("BSD").
133636 - */
133638 -/* ***************************************************************
133639 -*  Tuning parameters
133640 -*****************************************************************/
133642 -*  MAXWINDOWSIZE_DEFAULT :
133643 -*  maximum window size accepted by DStream, by default.
133644 -*  Frames requiring more memory will be rejected.
133646 -#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
133647 -#define ZSTD_MAXWINDOWSIZE_DEFAULT ((1 << ZSTD_WINDOWLOG_MAX) + 1) /* defined within zstd.h */
133648 -#endif
133650 -/*-*******************************************************
133651 -*  Dependencies
133652 -*********************************************************/
133653 -#include "fse.h"
133654 -#include "huf.h"
133655 -#include "mem.h" /* low level memory routines */
133656 -#include "zstd_internal.h"
133657 -#include <linux/kernel.h>
133658 -#include <linux/module.h>
133659 -#include <linux/string.h> /* memcpy, memmove, memset */
133661 -#define ZSTD_PREFETCH(ptr) __builtin_prefetch(ptr, 0, 0)
133663 -/*-*************************************
133664 -*  Macros
133665 -***************************************/
133666 -#define ZSTD_isError ERR_isError /* for inlining */
133667 -#define FSE_isError ERR_isError
133668 -#define HUF_isError ERR_isError
133670 -/*_*******************************************************
133671 -*  Memory operations
133672 -**********************************************************/
133673 -static void ZSTD_copy4(void *dst, const void *src) { memcpy(dst, src, 4); }
133675 -/*-*************************************************************
133676 -*   Context management
133677 -***************************************************************/
133678 -typedef enum {
133679 -       ZSTDds_getFrameHeaderSize,
133680 -       ZSTDds_decodeFrameHeader,
133681 -       ZSTDds_decodeBlockHeader,
133682 -       ZSTDds_decompressBlock,
133683 -       ZSTDds_decompressLastBlock,
133684 -       ZSTDds_checkChecksum,
133685 -       ZSTDds_decodeSkippableHeader,
133686 -       ZSTDds_skipFrame
133687 -} ZSTD_dStage;
133689 -typedef struct {
133690 -       FSE_DTable LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
133691 -       FSE_DTable OFTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
133692 -       FSE_DTable MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
133693 -       HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */
133694 -       U64 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32 / 2];
133695 -       U32 rep[ZSTD_REP_NUM];
133696 -} ZSTD_entropyTables_t;
133698 -struct ZSTD_DCtx_s {
133699 -       const FSE_DTable *LLTptr;
133700 -       const FSE_DTable *MLTptr;
133701 -       const FSE_DTable *OFTptr;
133702 -       const HUF_DTable *HUFptr;
133703 -       ZSTD_entropyTables_t entropy;
133704 -       const void *previousDstEnd; /* detect continuity */
133705 -       const void *base;          /* start of curr segment */
133706 -       const void *vBase;        /* virtual start of previous segment if it was just before curr one */
133707 -       const void *dictEnd;    /* end of previous segment */
133708 -       size_t expected;
133709 -       ZSTD_frameParams fParams;
133710 -       blockType_e bType; /* used in ZSTD_decompressContinue(), to transfer blockType between header decoding and block decoding stages */
133711 -       ZSTD_dStage stage;
133712 -       U32 litEntropy;
133713 -       U32 fseEntropy;
133714 -       struct xxh64_state xxhState;
133715 -       size_t headerSize;
133716 -       U32 dictID;
133717 -       const BYTE *litPtr;
133718 -       ZSTD_customMem customMem;
133719 -       size_t litSize;
133720 -       size_t rleSize;
133721 -       BYTE litBuffer[ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH];
133722 -       BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
133723 -}; /* typedef'd to ZSTD_DCtx within "zstd.h" */
133725 -size_t ZSTD_DCtxWorkspaceBound(void) { return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_DCtx)); }
133727 -size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx)
133729 -       dctx->expected = ZSTD_frameHeaderSize_prefix;
133730 -       dctx->stage = ZSTDds_getFrameHeaderSize;
133731 -       dctx->previousDstEnd = NULL;
133732 -       dctx->base = NULL;
133733 -       dctx->vBase = NULL;
133734 -       dctx->dictEnd = NULL;
133735 -       dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
133736 -       dctx->litEntropy = dctx->fseEntropy = 0;
133737 -       dctx->dictID = 0;
133738 -       ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
133739 -       memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
133740 -       dctx->LLTptr = dctx->entropy.LLTable;
133741 -       dctx->MLTptr = dctx->entropy.MLTable;
133742 -       dctx->OFTptr = dctx->entropy.OFTable;
133743 -       dctx->HUFptr = dctx->entropy.hufTable;
133744 -       return 0;
133747 -ZSTD_DCtx *ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
133749 -       ZSTD_DCtx *dctx;
133751 -       if (!customMem.customAlloc || !customMem.customFree)
133752 -               return NULL;
133754 -       dctx = (ZSTD_DCtx *)ZSTD_malloc(sizeof(ZSTD_DCtx), customMem);
133755 -       if (!dctx)
133756 -               return NULL;
133757 -       memcpy(&dctx->customMem, &customMem, sizeof(customMem));
133758 -       ZSTD_decompressBegin(dctx);
133759 -       return dctx;
133762 -ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize)
133764 -       ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
133765 -       return ZSTD_createDCtx_advanced(stackMem);
133768 -size_t ZSTD_freeDCtx(ZSTD_DCtx *dctx)
133770 -       if (dctx == NULL)
133771 -               return 0; /* support free on NULL */
133772 -       ZSTD_free(dctx, dctx->customMem);
133773 -       return 0; /* reserved as a potential error code in the future */
133776 -void ZSTD_copyDCtx(ZSTD_DCtx *dstDCtx, const ZSTD_DCtx *srcDCtx)
133778 -       size_t const workSpaceSize = (ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH) + ZSTD_frameHeaderSize_max;
133779 -       memcpy(dstDCtx, srcDCtx, sizeof(ZSTD_DCtx) - workSpaceSize); /* no need to copy workspace */
133782 -static void ZSTD_refDDict(ZSTD_DCtx *dstDCtx, const ZSTD_DDict *ddict);
133784 -/*-*************************************************************
133785 -*   Decompression section
133786 -***************************************************************/
133788 -/*! ZSTD_isFrame() :
133789 - *  Tells if the content of `buffer` starts with a valid Frame Identifier.
133790 - *  Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
133791 - *  Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
133792 - *  Note 3 : Skippable Frame Identifiers are considered valid. */
133793 -unsigned ZSTD_isFrame(const void *buffer, size_t size)
133795 -       if (size < 4)
133796 -               return 0;
133797 -       {
133798 -               U32 const magic = ZSTD_readLE32(buffer);
133799 -               if (magic == ZSTD_MAGICNUMBER)
133800 -                       return 1;
133801 -               if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START)
133802 -                       return 1;
133803 -       }
133804 -       return 0;
133807 -/** ZSTD_frameHeaderSize() :
133808 -*   srcSize must be >= ZSTD_frameHeaderSize_prefix.
133809 -*   @return : size of the Frame Header */
133810 -static size_t ZSTD_frameHeaderSize(const void *src, size_t srcSize)
133812 -       if (srcSize < ZSTD_frameHeaderSize_prefix)
133813 -               return ERROR(srcSize_wrong);
133814 -       {
133815 -               BYTE const fhd = ((const BYTE *)src)[4];
133816 -               U32 const dictID = fhd & 3;
133817 -               U32 const singleSegment = (fhd >> 5) & 1;
133818 -               U32 const fcsId = fhd >> 6;
133819 -               return ZSTD_frameHeaderSize_prefix + !singleSegment + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId] + (singleSegment && !fcsId);
133820 -       }
133823 -/** ZSTD_getFrameParams() :
133824 -*   decode Frame Header, or require larger `srcSize`.
133825 -*   @return : 0, `fparamsPtr` is correctly filled,
133826 -*            >0, `srcSize` is too small, result is expected `srcSize`,
133827 -*             or an error code, which can be tested using ZSTD_isError() */
133828 -size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src, size_t srcSize)
133830 -       const BYTE *ip = (const BYTE *)src;
133832 -       if (srcSize < ZSTD_frameHeaderSize_prefix)
133833 -               return ZSTD_frameHeaderSize_prefix;
133834 -       if (ZSTD_readLE32(src) != ZSTD_MAGICNUMBER) {
133835 -               if ((ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
133836 -                       if (srcSize < ZSTD_skippableHeaderSize)
133837 -                               return ZSTD_skippableHeaderSize; /* magic number + skippable frame length */
133838 -                       memset(fparamsPtr, 0, sizeof(*fparamsPtr));
133839 -                       fparamsPtr->frameContentSize = ZSTD_readLE32((const char *)src + 4);
133840 -                       fparamsPtr->windowSize = 0; /* windowSize==0 means a frame is skippable */
133841 -                       return 0;
133842 -               }
133843 -               return ERROR(prefix_unknown);
133844 -       }
133846 -       /* ensure there is enough `srcSize` to fully read/decode frame header */
133847 -       {
133848 -               size_t const fhsize = ZSTD_frameHeaderSize(src, srcSize);
133849 -               if (srcSize < fhsize)
133850 -                       return fhsize;
133851 -       }
133853 -       {
133854 -               BYTE const fhdByte = ip[4];
133855 -               size_t pos = 5;
133856 -               U32 const dictIDSizeCode = fhdByte & 3;
133857 -               U32 const checksumFlag = (fhdByte >> 2) & 1;
133858 -               U32 const singleSegment = (fhdByte >> 5) & 1;
133859 -               U32 const fcsID = fhdByte >> 6;
133860 -               U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX;
133861 -               U32 windowSize = 0;
133862 -               U32 dictID = 0;
133863 -               U64 frameContentSize = 0;
133864 -               if ((fhdByte & 0x08) != 0)
133865 -                       return ERROR(frameParameter_unsupported); /* reserved bits, which must be zero */
133866 -               if (!singleSegment) {
133867 -                       BYTE const wlByte = ip[pos++];
133868 -                       U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
133869 -                       if (windowLog > ZSTD_WINDOWLOG_MAX)
133870 -                               return ERROR(frameParameter_windowTooLarge); /* avoids issue with 1 << windowLog */
133871 -                       windowSize = (1U << windowLog);
133872 -                       windowSize += (windowSize >> 3) * (wlByte & 7);
133873 -               }
133875 -               switch (dictIDSizeCode) {
133876 -               default: /* impossible */
133877 -               case 0: break;
133878 -               case 1:
133879 -                       dictID = ip[pos];
133880 -                       pos++;
133881 -                       break;
133882 -               case 2:
133883 -                       dictID = ZSTD_readLE16(ip + pos);
133884 -                       pos += 2;
133885 -                       break;
133886 -               case 3:
133887 -                       dictID = ZSTD_readLE32(ip + pos);
133888 -                       pos += 4;
133889 -                       break;
133890 -               }
133891 -               switch (fcsID) {
133892 -               default: /* impossible */
133893 -               case 0:
133894 -                       if (singleSegment)
133895 -                               frameContentSize = ip[pos];
133896 -                       break;
133897 -               case 1: frameContentSize = ZSTD_readLE16(ip + pos) + 256; break;
133898 -               case 2: frameContentSize = ZSTD_readLE32(ip + pos); break;
133899 -               case 3: frameContentSize = ZSTD_readLE64(ip + pos); break;
133900 -               }
133901 -               if (!windowSize)
133902 -                       windowSize = (U32)frameContentSize;
133903 -               if (windowSize > windowSizeMax)
133904 -                       return ERROR(frameParameter_windowTooLarge);
133905 -               fparamsPtr->frameContentSize = frameContentSize;
133906 -               fparamsPtr->windowSize = windowSize;
133907 -               fparamsPtr->dictID = dictID;
133908 -               fparamsPtr->checksumFlag = checksumFlag;
133909 -       }
133910 -       return 0;
133913 -/** ZSTD_getFrameContentSize() :
133914 -*   compatible with legacy mode
133915 -*   @return : decompressed size of the single frame pointed to be `src` if known, otherwise
133916 -*             - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
133917 -*             - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
133918 -unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
133920 -       {
133921 -               ZSTD_frameParams fParams;
133922 -               if (ZSTD_getFrameParams(&fParams, src, srcSize) != 0)
133923 -                       return ZSTD_CONTENTSIZE_ERROR;
133924 -               if (fParams.windowSize == 0) {
133925 -                       /* Either skippable or empty frame, size == 0 either way */
133926 -                       return 0;
133927 -               } else if (fParams.frameContentSize != 0) {
133928 -                       return fParams.frameContentSize;
133929 -               } else {
133930 -                       return ZSTD_CONTENTSIZE_UNKNOWN;
133931 -               }
133932 -       }
133935 -/** ZSTD_findDecompressedSize() :
133936 - *  compatible with legacy mode
133937 - *  `srcSize` must be the exact length of some number of ZSTD compressed and/or
133938 - *      skippable frames
133939 - *  @return : decompressed size of the frames contained */
133940 -unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize)
133942 -       {
133943 -               unsigned long long totalDstSize = 0;
133944 -               while (srcSize >= ZSTD_frameHeaderSize_prefix) {
133945 -                       const U32 magicNumber = ZSTD_readLE32(src);
133947 -                       if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
133948 -                               size_t skippableSize;
133949 -                               if (srcSize < ZSTD_skippableHeaderSize)
133950 -                                       return ERROR(srcSize_wrong);
133951 -                               skippableSize = ZSTD_readLE32((const BYTE *)src + 4) + ZSTD_skippableHeaderSize;
133952 -                               if (srcSize < skippableSize) {
133953 -                                       return ZSTD_CONTENTSIZE_ERROR;
133954 -                               }
133956 -                               src = (const BYTE *)src + skippableSize;
133957 -                               srcSize -= skippableSize;
133958 -                               continue;
133959 -                       }
133961 -                       {
133962 -                               unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
133963 -                               if (ret >= ZSTD_CONTENTSIZE_ERROR)
133964 -                                       return ret;
133966 -                               /* check for overflow */
133967 -                               if (totalDstSize + ret < totalDstSize)
133968 -                                       return ZSTD_CONTENTSIZE_ERROR;
133969 -                               totalDstSize += ret;
133970 -                       }
133971 -                       {
133972 -                               size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
133973 -                               if (ZSTD_isError(frameSrcSize)) {
133974 -                                       return ZSTD_CONTENTSIZE_ERROR;
133975 -                               }
133977 -                               src = (const BYTE *)src + frameSrcSize;
133978 -                               srcSize -= frameSrcSize;
133979 -                       }
133980 -               }
133982 -               if (srcSize) {
133983 -                       return ZSTD_CONTENTSIZE_ERROR;
133984 -               }
133986 -               return totalDstSize;
133987 -       }
133990 -/** ZSTD_decodeFrameHeader() :
133991 -*   `headerSize` must be the size provided by ZSTD_frameHeaderSize().
133992 -*   @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
133993 -static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx *dctx, const void *src, size_t headerSize)
133995 -       size_t const result = ZSTD_getFrameParams(&(dctx->fParams), src, headerSize);
133996 -       if (ZSTD_isError(result))
133997 -               return result; /* invalid header */
133998 -       if (result > 0)
133999 -               return ERROR(srcSize_wrong); /* headerSize too small */
134000 -       if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID))
134001 -               return ERROR(dictionary_wrong);
134002 -       if (dctx->fParams.checksumFlag)
134003 -               xxh64_reset(&dctx->xxhState, 0);
134004 -       return 0;
134007 -typedef struct {
134008 -       blockType_e blockType;
134009 -       U32 lastBlock;
134010 -       U32 origSize;
134011 -} blockProperties_t;
134013 -/*! ZSTD_getcBlockSize() :
134014 -*   Provides the size of compressed block from block header `src` */
134015 -size_t ZSTD_getcBlockSize(const void *src, size_t srcSize, blockProperties_t *bpPtr)
134017 -       if (srcSize < ZSTD_blockHeaderSize)
134018 -               return ERROR(srcSize_wrong);
134019 -       {
134020 -               U32 const cBlockHeader = ZSTD_readLE24(src);
134021 -               U32 const cSize = cBlockHeader >> 3;
134022 -               bpPtr->lastBlock = cBlockHeader & 1;
134023 -               bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
134024 -               bpPtr->origSize = cSize; /* only useful for RLE */
134025 -               if (bpPtr->blockType == bt_rle)
134026 -                       return 1;
134027 -               if (bpPtr->blockType == bt_reserved)
134028 -                       return ERROR(corruption_detected);
134029 -               return cSize;
134030 -       }
134033 -static size_t ZSTD_copyRawBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
134035 -       if (srcSize > dstCapacity)
134036 -               return ERROR(dstSize_tooSmall);
134037 -       memcpy(dst, src, srcSize);
134038 -       return srcSize;
134041 -static size_t ZSTD_setRleBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize, size_t regenSize)
134043 -       if (srcSize != 1)
134044 -               return ERROR(srcSize_wrong);
134045 -       if (regenSize > dstCapacity)
134046 -               return ERROR(dstSize_tooSmall);
134047 -       memset(dst, *(const BYTE *)src, regenSize);
134048 -       return regenSize;
134051 -/*! ZSTD_decodeLiteralsBlock() :
134052 -       @return : nb of bytes read from src (< srcSize ) */
134053 -size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx *dctx, const void *src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
134055 -       if (srcSize < MIN_CBLOCK_SIZE)
134056 -               return ERROR(corruption_detected);
134058 -       {
134059 -               const BYTE *const istart = (const BYTE *)src;
134060 -               symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
134062 -               switch (litEncType) {
134063 -               case set_repeat:
134064 -                       if (dctx->litEntropy == 0)
134065 -                               return ERROR(dictionary_corrupted);
134066 -                       fallthrough;
134067 -               case set_compressed:
134068 -                       if (srcSize < 5)
134069 -                               return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
134070 -                       {
134071 -                               size_t lhSize, litSize, litCSize;
134072 -                               U32 singleStream = 0;
134073 -                               U32 const lhlCode = (istart[0] >> 2) & 3;
134074 -                               U32 const lhc = ZSTD_readLE32(istart);
134075 -                               switch (lhlCode) {
134076 -                               case 0:
134077 -                               case 1:
134078 -                               default: /* note : default is impossible, since lhlCode into [0..3] */
134079 -                                       /* 2 - 2 - 10 - 10 */
134080 -                                       singleStream = !lhlCode;
134081 -                                       lhSize = 3;
134082 -                                       litSize = (lhc >> 4) & 0x3FF;
134083 -                                       litCSize = (lhc >> 14) & 0x3FF;
134084 -                                       break;
134085 -                               case 2:
134086 -                                       /* 2 - 2 - 14 - 14 */
134087 -                                       lhSize = 4;
134088 -                                       litSize = (lhc >> 4) & 0x3FFF;
134089 -                                       litCSize = lhc >> 18;
134090 -                                       break;
134091 -                               case 3:
134092 -                                       /* 2 - 2 - 18 - 18 */
134093 -                                       lhSize = 5;
134094 -                                       litSize = (lhc >> 4) & 0x3FFFF;
134095 -                                       litCSize = (lhc >> 22) + (istart[4] << 10);
134096 -                                       break;
134097 -                               }
134098 -                               if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX)
134099 -                                       return ERROR(corruption_detected);
134100 -                               if (litCSize + lhSize > srcSize)
134101 -                                       return ERROR(corruption_detected);
134103 -                               if (HUF_isError(
134104 -                                       (litEncType == set_repeat)
134105 -                                           ? (singleStream ? HUF_decompress1X_usingDTable(dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->HUFptr)
134106 -                                                           : HUF_decompress4X_usingDTable(dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->HUFptr))
134107 -                                           : (singleStream
134108 -                                                  ? HUF_decompress1X2_DCtx_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart + lhSize, litCSize,
134109 -                                                                                dctx->entropy.workspace, sizeof(dctx->entropy.workspace))
134110 -                                                  : HUF_decompress4X_hufOnly_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart + lhSize, litCSize,
134111 -                                                                                  dctx->entropy.workspace, sizeof(dctx->entropy.workspace)))))
134112 -                                       return ERROR(corruption_detected);
134114 -                               dctx->litPtr = dctx->litBuffer;
134115 -                               dctx->litSize = litSize;
134116 -                               dctx->litEntropy = 1;
134117 -                               if (litEncType == set_compressed)
134118 -                                       dctx->HUFptr = dctx->entropy.hufTable;
134119 -                               memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
134120 -                               return litCSize + lhSize;
134121 -                       }
134123 -               case set_basic: {
134124 -                       size_t litSize, lhSize;
134125 -                       U32 const lhlCode = ((istart[0]) >> 2) & 3;
134126 -                       switch (lhlCode) {
134127 -                       case 0:
134128 -                       case 2:
134129 -                       default: /* note : default is impossible, since lhlCode into [0..3] */
134130 -                               lhSize = 1;
134131 -                               litSize = istart[0] >> 3;
134132 -                               break;
134133 -                       case 1:
134134 -                               lhSize = 2;
134135 -                               litSize = ZSTD_readLE16(istart) >> 4;
134136 -                               break;
134137 -                       case 3:
134138 -                               lhSize = 3;
134139 -                               litSize = ZSTD_readLE24(istart) >> 4;
134140 -                               break;
134141 -                       }
134143 -                       if (lhSize + litSize + WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
134144 -                               if (litSize + lhSize > srcSize)
134145 -                                       return ERROR(corruption_detected);
134146 -                               memcpy(dctx->litBuffer, istart + lhSize, litSize);
134147 -                               dctx->litPtr = dctx->litBuffer;
134148 -                               dctx->litSize = litSize;
134149 -                               memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
134150 -                               return lhSize + litSize;
134151 -                       }
134152 -                       /* direct reference into compressed stream */
134153 -                       dctx->litPtr = istart + lhSize;
134154 -                       dctx->litSize = litSize;
134155 -                       return lhSize + litSize;
134156 -               }
134158 -               case set_rle: {
134159 -                       U32 const lhlCode = ((istart[0]) >> 2) & 3;
134160 -                       size_t litSize, lhSize;
134161 -                       switch (lhlCode) {
134162 -                       case 0:
134163 -                       case 2:
134164 -                       default: /* note : default is impossible, since lhlCode into [0..3] */
134165 -                               lhSize = 1;
134166 -                               litSize = istart[0] >> 3;
134167 -                               break;
134168 -                       case 1:
134169 -                               lhSize = 2;
134170 -                               litSize = ZSTD_readLE16(istart) >> 4;
134171 -                               break;
134172 -                       case 3:
134173 -                               lhSize = 3;
134174 -                               litSize = ZSTD_readLE24(istart) >> 4;
134175 -                               if (srcSize < 4)
134176 -                                       return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
134177 -                               break;
134178 -                       }
134179 -                       if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX)
134180 -                               return ERROR(corruption_detected);
134181 -                       memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
134182 -                       dctx->litPtr = dctx->litBuffer;
134183 -                       dctx->litSize = litSize;
134184 -                       return lhSize + 1;
134185 -               }
134186 -               default:
134187 -                       return ERROR(corruption_detected); /* impossible */
134188 -               }
134189 -       }
134192 -typedef union {
134193 -       FSE_decode_t realData;
134194 -       U32 alignedBy4;
134195 -} FSE_decode_t4;
134197 -static const FSE_decode_t4 LL_defaultDTable[(1 << LL_DEFAULTNORMLOG) + 1] = {
134198 -    {{LL_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */
134199 -    {{0, 0, 4}},                /* 0 : base, symbol, bits */
134200 -    {{16, 0, 4}},
134201 -    {{32, 1, 5}},
134202 -    {{0, 3, 5}},
134203 -    {{0, 4, 5}},
134204 -    {{0, 6, 5}},
134205 -    {{0, 7, 5}},
134206 -    {{0, 9, 5}},
134207 -    {{0, 10, 5}},
134208 -    {{0, 12, 5}},
134209 -    {{0, 14, 6}},
134210 -    {{0, 16, 5}},
134211 -    {{0, 18, 5}},
134212 -    {{0, 19, 5}},
134213 -    {{0, 21, 5}},
134214 -    {{0, 22, 5}},
134215 -    {{0, 24, 5}},
134216 -    {{32, 25, 5}},
134217 -    {{0, 26, 5}},
134218 -    {{0, 27, 6}},
134219 -    {{0, 29, 6}},
134220 -    {{0, 31, 6}},
134221 -    {{32, 0, 4}},
134222 -    {{0, 1, 4}},
134223 -    {{0, 2, 5}},
134224 -    {{32, 4, 5}},
134225 -    {{0, 5, 5}},
134226 -    {{32, 7, 5}},
134227 -    {{0, 8, 5}},
134228 -    {{32, 10, 5}},
134229 -    {{0, 11, 5}},
134230 -    {{0, 13, 6}},
134231 -    {{32, 16, 5}},
134232 -    {{0, 17, 5}},
134233 -    {{32, 19, 5}},
134234 -    {{0, 20, 5}},
134235 -    {{32, 22, 5}},
134236 -    {{0, 23, 5}},
134237 -    {{0, 25, 4}},
134238 -    {{16, 25, 4}},
134239 -    {{32, 26, 5}},
134240 -    {{0, 28, 6}},
134241 -    {{0, 30, 6}},
134242 -    {{48, 0, 4}},
134243 -    {{16, 1, 4}},
134244 -    {{32, 2, 5}},
134245 -    {{32, 3, 5}},
134246 -    {{32, 5, 5}},
134247 -    {{32, 6, 5}},
134248 -    {{32, 8, 5}},
134249 -    {{32, 9, 5}},
134250 -    {{32, 11, 5}},
134251 -    {{32, 12, 5}},
134252 -    {{0, 15, 6}},
134253 -    {{32, 17, 5}},
134254 -    {{32, 18, 5}},
134255 -    {{32, 20, 5}},
134256 -    {{32, 21, 5}},
134257 -    {{32, 23, 5}},
134258 -    {{32, 24, 5}},
134259 -    {{0, 35, 6}},
134260 -    {{0, 34, 6}},
134261 -    {{0, 33, 6}},
134262 -    {{0, 32, 6}},
134263 -}; /* LL_defaultDTable */
134265 -static const FSE_decode_t4 ML_defaultDTable[(1 << ML_DEFAULTNORMLOG) + 1] = {
134266 -    {{ML_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */
134267 -    {{0, 0, 6}},                /* 0 : base, symbol, bits */
134268 -    {{0, 1, 4}},
134269 -    {{32, 2, 5}},
134270 -    {{0, 3, 5}},
134271 -    {{0, 5, 5}},
134272 -    {{0, 6, 5}},
134273 -    {{0, 8, 5}},
134274 -    {{0, 10, 6}},
134275 -    {{0, 13, 6}},
134276 -    {{0, 16, 6}},
134277 -    {{0, 19, 6}},
134278 -    {{0, 22, 6}},
134279 -    {{0, 25, 6}},
134280 -    {{0, 28, 6}},
134281 -    {{0, 31, 6}},
134282 -    {{0, 33, 6}},
134283 -    {{0, 35, 6}},
134284 -    {{0, 37, 6}},
134285 -    {{0, 39, 6}},
134286 -    {{0, 41, 6}},
134287 -    {{0, 43, 6}},
134288 -    {{0, 45, 6}},
134289 -    {{16, 1, 4}},
134290 -    {{0, 2, 4}},
134291 -    {{32, 3, 5}},
134292 -    {{0, 4, 5}},
134293 -    {{32, 6, 5}},
134294 -    {{0, 7, 5}},
134295 -    {{0, 9, 6}},
134296 -    {{0, 12, 6}},
134297 -    {{0, 15, 6}},
134298 -    {{0, 18, 6}},
134299 -    {{0, 21, 6}},
134300 -    {{0, 24, 6}},
134301 -    {{0, 27, 6}},
134302 -    {{0, 30, 6}},
134303 -    {{0, 32, 6}},
134304 -    {{0, 34, 6}},
134305 -    {{0, 36, 6}},
134306 -    {{0, 38, 6}},
134307 -    {{0, 40, 6}},
134308 -    {{0, 42, 6}},
134309 -    {{0, 44, 6}},
134310 -    {{32, 1, 4}},
134311 -    {{48, 1, 4}},
134312 -    {{16, 2, 4}},
134313 -    {{32, 4, 5}},
134314 -    {{32, 5, 5}},
134315 -    {{32, 7, 5}},
134316 -    {{32, 8, 5}},
134317 -    {{0, 11, 6}},
134318 -    {{0, 14, 6}},
134319 -    {{0, 17, 6}},
134320 -    {{0, 20, 6}},
134321 -    {{0, 23, 6}},
134322 -    {{0, 26, 6}},
134323 -    {{0, 29, 6}},
134324 -    {{0, 52, 6}},
134325 -    {{0, 51, 6}},
134326 -    {{0, 50, 6}},
134327 -    {{0, 49, 6}},
134328 -    {{0, 48, 6}},
134329 -    {{0, 47, 6}},
134330 -    {{0, 46, 6}},
134331 -}; /* ML_defaultDTable */
134333 -static const FSE_decode_t4 OF_defaultDTable[(1 << OF_DEFAULTNORMLOG) + 1] = {
134334 -    {{OF_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */
134335 -    {{0, 0, 5}},                /* 0 : base, symbol, bits */
134336 -    {{0, 6, 4}},
134337 -    {{0, 9, 5}},
134338 -    {{0, 15, 5}},
134339 -    {{0, 21, 5}},
134340 -    {{0, 3, 5}},
134341 -    {{0, 7, 4}},
134342 -    {{0, 12, 5}},
134343 -    {{0, 18, 5}},
134344 -    {{0, 23, 5}},
134345 -    {{0, 5, 5}},
134346 -    {{0, 8, 4}},
134347 -    {{0, 14, 5}},
134348 -    {{0, 20, 5}},
134349 -    {{0, 2, 5}},
134350 -    {{16, 7, 4}},
134351 -    {{0, 11, 5}},
134352 -    {{0, 17, 5}},
134353 -    {{0, 22, 5}},
134354 -    {{0, 4, 5}},
134355 -    {{16, 8, 4}},
134356 -    {{0, 13, 5}},
134357 -    {{0, 19, 5}},
134358 -    {{0, 1, 5}},
134359 -    {{16, 6, 4}},
134360 -    {{0, 10, 5}},
134361 -    {{0, 16, 5}},
134362 -    {{0, 28, 5}},
134363 -    {{0, 27, 5}},
134364 -    {{0, 26, 5}},
134365 -    {{0, 25, 5}},
134366 -    {{0, 24, 5}},
134367 -}; /* OF_defaultDTable */
134369 -/*! ZSTD_buildSeqTable() :
134370 -       @return : nb bytes read from src,
134371 -                         or an error code if it fails, testable with ZSTD_isError()
134373 -static size_t ZSTD_buildSeqTable(FSE_DTable *DTableSpace, const FSE_DTable **DTablePtr, symbolEncodingType_e type, U32 max, U32 maxLog, const void *src,
134374 -                                size_t srcSize, const FSE_decode_t4 *defaultTable, U32 flagRepeatTable, void *workspace, size_t workspaceSize)
134376 -       const void *const tmpPtr = defaultTable; /* bypass strict aliasing */
134377 -       switch (type) {
134378 -       case set_rle:
134379 -               if (!srcSize)
134380 -                       return ERROR(srcSize_wrong);
134381 -               if ((*(const BYTE *)src) > max)
134382 -                       return ERROR(corruption_detected);
134383 -               FSE_buildDTable_rle(DTableSpace, *(const BYTE *)src);
134384 -               *DTablePtr = DTableSpace;
134385 -               return 1;
134386 -       case set_basic: *DTablePtr = (const FSE_DTable *)tmpPtr; return 0;
134387 -       case set_repeat:
134388 -               if (!flagRepeatTable)
134389 -                       return ERROR(corruption_detected);
134390 -               return 0;
134391 -       default: /* impossible */
134392 -       case set_compressed: {
134393 -               U32 tableLog;
134394 -               S16 *norm = (S16 *)workspace;
134395 -               size_t const spaceUsed32 = ALIGN(sizeof(S16) * (MaxSeq + 1), sizeof(U32)) >> 2;
134397 -               if ((spaceUsed32 << 2) > workspaceSize)
134398 -                       return ERROR(GENERIC);
134399 -               workspace = (U32 *)workspace + spaceUsed32;
134400 -               workspaceSize -= (spaceUsed32 << 2);
134401 -               {
134402 -                       size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
134403 -                       if (FSE_isError(headerSize))
134404 -                               return ERROR(corruption_detected);
134405 -                       if (tableLog > maxLog)
134406 -                               return ERROR(corruption_detected);
134407 -                       FSE_buildDTable_wksp(DTableSpace, norm, max, tableLog, workspace, workspaceSize);
134408 -                       *DTablePtr = DTableSpace;
134409 -                       return headerSize;
134410 -               }
134411 -       }
134412 -       }
134415 -size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx *dctx, int *nbSeqPtr, const void *src, size_t srcSize)
134417 -       const BYTE *const istart = (const BYTE *const)src;
134418 -       const BYTE *const iend = istart + srcSize;
134419 -       const BYTE *ip = istart;
134421 -       /* check */
134422 -       if (srcSize < MIN_SEQUENCES_SIZE)
134423 -               return ERROR(srcSize_wrong);
134425 -       /* SeqHead */
134426 -       {
134427 -               int nbSeq = *ip++;
134428 -               if (!nbSeq) {
134429 -                       *nbSeqPtr = 0;
134430 -                       return 1;
134431 -               }
134432 -               if (nbSeq > 0x7F) {
134433 -                       if (nbSeq == 0xFF) {
134434 -                               if (ip + 2 > iend)
134435 -                                       return ERROR(srcSize_wrong);
134436 -                               nbSeq = ZSTD_readLE16(ip) + LONGNBSEQ, ip += 2;
134437 -                       } else {
134438 -                               if (ip >= iend)
134439 -                                       return ERROR(srcSize_wrong);
134440 -                               nbSeq = ((nbSeq - 0x80) << 8) + *ip++;
134441 -                       }
134442 -               }
134443 -               *nbSeqPtr = nbSeq;
134444 -       }
134446 -       /* FSE table descriptors */
134447 -       if (ip + 4 > iend)
134448 -               return ERROR(srcSize_wrong); /* minimum possible size */
134449 -       {
134450 -               symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
134451 -               symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
134452 -               symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
134453 -               ip++;
134455 -               /* Build DTables */
134456 -               {
134457 -                       size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, LLtype, MaxLL, LLFSELog, ip, iend - ip,
134458 -                                                                 LL_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace));
134459 -                       if (ZSTD_isError(llhSize))
134460 -                               return ERROR(corruption_detected);
134461 -                       ip += llhSize;
134462 -               }
134463 -               {
134464 -                       size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, OFtype, MaxOff, OffFSELog, ip, iend - ip,
134465 -                                                                 OF_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace));
134466 -                       if (ZSTD_isError(ofhSize))
134467 -                               return ERROR(corruption_detected);
134468 -                       ip += ofhSize;
134469 -               }
134470 -               {
134471 -                       size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, MLtype, MaxML, MLFSELog, ip, iend - ip,
134472 -                                                                 ML_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace));
134473 -                       if (ZSTD_isError(mlhSize))
134474 -                               return ERROR(corruption_detected);
134475 -                       ip += mlhSize;
134476 -               }
134477 -       }
134479 -       return ip - istart;
134482 -typedef struct {
134483 -       size_t litLength;
134484 -       size_t matchLength;
134485 -       size_t offset;
134486 -       const BYTE *match;
134487 -} seq_t;
134489 -typedef struct {
134490 -       BIT_DStream_t DStream;
134491 -       FSE_DState_t stateLL;
134492 -       FSE_DState_t stateOffb;
134493 -       FSE_DState_t stateML;
134494 -       size_t prevOffset[ZSTD_REP_NUM];
134495 -       const BYTE *base;
134496 -       size_t pos;
134497 -       uPtrDiff gotoDict;
134498 -} seqState_t;
134500 -FORCE_NOINLINE
134501 -size_t ZSTD_execSequenceLast7(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base,
134502 -                             const BYTE *const vBase, const BYTE *const dictEnd)
134504 -       BYTE *const oLitEnd = op + sequence.litLength;
134505 -       size_t const sequenceLength = sequence.litLength + sequence.matchLength;
134506 -       BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
134507 -       BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH;
134508 -       const BYTE *const iLitEnd = *litPtr + sequence.litLength;
134509 -       const BYTE *match = oLitEnd - sequence.offset;
134511 -       /* check */
134512 -       if (oMatchEnd > oend)
134513 -               return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
134514 -       if (iLitEnd > litLimit)
134515 -               return ERROR(corruption_detected); /* over-read beyond lit buffer */
134516 -       if (oLitEnd <= oend_w)
134517 -               return ERROR(GENERIC); /* Precondition */
134519 -       /* copy literals */
134520 -       if (op < oend_w) {
134521 -               ZSTD_wildcopy(op, *litPtr, oend_w - op);
134522 -               *litPtr += oend_w - op;
134523 -               op = oend_w;
134524 -       }
134525 -       while (op < oLitEnd)
134526 -               *op++ = *(*litPtr)++;
134528 -       /* copy Match */
134529 -       if (sequence.offset > (size_t)(oLitEnd - base)) {
134530 -               /* offset beyond prefix */
134531 -               if (sequence.offset > (size_t)(oLitEnd - vBase))
134532 -                       return ERROR(corruption_detected);
134533 -               match = dictEnd - (base - match);
134534 -               if (match + sequence.matchLength <= dictEnd) {
134535 -                       memmove(oLitEnd, match, sequence.matchLength);
134536 -                       return sequenceLength;
134537 -               }
134538 -               /* span extDict & currPrefixSegment */
134539 -               {
134540 -                       size_t const length1 = dictEnd - match;
134541 -                       memmove(oLitEnd, match, length1);
134542 -                       op = oLitEnd + length1;
134543 -                       sequence.matchLength -= length1;
134544 -                       match = base;
134545 -               }
134546 -       }
134547 -       while (op < oMatchEnd)
134548 -               *op++ = *match++;
134549 -       return sequenceLength;
134552 -static seq_t ZSTD_decodeSequence(seqState_t *seqState)
134554 -       seq_t seq;
134556 -       U32 const llCode = FSE_peekSymbol(&seqState->stateLL);
134557 -       U32 const mlCode = FSE_peekSymbol(&seqState->stateML);
134558 -       U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */
134560 -       U32 const llBits = LL_bits[llCode];
134561 -       U32 const mlBits = ML_bits[mlCode];
134562 -       U32 const ofBits = ofCode;
134563 -       U32 const totalBits = llBits + mlBits + ofBits;
134565 -       static const U32 LL_base[MaxLL + 1] = {0,  1,  2,  3,  4,  5,  6,  7,  8,    9,     10,    11,    12,    13,     14,     15,     16,     18,
134566 -                                              20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000};
134568 -       static const U32 ML_base[MaxML + 1] = {3,  4,  5,  6,  7,  8,  9,  10,   11,    12,    13,    14,    15,     16,     17,     18,     19,     20,
134569 -                                              21, 22, 23, 24, 25, 26, 27, 28,   29,    30,    31,    32,    33,     34,     35,     37,     39,     41,
134570 -                                              43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003};
134572 -       static const U32 OF_base[MaxOff + 1] = {0,       1,     1,      5,      0xD,      0x1D,      0x3D,      0x7D,      0xFD,     0x1FD,
134573 -                                               0x3FD,   0x7FD,    0xFFD,    0x1FFD,   0x3FFD,   0x7FFD,    0xFFFD,    0x1FFFD,   0x3FFFD,  0x7FFFD,
134574 -                                               0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD};
134576 -       /* sequence */
134577 -       {
134578 -               size_t offset;
134579 -               if (!ofCode)
134580 -                       offset = 0;
134581 -               else {
134582 -                       offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */
134583 -                       if (ZSTD_32bits())
134584 -                               BIT_reloadDStream(&seqState->DStream);
134585 -               }
134587 -               if (ofCode <= 1) {
134588 -                       offset += (llCode == 0);
134589 -                       if (offset) {
134590 -                               size_t temp = (offset == 3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
134591 -                               temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
134592 -                               if (offset != 1)
134593 -                                       seqState->prevOffset[2] = seqState->prevOffset[1];
134594 -                               seqState->prevOffset[1] = seqState->prevOffset[0];
134595 -                               seqState->prevOffset[0] = offset = temp;
134596 -                       } else {
134597 -                               offset = seqState->prevOffset[0];
134598 -                       }
134599 -               } else {
134600 -                       seqState->prevOffset[2] = seqState->prevOffset[1];
134601 -                       seqState->prevOffset[1] = seqState->prevOffset[0];
134602 -                       seqState->prevOffset[0] = offset;
134603 -               }
134604 -               seq.offset = offset;
134605 -       }
134607 -       seq.matchLength = ML_base[mlCode] + ((mlCode > 31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <=  16 bits */
134608 -       if (ZSTD_32bits() && (mlBits + llBits > 24))
134609 -               BIT_reloadDStream(&seqState->DStream);
134611 -       seq.litLength = LL_base[llCode] + ((llCode > 15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <=  16 bits */
134612 -       if (ZSTD_32bits() || (totalBits > 64 - 7 - (LLFSELog + MLFSELog + OffFSELog)))
134613 -               BIT_reloadDStream(&seqState->DStream);
134615 -       /* ANS state update */
134616 -       FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <=  9 bits */
134617 -       FSE_updateState(&seqState->stateML, &seqState->DStream); /* <=  9 bits */
134618 -       if (ZSTD_32bits())
134619 -               BIT_reloadDStream(&seqState->DStream);             /* <= 18 bits */
134620 -       FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <=  8 bits */
134622 -       seq.match = NULL;
134624 -       return seq;
134627 -FORCE_INLINE
134628 -size_t ZSTD_execSequence(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base,
134629 -                        const BYTE *const vBase, const BYTE *const dictEnd)
134631 -       BYTE *const oLitEnd = op + sequence.litLength;
134632 -       size_t const sequenceLength = sequence.litLength + sequence.matchLength;
134633 -       BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
134634 -       BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH;
134635 -       const BYTE *const iLitEnd = *litPtr + sequence.litLength;
134636 -       const BYTE *match = oLitEnd - sequence.offset;
134638 -       /* check */
134639 -       if (oMatchEnd > oend)
134640 -               return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
134641 -       if (iLitEnd > litLimit)
134642 -               return ERROR(corruption_detected); /* over-read beyond lit buffer */
134643 -       if (oLitEnd > oend_w)
134644 -               return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd);
134646 -       /* copy Literals */
134647 -       ZSTD_copy8(op, *litPtr);
134648 -       if (sequence.litLength > 8)
134649 -               ZSTD_wildcopy(op + 8, (*litPtr) + 8,
134650 -                             sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
134651 -       op = oLitEnd;
134652 -       *litPtr = iLitEnd; /* update for next sequence */
134654 -       /* copy Match */
134655 -       if (sequence.offset > (size_t)(oLitEnd - base)) {
134656 -               /* offset beyond prefix */
134657 -               if (sequence.offset > (size_t)(oLitEnd - vBase))
134658 -                       return ERROR(corruption_detected);
134659 -               match = dictEnd + (match - base);
134660 -               if (match + sequence.matchLength <= dictEnd) {
134661 -                       memmove(oLitEnd, match, sequence.matchLength);
134662 -                       return sequenceLength;
134663 -               }
134664 -               /* span extDict & currPrefixSegment */
134665 -               {
134666 -                       size_t const length1 = dictEnd - match;
134667 -                       memmove(oLitEnd, match, length1);
134668 -                       op = oLitEnd + length1;
134669 -                       sequence.matchLength -= length1;
134670 -                       match = base;
134671 -                       if (op > oend_w || sequence.matchLength < MINMATCH) {
134672 -                               U32 i;
134673 -                               for (i = 0; i < sequence.matchLength; ++i)
134674 -                                       op[i] = match[i];
134675 -                               return sequenceLength;
134676 -                       }
134677 -               }
134678 -       }
134679 -       /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
134681 -       /* match within prefix */
134682 -       if (sequence.offset < 8) {
134683 -               /* close range match, overlap */
134684 -               static const U32 dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */
134685 -               static const int dec64table[] = {8, 8, 8, 7, 8, 9, 10, 11}; /* subtracted */
134686 -               int const sub2 = dec64table[sequence.offset];
134687 -               op[0] = match[0];
134688 -               op[1] = match[1];
134689 -               op[2] = match[2];
134690 -               op[3] = match[3];
134691 -               match += dec32table[sequence.offset];
134692 -               ZSTD_copy4(op + 4, match);
134693 -               match -= sub2;
134694 -       } else {
134695 -               ZSTD_copy8(op, match);
134696 -       }
134697 -       op += 8;
134698 -       match += 8;
134700 -       if (oMatchEnd > oend - (16 - MINMATCH)) {
134701 -               if (op < oend_w) {
134702 -                       ZSTD_wildcopy(op, match, oend_w - op);
134703 -                       match += oend_w - op;
134704 -                       op = oend_w;
134705 -               }
134706 -               while (op < oMatchEnd)
134707 -                       *op++ = *match++;
134708 -       } else {
134709 -               ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8); /* works even if matchLength < 8 */
134710 -       }
134711 -       return sequenceLength;
134714 -static size_t ZSTD_decompressSequences(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize)
134716 -       const BYTE *ip = (const BYTE *)seqStart;
134717 -       const BYTE *const iend = ip + seqSize;
134718 -       BYTE *const ostart = (BYTE * const)dst;
134719 -       BYTE *const oend = ostart + maxDstSize;
134720 -       BYTE *op = ostart;
134721 -       const BYTE *litPtr = dctx->litPtr;
134722 -       const BYTE *const litEnd = litPtr + dctx->litSize;
134723 -       const BYTE *const base = (const BYTE *)(dctx->base);
134724 -       const BYTE *const vBase = (const BYTE *)(dctx->vBase);
134725 -       const BYTE *const dictEnd = (const BYTE *)(dctx->dictEnd);
134726 -       int nbSeq;
134728 -       /* Build Decoding Tables */
134729 -       {
134730 -               size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize);
134731 -               if (ZSTD_isError(seqHSize))
134732 -                       return seqHSize;
134733 -               ip += seqHSize;
134734 -       }
134736 -       /* Regen sequences */
134737 -       if (nbSeq) {
134738 -               seqState_t seqState;
134739 -               dctx->fseEntropy = 1;
134740 -               {
134741 -                       U32 i;
134742 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
134743 -                               seqState.prevOffset[i] = dctx->entropy.rep[i];
134744 -               }
134745 -               CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend - ip), corruption_detected);
134746 -               FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
134747 -               FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
134748 -               FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
134750 -               for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq;) {
134751 -                       nbSeq--;
134752 -                       {
134753 -                               seq_t const sequence = ZSTD_decodeSequence(&seqState);
134754 -                               size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
134755 -                               if (ZSTD_isError(oneSeqSize))
134756 -                                       return oneSeqSize;
134757 -                               op += oneSeqSize;
134758 -                       }
134759 -               }
134761 -               /* check if reached exact end */
134762 -               if (nbSeq)
134763 -                       return ERROR(corruption_detected);
134764 -               /* save reps for next block */
134765 -               {
134766 -                       U32 i;
134767 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
134768 -                               dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]);
134769 -               }
134770 -       }
134772 -       /* last literal segment */
134773 -       {
134774 -               size_t const lastLLSize = litEnd - litPtr;
134775 -               if (lastLLSize > (size_t)(oend - op))
134776 -                       return ERROR(dstSize_tooSmall);
134777 -               memcpy(op, litPtr, lastLLSize);
134778 -               op += lastLLSize;
134779 -       }
134781 -       return op - ostart;
134784 -FORCE_INLINE seq_t ZSTD_decodeSequenceLong_generic(seqState_t *seqState, int const longOffsets)
134786 -       seq_t seq;
134788 -       U32 const llCode = FSE_peekSymbol(&seqState->stateLL);
134789 -       U32 const mlCode = FSE_peekSymbol(&seqState->stateML);
134790 -       U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */
134792 -       U32 const llBits = LL_bits[llCode];
134793 -       U32 const mlBits = ML_bits[mlCode];
134794 -       U32 const ofBits = ofCode;
134795 -       U32 const totalBits = llBits + mlBits + ofBits;
134797 -       static const U32 LL_base[MaxLL + 1] = {0,  1,  2,  3,  4,  5,  6,  7,  8,    9,     10,    11,    12,    13,     14,     15,     16,     18,
134798 -                                              20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000};
134800 -       static const U32 ML_base[MaxML + 1] = {3,  4,  5,  6,  7,  8,  9,  10,   11,    12,    13,    14,    15,     16,     17,     18,     19,     20,
134801 -                                              21, 22, 23, 24, 25, 26, 27, 28,   29,    30,    31,    32,    33,     34,     35,     37,     39,     41,
134802 -                                              43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003};
134804 -       static const U32 OF_base[MaxOff + 1] = {0,       1,     1,      5,      0xD,      0x1D,      0x3D,      0x7D,      0xFD,     0x1FD,
134805 -                                               0x3FD,   0x7FD,    0xFFD,    0x1FFD,   0x3FFD,   0x7FFD,    0xFFFD,    0x1FFFD,   0x3FFFD,  0x7FFFD,
134806 -                                               0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD};
134808 -       /* sequence */
134809 -       {
134810 -               size_t offset;
134811 -               if (!ofCode)
134812 -                       offset = 0;
134813 -               else {
134814 -                       if (longOffsets) {
134815 -                               int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN);
134816 -                               offset = OF_base[ofCode] + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
134817 -                               if (ZSTD_32bits() || extraBits)
134818 -                                       BIT_reloadDStream(&seqState->DStream);
134819 -                               if (extraBits)
134820 -                                       offset += BIT_readBitsFast(&seqState->DStream, extraBits);
134821 -                       } else {
134822 -                               offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */
134823 -                               if (ZSTD_32bits())
134824 -                                       BIT_reloadDStream(&seqState->DStream);
134825 -                       }
134826 -               }
134828 -               if (ofCode <= 1) {
134829 -                       offset += (llCode == 0);
134830 -                       if (offset) {
134831 -                               size_t temp = (offset == 3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
134832 -                               temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
134833 -                               if (offset != 1)
134834 -                                       seqState->prevOffset[2] = seqState->prevOffset[1];
134835 -                               seqState->prevOffset[1] = seqState->prevOffset[0];
134836 -                               seqState->prevOffset[0] = offset = temp;
134837 -                       } else {
134838 -                               offset = seqState->prevOffset[0];
134839 -                       }
134840 -               } else {
134841 -                       seqState->prevOffset[2] = seqState->prevOffset[1];
134842 -                       seqState->prevOffset[1] = seqState->prevOffset[0];
134843 -                       seqState->prevOffset[0] = offset;
134844 -               }
134845 -               seq.offset = offset;
134846 -       }
134848 -       seq.matchLength = ML_base[mlCode] + ((mlCode > 31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <=  16 bits */
134849 -       if (ZSTD_32bits() && (mlBits + llBits > 24))
134850 -               BIT_reloadDStream(&seqState->DStream);
134852 -       seq.litLength = LL_base[llCode] + ((llCode > 15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <=  16 bits */
134853 -       if (ZSTD_32bits() || (totalBits > 64 - 7 - (LLFSELog + MLFSELog + OffFSELog)))
134854 -               BIT_reloadDStream(&seqState->DStream);
134856 -       {
134857 -               size_t const pos = seqState->pos + seq.litLength;
134858 -               seq.match = seqState->base + pos - seq.offset; /* single memory segment */
134859 -               if (seq.offset > pos)
134860 -                       seq.match += seqState->gotoDict; /* separate memory segment */
134861 -               seqState->pos = pos + seq.matchLength;
134862 -       }
134864 -       /* ANS state update */
134865 -       FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <=  9 bits */
134866 -       FSE_updateState(&seqState->stateML, &seqState->DStream); /* <=  9 bits */
134867 -       if (ZSTD_32bits())
134868 -               BIT_reloadDStream(&seqState->DStream);             /* <= 18 bits */
134869 -       FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <=  8 bits */
134871 -       return seq;
134874 -static seq_t ZSTD_decodeSequenceLong(seqState_t *seqState, unsigned const windowSize)
134876 -       if (ZSTD_highbit32(windowSize) > STREAM_ACCUMULATOR_MIN) {
134877 -               return ZSTD_decodeSequenceLong_generic(seqState, 1);
134878 -       } else {
134879 -               return ZSTD_decodeSequenceLong_generic(seqState, 0);
134880 -       }
134883 -FORCE_INLINE
134884 -size_t ZSTD_execSequenceLong(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base,
134885 -                            const BYTE *const vBase, const BYTE *const dictEnd)
134887 -       BYTE *const oLitEnd = op + sequence.litLength;
134888 -       size_t const sequenceLength = sequence.litLength + sequence.matchLength;
134889 -       BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
134890 -       BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH;
134891 -       const BYTE *const iLitEnd = *litPtr + sequence.litLength;
134892 -       const BYTE *match = sequence.match;
134894 -       /* check */
134895 -       if (oMatchEnd > oend)
134896 -               return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
134897 -       if (iLitEnd > litLimit)
134898 -               return ERROR(corruption_detected); /* over-read beyond lit buffer */
134899 -       if (oLitEnd > oend_w)
134900 -               return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd);
134902 -       /* copy Literals */
134903 -       ZSTD_copy8(op, *litPtr);
134904 -       if (sequence.litLength > 8)
134905 -               ZSTD_wildcopy(op + 8, (*litPtr) + 8,
134906 -                             sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
134907 -       op = oLitEnd;
134908 -       *litPtr = iLitEnd; /* update for next sequence */
134910 -       /* copy Match */
134911 -       if (sequence.offset > (size_t)(oLitEnd - base)) {
134912 -               /* offset beyond prefix */
134913 -               if (sequence.offset > (size_t)(oLitEnd - vBase))
134914 -                       return ERROR(corruption_detected);
134915 -               if (match + sequence.matchLength <= dictEnd) {
134916 -                       memmove(oLitEnd, match, sequence.matchLength);
134917 -                       return sequenceLength;
134918 -               }
134919 -               /* span extDict & currPrefixSegment */
134920 -               {
134921 -                       size_t const length1 = dictEnd - match;
134922 -                       memmove(oLitEnd, match, length1);
134923 -                       op = oLitEnd + length1;
134924 -                       sequence.matchLength -= length1;
134925 -                       match = base;
134926 -                       if (op > oend_w || sequence.matchLength < MINMATCH) {
134927 -                               U32 i;
134928 -                               for (i = 0; i < sequence.matchLength; ++i)
134929 -                                       op[i] = match[i];
134930 -                               return sequenceLength;
134931 -                       }
134932 -               }
134933 -       }
134934 -       /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
134936 -       /* match within prefix */
134937 -       if (sequence.offset < 8) {
134938 -               /* close range match, overlap */
134939 -               static const U32 dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */
134940 -               static const int dec64table[] = {8, 8, 8, 7, 8, 9, 10, 11}; /* subtracted */
134941 -               int const sub2 = dec64table[sequence.offset];
134942 -               op[0] = match[0];
134943 -               op[1] = match[1];
134944 -               op[2] = match[2];
134945 -               op[3] = match[3];
134946 -               match += dec32table[sequence.offset];
134947 -               ZSTD_copy4(op + 4, match);
134948 -               match -= sub2;
134949 -       } else {
134950 -               ZSTD_copy8(op, match);
134951 -       }
134952 -       op += 8;
134953 -       match += 8;
134955 -       if (oMatchEnd > oend - (16 - MINMATCH)) {
134956 -               if (op < oend_w) {
134957 -                       ZSTD_wildcopy(op, match, oend_w - op);
134958 -                       match += oend_w - op;
134959 -                       op = oend_w;
134960 -               }
134961 -               while (op < oMatchEnd)
134962 -                       *op++ = *match++;
134963 -       } else {
134964 -               ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8); /* works even if matchLength < 8 */
134965 -       }
134966 -       return sequenceLength;
134969 -static size_t ZSTD_decompressSequencesLong(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize)
134971 -       const BYTE *ip = (const BYTE *)seqStart;
134972 -       const BYTE *const iend = ip + seqSize;
134973 -       BYTE *const ostart = (BYTE * const)dst;
134974 -       BYTE *const oend = ostart + maxDstSize;
134975 -       BYTE *op = ostart;
134976 -       const BYTE *litPtr = dctx->litPtr;
134977 -       const BYTE *const litEnd = litPtr + dctx->litSize;
134978 -       const BYTE *const base = (const BYTE *)(dctx->base);
134979 -       const BYTE *const vBase = (const BYTE *)(dctx->vBase);
134980 -       const BYTE *const dictEnd = (const BYTE *)(dctx->dictEnd);
134981 -       unsigned const windowSize = dctx->fParams.windowSize;
134982 -       int nbSeq;
134984 -       /* Build Decoding Tables */
134985 -       {
134986 -               size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize);
134987 -               if (ZSTD_isError(seqHSize))
134988 -                       return seqHSize;
134989 -               ip += seqHSize;
134990 -       }
134992 -       /* Regen sequences */
134993 -       if (nbSeq) {
134994 -#define STORED_SEQS 4
134995 -#define STOSEQ_MASK (STORED_SEQS - 1)
134996 -#define ADVANCED_SEQS 4
134997 -               seq_t *sequences = (seq_t *)dctx->entropy.workspace;
134998 -               int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
134999 -               seqState_t seqState;
135000 -               int seqNb;
135001 -               ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.workspace) >= sizeof(seq_t) * STORED_SEQS);
135002 -               dctx->fseEntropy = 1;
135003 -               {
135004 -                       U32 i;
135005 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
135006 -                               seqState.prevOffset[i] = dctx->entropy.rep[i];
135007 -               }
135008 -               seqState.base = base;
135009 -               seqState.pos = (size_t)(op - base);
135010 -               seqState.gotoDict = (uPtrDiff)dictEnd - (uPtrDiff)base; /* cast to avoid undefined behaviour */
135011 -               CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend - ip), corruption_detected);
135012 -               FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
135013 -               FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
135014 -               FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
135016 -               /* prepare in advance */
135017 -               for (seqNb = 0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && seqNb < seqAdvance; seqNb++) {
135018 -                       sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, windowSize);
135019 -               }
135020 -               if (seqNb < seqAdvance)
135021 -                       return ERROR(corruption_detected);
135023 -               /* decode and decompress */
135024 -               for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && seqNb < nbSeq; seqNb++) {
135025 -                       seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, windowSize);
135026 -                       size_t const oneSeqSize =
135027 -                           ZSTD_execSequenceLong(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd);
135028 -                       if (ZSTD_isError(oneSeqSize))
135029 -                               return oneSeqSize;
135030 -                       ZSTD_PREFETCH(sequence.match);
135031 -                       sequences[seqNb & STOSEQ_MASK] = sequence;
135032 -                       op += oneSeqSize;
135033 -               }
135034 -               if (seqNb < nbSeq)
135035 -                       return ERROR(corruption_detected);
135037 -               /* finish queue */
135038 -               seqNb -= seqAdvance;
135039 -               for (; seqNb < nbSeq; seqNb++) {
135040 -                       size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd);
135041 -                       if (ZSTD_isError(oneSeqSize))
135042 -                               return oneSeqSize;
135043 -                       op += oneSeqSize;
135044 -               }
135046 -               /* save reps for next block */
135047 -               {
135048 -                       U32 i;
135049 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
135050 -                               dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]);
135051 -               }
135052 -       }
135054 -       /* last literal segment */
135055 -       {
135056 -               size_t const lastLLSize = litEnd - litPtr;
135057 -               if (lastLLSize > (size_t)(oend - op))
135058 -                       return ERROR(dstSize_tooSmall);
135059 -               memcpy(op, litPtr, lastLLSize);
135060 -               op += lastLLSize;
135061 -       }
135063 -       return op - ostart;
135066 -static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
135067 -{ /* blockType == blockCompressed */
135068 -       const BYTE *ip = (const BYTE *)src;
135070 -       if (srcSize >= ZSTD_BLOCKSIZE_ABSOLUTEMAX)
135071 -               return ERROR(srcSize_wrong);
135073 -       /* Decode literals section */
135074 -       {
135075 -               size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
135076 -               if (ZSTD_isError(litCSize))
135077 -                       return litCSize;
135078 -               ip += litCSize;
135079 -               srcSize -= litCSize;
135080 -       }
135081 -       if (sizeof(size_t) > 4) /* do not enable prefetching on 32-bits x86, as it's performance detrimental */
135082 -                               /* likely because of register pressure */
135083 -                               /* if that's the correct cause, then 32-bits ARM should be affected differently */
135084 -                               /* it would be good to test this on ARM real hardware, to see if prefetch version improves speed */
135085 -               if (dctx->fParams.windowSize > (1 << 23))
135086 -                       return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize);
135087 -       return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);
135090 -static void ZSTD_checkContinuity(ZSTD_DCtx *dctx, const void *dst)
135092 -       if (dst != dctx->previousDstEnd) { /* not contiguous */
135093 -               dctx->dictEnd = dctx->previousDstEnd;
135094 -               dctx->vBase = (const char *)dst - ((const char *)(dctx->previousDstEnd) - (const char *)(dctx->base));
135095 -               dctx->base = dst;
135096 -               dctx->previousDstEnd = dst;
135097 -       }
135100 -size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
135102 -       size_t dSize;
135103 -       ZSTD_checkContinuity(dctx, dst);
135104 -       dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
135105 -       dctx->previousDstEnd = (char *)dst + dSize;
135106 -       return dSize;
135109 -/** ZSTD_insertBlock() :
135110 -       insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
135111 -size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart, size_t blockSize)
135113 -       ZSTD_checkContinuity(dctx, blockStart);
135114 -       dctx->previousDstEnd = (const char *)blockStart + blockSize;
135115 -       return blockSize;
135118 -size_t ZSTD_generateNxBytes(void *dst, size_t dstCapacity, BYTE byte, size_t length)
135120 -       if (length > dstCapacity)
135121 -               return ERROR(dstSize_tooSmall);
135122 -       memset(dst, byte, length);
135123 -       return length;
135126 -/** ZSTD_findFrameCompressedSize() :
135127 - *  compatible with legacy mode
135128 - *  `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
135129 - *  `srcSize` must be at least as large as the frame contained
135130 - *  @return : the compressed size of the frame starting at `src` */
135131 -size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
135133 -       if (srcSize >= ZSTD_skippableHeaderSize && (ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
135134 -               return ZSTD_skippableHeaderSize + ZSTD_readLE32((const BYTE *)src + 4);
135135 -       } else {
135136 -               const BYTE *ip = (const BYTE *)src;
135137 -               const BYTE *const ipstart = ip;
135138 -               size_t remainingSize = srcSize;
135139 -               ZSTD_frameParams fParams;
135141 -               size_t const headerSize = ZSTD_frameHeaderSize(ip, remainingSize);
135142 -               if (ZSTD_isError(headerSize))
135143 -                       return headerSize;
135145 -               /* Frame Header */
135146 -               {
135147 -                       size_t const ret = ZSTD_getFrameParams(&fParams, ip, remainingSize);
135148 -                       if (ZSTD_isError(ret))
135149 -                               return ret;
135150 -                       if (ret > 0)
135151 -                               return ERROR(srcSize_wrong);
135152 -               }
135154 -               ip += headerSize;
135155 -               remainingSize -= headerSize;
135157 -               /* Loop on each block */
135158 -               while (1) {
135159 -                       blockProperties_t blockProperties;
135160 -                       size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
135161 -                       if (ZSTD_isError(cBlockSize))
135162 -                               return cBlockSize;
135164 -                       if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
135165 -                               return ERROR(srcSize_wrong);
135167 -                       ip += ZSTD_blockHeaderSize + cBlockSize;
135168 -                       remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
135170 -                       if (blockProperties.lastBlock)
135171 -                               break;
135172 -               }
135174 -               if (fParams.checksumFlag) { /* Frame content checksum */
135175 -                       if (remainingSize < 4)
135176 -                               return ERROR(srcSize_wrong);
135177 -                       ip += 4;
135178 -                       remainingSize -= 4;
135179 -               }
135181 -               return ip - ipstart;
135182 -       }
135185 -/*! ZSTD_decompressFrame() :
135186 -*   @dctx must be properly initialized */
135187 -static size_t ZSTD_decompressFrame(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void **srcPtr, size_t *srcSizePtr)
135189 -       const BYTE *ip = (const BYTE *)(*srcPtr);
135190 -       BYTE *const ostart = (BYTE * const)dst;
135191 -       BYTE *const oend = ostart + dstCapacity;
135192 -       BYTE *op = ostart;
135193 -       size_t remainingSize = *srcSizePtr;
135195 -       /* check */
135196 -       if (remainingSize < ZSTD_frameHeaderSize_min + ZSTD_blockHeaderSize)
135197 -               return ERROR(srcSize_wrong);
135199 -       /* Frame Header */
135200 -       {
135201 -               size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix);
135202 -               if (ZSTD_isError(frameHeaderSize))
135203 -                       return frameHeaderSize;
135204 -               if (remainingSize < frameHeaderSize + ZSTD_blockHeaderSize)
135205 -                       return ERROR(srcSize_wrong);
135206 -               CHECK_F(ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize));
135207 -               ip += frameHeaderSize;
135208 -               remainingSize -= frameHeaderSize;
135209 -       }
135211 -       /* Loop on each block */
135212 -       while (1) {
135213 -               size_t decodedSize;
135214 -               blockProperties_t blockProperties;
135215 -               size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
135216 -               if (ZSTD_isError(cBlockSize))
135217 -                       return cBlockSize;
135219 -               ip += ZSTD_blockHeaderSize;
135220 -               remainingSize -= ZSTD_blockHeaderSize;
135221 -               if (cBlockSize > remainingSize)
135222 -                       return ERROR(srcSize_wrong);
135224 -               switch (blockProperties.blockType) {
135225 -               case bt_compressed: decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend - op, ip, cBlockSize); break;
135226 -               case bt_raw: decodedSize = ZSTD_copyRawBlock(op, oend - op, ip, cBlockSize); break;
135227 -               case bt_rle: decodedSize = ZSTD_generateNxBytes(op, oend - op, *ip, blockProperties.origSize); break;
135228 -               case bt_reserved:
135229 -               default: return ERROR(corruption_detected);
135230 -               }
135232 -               if (ZSTD_isError(decodedSize))
135233 -                       return decodedSize;
135234 -               if (dctx->fParams.checksumFlag)
135235 -                       xxh64_update(&dctx->xxhState, op, decodedSize);
135236 -               op += decodedSize;
135237 -               ip += cBlockSize;
135238 -               remainingSize -= cBlockSize;
135239 -               if (blockProperties.lastBlock)
135240 -                       break;
135241 -       }
135243 -       if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
135244 -               U32 const checkCalc = (U32)xxh64_digest(&dctx->xxhState);
135245 -               U32 checkRead;
135246 -               if (remainingSize < 4)
135247 -                       return ERROR(checksum_wrong);
135248 -               checkRead = ZSTD_readLE32(ip);
135249 -               if (checkRead != checkCalc)
135250 -                       return ERROR(checksum_wrong);
135251 -               ip += 4;
135252 -               remainingSize -= 4;
135253 -       }
135255 -       /* Allow caller to get size read */
135256 -       *srcPtr = ip;
135257 -       *srcSizePtr = remainingSize;
135258 -       return op - ostart;
135261 -static const void *ZSTD_DDictDictContent(const ZSTD_DDict *ddict);
135262 -static size_t ZSTD_DDictDictSize(const ZSTD_DDict *ddict);
135264 -static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
135265 -                                       const ZSTD_DDict *ddict)
135267 -       void *const dststart = dst;
135269 -       if (ddict) {
135270 -               if (dict) {
135271 -                       /* programmer error, these two cases should be mutually exclusive */
135272 -                       return ERROR(GENERIC);
135273 -               }
135275 -               dict = ZSTD_DDictDictContent(ddict);
135276 -               dictSize = ZSTD_DDictDictSize(ddict);
135277 -       }
135279 -       while (srcSize >= ZSTD_frameHeaderSize_prefix) {
135280 -               U32 magicNumber;
135282 -               magicNumber = ZSTD_readLE32(src);
135283 -               if (magicNumber != ZSTD_MAGICNUMBER) {
135284 -                       if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
135285 -                               size_t skippableSize;
135286 -                               if (srcSize < ZSTD_skippableHeaderSize)
135287 -                                       return ERROR(srcSize_wrong);
135288 -                               skippableSize = ZSTD_readLE32((const BYTE *)src + 4) + ZSTD_skippableHeaderSize;
135289 -                               if (srcSize < skippableSize) {
135290 -                                       return ERROR(srcSize_wrong);
135291 -                               }
135293 -                               src = (const BYTE *)src + skippableSize;
135294 -                               srcSize -= skippableSize;
135295 -                               continue;
135296 -                       } else {
135297 -                               return ERROR(prefix_unknown);
135298 -                       }
135299 -               }
135301 -               if (ddict) {
135302 -                       /* we were called from ZSTD_decompress_usingDDict */
135303 -                       ZSTD_refDDict(dctx, ddict);
135304 -               } else {
135305 -                       /* this will initialize correctly with no dict if dict == NULL, so
135306 -                        * use this in all cases but ddict */
135307 -                       CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
135308 -               }
135309 -               ZSTD_checkContinuity(dctx, dst);
135311 -               {
135312 -                       const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, &src, &srcSize);
135313 -                       if (ZSTD_isError(res))
135314 -                               return res;
135315 -                       /* don't need to bounds check this, ZSTD_decompressFrame will have
135316 -                        * already */
135317 -                       dst = (BYTE *)dst + res;
135318 -                       dstCapacity -= res;
135319 -               }
135320 -       }
135322 -       if (srcSize)
135323 -               return ERROR(srcSize_wrong); /* input not entirely consumed */
135325 -       return (BYTE *)dst - (BYTE *)dststart;
135328 -size_t ZSTD_decompress_usingDict(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize)
135330 -       return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL);
135333 -size_t ZSTD_decompressDCtx(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
135335 -       return ZSTD_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);
135338 -/*-**************************************
135339 -*   Advanced Streaming Decompression API
135340 -*   Bufferless and synchronous
135341 -****************************************/
135342 -size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx) { return dctx->expected; }
135344 -ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx)
135346 -       switch (dctx->stage) {
135347 -       default: /* should not happen */
135348 -       case ZSTDds_getFrameHeaderSize:
135349 -       case ZSTDds_decodeFrameHeader: return ZSTDnit_frameHeader;
135350 -       case ZSTDds_decodeBlockHeader: return ZSTDnit_blockHeader;
135351 -       case ZSTDds_decompressBlock: return ZSTDnit_block;
135352 -       case ZSTDds_decompressLastBlock: return ZSTDnit_lastBlock;
135353 -       case ZSTDds_checkChecksum: return ZSTDnit_checksum;
135354 -       case ZSTDds_decodeSkippableHeader:
135355 -       case ZSTDds_skipFrame: return ZSTDnit_skippableFrame;
135356 -       }
135359 -int ZSTD_isSkipFrame(ZSTD_DCtx *dctx) { return dctx->stage == ZSTDds_skipFrame; } /* for zbuff */
135361 -/** ZSTD_decompressContinue() :
135362 -*   @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
135363 -*             or an error code, which can be tested using ZSTD_isError() */
135364 -size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
135366 -       /* Sanity check */
135367 -       if (srcSize != dctx->expected)
135368 -               return ERROR(srcSize_wrong);
135369 -       if (dstCapacity)
135370 -               ZSTD_checkContinuity(dctx, dst);
135372 -       switch (dctx->stage) {
135373 -       case ZSTDds_getFrameHeaderSize:
135374 -               if (srcSize != ZSTD_frameHeaderSize_prefix)
135375 -                       return ERROR(srcSize_wrong);                                    /* impossible */
135376 -               if ((ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
135377 -                       memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix);
135378 -                       dctx->expected = ZSTD_skippableHeaderSize - ZSTD_frameHeaderSize_prefix; /* magic number + skippable frame length */
135379 -                       dctx->stage = ZSTDds_decodeSkippableHeader;
135380 -                       return 0;
135381 -               }
135382 -               dctx->headerSize = ZSTD_frameHeaderSize(src, ZSTD_frameHeaderSize_prefix);
135383 -               if (ZSTD_isError(dctx->headerSize))
135384 -                       return dctx->headerSize;
135385 -               memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix);
135386 -               if (dctx->headerSize > ZSTD_frameHeaderSize_prefix) {
135387 -                       dctx->expected = dctx->headerSize - ZSTD_frameHeaderSize_prefix;
135388 -                       dctx->stage = ZSTDds_decodeFrameHeader;
135389 -                       return 0;
135390 -               }
135391 -               dctx->expected = 0; /* not necessary to copy more */
135392 -               fallthrough;
135394 -       case ZSTDds_decodeFrameHeader:
135395 -               memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected);
135396 -               CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
135397 -               dctx->expected = ZSTD_blockHeaderSize;
135398 -               dctx->stage = ZSTDds_decodeBlockHeader;
135399 -               return 0;
135401 -       case ZSTDds_decodeBlockHeader: {
135402 -               blockProperties_t bp;
135403 -               size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
135404 -               if (ZSTD_isError(cBlockSize))
135405 -                       return cBlockSize;
135406 -               dctx->expected = cBlockSize;
135407 -               dctx->bType = bp.blockType;
135408 -               dctx->rleSize = bp.origSize;
135409 -               if (cBlockSize) {
135410 -                       dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock;
135411 -                       return 0;
135412 -               }
135413 -               /* empty block */
135414 -               if (bp.lastBlock) {
135415 -                       if (dctx->fParams.checksumFlag) {
135416 -                               dctx->expected = 4;
135417 -                               dctx->stage = ZSTDds_checkChecksum;
135418 -                       } else {
135419 -                               dctx->expected = 0; /* end of frame */
135420 -                               dctx->stage = ZSTDds_getFrameHeaderSize;
135421 -                       }
135422 -               } else {
135423 -                       dctx->expected = 3; /* go directly to next header */
135424 -                       dctx->stage = ZSTDds_decodeBlockHeader;
135425 -               }
135426 -               return 0;
135427 -       }
135428 -       case ZSTDds_decompressLastBlock:
135429 -       case ZSTDds_decompressBlock: {
135430 -               size_t rSize;
135431 -               switch (dctx->bType) {
135432 -               case bt_compressed: rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); break;
135433 -               case bt_raw: rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); break;
135434 -               case bt_rle: rSize = ZSTD_setRleBlock(dst, dstCapacity, src, srcSize, dctx->rleSize); break;
135435 -               case bt_reserved: /* should never happen */
135436 -               default: return ERROR(corruption_detected);
135437 -               }
135438 -               if (ZSTD_isError(rSize))
135439 -                       return rSize;
135440 -               if (dctx->fParams.checksumFlag)
135441 -                       xxh64_update(&dctx->xxhState, dst, rSize);
135443 -               if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */
135444 -                       if (dctx->fParams.checksumFlag) {       /* another round for frame checksum */
135445 -                               dctx->expected = 4;
135446 -                               dctx->stage = ZSTDds_checkChecksum;
135447 -                       } else {
135448 -                               dctx->expected = 0; /* ends here */
135449 -                               dctx->stage = ZSTDds_getFrameHeaderSize;
135450 -                       }
135451 -               } else {
135452 -                       dctx->stage = ZSTDds_decodeBlockHeader;
135453 -                       dctx->expected = ZSTD_blockHeaderSize;
135454 -                       dctx->previousDstEnd = (char *)dst + rSize;
135455 -               }
135456 -               return rSize;
135457 -       }
135458 -       case ZSTDds_checkChecksum: {
135459 -               U32 const h32 = (U32)xxh64_digest(&dctx->xxhState);
135460 -               U32 const check32 = ZSTD_readLE32(src); /* srcSize == 4, guaranteed by dctx->expected */
135461 -               if (check32 != h32)
135462 -                       return ERROR(checksum_wrong);
135463 -               dctx->expected = 0;
135464 -               dctx->stage = ZSTDds_getFrameHeaderSize;
135465 -               return 0;
135466 -       }
135467 -       case ZSTDds_decodeSkippableHeader: {
135468 -               memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected);
135469 -               dctx->expected = ZSTD_readLE32(dctx->headerBuffer + 4);
135470 -               dctx->stage = ZSTDds_skipFrame;
135471 -               return 0;
135472 -       }
135473 -       case ZSTDds_skipFrame: {
135474 -               dctx->expected = 0;
135475 -               dctx->stage = ZSTDds_getFrameHeaderSize;
135476 -               return 0;
135477 -       }
135478 -       default:
135479 -               return ERROR(GENERIC); /* impossible */
135480 -       }
135483 -static size_t ZSTD_refDictContent(ZSTD_DCtx *dctx, const void *dict, size_t dictSize)
135485 -       dctx->dictEnd = dctx->previousDstEnd;
135486 -       dctx->vBase = (const char *)dict - ((const char *)(dctx->previousDstEnd) - (const char *)(dctx->base));
135487 -       dctx->base = dict;
135488 -       dctx->previousDstEnd = (const char *)dict + dictSize;
135489 -       return 0;
135492 -/* ZSTD_loadEntropy() :
135493 - * dict : must point at beginning of a valid zstd dictionary
135494 - * @return : size of entropy tables read */
135495 -static size_t ZSTD_loadEntropy(ZSTD_entropyTables_t *entropy, const void *const dict, size_t const dictSize)
135497 -       const BYTE *dictPtr = (const BYTE *)dict;
135498 -       const BYTE *const dictEnd = dictPtr + dictSize;
135500 -       if (dictSize <= 8)
135501 -               return ERROR(dictionary_corrupted);
135502 -       dictPtr += 8; /* skip header = magic + dictID */
135504 -       {
135505 -               size_t const hSize = HUF_readDTableX4_wksp(entropy->hufTable, dictPtr, dictEnd - dictPtr, entropy->workspace, sizeof(entropy->workspace));
135506 -               if (HUF_isError(hSize))
135507 -                       return ERROR(dictionary_corrupted);
135508 -               dictPtr += hSize;
135509 -       }
135511 -       {
135512 -               short offcodeNCount[MaxOff + 1];
135513 -               U32 offcodeMaxValue = MaxOff, offcodeLog;
135514 -               size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd - dictPtr);
135515 -               if (FSE_isError(offcodeHeaderSize))
135516 -                       return ERROR(dictionary_corrupted);
135517 -               if (offcodeLog > OffFSELog)
135518 -                       return ERROR(dictionary_corrupted);
135519 -               CHECK_E(FSE_buildDTable_wksp(entropy->OFTable, offcodeNCount, offcodeMaxValue, offcodeLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted);
135520 -               dictPtr += offcodeHeaderSize;
135521 -       }
135523 -       {
135524 -               short matchlengthNCount[MaxML + 1];
135525 -               unsigned matchlengthMaxValue = MaxML, matchlengthLog;
135526 -               size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd - dictPtr);
135527 -               if (FSE_isError(matchlengthHeaderSize))
135528 -                       return ERROR(dictionary_corrupted);
135529 -               if (matchlengthLog > MLFSELog)
135530 -                       return ERROR(dictionary_corrupted);
135531 -               CHECK_E(FSE_buildDTable_wksp(entropy->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted);
135532 -               dictPtr += matchlengthHeaderSize;
135533 -       }
135535 -       {
135536 -               short litlengthNCount[MaxLL + 1];
135537 -               unsigned litlengthMaxValue = MaxLL, litlengthLog;
135538 -               size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd - dictPtr);
135539 -               if (FSE_isError(litlengthHeaderSize))
135540 -                       return ERROR(dictionary_corrupted);
135541 -               if (litlengthLog > LLFSELog)
135542 -                       return ERROR(dictionary_corrupted);
135543 -               CHECK_E(FSE_buildDTable_wksp(entropy->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted);
135544 -               dictPtr += litlengthHeaderSize;
135545 -       }
135547 -       if (dictPtr + 12 > dictEnd)
135548 -               return ERROR(dictionary_corrupted);
135549 -       {
135550 -               int i;
135551 -               size_t const dictContentSize = (size_t)(dictEnd - (dictPtr + 12));
135552 -               for (i = 0; i < 3; i++) {
135553 -                       U32 const rep = ZSTD_readLE32(dictPtr);
135554 -                       dictPtr += 4;
135555 -                       if (rep == 0 || rep >= dictContentSize)
135556 -                               return ERROR(dictionary_corrupted);
135557 -                       entropy->rep[i] = rep;
135558 -               }
135559 -       }
135561 -       return dictPtr - (const BYTE *)dict;
135564 -static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx *dctx, const void *dict, size_t dictSize)
135566 -       if (dictSize < 8)
135567 -               return ZSTD_refDictContent(dctx, dict, dictSize);
135568 -       {
135569 -               U32 const magic = ZSTD_readLE32(dict);
135570 -               if (magic != ZSTD_DICT_MAGIC) {
135571 -                       return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */
135572 -               }
135573 -       }
135574 -       dctx->dictID = ZSTD_readLE32((const char *)dict + 4);
135576 -       /* load entropy tables */
135577 -       {
135578 -               size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize);
135579 -               if (ZSTD_isError(eSize))
135580 -                       return ERROR(dictionary_corrupted);
135581 -               dict = (const char *)dict + eSize;
135582 -               dictSize -= eSize;
135583 -       }
135584 -       dctx->litEntropy = dctx->fseEntropy = 1;
135586 -       /* reference dictionary content */
135587 -       return ZSTD_refDictContent(dctx, dict, dictSize);
135590 -size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict, size_t dictSize)
135592 -       CHECK_F(ZSTD_decompressBegin(dctx));
135593 -       if (dict && dictSize)
135594 -               CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted);
135595 -       return 0;
135598 -/* ======   ZSTD_DDict   ====== */
135600 -struct ZSTD_DDict_s {
135601 -       void *dictBuffer;
135602 -       const void *dictContent;
135603 -       size_t dictSize;
135604 -       ZSTD_entropyTables_t entropy;
135605 -       U32 dictID;
135606 -       U32 entropyPresent;
135607 -       ZSTD_customMem cMem;
135608 -}; /* typedef'd to ZSTD_DDict within "zstd.h" */
135610 -size_t ZSTD_DDictWorkspaceBound(void) { return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_DDict)); }
135612 -static const void *ZSTD_DDictDictContent(const ZSTD_DDict *ddict) { return ddict->dictContent; }
135614 -static size_t ZSTD_DDictDictSize(const ZSTD_DDict *ddict) { return ddict->dictSize; }
135616 -static void ZSTD_refDDict(ZSTD_DCtx *dstDCtx, const ZSTD_DDict *ddict)
135618 -       ZSTD_decompressBegin(dstDCtx); /* init */
135619 -       if (ddict) {                   /* support refDDict on NULL */
135620 -               dstDCtx->dictID = ddict->dictID;
135621 -               dstDCtx->base = ddict->dictContent;
135622 -               dstDCtx->vBase = ddict->dictContent;
135623 -               dstDCtx->dictEnd = (const BYTE *)ddict->dictContent + ddict->dictSize;
135624 -               dstDCtx->previousDstEnd = dstDCtx->dictEnd;
135625 -               if (ddict->entropyPresent) {
135626 -                       dstDCtx->litEntropy = 1;
135627 -                       dstDCtx->fseEntropy = 1;
135628 -                       dstDCtx->LLTptr = ddict->entropy.LLTable;
135629 -                       dstDCtx->MLTptr = ddict->entropy.MLTable;
135630 -                       dstDCtx->OFTptr = ddict->entropy.OFTable;
135631 -                       dstDCtx->HUFptr = ddict->entropy.hufTable;
135632 -                       dstDCtx->entropy.rep[0] = ddict->entropy.rep[0];
135633 -                       dstDCtx->entropy.rep[1] = ddict->entropy.rep[1];
135634 -                       dstDCtx->entropy.rep[2] = ddict->entropy.rep[2];
135635 -               } else {
135636 -                       dstDCtx->litEntropy = 0;
135637 -                       dstDCtx->fseEntropy = 0;
135638 -               }
135639 -       }
135642 -static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict *ddict)
135644 -       ddict->dictID = 0;
135645 -       ddict->entropyPresent = 0;
135646 -       if (ddict->dictSize < 8)
135647 -               return 0;
135648 -       {
135649 -               U32 const magic = ZSTD_readLE32(ddict->dictContent);
135650 -               if (magic != ZSTD_DICT_MAGIC)
135651 -                       return 0; /* pure content mode */
135652 -       }
135653 -       ddict->dictID = ZSTD_readLE32((const char *)ddict->dictContent + 4);
135655 -       /* load entropy tables */
135656 -       CHECK_E(ZSTD_loadEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize), dictionary_corrupted);
135657 -       ddict->entropyPresent = 1;
135658 -       return 0;
135661 -static ZSTD_DDict *ZSTD_createDDict_advanced(const void *dict, size_t dictSize, unsigned byReference, ZSTD_customMem customMem)
135663 -       if (!customMem.customAlloc || !customMem.customFree)
135664 -               return NULL;
135666 -       {
135667 -               ZSTD_DDict *const ddict = (ZSTD_DDict *)ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
135668 -               if (!ddict)
135669 -                       return NULL;
135670 -               ddict->cMem = customMem;
135672 -               if ((byReference) || (!dict) || (!dictSize)) {
135673 -                       ddict->dictBuffer = NULL;
135674 -                       ddict->dictContent = dict;
135675 -               } else {
135676 -                       void *const internalBuffer = ZSTD_malloc(dictSize, customMem);
135677 -                       if (!internalBuffer) {
135678 -                               ZSTD_freeDDict(ddict);
135679 -                               return NULL;
135680 -                       }
135681 -                       memcpy(internalBuffer, dict, dictSize);
135682 -                       ddict->dictBuffer = internalBuffer;
135683 -                       ddict->dictContent = internalBuffer;
135684 -               }
135685 -               ddict->dictSize = dictSize;
135686 -               ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
135687 -               /* parse dictionary content */
135688 -               {
135689 -                       size_t const errorCode = ZSTD_loadEntropy_inDDict(ddict);
135690 -                       if (ZSTD_isError(errorCode)) {
135691 -                               ZSTD_freeDDict(ddict);
135692 -                               return NULL;
135693 -                       }
135694 -               }
135696 -               return ddict;
135697 -       }
135700 -/*! ZSTD_initDDict() :
135701 -*   Create a digested dictionary, to start decompression without startup delay.
135702 -*   `dict` content is copied inside DDict.
135703 -*   Consequently, `dict` can be released after `ZSTD_DDict` creation */
135704 -ZSTD_DDict *ZSTD_initDDict(const void *dict, size_t dictSize, void *workspace, size_t workspaceSize)
135706 -       ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
135707 -       return ZSTD_createDDict_advanced(dict, dictSize, 1, stackMem);
135710 -size_t ZSTD_freeDDict(ZSTD_DDict *ddict)
135712 -       if (ddict == NULL)
135713 -               return 0; /* support free on NULL */
135714 -       {
135715 -               ZSTD_customMem const cMem = ddict->cMem;
135716 -               ZSTD_free(ddict->dictBuffer, cMem);
135717 -               ZSTD_free(ddict, cMem);
135718 -               return 0;
135719 -       }
135722 -/*! ZSTD_getDictID_fromDict() :
135723 - *  Provides the dictID stored within dictionary.
135724 - *  if @return == 0, the dictionary is not conformant with Zstandard specification.
135725 - *  It can still be loaded, but as a content-only dictionary. */
135726 -unsigned ZSTD_getDictID_fromDict(const void *dict, size_t dictSize)
135728 -       if (dictSize < 8)
135729 -               return 0;
135730 -       if (ZSTD_readLE32(dict) != ZSTD_DICT_MAGIC)
135731 -               return 0;
135732 -       return ZSTD_readLE32((const char *)dict + 4);
135735 -/*! ZSTD_getDictID_fromDDict() :
135736 - *  Provides the dictID of the dictionary loaded into `ddict`.
135737 - *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
135738 - *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
135739 -unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict)
135741 -       if (ddict == NULL)
135742 -               return 0;
135743 -       return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
135746 -/*! ZSTD_getDictID_fromFrame() :
135747 - *  Provides the dictID required to decompressed the frame stored within `src`.
135748 - *  If @return == 0, the dictID could not be decoded.
135749 - *  This could for one of the following reasons :
135750 - *  - The frame does not require a dictionary to be decoded (most common case).
135751 - *  - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
135752 - *    Note : this use case also happens when using a non-conformant dictionary.
135753 - *  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
135754 - *  - This is not a Zstandard frame.
135755 - *  When identifying the exact failure cause, it's possible to used ZSTD_getFrameParams(), which will provide a more precise error code. */
135756 -unsigned ZSTD_getDictID_fromFrame(const void *src, size_t srcSize)
135758 -       ZSTD_frameParams zfp = {0, 0, 0, 0};
135759 -       size_t const hError = ZSTD_getFrameParams(&zfp, src, srcSize);
135760 -       if (ZSTD_isError(hError))
135761 -               return 0;
135762 -       return zfp.dictID;
135765 -/*! ZSTD_decompress_usingDDict() :
135766 -*   Decompression using a pre-digested Dictionary
135767 -*   Use dictionary without significant overhead. */
135768 -size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_DDict *ddict)
135770 -       /* pass content and size in case legacy frames are encountered */
135771 -       return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, NULL, 0, ddict);
135774 -/*=====================================
135775 -*   Streaming decompression
135776 -*====================================*/
135778 -typedef enum { zdss_init, zdss_loadHeader, zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
135780 -/* *** Resource management *** */
135781 -struct ZSTD_DStream_s {
135782 -       ZSTD_DCtx *dctx;
135783 -       ZSTD_DDict *ddictLocal;
135784 -       const ZSTD_DDict *ddict;
135785 -       ZSTD_frameParams fParams;
135786 -       ZSTD_dStreamStage stage;
135787 -       char *inBuff;
135788 -       size_t inBuffSize;
135789 -       size_t inPos;
135790 -       size_t maxWindowSize;
135791 -       char *outBuff;
135792 -       size_t outBuffSize;
135793 -       size_t outStart;
135794 -       size_t outEnd;
135795 -       size_t blockSize;
135796 -       BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; /* tmp buffer to store frame header */
135797 -       size_t lhSize;
135798 -       ZSTD_customMem customMem;
135799 -       void *legacyContext;
135800 -       U32 previousLegacyVersion;
135801 -       U32 legacyVersion;
135802 -       U32 hostageByte;
135803 -}; /* typedef'd to ZSTD_DStream within "zstd.h" */
135805 -size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize)
135807 -       size_t const blockSize = MIN(maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
135808 -       size_t const inBuffSize = blockSize;
135809 -       size_t const outBuffSize = maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
135810 -       return ZSTD_DCtxWorkspaceBound() + ZSTD_ALIGN(sizeof(ZSTD_DStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize);
135813 -static ZSTD_DStream *ZSTD_createDStream_advanced(ZSTD_customMem customMem)
135815 -       ZSTD_DStream *zds;
135817 -       if (!customMem.customAlloc || !customMem.customFree)
135818 -               return NULL;
135820 -       zds = (ZSTD_DStream *)ZSTD_malloc(sizeof(ZSTD_DStream), customMem);
135821 -       if (zds == NULL)
135822 -               return NULL;
135823 -       memset(zds, 0, sizeof(ZSTD_DStream));
135824 -       memcpy(&zds->customMem, &customMem, sizeof(ZSTD_customMem));
135825 -       zds->dctx = ZSTD_createDCtx_advanced(customMem);
135826 -       if (zds->dctx == NULL) {
135827 -               ZSTD_freeDStream(zds);
135828 -               return NULL;
135829 -       }
135830 -       zds->stage = zdss_init;
135831 -       zds->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
135832 -       return zds;
135835 -ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace, size_t workspaceSize)
135837 -       ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
135838 -       ZSTD_DStream *zds = ZSTD_createDStream_advanced(stackMem);
135839 -       if (!zds) {
135840 -               return NULL;
135841 -       }
135843 -       zds->maxWindowSize = maxWindowSize;
135844 -       zds->stage = zdss_loadHeader;
135845 -       zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
135846 -       ZSTD_freeDDict(zds->ddictLocal);
135847 -       zds->ddictLocal = NULL;
135848 -       zds->ddict = zds->ddictLocal;
135849 -       zds->legacyVersion = 0;
135850 -       zds->hostageByte = 0;
135852 -       {
135853 -               size_t const blockSize = MIN(zds->maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
135854 -               size_t const neededOutSize = zds->maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
135856 -               zds->inBuff = (char *)ZSTD_malloc(blockSize, zds->customMem);
135857 -               zds->inBuffSize = blockSize;
135858 -               zds->outBuff = (char *)ZSTD_malloc(neededOutSize, zds->customMem);
135859 -               zds->outBuffSize = neededOutSize;
135860 -               if (zds->inBuff == NULL || zds->outBuff == NULL) {
135861 -                       ZSTD_freeDStream(zds);
135862 -                       return NULL;
135863 -               }
135864 -       }
135865 -       return zds;
135868 -ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize, const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize)
135870 -       ZSTD_DStream *zds = ZSTD_initDStream(maxWindowSize, workspace, workspaceSize);
135871 -       if (zds) {
135872 -               zds->ddict = ddict;
135873 -       }
135874 -       return zds;
135877 -size_t ZSTD_freeDStream(ZSTD_DStream *zds)
135879 -       if (zds == NULL)
135880 -               return 0; /* support free on null */
135881 -       {
135882 -               ZSTD_customMem const cMem = zds->customMem;
135883 -               ZSTD_freeDCtx(zds->dctx);
135884 -               zds->dctx = NULL;
135885 -               ZSTD_freeDDict(zds->ddictLocal);
135886 -               zds->ddictLocal = NULL;
135887 -               ZSTD_free(zds->inBuff, cMem);
135888 -               zds->inBuff = NULL;
135889 -               ZSTD_free(zds->outBuff, cMem);
135890 -               zds->outBuff = NULL;
135891 -               ZSTD_free(zds, cMem);
135892 -               return 0;
135893 -       }
135896 -/* *** Initialization *** */
135898 -size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX + ZSTD_blockHeaderSize; }
135899 -size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; }
135901 -size_t ZSTD_resetDStream(ZSTD_DStream *zds)
135903 -       zds->stage = zdss_loadHeader;
135904 -       zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
135905 -       zds->legacyVersion = 0;
135906 -       zds->hostageByte = 0;
135907 -       return ZSTD_frameHeaderSize_prefix;
135910 -/* *****   Decompression   ***** */
135912 -ZSTD_STATIC size_t ZSTD_limitCopy(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
135914 -       size_t const length = MIN(dstCapacity, srcSize);
135915 -       memcpy(dst, src, length);
135916 -       return length;
135919 -size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inBuffer *input)
135921 -       const char *const istart = (const char *)(input->src) + input->pos;
135922 -       const char *const iend = (const char *)(input->src) + input->size;
135923 -       const char *ip = istart;
135924 -       char *const ostart = (char *)(output->dst) + output->pos;
135925 -       char *const oend = (char *)(output->dst) + output->size;
135926 -       char *op = ostart;
135927 -       U32 someMoreWork = 1;
135929 -       while (someMoreWork) {
135930 -               switch (zds->stage) {
135931 -               case zdss_init:
135932 -                       ZSTD_resetDStream(zds); /* transparent reset on starting decoding a new frame */
135933 -                       fallthrough;
135935 -               case zdss_loadHeader: {
135936 -                       size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize);
135937 -                       if (ZSTD_isError(hSize))
135938 -                               return hSize;
135939 -                       if (hSize != 0) {                                  /* need more input */
135940 -                               size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */
135941 -                               if (toLoad > (size_t)(iend - ip)) {     /* not enough input to load full header */
135942 -                                       memcpy(zds->headerBuffer + zds->lhSize, ip, iend - ip);
135943 -                                       zds->lhSize += iend - ip;
135944 -                                       input->pos = input->size;
135945 -                                       return (MAX(ZSTD_frameHeaderSize_min, hSize) - zds->lhSize) +
135946 -                                              ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
135947 -                               }
135948 -                               memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad);
135949 -                               zds->lhSize = hSize;
135950 -                               ip += toLoad;
135951 -                               break;
135952 -                       }
135954 -                       /* check for single-pass mode opportunity */
135955 -                       if (zds->fParams.frameContentSize && zds->fParams.windowSize /* skippable frame if == 0 */
135956 -                           && (U64)(size_t)(oend - op) >= zds->fParams.frameContentSize) {
135957 -                               size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend - istart);
135958 -                               if (cSize <= (size_t)(iend - istart)) {
135959 -                                       size_t const decompressedSize = ZSTD_decompress_usingDDict(zds->dctx, op, oend - op, istart, cSize, zds->ddict);
135960 -                                       if (ZSTD_isError(decompressedSize))
135961 -                                               return decompressedSize;
135962 -                                       ip = istart + cSize;
135963 -                                       op += decompressedSize;
135964 -                                       zds->dctx->expected = 0;
135965 -                                       zds->stage = zdss_init;
135966 -                                       someMoreWork = 0;
135967 -                                       break;
135968 -                               }
135969 -                       }
135971 -                       /* Consume header */
135972 -                       ZSTD_refDDict(zds->dctx, zds->ddict);
135973 -                       {
135974 -                               size_t const h1Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); /* == ZSTD_frameHeaderSize_prefix */
135975 -                               CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer, h1Size));
135976 -                               {
135977 -                                       size_t const h2Size = ZSTD_nextSrcSizeToDecompress(zds->dctx);
135978 -                                       CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer + h1Size, h2Size));
135979 -                               }
135980 -                       }
135982 -                       zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
135983 -                       if (zds->fParams.windowSize > zds->maxWindowSize)
135984 -                               return ERROR(frameParameter_windowTooLarge);
135986 -                       /* Buffers are preallocated, but double check */
135987 -                       {
135988 -                               size_t const blockSize = MIN(zds->maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
135989 -                               size_t const neededOutSize = zds->maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
135990 -                               if (zds->inBuffSize < blockSize) {
135991 -                                       return ERROR(GENERIC);
135992 -                               }
135993 -                               if (zds->outBuffSize < neededOutSize) {
135994 -                                       return ERROR(GENERIC);
135995 -                               }
135996 -                               zds->blockSize = blockSize;
135997 -                       }
135998 -                       zds->stage = zdss_read;
135999 -               }
136000 -                       fallthrough;
136002 -               case zdss_read: {
136003 -                       size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
136004 -                       if (neededInSize == 0) { /* end of frame */
136005 -                               zds->stage = zdss_init;
136006 -                               someMoreWork = 0;
136007 -                               break;
136008 -                       }
136009 -                       if ((size_t)(iend - ip) >= neededInSize) { /* decode directly from src */
136010 -                               const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx);
136011 -                               size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, zds->outBuff + zds->outStart,
136012 -                                                                                  (isSkipFrame ? 0 : zds->outBuffSize - zds->outStart), ip, neededInSize);
136013 -                               if (ZSTD_isError(decodedSize))
136014 -                                       return decodedSize;
136015 -                               ip += neededInSize;
136016 -                               if (!decodedSize && !isSkipFrame)
136017 -                                       break; /* this was just a header */
136018 -                               zds->outEnd = zds->outStart + decodedSize;
136019 -                               zds->stage = zdss_flush;
136020 -                               break;
136021 -                       }
136022 -                       if (ip == iend) {
136023 -                               someMoreWork = 0;
136024 -                               break;
136025 -                       } /* no more input */
136026 -                       zds->stage = zdss_load;
136027 -                       /* pass-through */
136028 -               }
136029 -                       fallthrough;
136031 -               case zdss_load: {
136032 -                       size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
136033 -                       size_t const toLoad = neededInSize - zds->inPos; /* should always be <= remaining space within inBuff */
136034 -                       size_t loadedSize;
136035 -                       if (toLoad > zds->inBuffSize - zds->inPos)
136036 -                               return ERROR(corruption_detected); /* should never happen */
136037 -                       loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend - ip);
136038 -                       ip += loadedSize;
136039 -                       zds->inPos += loadedSize;
136040 -                       if (loadedSize < toLoad) {
136041 -                               someMoreWork = 0;
136042 -                               break;
136043 -                       } /* not enough input, wait for more */
136045 -                       /* decode loaded input */
136046 -                       {
136047 -                               const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx);
136048 -                               size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, zds->outBuff + zds->outStart, zds->outBuffSize - zds->outStart,
136049 -                                                                                  zds->inBuff, neededInSize);
136050 -                               if (ZSTD_isError(decodedSize))
136051 -                                       return decodedSize;
136052 -                               zds->inPos = 0; /* input is consumed */
136053 -                               if (!decodedSize && !isSkipFrame) {
136054 -                                       zds->stage = zdss_read;
136055 -                                       break;
136056 -                               } /* this was just a header */
136057 -                               zds->outEnd = zds->outStart + decodedSize;
136058 -                               zds->stage = zdss_flush;
136059 -                               /* pass-through */
136060 -                       }
136061 -               }
136062 -                       fallthrough;
136064 -               case zdss_flush: {
136065 -                       size_t const toFlushSize = zds->outEnd - zds->outStart;
136066 -                       size_t const flushedSize = ZSTD_limitCopy(op, oend - op, zds->outBuff + zds->outStart, toFlushSize);
136067 -                       op += flushedSize;
136068 -                       zds->outStart += flushedSize;
136069 -                       if (flushedSize == toFlushSize) { /* flush completed */
136070 -                               zds->stage = zdss_read;
136071 -                               if (zds->outStart + zds->blockSize > zds->outBuffSize)
136072 -                                       zds->outStart = zds->outEnd = 0;
136073 -                               break;
136074 -                       }
136075 -                       /* cannot complete flush */
136076 -                       someMoreWork = 0;
136077 -                       break;
136078 -               }
136079 -               default:
136080 -                       return ERROR(GENERIC); /* impossible */
136081 -               }
136082 -       }
136084 -       /* result */
136085 -       input->pos += (size_t)(ip - istart);
136086 -       output->pos += (size_t)(op - ostart);
136087 -       {
136088 -               size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds->dctx);
136089 -               if (!nextSrcSizeHint) {                     /* frame fully decoded */
136090 -                       if (zds->outEnd == zds->outStart) { /* output fully flushed */
136091 -                               if (zds->hostageByte) {
136092 -                                       if (input->pos >= input->size) {
136093 -                                               zds->stage = zdss_read;
136094 -                                               return 1;
136095 -                                       }            /* can't release hostage (not present) */
136096 -                                       input->pos++; /* release hostage */
136097 -                               }
136098 -                               return 0;
136099 -                       }
136100 -                       if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */
136101 -                               input->pos--;    /* note : pos > 0, otherwise, impossible to finish reading last block */
136102 -                               zds->hostageByte = 1;
136103 -                       }
136104 -                       return 1;
136105 -               }
136106 -               nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds->dctx) == ZSTDnit_block); /* preload header of next block */
136107 -               if (zds->inPos > nextSrcSizeHint)
136108 -                       return ERROR(GENERIC); /* should never happen */
136109 -               nextSrcSizeHint -= zds->inPos; /* already loaded*/
136110 -               return nextSrcSizeHint;
136111 -       }
136114 -EXPORT_SYMBOL(ZSTD_DCtxWorkspaceBound);
136115 -EXPORT_SYMBOL(ZSTD_initDCtx);
136116 -EXPORT_SYMBOL(ZSTD_decompressDCtx);
136117 -EXPORT_SYMBOL(ZSTD_decompress_usingDict);
136119 -EXPORT_SYMBOL(ZSTD_DDictWorkspaceBound);
136120 -EXPORT_SYMBOL(ZSTD_initDDict);
136121 -EXPORT_SYMBOL(ZSTD_decompress_usingDDict);
136123 -EXPORT_SYMBOL(ZSTD_DStreamWorkspaceBound);
136124 -EXPORT_SYMBOL(ZSTD_initDStream);
136125 -EXPORT_SYMBOL(ZSTD_initDStream_usingDDict);
136126 -EXPORT_SYMBOL(ZSTD_resetDStream);
136127 -EXPORT_SYMBOL(ZSTD_decompressStream);
136128 -EXPORT_SYMBOL(ZSTD_DStreamInSize);
136129 -EXPORT_SYMBOL(ZSTD_DStreamOutSize);
136131 -EXPORT_SYMBOL(ZSTD_findFrameCompressedSize);
136132 -EXPORT_SYMBOL(ZSTD_getFrameContentSize);
136133 -EXPORT_SYMBOL(ZSTD_findDecompressedSize);
136135 -EXPORT_SYMBOL(ZSTD_isFrame);
136136 -EXPORT_SYMBOL(ZSTD_getDictID_fromDict);
136137 -EXPORT_SYMBOL(ZSTD_getDictID_fromDDict);
136138 -EXPORT_SYMBOL(ZSTD_getDictID_fromFrame);
136140 -EXPORT_SYMBOL(ZSTD_getFrameParams);
136141 -EXPORT_SYMBOL(ZSTD_decompressBegin);
136142 -EXPORT_SYMBOL(ZSTD_decompressBegin_usingDict);
136143 -EXPORT_SYMBOL(ZSTD_copyDCtx);
136144 -EXPORT_SYMBOL(ZSTD_nextSrcSizeToDecompress);
136145 -EXPORT_SYMBOL(ZSTD_decompressContinue);
136146 -EXPORT_SYMBOL(ZSTD_nextInputType);
136148 -EXPORT_SYMBOL(ZSTD_decompressBlock);
136149 -EXPORT_SYMBOL(ZSTD_insertBlock);
136151 -MODULE_LICENSE("Dual BSD/GPL");
136152 -MODULE_DESCRIPTION("Zstd Decompressor");
136153 diff --git a/lib/zstd/decompress/huf_decompress.c b/lib/zstd/decompress/huf_decompress.c
136154 new file mode 100644
136155 index 000000000000..dee939434873
136156 --- /dev/null
136157 +++ b/lib/zstd/decompress/huf_decompress.c
136158 @@ -0,0 +1,1205 @@
136159 +/* ******************************************************************
136160 + * huff0 huffman decoder,
136161 + * part of Finite State Entropy library
136162 + * Copyright (c) Yann Collet, Facebook, Inc.
136164 + *  You can contact the author at :
136165 + *  - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
136167 + * This source code is licensed under both the BSD-style license (found in the
136168 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
136169 + * in the COPYING file in the root directory of this source tree).
136170 + * You may select, at your option, one of the above-listed licenses.
136171 +****************************************************************** */
136173 +/* **************************************************************
136174 +*  Dependencies
136175 +****************************************************************/
136176 +#include "../common/zstd_deps.h"  /* ZSTD_memcpy, ZSTD_memset */
136177 +#include "../common/compiler.h"
136178 +#include "../common/bitstream.h"  /* BIT_* */
136179 +#include "../common/fse.h"        /* to compress headers */
136180 +#define HUF_STATIC_LINKING_ONLY
136181 +#include "../common/huf.h"
136182 +#include "../common/error_private.h"
136184 +/* **************************************************************
136185 +*  Macros
136186 +****************************************************************/
136188 +/* These two optional macros force the use one way or another of the two
136189 + * Huffman decompression implementations. You can't force in both directions
136190 + * at the same time.
136191 + */
136192 +#if defined(HUF_FORCE_DECOMPRESS_X1) && \
136193 +    defined(HUF_FORCE_DECOMPRESS_X2)
136194 +#error "Cannot force the use of the X1 and X2 decoders at the same time!"
136195 +#endif
136198 +/* **************************************************************
136199 +*  Error Management
136200 +****************************************************************/
136201 +#define HUF_isError ERR_isError
136204 +/* **************************************************************
136205 +*  Byte alignment for workSpace management
136206 +****************************************************************/
136207 +#define HUF_ALIGN(x, a)         HUF_ALIGN_MASK((x), (a) - 1)
136208 +#define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
136211 +/* **************************************************************
136212 +*  BMI2 Variant Wrappers
136213 +****************************************************************/
136214 +#if DYNAMIC_BMI2
136216 +#define HUF_DGEN(fn)                                                        \
136217 +                                                                            \
136218 +    static size_t fn##_default(                                             \
136219 +                  void* dst,  size_t dstSize,                               \
136220 +            const void* cSrc, size_t cSrcSize,                              \
136221 +            const HUF_DTable* DTable)                                       \
136222 +    {                                                                       \
136223 +        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \
136224 +    }                                                                       \
136225 +                                                                            \
136226 +    static TARGET_ATTRIBUTE("bmi2") size_t fn##_bmi2(                       \
136227 +                  void* dst,  size_t dstSize,                               \
136228 +            const void* cSrc, size_t cSrcSize,                              \
136229 +            const HUF_DTable* DTable)                                       \
136230 +    {                                                                       \
136231 +        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \
136232 +    }                                                                       \
136233 +                                                                            \
136234 +    static size_t fn(void* dst, size_t dstSize, void const* cSrc,           \
136235 +                     size_t cSrcSize, HUF_DTable const* DTable, int bmi2)   \
136236 +    {                                                                       \
136237 +        if (bmi2) {                                                         \
136238 +            return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);         \
136239 +        }                                                                   \
136240 +        return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable);          \
136241 +    }
136243 +#else
136245 +#define HUF_DGEN(fn)                                                        \
136246 +    static size_t fn(void* dst, size_t dstSize, void const* cSrc,           \
136247 +                     size_t cSrcSize, HUF_DTable const* DTable, int bmi2)   \
136248 +    {                                                                       \
136249 +        (void)bmi2;                                                         \
136250 +        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \
136251 +    }
136253 +#endif
136256 +/*-***************************/
136257 +/*  generic DTableDesc       */
136258 +/*-***************************/
136259 +typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc;
136261 +static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
136263 +    DTableDesc dtd;
136264 +    ZSTD_memcpy(&dtd, table, sizeof(dtd));
136265 +    return dtd;
136269 +#ifndef HUF_FORCE_DECOMPRESS_X2
136271 +/*-***************************/
136272 +/*  single-symbol decoding   */
136273 +/*-***************************/
136274 +typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX1;   /* single-symbol decoding */
136277 + * Packs 4 HUF_DEltX1 structs into a U64. This is used to lay down 4 entries at
136278 + * a time.
136279 + */
136280 +static U64 HUF_DEltX1_set4(BYTE symbol, BYTE nbBits) {
136281 +    U64 D4;
136282 +    if (MEM_isLittleEndian()) {
136283 +        D4 = symbol + (nbBits << 8);
136284 +    } else {
136285 +        D4 = (symbol << 8) + nbBits;
136286 +    }
136287 +    D4 *= 0x0001000100010001ULL;
136288 +    return D4;
136291 +typedef struct {
136292 +        U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];
136293 +        U32 rankStart[HUF_TABLELOG_ABSOLUTEMAX + 1];
136294 +        U32 statsWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
136295 +        BYTE symbols[HUF_SYMBOLVALUE_MAX + 1];
136296 +        BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];
136297 +} HUF_ReadDTableX1_Workspace;
136300 +size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
136302 +    return HUF_readDTableX1_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0);
136305 +size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2)
136307 +    U32 tableLog = 0;
136308 +    U32 nbSymbols = 0;
136309 +    size_t iSize;
136310 +    void* const dtPtr = DTable + 1;
136311 +    HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr;
136312 +    HUF_ReadDTableX1_Workspace* wksp = (HUF_ReadDTableX1_Workspace*)workSpace;
136314 +    DEBUG_STATIC_ASSERT(HUF_DECOMPRESS_WORKSPACE_SIZE >= sizeof(*wksp));
136315 +    if (sizeof(*wksp) > wkspSize) return ERROR(tableLog_tooLarge);
136317 +    DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
136318 +    /* ZSTD_memset(huffWeight, 0, sizeof(huffWeight)); */   /* is not necessary, even though some analyzer complain ... */
136320 +    iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), bmi2);
136321 +    if (HUF_isError(iSize)) return iSize;
136323 +    /* Table header */
136324 +    {   DTableDesc dtd = HUF_getDTableDesc(DTable);
136325 +        if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge);   /* DTable too small, Huffman tree cannot fit in */
136326 +        dtd.tableType = 0;
136327 +        dtd.tableLog = (BYTE)tableLog;
136328 +        ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
136329 +    }
136331 +    /* Compute symbols and rankStart given rankVal:
136332 +     *
136333 +     * rankVal already contains the number of values of each weight.
136334 +     *
136335 +     * symbols contains the symbols ordered by weight. First are the rankVal[0]
136336 +     * weight 0 symbols, followed by the rankVal[1] weight 1 symbols, and so on.
136337 +     * symbols[0] is filled (but unused) to avoid a branch.
136338 +     *
136339 +     * rankStart contains the offset where each rank belongs in the DTable.
136340 +     * rankStart[0] is not filled because there are no entries in the table for
136341 +     * weight 0.
136342 +     */
136343 +    {
136344 +        int n;
136345 +        int nextRankStart = 0;
136346 +        int const unroll = 4;
136347 +        int const nLimit = (int)nbSymbols - unroll + 1;
136348 +        for (n=0; n<(int)tableLog+1; n++) {
136349 +            U32 const curr = nextRankStart;
136350 +            nextRankStart += wksp->rankVal[n];
136351 +            wksp->rankStart[n] = curr;
136352 +        }
136353 +        for (n=0; n < nLimit; n += unroll) {
136354 +            int u;
136355 +            for (u=0; u < unroll; ++u) {
136356 +                size_t const w = wksp->huffWeight[n+u];
136357 +                wksp->symbols[wksp->rankStart[w]++] = (BYTE)(n+u);
136358 +            }
136359 +        }
136360 +        for (; n < (int)nbSymbols; ++n) {
136361 +            size_t const w = wksp->huffWeight[n];
136362 +            wksp->symbols[wksp->rankStart[w]++] = (BYTE)n;
136363 +        }
136364 +    }
136366 +    /* fill DTable
136367 +     * We fill all entries of each weight in order.
136368 +     * That way length is a constant for each iteration of the outter loop.
136369 +     * We can switch based on the length to a different inner loop which is
136370 +     * optimized for that particular case.
136371 +     */
136372 +    {
136373 +        U32 w;
136374 +        int symbol=wksp->rankVal[0];
136375 +        int rankStart=0;
136376 +        for (w=1; w<tableLog+1; ++w) {
136377 +            int const symbolCount = wksp->rankVal[w];
136378 +            int const length = (1 << w) >> 1;
136379 +            int uStart = rankStart;
136380 +            BYTE const nbBits = (BYTE)(tableLog + 1 - w);
136381 +            int s;
136382 +            int u;
136383 +            switch (length) {
136384 +            case 1:
136385 +                for (s=0; s<symbolCount; ++s) {
136386 +                    HUF_DEltX1 D;
136387 +                    D.byte = wksp->symbols[symbol + s];
136388 +                    D.nbBits = nbBits;
136389 +                    dt[uStart] = D;
136390 +                    uStart += 1;
136391 +                }
136392 +                break;
136393 +            case 2:
136394 +                for (s=0; s<symbolCount; ++s) {
136395 +                    HUF_DEltX1 D;
136396 +                    D.byte = wksp->symbols[symbol + s];
136397 +                    D.nbBits = nbBits;
136398 +                    dt[uStart+0] = D;
136399 +                    dt[uStart+1] = D;
136400 +                    uStart += 2;
136401 +                }
136402 +                break;
136403 +            case 4:
136404 +                for (s=0; s<symbolCount; ++s) {
136405 +                    U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
136406 +                    MEM_write64(dt + uStart, D4);
136407 +                    uStart += 4;
136408 +                }
136409 +                break;
136410 +            case 8:
136411 +                for (s=0; s<symbolCount; ++s) {
136412 +                    U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
136413 +                    MEM_write64(dt + uStart, D4);
136414 +                    MEM_write64(dt + uStart + 4, D4);
136415 +                    uStart += 8;
136416 +                }
136417 +                break;
136418 +            default:
136419 +                for (s=0; s<symbolCount; ++s) {
136420 +                    U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
136421 +                    for (u=0; u < length; u += 16) {
136422 +                        MEM_write64(dt + uStart + u + 0, D4);
136423 +                        MEM_write64(dt + uStart + u + 4, D4);
136424 +                        MEM_write64(dt + uStart + u + 8, D4);
136425 +                        MEM_write64(dt + uStart + u + 12, D4);
136426 +                    }
136427 +                    assert(u == length);
136428 +                    uStart += length;
136429 +                }
136430 +                break;
136431 +            }
136432 +            symbol += symbolCount;
136433 +            rankStart += symbolCount * length;
136434 +        }
136435 +    }
136436 +    return iSize;
136439 +FORCE_INLINE_TEMPLATE BYTE
136440 +HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog)
136442 +    size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
136443 +    BYTE const c = dt[val].byte;
136444 +    BIT_skipBits(Dstream, dt[val].nbBits);
136445 +    return c;
136448 +#define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \
136449 +    *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog)
136451 +#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr)  \
136452 +    if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
136453 +        HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
136455 +#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \
136456 +    if (MEM_64bits()) \
136457 +        HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
136459 +HINT_INLINE size_t
136460 +HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog)
136462 +    BYTE* const pStart = p;
136464 +    /* up to 4 symbols at a time */
136465 +    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) {
136466 +        HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
136467 +        HUF_DECODE_SYMBOLX1_1(p, bitDPtr);
136468 +        HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
136469 +        HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
136470 +    }
136472 +    /* [0-3] symbols remaining */
136473 +    if (MEM_32bits())
136474 +        while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd))
136475 +            HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
136477 +    /* no more data to retrieve from bitstream, no need to reload */
136478 +    while (p < pEnd)
136479 +        HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
136481 +    return pEnd-pStart;
136484 +FORCE_INLINE_TEMPLATE size_t
136485 +HUF_decompress1X1_usingDTable_internal_body(
136486 +          void* dst,  size_t dstSize,
136487 +    const void* cSrc, size_t cSrcSize,
136488 +    const HUF_DTable* DTable)
136490 +    BYTE* op = (BYTE*)dst;
136491 +    BYTE* const oend = op + dstSize;
136492 +    const void* dtPtr = DTable + 1;
136493 +    const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
136494 +    BIT_DStream_t bitD;
136495 +    DTableDesc const dtd = HUF_getDTableDesc(DTable);
136496 +    U32 const dtLog = dtd.tableLog;
136498 +    CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
136500 +    HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog);
136502 +    if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
136504 +    return dstSize;
136507 +FORCE_INLINE_TEMPLATE size_t
136508 +HUF_decompress4X1_usingDTable_internal_body(
136509 +          void* dst,  size_t dstSize,
136510 +    const void* cSrc, size_t cSrcSize,
136511 +    const HUF_DTable* DTable)
136513 +    /* Check */
136514 +    if (cSrcSize < 10) return ERROR(corruption_detected);  /* strict minimum : jump table + 1 byte per stream */
136516 +    {   const BYTE* const istart = (const BYTE*) cSrc;
136517 +        BYTE* const ostart = (BYTE*) dst;
136518 +        BYTE* const oend = ostart + dstSize;
136519 +        BYTE* const olimit = oend - 3;
136520 +        const void* const dtPtr = DTable + 1;
136521 +        const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
136523 +        /* Init */
136524 +        BIT_DStream_t bitD1;
136525 +        BIT_DStream_t bitD2;
136526 +        BIT_DStream_t bitD3;
136527 +        BIT_DStream_t bitD4;
136528 +        size_t const length1 = MEM_readLE16(istart);
136529 +        size_t const length2 = MEM_readLE16(istart+2);
136530 +        size_t const length3 = MEM_readLE16(istart+4);
136531 +        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
136532 +        const BYTE* const istart1 = istart + 6;  /* jumpTable */
136533 +        const BYTE* const istart2 = istart1 + length1;
136534 +        const BYTE* const istart3 = istart2 + length2;
136535 +        const BYTE* const istart4 = istart3 + length3;
136536 +        const size_t segmentSize = (dstSize+3) / 4;
136537 +        BYTE* const opStart2 = ostart + segmentSize;
136538 +        BYTE* const opStart3 = opStart2 + segmentSize;
136539 +        BYTE* const opStart4 = opStart3 + segmentSize;
136540 +        BYTE* op1 = ostart;
136541 +        BYTE* op2 = opStart2;
136542 +        BYTE* op3 = opStart3;
136543 +        BYTE* op4 = opStart4;
136544 +        DTableDesc const dtd = HUF_getDTableDesc(DTable);
136545 +        U32 const dtLog = dtd.tableLog;
136546 +        U32 endSignal = 1;
136548 +        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
136549 +        CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
136550 +        CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
136551 +        CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
136552 +        CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
136554 +        /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */
136555 +        for ( ; (endSignal) & (op4 < olimit) ; ) {
136556 +            HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
136557 +            HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
136558 +            HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
136559 +            HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
136560 +            HUF_DECODE_SYMBOLX1_1(op1, &bitD1);
136561 +            HUF_DECODE_SYMBOLX1_1(op2, &bitD2);
136562 +            HUF_DECODE_SYMBOLX1_1(op3, &bitD3);
136563 +            HUF_DECODE_SYMBOLX1_1(op4, &bitD4);
136564 +            HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
136565 +            HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
136566 +            HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
136567 +            HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
136568 +            HUF_DECODE_SYMBOLX1_0(op1, &bitD1);
136569 +            HUF_DECODE_SYMBOLX1_0(op2, &bitD2);
136570 +            HUF_DECODE_SYMBOLX1_0(op3, &bitD3);
136571 +            HUF_DECODE_SYMBOLX1_0(op4, &bitD4);
136572 +            endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
136573 +            endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
136574 +            endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
136575 +            endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
136576 +        }
136578 +        /* check corruption */
136579 +        /* note : should not be necessary : op# advance in lock step, and we control op4.
136580 +         *        but curiously, binary generated by gcc 7.2 & 7.3 with -mbmi2 runs faster when >=1 test is present */
136581 +        if (op1 > opStart2) return ERROR(corruption_detected);
136582 +        if (op2 > opStart3) return ERROR(corruption_detected);
136583 +        if (op3 > opStart4) return ERROR(corruption_detected);
136584 +        /* note : op4 supposed already verified within main loop */
136586 +        /* finish bitStreams one by one */
136587 +        HUF_decodeStreamX1(op1, &bitD1, opStart2, dt, dtLog);
136588 +        HUF_decodeStreamX1(op2, &bitD2, opStart3, dt, dtLog);
136589 +        HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog);
136590 +        HUF_decodeStreamX1(op4, &bitD4, oend,     dt, dtLog);
136592 +        /* check */
136593 +        { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
136594 +          if (!endCheck) return ERROR(corruption_detected); }
136596 +        /* decoded size */
136597 +        return dstSize;
136598 +    }
136602 +typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize,
136603 +                                               const void *cSrc,
136604 +                                               size_t cSrcSize,
136605 +                                               const HUF_DTable *DTable);
136607 +HUF_DGEN(HUF_decompress1X1_usingDTable_internal)
136608 +HUF_DGEN(HUF_decompress4X1_usingDTable_internal)
136612 +size_t HUF_decompress1X1_usingDTable(
136613 +          void* dst,  size_t dstSize,
136614 +    const void* cSrc, size_t cSrcSize,
136615 +    const HUF_DTable* DTable)
136617 +    DTableDesc dtd = HUF_getDTableDesc(DTable);
136618 +    if (dtd.tableType != 0) return ERROR(GENERIC);
136619 +    return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
136622 +size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
136623 +                                   const void* cSrc, size_t cSrcSize,
136624 +                                   void* workSpace, size_t wkspSize)
136626 +    const BYTE* ip = (const BYTE*) cSrc;
136628 +    size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
136629 +    if (HUF_isError(hSize)) return hSize;
136630 +    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
136631 +    ip += hSize; cSrcSize -= hSize;
136633 +    return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
136637 +size_t HUF_decompress4X1_usingDTable(
136638 +          void* dst,  size_t dstSize,
136639 +    const void* cSrc, size_t cSrcSize,
136640 +    const HUF_DTable* DTable)
136642 +    DTableDesc dtd = HUF_getDTableDesc(DTable);
136643 +    if (dtd.tableType != 0) return ERROR(GENERIC);
136644 +    return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
136647 +static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
136648 +                                   const void* cSrc, size_t cSrcSize,
136649 +                                   void* workSpace, size_t wkspSize, int bmi2)
136651 +    const BYTE* ip = (const BYTE*) cSrc;
136653 +    size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
136654 +    if (HUF_isError(hSize)) return hSize;
136655 +    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
136656 +    ip += hSize; cSrcSize -= hSize;
136658 +    return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
136661 +size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
136662 +                                   const void* cSrc, size_t cSrcSize,
136663 +                                   void* workSpace, size_t wkspSize)
136665 +    return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0);
136669 +#endif /* HUF_FORCE_DECOMPRESS_X2 */
136672 +#ifndef HUF_FORCE_DECOMPRESS_X1
136674 +/* *************************/
136675 +/* double-symbols decoding */
136676 +/* *************************/
136678 +typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2;  /* double-symbols decoding */
136679 +typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
136680 +typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
136681 +typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX];
136684 +/* HUF_fillDTableX2Level2() :
136685 + * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
136686 +static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 sizeLog, const U32 consumed,
136687 +                           const U32* rankValOrigin, const int minWeight,
136688 +                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
136689 +                           U32 nbBitsBaseline, U16 baseSeq, U32* wksp, size_t wkspSize)
136691 +    HUF_DEltX2 DElt;
136692 +    U32* rankVal = wksp;
136694 +    assert(wkspSize >= HUF_TABLELOG_MAX + 1);
136695 +    (void)wkspSize;
136696 +    /* get pre-calculated rankVal */
136697 +    ZSTD_memcpy(rankVal, rankValOrigin, sizeof(U32) * (HUF_TABLELOG_MAX + 1));
136699 +    /* fill skipped values */
136700 +    if (minWeight>1) {
136701 +        U32 i, skipSize = rankVal[minWeight];
136702 +        MEM_writeLE16(&(DElt.sequence), baseSeq);
136703 +        DElt.nbBits   = (BYTE)(consumed);
136704 +        DElt.length   = 1;
136705 +        for (i = 0; i < skipSize; i++)
136706 +            DTable[i] = DElt;
136707 +    }
136709 +    /* fill DTable */
136710 +    {   U32 s; for (s=0; s<sortedListSize; s++) {   /* note : sortedSymbols already skipped */
136711 +            const U32 symbol = sortedSymbols[s].symbol;
136712 +            const U32 weight = sortedSymbols[s].weight;
136713 +            const U32 nbBits = nbBitsBaseline - weight;
136714 +            const U32 length = 1 << (sizeLog-nbBits);
136715 +            const U32 start = rankVal[weight];
136716 +            U32 i = start;
136717 +            const U32 end = start + length;
136719 +            MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
136720 +            DElt.nbBits = (BYTE)(nbBits + consumed);
136721 +            DElt.length = 2;
136722 +            do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */
136724 +            rankVal[weight] += length;
136725 +    }   }
136729 +static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,
136730 +                           const sortedSymbol_t* sortedList, const U32 sortedListSize,
136731 +                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
136732 +                           const U32 nbBitsBaseline, U32* wksp, size_t wkspSize)
136734 +    U32* rankVal = wksp;
136735 +    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */
136736 +    const U32 minBits  = nbBitsBaseline - maxWeight;
136737 +    U32 s;
136739 +    assert(wkspSize >= HUF_TABLELOG_MAX + 1);
136740 +    wksp += HUF_TABLELOG_MAX + 1;
136741 +    wkspSize -= HUF_TABLELOG_MAX + 1;
136743 +    ZSTD_memcpy(rankVal, rankValOrigin, sizeof(U32) * (HUF_TABLELOG_MAX + 1));
136745 +    /* fill DTable */
136746 +    for (s=0; s<sortedListSize; s++) {
136747 +        const U16 symbol = sortedList[s].symbol;
136748 +        const U32 weight = sortedList[s].weight;
136749 +        const U32 nbBits = nbBitsBaseline - weight;
136750 +        const U32 start = rankVal[weight];
136751 +        const U32 length = 1 << (targetLog-nbBits);
136753 +        if (targetLog-nbBits >= minBits) {   /* enough room for a second symbol */
136754 +            U32 sortedRank;
136755 +            int minWeight = nbBits + scaleLog;
136756 +            if (minWeight < 1) minWeight = 1;
136757 +            sortedRank = rankStart[minWeight];
136758 +            HUF_fillDTableX2Level2(DTable+start, targetLog-nbBits, nbBits,
136759 +                           rankValOrigin[nbBits], minWeight,
136760 +                           sortedList+sortedRank, sortedListSize-sortedRank,
136761 +                           nbBitsBaseline, symbol, wksp, wkspSize);
136762 +        } else {
136763 +            HUF_DEltX2 DElt;
136764 +            MEM_writeLE16(&(DElt.sequence), symbol);
136765 +            DElt.nbBits = (BYTE)(nbBits);
136766 +            DElt.length = 1;
136767 +            {   U32 const end = start + length;
136768 +                U32 u;
136769 +                for (u = start; u < end; u++) DTable[u] = DElt;
136770 +        }   }
136771 +        rankVal[weight] += length;
136772 +    }
136775 +typedef struct {
136776 +    rankValCol_t rankVal[HUF_TABLELOG_MAX];
136777 +    U32 rankStats[HUF_TABLELOG_MAX + 1];
136778 +    U32 rankStart0[HUF_TABLELOG_MAX + 2];
136779 +    sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1];
136780 +    BYTE weightList[HUF_SYMBOLVALUE_MAX + 1];
136781 +    U32 calleeWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
136782 +} HUF_ReadDTableX2_Workspace;
136784 +size_t HUF_readDTableX2_wksp(HUF_DTable* DTable,
136785 +                       const void* src, size_t srcSize,
136786 +                             void* workSpace, size_t wkspSize)
136788 +    U32 tableLog, maxW, sizeOfSort, nbSymbols;
136789 +    DTableDesc dtd = HUF_getDTableDesc(DTable);
136790 +    U32 const maxTableLog = dtd.maxTableLog;
136791 +    size_t iSize;
136792 +    void* dtPtr = DTable+1;   /* force compiler to avoid strict-aliasing */
136793 +    HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
136794 +    U32 *rankStart;
136796 +    HUF_ReadDTableX2_Workspace* const wksp = (HUF_ReadDTableX2_Workspace*)workSpace;
136798 +    if (sizeof(*wksp) > wkspSize) return ERROR(GENERIC);
136800 +    rankStart = wksp->rankStart0 + 1;
136801 +    ZSTD_memset(wksp->rankStats, 0, sizeof(wksp->rankStats));
136802 +    ZSTD_memset(wksp->rankStart0, 0, sizeof(wksp->rankStart0));
136804 +    DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable));   /* if compiler fails here, assertion is wrong */
136805 +    if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
136806 +    /* ZSTD_memset(weightList, 0, sizeof(weightList)); */  /* is not necessary, even though some analyzer complain ... */
136808 +    iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), /* bmi2 */ 0);
136809 +    if (HUF_isError(iSize)) return iSize;
136811 +    /* check result */
136812 +    if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */
136814 +    /* find maxWeight */
136815 +    for (maxW = tableLog; wksp->rankStats[maxW]==0; maxW--) {}  /* necessarily finds a solution before 0 */
136817 +    /* Get start index of each weight */
136818 +    {   U32 w, nextRankStart = 0;
136819 +        for (w=1; w<maxW+1; w++) {
136820 +            U32 curr = nextRankStart;
136821 +            nextRankStart += wksp->rankStats[w];
136822 +            rankStart[w] = curr;
136823 +        }
136824 +        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/
136825 +        sizeOfSort = nextRankStart;
136826 +    }
136828 +    /* sort symbols by weight */
136829 +    {   U32 s;
136830 +        for (s=0; s<nbSymbols; s++) {
136831 +            U32 const w = wksp->weightList[s];
136832 +            U32 const r = rankStart[w]++;
136833 +            wksp->sortedSymbol[r].symbol = (BYTE)s;
136834 +            wksp->sortedSymbol[r].weight = (BYTE)w;
136835 +        }
136836 +        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */
136837 +    }
136839 +    /* Build rankVal */
136840 +    {   U32* const rankVal0 = wksp->rankVal[0];
136841 +        {   int const rescale = (maxTableLog-tableLog) - 1;   /* tableLog <= maxTableLog */
136842 +            U32 nextRankVal = 0;
136843 +            U32 w;
136844 +            for (w=1; w<maxW+1; w++) {
136845 +                U32 curr = nextRankVal;
136846 +                nextRankVal += wksp->rankStats[w] << (w+rescale);
136847 +                rankVal0[w] = curr;
136848 +        }   }
136849 +        {   U32 const minBits = tableLog+1 - maxW;
136850 +            U32 consumed;
136851 +            for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
136852 +                U32* const rankValPtr = wksp->rankVal[consumed];
136853 +                U32 w;
136854 +                for (w = 1; w < maxW+1; w++) {
136855 +                    rankValPtr[w] = rankVal0[w] >> consumed;
136856 +    }   }   }   }
136858 +    HUF_fillDTableX2(dt, maxTableLog,
136859 +                   wksp->sortedSymbol, sizeOfSort,
136860 +                   wksp->rankStart0, wksp->rankVal, maxW,
136861 +                   tableLog+1,
136862 +                   wksp->calleeWksp, sizeof(wksp->calleeWksp) / sizeof(U32));
136864 +    dtd.tableLog = (BYTE)maxTableLog;
136865 +    dtd.tableType = 1;
136866 +    ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
136867 +    return iSize;
136871 +FORCE_INLINE_TEMPLATE U32
136872 +HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
136874 +    size_t const val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
136875 +    ZSTD_memcpy(op, dt+val, 2);
136876 +    BIT_skipBits(DStream, dt[val].nbBits);
136877 +    return dt[val].length;
136880 +FORCE_INLINE_TEMPLATE U32
136881 +HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
136883 +    size_t const val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
136884 +    ZSTD_memcpy(op, dt+val, 1);
136885 +    if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
136886 +    else {
136887 +        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
136888 +            BIT_skipBits(DStream, dt[val].nbBits);
136889 +            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
136890 +                /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
136891 +                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);
136892 +    }   }
136893 +    return 1;
136896 +#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
136897 +    ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
136899 +#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
136900 +    if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
136901 +        ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
136903 +#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
136904 +    if (MEM_64bits()) \
136905 +        ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
136907 +HINT_INLINE size_t
136908 +HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd,
136909 +                const HUF_DEltX2* const dt, const U32 dtLog)
136911 +    BYTE* const pStart = p;
136913 +    /* up to 8 symbols at a time */
136914 +    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
136915 +        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
136916 +        HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
136917 +        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
136918 +        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
136919 +    }
136921 +    /* closer to end : up to 2 symbols at a time */
136922 +    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
136923 +        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
136925 +    while (p <= pEnd-2)
136926 +        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
136928 +    if (p < pEnd)
136929 +        p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog);
136931 +    return p-pStart;
136934 +FORCE_INLINE_TEMPLATE size_t
136935 +HUF_decompress1X2_usingDTable_internal_body(
136936 +          void* dst,  size_t dstSize,
136937 +    const void* cSrc, size_t cSrcSize,
136938 +    const HUF_DTable* DTable)
136940 +    BIT_DStream_t bitD;
136942 +    /* Init */
136943 +    CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
136945 +    /* decode */
136946 +    {   BYTE* const ostart = (BYTE*) dst;
136947 +        BYTE* const oend = ostart + dstSize;
136948 +        const void* const dtPtr = DTable+1;   /* force compiler to not use strict-aliasing */
136949 +        const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
136950 +        DTableDesc const dtd = HUF_getDTableDesc(DTable);
136951 +        HUF_decodeStreamX2(ostart, &bitD, oend, dt, dtd.tableLog);
136952 +    }
136954 +    /* check */
136955 +    if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
136957 +    /* decoded size */
136958 +    return dstSize;
136961 +FORCE_INLINE_TEMPLATE size_t
136962 +HUF_decompress4X2_usingDTable_internal_body(
136963 +          void* dst,  size_t dstSize,
136964 +    const void* cSrc, size_t cSrcSize,
136965 +    const HUF_DTable* DTable)
136967 +    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
136969 +    {   const BYTE* const istart = (const BYTE*) cSrc;
136970 +        BYTE* const ostart = (BYTE*) dst;
136971 +        BYTE* const oend = ostart + dstSize;
136972 +        BYTE* const olimit = oend - (sizeof(size_t)-1);
136973 +        const void* const dtPtr = DTable+1;
136974 +        const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
136976 +        /* Init */
136977 +        BIT_DStream_t bitD1;
136978 +        BIT_DStream_t bitD2;
136979 +        BIT_DStream_t bitD3;
136980 +        BIT_DStream_t bitD4;
136981 +        size_t const length1 = MEM_readLE16(istart);
136982 +        size_t const length2 = MEM_readLE16(istart+2);
136983 +        size_t const length3 = MEM_readLE16(istart+4);
136984 +        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
136985 +        const BYTE* const istart1 = istart + 6;  /* jumpTable */
136986 +        const BYTE* const istart2 = istart1 + length1;
136987 +        const BYTE* const istart3 = istart2 + length2;
136988 +        const BYTE* const istart4 = istart3 + length3;
136989 +        size_t const segmentSize = (dstSize+3) / 4;
136990 +        BYTE* const opStart2 = ostart + segmentSize;
136991 +        BYTE* const opStart3 = opStart2 + segmentSize;
136992 +        BYTE* const opStart4 = opStart3 + segmentSize;
136993 +        BYTE* op1 = ostart;
136994 +        BYTE* op2 = opStart2;
136995 +        BYTE* op3 = opStart3;
136996 +        BYTE* op4 = opStart4;
136997 +        U32 endSignal = 1;
136998 +        DTableDesc const dtd = HUF_getDTableDesc(DTable);
136999 +        U32 const dtLog = dtd.tableLog;
137001 +        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
137002 +        CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
137003 +        CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
137004 +        CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
137005 +        CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
137007 +        /* 16-32 symbols per loop (4-8 symbols per stream) */
137008 +        for ( ; (endSignal) & (op4 < olimit); ) {
137009 +#if defined(__clang__) && (defined(__x86_64__) || defined(__i386__))
137010 +            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
137011 +            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
137012 +            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
137013 +            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
137014 +            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
137015 +            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
137016 +            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
137017 +            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
137018 +            endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
137019 +            endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
137020 +            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
137021 +            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
137022 +            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
137023 +            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
137024 +            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
137025 +            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
137026 +            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
137027 +            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
137028 +            endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
137029 +            endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
137030 +#else
137031 +            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
137032 +            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
137033 +            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
137034 +            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
137035 +            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
137036 +            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
137037 +            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
137038 +            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
137039 +            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
137040 +            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
137041 +            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
137042 +            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
137043 +            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
137044 +            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
137045 +            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
137046 +            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
137047 +            endSignal = (U32)LIKELY(
137048 +                        (BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished)
137049 +                      & (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished)
137050 +                      & (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished)
137051 +                      & (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished));
137052 +#endif
137053 +        }
137055 +        /* check corruption */
137056 +        if (op1 > opStart2) return ERROR(corruption_detected);
137057 +        if (op2 > opStart3) return ERROR(corruption_detected);
137058 +        if (op3 > opStart4) return ERROR(corruption_detected);
137059 +        /* note : op4 already verified within main loop */
137061 +        /* finish bitStreams one by one */
137062 +        HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
137063 +        HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
137064 +        HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
137065 +        HUF_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
137067 +        /* check */
137068 +        { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
137069 +          if (!endCheck) return ERROR(corruption_detected); }
137071 +        /* decoded size */
137072 +        return dstSize;
137073 +    }
137076 +HUF_DGEN(HUF_decompress1X2_usingDTable_internal)
137077 +HUF_DGEN(HUF_decompress4X2_usingDTable_internal)
137079 +size_t HUF_decompress1X2_usingDTable(
137080 +          void* dst,  size_t dstSize,
137081 +    const void* cSrc, size_t cSrcSize,
137082 +    const HUF_DTable* DTable)
137084 +    DTableDesc dtd = HUF_getDTableDesc(DTable);
137085 +    if (dtd.tableType != 1) return ERROR(GENERIC);
137086 +    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
137089 +size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
137090 +                                   const void* cSrc, size_t cSrcSize,
137091 +                                   void* workSpace, size_t wkspSize)
137093 +    const BYTE* ip = (const BYTE*) cSrc;
137095 +    size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize,
137096 +                                               workSpace, wkspSize);
137097 +    if (HUF_isError(hSize)) return hSize;
137098 +    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
137099 +    ip += hSize; cSrcSize -= hSize;
137101 +    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
137105 +size_t HUF_decompress4X2_usingDTable(
137106 +          void* dst,  size_t dstSize,
137107 +    const void* cSrc, size_t cSrcSize,
137108 +    const HUF_DTable* DTable)
137110 +    DTableDesc dtd = HUF_getDTableDesc(DTable);
137111 +    if (dtd.tableType != 1) return ERROR(GENERIC);
137112 +    return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
137115 +static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
137116 +                                   const void* cSrc, size_t cSrcSize,
137117 +                                   void* workSpace, size_t wkspSize, int bmi2)
137119 +    const BYTE* ip = (const BYTE*) cSrc;
137121 +    size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize,
137122 +                                         workSpace, wkspSize);
137123 +    if (HUF_isError(hSize)) return hSize;
137124 +    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
137125 +    ip += hSize; cSrcSize -= hSize;
137127 +    return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
137130 +size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
137131 +                                   const void* cSrc, size_t cSrcSize,
137132 +                                   void* workSpace, size_t wkspSize)
137134 +    return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0);
137138 +#endif /* HUF_FORCE_DECOMPRESS_X1 */
137141 +/* ***********************************/
137142 +/* Universal decompression selectors */
137143 +/* ***********************************/
137145 +size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize,
137146 +                                    const void* cSrc, size_t cSrcSize,
137147 +                                    const HUF_DTable* DTable)
137149 +    DTableDesc const dtd = HUF_getDTableDesc(DTable);
137150 +#if defined(HUF_FORCE_DECOMPRESS_X1)
137151 +    (void)dtd;
137152 +    assert(dtd.tableType == 0);
137153 +    return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
137154 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
137155 +    (void)dtd;
137156 +    assert(dtd.tableType == 1);
137157 +    return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
137158 +#else
137159 +    return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
137160 +                           HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
137161 +#endif
137164 +size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
137165 +                                    const void* cSrc, size_t cSrcSize,
137166 +                                    const HUF_DTable* DTable)
137168 +    DTableDesc const dtd = HUF_getDTableDesc(DTable);
137169 +#if defined(HUF_FORCE_DECOMPRESS_X1)
137170 +    (void)dtd;
137171 +    assert(dtd.tableType == 0);
137172 +    return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
137173 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
137174 +    (void)dtd;
137175 +    assert(dtd.tableType == 1);
137176 +    return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
137177 +#else
137178 +    return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
137179 +                           HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
137180 +#endif
137184 +#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
137185 +typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
137186 +static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
137188 +    /* single, double, quad */
137189 +    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */
137190 +    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */
137191 +    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */
137192 +    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */
137193 +    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */
137194 +    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */
137195 +    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */
137196 +    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */
137197 +    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */
137198 +    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */
137199 +    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */
137200 +    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */
137201 +    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */
137202 +    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */
137203 +    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */
137204 +    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */
137206 +#endif
137208 +/** HUF_selectDecoder() :
137209 + *  Tells which decoder is likely to decode faster,
137210 + *  based on a set of pre-computed metrics.
137211 + * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
137212 + *  Assumption : 0 < dstSize <= 128 KB */
137213 +U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
137215 +    assert(dstSize > 0);
137216 +    assert(dstSize <= 128*1024);
137217 +#if defined(HUF_FORCE_DECOMPRESS_X1)
137218 +    (void)dstSize;
137219 +    (void)cSrcSize;
137220 +    return 0;
137221 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
137222 +    (void)dstSize;
137223 +    (void)cSrcSize;
137224 +    return 1;
137225 +#else
137226 +    /* decoder timing evaluation */
137227 +    {   U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 */
137228 +        U32 const D256 = (U32)(dstSize >> 8);
137229 +        U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
137230 +        U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
137231 +        DTime1 += DTime1 >> 3;  /* advantage to algorithm using less memory, to reduce cache eviction */
137232 +        return DTime1 < DTime0;
137233 +    }
137234 +#endif
137238 +size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
137239 +                                     size_t dstSize, const void* cSrc,
137240 +                                     size_t cSrcSize, void* workSpace,
137241 +                                     size_t wkspSize)
137243 +    /* validation checks */
137244 +    if (dstSize == 0) return ERROR(dstSize_tooSmall);
137245 +    if (cSrcSize == 0) return ERROR(corruption_detected);
137247 +    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
137248 +#if defined(HUF_FORCE_DECOMPRESS_X1)
137249 +        (void)algoNb;
137250 +        assert(algoNb == 0);
137251 +        return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
137252 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
137253 +        (void)algoNb;
137254 +        assert(algoNb == 1);
137255 +        return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
137256 +#else
137257 +        return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
137258 +                            cSrcSize, workSpace, wkspSize):
137259 +                        HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
137260 +#endif
137261 +    }
137264 +size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
137265 +                                  const void* cSrc, size_t cSrcSize,
137266 +                                  void* workSpace, size_t wkspSize)
137268 +    /* validation checks */
137269 +    if (dstSize == 0) return ERROR(dstSize_tooSmall);
137270 +    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
137271 +    if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
137272 +    if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
137274 +    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
137275 +#if defined(HUF_FORCE_DECOMPRESS_X1)
137276 +        (void)algoNb;
137277 +        assert(algoNb == 0);
137278 +        return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
137279 +                                cSrcSize, workSpace, wkspSize);
137280 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
137281 +        (void)algoNb;
137282 +        assert(algoNb == 1);
137283 +        return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
137284 +                                cSrcSize, workSpace, wkspSize);
137285 +#else
137286 +        return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
137287 +                                cSrcSize, workSpace, wkspSize):
137288 +                        HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
137289 +                                cSrcSize, workSpace, wkspSize);
137290 +#endif
137291 +    }
137295 +size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
137297 +    DTableDesc const dtd = HUF_getDTableDesc(DTable);
137298 +#if defined(HUF_FORCE_DECOMPRESS_X1)
137299 +    (void)dtd;
137300 +    assert(dtd.tableType == 0);
137301 +    return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
137302 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
137303 +    (void)dtd;
137304 +    assert(dtd.tableType == 1);
137305 +    return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
137306 +#else
137307 +    return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
137308 +                           HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
137309 +#endif
137312 +#ifndef HUF_FORCE_DECOMPRESS_X2
137313 +size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
137315 +    const BYTE* ip = (const BYTE*) cSrc;
137317 +    size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
137318 +    if (HUF_isError(hSize)) return hSize;
137319 +    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
137320 +    ip += hSize; cSrcSize -= hSize;
137322 +    return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
137324 +#endif
137326 +size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
137328 +    DTableDesc const dtd = HUF_getDTableDesc(DTable);
137329 +#if defined(HUF_FORCE_DECOMPRESS_X1)
137330 +    (void)dtd;
137331 +    assert(dtd.tableType == 0);
137332 +    return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
137333 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
137334 +    (void)dtd;
137335 +    assert(dtd.tableType == 1);
137336 +    return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
137337 +#else
137338 +    return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
137339 +                           HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
137340 +#endif
137343 +size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
137345 +    /* validation checks */
137346 +    if (dstSize == 0) return ERROR(dstSize_tooSmall);
137347 +    if (cSrcSize == 0) return ERROR(corruption_detected);
137349 +    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
137350 +#if defined(HUF_FORCE_DECOMPRESS_X1)
137351 +        (void)algoNb;
137352 +        assert(algoNb == 0);
137353 +        return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
137354 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
137355 +        (void)algoNb;
137356 +        assert(algoNb == 1);
137357 +        return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
137358 +#else
137359 +        return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) :
137360 +                        HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
137361 +#endif
137362 +    }
137364 diff --git a/lib/zstd/decompress/zstd_ddict.c b/lib/zstd/decompress/zstd_ddict.c
137365 new file mode 100644
137366 index 000000000000..dbbc7919de53
137367 --- /dev/null
137368 +++ b/lib/zstd/decompress/zstd_ddict.c
137369 @@ -0,0 +1,241 @@
137371 + * Copyright (c) Yann Collet, Facebook, Inc.
137372 + * All rights reserved.
137374 + * This source code is licensed under both the BSD-style license (found in the
137375 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
137376 + * in the COPYING file in the root directory of this source tree).
137377 + * You may select, at your option, one of the above-listed licenses.
137378 + */
137380 +/* zstd_ddict.c :
137381 + * concentrates all logic that needs to know the internals of ZSTD_DDict object */
137383 +/*-*******************************************************
137384 +*  Dependencies
137385 +*********************************************************/
137386 +#include "../common/zstd_deps.h"   /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
137387 +#include "../common/cpu.h"         /* bmi2 */
137388 +#include "../common/mem.h"         /* low level memory routines */
137389 +#define FSE_STATIC_LINKING_ONLY
137390 +#include "../common/fse.h"
137391 +#define HUF_STATIC_LINKING_ONLY
137392 +#include "../common/huf.h"
137393 +#include "zstd_decompress_internal.h"
137394 +#include "zstd_ddict.h"
137399 +/*-*******************************************************
137400 +*  Types
137401 +*********************************************************/
137402 +struct ZSTD_DDict_s {
137403 +    void* dictBuffer;
137404 +    const void* dictContent;
137405 +    size_t dictSize;
137406 +    ZSTD_entropyDTables_t entropy;
137407 +    U32 dictID;
137408 +    U32 entropyPresent;
137409 +    ZSTD_customMem cMem;
137410 +};  /* typedef'd to ZSTD_DDict within "zstd.h" */
137412 +const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict)
137414 +    assert(ddict != NULL);
137415 +    return ddict->dictContent;
137418 +size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict)
137420 +    assert(ddict != NULL);
137421 +    return ddict->dictSize;
137424 +void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
137426 +    DEBUGLOG(4, "ZSTD_copyDDictParameters");
137427 +    assert(dctx != NULL);
137428 +    assert(ddict != NULL);
137429 +    dctx->dictID = ddict->dictID;
137430 +    dctx->prefixStart = ddict->dictContent;
137431 +    dctx->virtualStart = ddict->dictContent;
137432 +    dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
137433 +    dctx->previousDstEnd = dctx->dictEnd;
137434 +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
137435 +    dctx->dictContentBeginForFuzzing = dctx->prefixStart;
137436 +    dctx->dictContentEndForFuzzing = dctx->previousDstEnd;
137437 +#endif
137438 +    if (ddict->entropyPresent) {
137439 +        dctx->litEntropy = 1;
137440 +        dctx->fseEntropy = 1;
137441 +        dctx->LLTptr = ddict->entropy.LLTable;
137442 +        dctx->MLTptr = ddict->entropy.MLTable;
137443 +        dctx->OFTptr = ddict->entropy.OFTable;
137444 +        dctx->HUFptr = ddict->entropy.hufTable;
137445 +        dctx->entropy.rep[0] = ddict->entropy.rep[0];
137446 +        dctx->entropy.rep[1] = ddict->entropy.rep[1];
137447 +        dctx->entropy.rep[2] = ddict->entropy.rep[2];
137448 +    } else {
137449 +        dctx->litEntropy = 0;
137450 +        dctx->fseEntropy = 0;
137451 +    }
137455 +static size_t
137456 +ZSTD_loadEntropy_intoDDict(ZSTD_DDict* ddict,
137457 +                           ZSTD_dictContentType_e dictContentType)
137459 +    ddict->dictID = 0;
137460 +    ddict->entropyPresent = 0;
137461 +    if (dictContentType == ZSTD_dct_rawContent) return 0;
137463 +    if (ddict->dictSize < 8) {
137464 +        if (dictContentType == ZSTD_dct_fullDict)
137465 +            return ERROR(dictionary_corrupted);   /* only accept specified dictionaries */
137466 +        return 0;   /* pure content mode */
137467 +    }
137468 +    {   U32 const magic = MEM_readLE32(ddict->dictContent);
137469 +        if (magic != ZSTD_MAGIC_DICTIONARY) {
137470 +            if (dictContentType == ZSTD_dct_fullDict)
137471 +                return ERROR(dictionary_corrupted);   /* only accept specified dictionaries */
137472 +            return 0;   /* pure content mode */
137473 +        }
137474 +    }
137475 +    ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE);
137477 +    /* load entropy tables */
137478 +    RETURN_ERROR_IF(ZSTD_isError(ZSTD_loadDEntropy(
137479 +            &ddict->entropy, ddict->dictContent, ddict->dictSize)),
137480 +        dictionary_corrupted, "");
137481 +    ddict->entropyPresent = 1;
137482 +    return 0;
137486 +static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
137487 +                                      const void* dict, size_t dictSize,
137488 +                                      ZSTD_dictLoadMethod_e dictLoadMethod,
137489 +                                      ZSTD_dictContentType_e dictContentType)
137491 +    if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) {
137492 +        ddict->dictBuffer = NULL;
137493 +        ddict->dictContent = dict;
137494 +        if (!dict) dictSize = 0;
137495 +    } else {
137496 +        void* const internalBuffer = ZSTD_customMalloc(dictSize, ddict->cMem);
137497 +        ddict->dictBuffer = internalBuffer;
137498 +        ddict->dictContent = internalBuffer;
137499 +        if (!internalBuffer) return ERROR(memory_allocation);
137500 +        ZSTD_memcpy(internalBuffer, dict, dictSize);
137501 +    }
137502 +    ddict->dictSize = dictSize;
137503 +    ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001);  /* cover both little and big endian */
137505 +    /* parse dictionary content */
137506 +    FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) , "");
137508 +    return 0;
137511 +ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
137512 +                                      ZSTD_dictLoadMethod_e dictLoadMethod,
137513 +                                      ZSTD_dictContentType_e dictContentType,
137514 +                                      ZSTD_customMem customMem)
137516 +    if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
137518 +    {   ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_customMalloc(sizeof(ZSTD_DDict), customMem);
137519 +        if (ddict == NULL) return NULL;
137520 +        ddict->cMem = customMem;
137521 +        {   size_t const initResult = ZSTD_initDDict_internal(ddict,
137522 +                                            dict, dictSize,
137523 +                                            dictLoadMethod, dictContentType);
137524 +            if (ZSTD_isError(initResult)) {
137525 +                ZSTD_freeDDict(ddict);
137526 +                return NULL;
137527 +        }   }
137528 +        return ddict;
137529 +    }
137532 +/*! ZSTD_createDDict() :
137533 +*   Create a digested dictionary, to start decompression without startup delay.
137534 +*   `dict` content is copied inside DDict.
137535 +*   Consequently, `dict` can be released after `ZSTD_DDict` creation */
137536 +ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
137538 +    ZSTD_customMem const allocator = { NULL, NULL, NULL };
137539 +    return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, allocator);
137542 +/*! ZSTD_createDDict_byReference() :
137543 + *  Create a digested dictionary, to start decompression without startup delay.
137544 + *  Dictionary content is simply referenced, it will be accessed during decompression.
137545 + *  Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */
137546 +ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize)
137548 +    ZSTD_customMem const allocator = { NULL, NULL, NULL };
137549 +    return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, allocator);
137553 +const ZSTD_DDict* ZSTD_initStaticDDict(
137554 +                                void* sBuffer, size_t sBufferSize,
137555 +                                const void* dict, size_t dictSize,
137556 +                                ZSTD_dictLoadMethod_e dictLoadMethod,
137557 +                                ZSTD_dictContentType_e dictContentType)
137559 +    size_t const neededSpace = sizeof(ZSTD_DDict)
137560 +                             + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
137561 +    ZSTD_DDict* const ddict = (ZSTD_DDict*)sBuffer;
137562 +    assert(sBuffer != NULL);
137563 +    assert(dict != NULL);
137564 +    if ((size_t)sBuffer & 7) return NULL;   /* 8-aligned */
137565 +    if (sBufferSize < neededSpace) return NULL;
137566 +    if (dictLoadMethod == ZSTD_dlm_byCopy) {
137567 +        ZSTD_memcpy(ddict+1, dict, dictSize);  /* local copy */
137568 +        dict = ddict+1;
137569 +    }
137570 +    if (ZSTD_isError( ZSTD_initDDict_internal(ddict,
137571 +                                              dict, dictSize,
137572 +                                              ZSTD_dlm_byRef, dictContentType) ))
137573 +        return NULL;
137574 +    return ddict;
137578 +size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
137580 +    if (ddict==NULL) return 0;   /* support free on NULL */
137581 +    {   ZSTD_customMem const cMem = ddict->cMem;
137582 +        ZSTD_customFree(ddict->dictBuffer, cMem);
137583 +        ZSTD_customFree(ddict, cMem);
137584 +        return 0;
137585 +    }
137588 +/*! ZSTD_estimateDDictSize() :
137589 + *  Estimate amount of memory that will be needed to create a dictionary for decompression.
137590 + *  Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */
137591 +size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod)
137593 +    return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
137596 +size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
137598 +    if (ddict==NULL) return 0;   /* support sizeof on NULL */
137599 +    return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ;
137602 +/*! ZSTD_getDictID_fromDDict() :
137603 + *  Provides the dictID of the dictionary loaded into `ddict`.
137604 + *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
137605 + *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
137606 +unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
137608 +    if (ddict==NULL) return 0;
137609 +    return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
137611 diff --git a/lib/zstd/decompress/zstd_ddict.h b/lib/zstd/decompress/zstd_ddict.h
137612 new file mode 100644
137613 index 000000000000..8c1a79d666f8
137614 --- /dev/null
137615 +++ b/lib/zstd/decompress/zstd_ddict.h
137616 @@ -0,0 +1,44 @@
137618 + * Copyright (c) Yann Collet, Facebook, Inc.
137619 + * All rights reserved.
137621 + * This source code is licensed under both the BSD-style license (found in the
137622 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
137623 + * in the COPYING file in the root directory of this source tree).
137624 + * You may select, at your option, one of the above-listed licenses.
137625 + */
137628 +#ifndef ZSTD_DDICT_H
137629 +#define ZSTD_DDICT_H
137631 +/*-*******************************************************
137632 + *  Dependencies
137633 + *********************************************************/
137634 +#include "../common/zstd_deps.h"   /* size_t */
137635 +#include <linux/zstd.h>     /* ZSTD_DDict, and several public functions */
137638 +/*-*******************************************************
137639 + *  Interface
137640 + *********************************************************/
137642 +/* note: several prototypes are already published in `zstd.h` :
137643 + * ZSTD_createDDict()
137644 + * ZSTD_createDDict_byReference()
137645 + * ZSTD_createDDict_advanced()
137646 + * ZSTD_freeDDict()
137647 + * ZSTD_initStaticDDict()
137648 + * ZSTD_sizeof_DDict()
137649 + * ZSTD_estimateDDictSize()
137650 + * ZSTD_getDictID_fromDict()
137651 + */
137653 +const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict);
137654 +size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict);
137656 +void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
137660 +#endif /* ZSTD_DDICT_H */
137661 diff --git a/lib/zstd/decompress/zstd_decompress.c b/lib/zstd/decompress/zstd_decompress.c
137662 new file mode 100644
137663 index 000000000000..16b4ea795a7e
137664 --- /dev/null
137665 +++ b/lib/zstd/decompress/zstd_decompress.c
137666 @@ -0,0 +1,2075 @@
137668 + * Copyright (c) Yann Collet, Facebook, Inc.
137669 + * All rights reserved.
137671 + * This source code is licensed under both the BSD-style license (found in the
137672 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
137673 + * in the COPYING file in the root directory of this source tree).
137674 + * You may select, at your option, one of the above-listed licenses.
137675 + */
137678 +/* ***************************************************************
137679 +*  Tuning parameters
137680 +*****************************************************************/
137682 + * HEAPMODE :
137683 + * Select how default decompression function ZSTD_decompress() allocates its context,
137684 + * on stack (0), or into heap (1, default; requires malloc()).
137685 + * Note that functions with explicit context such as ZSTD_decompressDCtx() are unaffected.
137686 + */
137687 +#ifndef ZSTD_HEAPMODE
137688 +#  define ZSTD_HEAPMODE 1
137689 +#endif
137692 +*  LEGACY_SUPPORT :
137693 +*  if set to 1+, ZSTD_decompress() can decode older formats (v0.1+)
137697 + *  MAXWINDOWSIZE_DEFAULT :
137698 + *  maximum window size accepted by DStream __by default__.
137699 + *  Frames requiring more memory will be rejected.
137700 + *  It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize().
137701 + */
137702 +#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
137703 +#  define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) + 1)
137704 +#endif
137707 + *  NO_FORWARD_PROGRESS_MAX :
137708 + *  maximum allowed nb of calls to ZSTD_decompressStream()
137709 + *  without any forward progress
137710 + *  (defined as: no byte read from input, and no byte flushed to output)
137711 + *  before triggering an error.
137712 + */
137713 +#ifndef ZSTD_NO_FORWARD_PROGRESS_MAX
137714 +#  define ZSTD_NO_FORWARD_PROGRESS_MAX 16
137715 +#endif
137718 +/*-*******************************************************
137719 +*  Dependencies
137720 +*********************************************************/
137721 +#include "../common/zstd_deps.h"   /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
137722 +#include "../common/cpu.h"         /* bmi2 */
137723 +#include "../common/mem.h"         /* low level memory routines */
137724 +#define FSE_STATIC_LINKING_ONLY
137725 +#include "../common/fse.h"
137726 +#define HUF_STATIC_LINKING_ONLY
137727 +#include "../common/huf.h"
137728 +#include <linux/xxhash.h> /* xxh64_reset, xxh64_update, xxh64_digest, XXH64 */
137729 +#include "../common/zstd_internal.h"  /* blockProperties_t */
137730 +#include "zstd_decompress_internal.h"   /* ZSTD_DCtx */
137731 +#include "zstd_ddict.h"  /* ZSTD_DDictDictContent */
137732 +#include "zstd_decompress_block.h"   /* ZSTD_decompressBlock_internal */
137737 +/*************************************
137738 + * Multiple DDicts Hashset internals *
137739 + *************************************/
137741 +#define DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT 4
137742 +#define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT 3   /* These two constants represent SIZE_MULT/COUNT_MULT load factor without using a float.
137743 +                                                     * Currently, that means a 0.75 load factor.
137744 +                                                     * So, if count * COUNT_MULT / size * SIZE_MULT != 0, then we've exceeded
137745 +                                                     * the load factor of the ddict hash set.
137746 +                                                     */
137748 +#define DDICT_HASHSET_TABLE_BASE_SIZE 64
137749 +#define DDICT_HASHSET_RESIZE_FACTOR 2
137751 +/* Hash function to determine starting position of dict insertion within the table
137752 + * Returns an index between [0, hashSet->ddictPtrTableSize]
137753 + */
137754 +static size_t ZSTD_DDictHashSet_getIndex(const ZSTD_DDictHashSet* hashSet, U32 dictID) {
137755 +    const U64 hash = xxh64(&dictID, sizeof(U32), 0);
137756 +    /* DDict ptr table size is a multiple of 2, use size - 1 as mask to get index within [0, hashSet->ddictPtrTableSize) */
137757 +    return hash & (hashSet->ddictPtrTableSize - 1);
137760 +/* Adds DDict to a hashset without resizing it.
137761 + * If inserting a DDict with a dictID that already exists in the set, replaces the one in the set.
137762 + * Returns 0 if successful, or a zstd error code if something went wrong.
137763 + */
137764 +static size_t ZSTD_DDictHashSet_emplaceDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict) {
137765 +    const U32 dictID = ZSTD_getDictID_fromDDict(ddict);
137766 +    size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID);
137767 +    const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1;
137768 +    RETURN_ERROR_IF(hashSet->ddictPtrCount == hashSet->ddictPtrTableSize, GENERIC, "Hash set is full!");
137769 +    DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx);
137770 +    while (hashSet->ddictPtrTable[idx] != NULL) {
137771 +        /* Replace existing ddict if inserting ddict with same dictID */
137772 +        if (ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]) == dictID) {
137773 +            DEBUGLOG(4, "DictID already exists, replacing rather than adding");
137774 +            hashSet->ddictPtrTable[idx] = ddict;
137775 +            return 0;
137776 +        }
137777 +        idx &= idxRangeMask;
137778 +        idx++;
137779 +    }
137780 +    DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx);
137781 +    hashSet->ddictPtrTable[idx] = ddict;
137782 +    hashSet->ddictPtrCount++;
137783 +    return 0;
137786 +/* Expands hash table by factor of DDICT_HASHSET_RESIZE_FACTOR and
137787 + * rehashes all values, allocates new table, frees old table.
137788 + * Returns 0 on success, otherwise a zstd error code.
137789 + */
137790 +static size_t ZSTD_DDictHashSet_expand(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) {
137791 +    size_t newTableSize = hashSet->ddictPtrTableSize * DDICT_HASHSET_RESIZE_FACTOR;
137792 +    const ZSTD_DDict** newTable = (const ZSTD_DDict**)ZSTD_customCalloc(sizeof(ZSTD_DDict*) * newTableSize, customMem);
137793 +    const ZSTD_DDict** oldTable = hashSet->ddictPtrTable;
137794 +    size_t oldTableSize = hashSet->ddictPtrTableSize;
137795 +    size_t i;
137797 +    DEBUGLOG(4, "Expanding DDict hash table! Old size: %zu new size: %zu", oldTableSize, newTableSize);
137798 +    RETURN_ERROR_IF(!newTable, memory_allocation, "Expanded hashset allocation failed!");
137799 +    hashSet->ddictPtrTable = newTable;
137800 +    hashSet->ddictPtrTableSize = newTableSize;
137801 +    hashSet->ddictPtrCount = 0;
137802 +    for (i = 0; i < oldTableSize; ++i) {
137803 +        if (oldTable[i] != NULL) {
137804 +            FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, oldTable[i]), "");
137805 +        }
137806 +    }
137807 +    ZSTD_customFree((void*)oldTable, customMem);
137808 +    DEBUGLOG(4, "Finished re-hash");
137809 +    return 0;
137812 +/* Fetches a DDict with the given dictID
137813 + * Returns the ZSTD_DDict* with the requested dictID. If it doesn't exist, then returns NULL.
137814 + */
137815 +static const ZSTD_DDict* ZSTD_DDictHashSet_getDDict(ZSTD_DDictHashSet* hashSet, U32 dictID) {
137816 +    size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID);
137817 +    const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1;
137818 +    DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx);
137819 +    for (;;) {
137820 +        size_t currDictID = ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]);
137821 +        if (currDictID == dictID || currDictID == 0) {
137822 +            /* currDictID == 0 implies a NULL ddict entry */
137823 +            break;
137824 +        } else {
137825 +            idx &= idxRangeMask;    /* Goes to start of table when we reach the end */
137826 +            idx++;
137827 +        }
137828 +    }
137829 +    DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx);
137830 +    return hashSet->ddictPtrTable[idx];
137833 +/* Allocates space for and returns a ddict hash set
137834 + * The hash set's ZSTD_DDict* table has all values automatically set to NULL to begin with.
137835 + * Returns NULL if allocation failed.
137836 + */
137837 +static ZSTD_DDictHashSet* ZSTD_createDDictHashSet(ZSTD_customMem customMem) {
137838 +    ZSTD_DDictHashSet* ret = (ZSTD_DDictHashSet*)ZSTD_customMalloc(sizeof(ZSTD_DDictHashSet), customMem);
137839 +    DEBUGLOG(4, "Allocating new hash set");
137840 +    ret->ddictPtrTable = (const ZSTD_DDict**)ZSTD_customCalloc(DDICT_HASHSET_TABLE_BASE_SIZE * sizeof(ZSTD_DDict*), customMem);
137841 +    ret->ddictPtrTableSize = DDICT_HASHSET_TABLE_BASE_SIZE;
137842 +    ret->ddictPtrCount = 0;
137843 +    if (!ret || !ret->ddictPtrTable) {
137844 +        return NULL;
137845 +    }
137846 +    return ret;
137849 +/* Frees the table of ZSTD_DDict* within a hashset, then frees the hashset itself.
137850 + * Note: The ZSTD_DDict* within the table are NOT freed.
137851 + */
137852 +static void ZSTD_freeDDictHashSet(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) {
137853 +    DEBUGLOG(4, "Freeing ddict hash set");
137854 +    if (hashSet && hashSet->ddictPtrTable) {
137855 +        ZSTD_customFree((void*)hashSet->ddictPtrTable, customMem);
137856 +    }
137857 +    if (hashSet) {
137858 +        ZSTD_customFree(hashSet, customMem);
137859 +    }
137862 +/* Public function: Adds a DDict into the ZSTD_DDictHashSet, possibly triggering a resize of the hash set.
137863 + * Returns 0 on success, or a ZSTD error.
137864 + */
137865 +static size_t ZSTD_DDictHashSet_addDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict, ZSTD_customMem customMem) {
137866 +    DEBUGLOG(4, "Adding dict ID: %u to hashset with - Count: %zu Tablesize: %zu", ZSTD_getDictID_fromDDict(ddict), hashSet->ddictPtrCount, hashSet->ddictPtrTableSize);
137867 +    if (hashSet->ddictPtrCount * DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT / hashSet->ddictPtrTableSize * DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT != 0) {
137868 +        FORWARD_IF_ERROR(ZSTD_DDictHashSet_expand(hashSet, customMem), "");
137869 +    }
137870 +    FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, ddict), "");
137871 +    return 0;
137874 +/*-*************************************************************
137875 +*   Context management
137876 +***************************************************************/
137877 +size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx)
137879 +    if (dctx==NULL) return 0;   /* support sizeof NULL */
137880 +    return sizeof(*dctx)
137881 +           + ZSTD_sizeof_DDict(dctx->ddictLocal)
137882 +           + dctx->inBuffSize + dctx->outBuffSize;
137885 +size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); }
137888 +static size_t ZSTD_startingInputLength(ZSTD_format_e format)
137890 +    size_t const startingInputLength = ZSTD_FRAMEHEADERSIZE_PREFIX(format);
137891 +    /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */
137892 +    assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) );
137893 +    return startingInputLength;
137896 +static void ZSTD_DCtx_resetParameters(ZSTD_DCtx* dctx)
137898 +    assert(dctx->streamStage == zdss_init);
137899 +    dctx->format = ZSTD_f_zstd1;
137900 +    dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
137901 +    dctx->outBufferMode = ZSTD_bm_buffered;
137902 +    dctx->forceIgnoreChecksum = ZSTD_d_validateChecksum;
137903 +    dctx->refMultipleDDicts = ZSTD_rmd_refSingleDDict;
137906 +static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
137908 +    dctx->staticSize  = 0;
137909 +    dctx->ddict       = NULL;
137910 +    dctx->ddictLocal  = NULL;
137911 +    dctx->dictEnd     = NULL;
137912 +    dctx->ddictIsCold = 0;
137913 +    dctx->dictUses = ZSTD_dont_use;
137914 +    dctx->inBuff      = NULL;
137915 +    dctx->inBuffSize  = 0;
137916 +    dctx->outBuffSize = 0;
137917 +    dctx->streamStage = zdss_init;
137918 +    dctx->legacyContext = NULL;
137919 +    dctx->previousLegacyVersion = 0;
137920 +    dctx->noForwardProgress = 0;
137921 +    dctx->oversizedDuration = 0;
137922 +    dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
137923 +    dctx->ddictSet = NULL;
137924 +    ZSTD_DCtx_resetParameters(dctx);
137925 +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
137926 +    dctx->dictContentEndForFuzzing = NULL;
137927 +#endif
137930 +ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize)
137932 +    ZSTD_DCtx* const dctx = (ZSTD_DCtx*) workspace;
137934 +    if ((size_t)workspace & 7) return NULL;  /* 8-aligned */
137935 +    if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL;  /* minimum size */
137937 +    ZSTD_initDCtx_internal(dctx);
137938 +    dctx->staticSize = workspaceSize;
137939 +    dctx->inBuff = (char*)(dctx+1);
137940 +    return dctx;
137943 +ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
137945 +    if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
137947 +    {   ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_customMalloc(sizeof(*dctx), customMem);
137948 +        if (!dctx) return NULL;
137949 +        dctx->customMem = customMem;
137950 +        ZSTD_initDCtx_internal(dctx);
137951 +        return dctx;
137952 +    }
137955 +ZSTD_DCtx* ZSTD_createDCtx(void)
137957 +    DEBUGLOG(3, "ZSTD_createDCtx");
137958 +    return ZSTD_createDCtx_advanced(ZSTD_defaultCMem);
137961 +static void ZSTD_clearDict(ZSTD_DCtx* dctx)
137963 +    ZSTD_freeDDict(dctx->ddictLocal);
137964 +    dctx->ddictLocal = NULL;
137965 +    dctx->ddict = NULL;
137966 +    dctx->dictUses = ZSTD_dont_use;
137969 +size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
137971 +    if (dctx==NULL) return 0;   /* support free on NULL */
137972 +    RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "not compatible with static DCtx");
137973 +    {   ZSTD_customMem const cMem = dctx->customMem;
137974 +        ZSTD_clearDict(dctx);
137975 +        ZSTD_customFree(dctx->inBuff, cMem);
137976 +        dctx->inBuff = NULL;
137977 +        if (dctx->ddictSet) {
137978 +            ZSTD_freeDDictHashSet(dctx->ddictSet, cMem);
137979 +            dctx->ddictSet = NULL;
137980 +        }
137981 +        ZSTD_customFree(dctx, cMem);
137982 +        return 0;
137983 +    }
137986 +/* no longer useful */
137987 +void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
137989 +    size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx);
137990 +    ZSTD_memcpy(dstDCtx, srcDCtx, toCopy);  /* no need to copy workspace */
137993 +/* Given a dctx with a digested frame params, re-selects the correct ZSTD_DDict based on
137994 + * the requested dict ID from the frame. If there exists a reference to the correct ZSTD_DDict, then
137995 + * accordingly sets the ddict to be used to decompress the frame.
137997 + * If no DDict is found, then no action is taken, and the ZSTD_DCtx::ddict remains as-is.
137999 + * ZSTD_d_refMultipleDDicts must be enabled for this function to be called.
138000 + */
138001 +static void ZSTD_DCtx_selectFrameDDict(ZSTD_DCtx* dctx) {
138002 +    assert(dctx->refMultipleDDicts && dctx->ddictSet);
138003 +    DEBUGLOG(4, "Adjusting DDict based on requested dict ID from frame");
138004 +    if (dctx->ddict) {
138005 +        const ZSTD_DDict* frameDDict = ZSTD_DDictHashSet_getDDict(dctx->ddictSet, dctx->fParams.dictID);
138006 +        if (frameDDict) {
138007 +            DEBUGLOG(4, "DDict found!");
138008 +            ZSTD_clearDict(dctx);
138009 +            dctx->dictID = dctx->fParams.dictID;
138010 +            dctx->ddict = frameDDict;
138011 +            dctx->dictUses = ZSTD_use_indefinitely;
138012 +        }
138013 +    }
138017 +/*-*************************************************************
138018 + *   Frame header decoding
138019 + ***************************************************************/
138021 +/*! ZSTD_isFrame() :
138022 + *  Tells if the content of `buffer` starts with a valid Frame Identifier.
138023 + *  Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
138024 + *  Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
138025 + *  Note 3 : Skippable Frame Identifiers are considered valid. */
138026 +unsigned ZSTD_isFrame(const void* buffer, size_t size)
138028 +    if (size < ZSTD_FRAMEIDSIZE) return 0;
138029 +    {   U32 const magic = MEM_readLE32(buffer);
138030 +        if (magic == ZSTD_MAGICNUMBER) return 1;
138031 +        if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
138032 +    }
138033 +    return 0;
138036 +/** ZSTD_frameHeaderSize_internal() :
138037 + *  srcSize must be large enough to reach header size fields.
138038 + *  note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless.
138039 + * @return : size of the Frame Header
138040 + *           or an error code, which can be tested with ZSTD_isError() */
138041 +static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format)
138043 +    size_t const minInputSize = ZSTD_startingInputLength(format);
138044 +    RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong, "");
138046 +    {   BYTE const fhd = ((const BYTE*)src)[minInputSize-1];
138047 +        U32 const dictID= fhd & 3;
138048 +        U32 const singleSegment = (fhd >> 5) & 1;
138049 +        U32 const fcsId = fhd >> 6;
138050 +        return minInputSize + !singleSegment
138051 +             + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId]
138052 +             + (singleSegment && !fcsId);
138053 +    }
138056 +/** ZSTD_frameHeaderSize() :
138057 + *  srcSize must be >= ZSTD_frameHeaderSize_prefix.
138058 + * @return : size of the Frame Header,
138059 + *           or an error code (if srcSize is too small) */
138060 +size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
138062 +    return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1);
138066 +/** ZSTD_getFrameHeader_advanced() :
138067 + *  decode Frame Header, or require larger `srcSize`.
138068 + *  note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless
138069 + * @return : 0, `zfhPtr` is correctly filled,
138070 + *          >0, `srcSize` is too small, value is wanted `srcSize` amount,
138071 + *           or an error code, which can be tested using ZSTD_isError() */
138072 +size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
138074 +    const BYTE* ip = (const BYTE*)src;
138075 +    size_t const minInputSize = ZSTD_startingInputLength(format);
138077 +    ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr));   /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
138078 +    if (srcSize < minInputSize) return minInputSize;
138079 +    RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter");
138081 +    if ( (format != ZSTD_f_zstd1_magicless)
138082 +      && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
138083 +        if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
138084 +            /* skippable frame */
138085 +            if (srcSize < ZSTD_SKIPPABLEHEADERSIZE)
138086 +                return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */
138087 +            ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr));
138088 +            zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE);
138089 +            zfhPtr->frameType = ZSTD_skippableFrame;
138090 +            return 0;
138091 +        }
138092 +        RETURN_ERROR(prefix_unknown, "");
138093 +    }
138095 +    /* ensure there is enough `srcSize` to fully read/decode frame header */
138096 +    {   size_t const fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format);
138097 +        if (srcSize < fhsize) return fhsize;
138098 +        zfhPtr->headerSize = (U32)fhsize;
138099 +    }
138101 +    {   BYTE const fhdByte = ip[minInputSize-1];
138102 +        size_t pos = minInputSize;
138103 +        U32 const dictIDSizeCode = fhdByte&3;
138104 +        U32 const checksumFlag = (fhdByte>>2)&1;
138105 +        U32 const singleSegment = (fhdByte>>5)&1;
138106 +        U32 const fcsID = fhdByte>>6;
138107 +        U64 windowSize = 0;
138108 +        U32 dictID = 0;
138109 +        U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN;
138110 +        RETURN_ERROR_IF((fhdByte & 0x08) != 0, frameParameter_unsupported,
138111 +                        "reserved bits, must be zero");
138113 +        if (!singleSegment) {
138114 +            BYTE const wlByte = ip[pos++];
138115 +            U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
138116 +            RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge, "");
138117 +            windowSize = (1ULL << windowLog);
138118 +            windowSize += (windowSize >> 3) * (wlByte&7);
138119 +        }
138120 +        switch(dictIDSizeCode)
138121 +        {
138122 +            default: assert(0);  /* impossible */
138123 +            case 0 : break;
138124 +            case 1 : dictID = ip[pos]; pos++; break;
138125 +            case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break;
138126 +            case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break;
138127 +        }
138128 +        switch(fcsID)
138129 +        {
138130 +            default: assert(0);  /* impossible */
138131 +            case 0 : if (singleSegment) frameContentSize = ip[pos]; break;
138132 +            case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break;
138133 +            case 2 : frameContentSize = MEM_readLE32(ip+pos); break;
138134 +            case 3 : frameContentSize = MEM_readLE64(ip+pos); break;
138135 +        }
138136 +        if (singleSegment) windowSize = frameContentSize;
138138 +        zfhPtr->frameType = ZSTD_frame;
138139 +        zfhPtr->frameContentSize = frameContentSize;
138140 +        zfhPtr->windowSize = windowSize;
138141 +        zfhPtr->blockSizeMax = (unsigned) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
138142 +        zfhPtr->dictID = dictID;
138143 +        zfhPtr->checksumFlag = checksumFlag;
138144 +    }
138145 +    return 0;
138148 +/** ZSTD_getFrameHeader() :
138149 + *  decode Frame Header, or require larger `srcSize`.
138150 + *  note : this function does not consume input, it only reads it.
138151 + * @return : 0, `zfhPtr` is correctly filled,
138152 + *          >0, `srcSize` is too small, value is wanted `srcSize` amount,
138153 + *           or an error code, which can be tested using ZSTD_isError() */
138154 +size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize)
138156 +    return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1);
138160 +/** ZSTD_getFrameContentSize() :
138161 + *  compatible with legacy mode
138162 + * @return : decompressed size of the single frame pointed to be `src` if known, otherwise
138163 + *         - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
138164 + *         - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
138165 +unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
138167 +    {   ZSTD_frameHeader zfh;
138168 +        if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0)
138169 +            return ZSTD_CONTENTSIZE_ERROR;
138170 +        if (zfh.frameType == ZSTD_skippableFrame) {
138171 +            return 0;
138172 +        } else {
138173 +            return zfh.frameContentSize;
138174 +    }   }
138177 +static size_t readSkippableFrameSize(void const* src, size_t srcSize)
138179 +    size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE;
138180 +    U32 sizeU32;
138182 +    RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong, "");
138184 +    sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE);
138185 +    RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32,
138186 +                    frameParameter_unsupported, "");
138187 +    {
138188 +        size_t const skippableSize = skippableHeaderSize + sizeU32;
138189 +        RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong, "");
138190 +        return skippableSize;
138191 +    }
138194 +/** ZSTD_findDecompressedSize() :
138195 + *  compatible with legacy mode
138196 + *  `srcSize` must be the exact length of some number of ZSTD compressed and/or
138197 + *      skippable frames
138198 + *  @return : decompressed size of the frames contained */
138199 +unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
138201 +    unsigned long long totalDstSize = 0;
138203 +    while (srcSize >= ZSTD_startingInputLength(ZSTD_f_zstd1)) {
138204 +        U32 const magicNumber = MEM_readLE32(src);
138206 +        if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
138207 +            size_t const skippableSize = readSkippableFrameSize(src, srcSize);
138208 +            if (ZSTD_isError(skippableSize)) {
138209 +                return ZSTD_CONTENTSIZE_ERROR;
138210 +            }
138211 +            assert(skippableSize <= srcSize);
138213 +            src = (const BYTE *)src + skippableSize;
138214 +            srcSize -= skippableSize;
138215 +            continue;
138216 +        }
138218 +        {   unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
138219 +            if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret;
138221 +            /* check for overflow */
138222 +            if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR;
138223 +            totalDstSize += ret;
138224 +        }
138225 +        {   size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
138226 +            if (ZSTD_isError(frameSrcSize)) {
138227 +                return ZSTD_CONTENTSIZE_ERROR;
138228 +            }
138230 +            src = (const BYTE *)src + frameSrcSize;
138231 +            srcSize -= frameSrcSize;
138232 +        }
138233 +    }  /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
138235 +    if (srcSize) return ZSTD_CONTENTSIZE_ERROR;
138237 +    return totalDstSize;
138240 +/** ZSTD_getDecompressedSize() :
138241 + *  compatible with legacy mode
138242 + * @return : decompressed size if known, 0 otherwise
138243 +             note : 0 can mean any of the following :
138244 +                   - frame content is empty
138245 +                   - decompressed size field is not present in frame header
138246 +                   - frame header unknown / not supported
138247 +                   - frame header not complete (`srcSize` too small) */
138248 +unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize)
138250 +    unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
138251 +    ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN);
138252 +    return (ret >= ZSTD_CONTENTSIZE_ERROR) ? 0 : ret;
138256 +/** ZSTD_decodeFrameHeader() :
138257 + * `headerSize` must be the size provided by ZSTD_frameHeaderSize().
138258 + * If multiple DDict references are enabled, also will choose the correct DDict to use.
138259 + * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
138260 +static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize)
138262 +    size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format);
138263 +    if (ZSTD_isError(result)) return result;    /* invalid header */
138264 +    RETURN_ERROR_IF(result>0, srcSize_wrong, "headerSize too small");
138266 +    /* Reference DDict requested by frame if dctx references multiple ddicts */
138267 +    if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts && dctx->ddictSet) {
138268 +        ZSTD_DCtx_selectFrameDDict(dctx);
138269 +    }
138271 +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
138272 +    /* Skip the dictID check in fuzzing mode, because it makes the search
138273 +     * harder.
138274 +     */
138275 +    RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID),
138276 +                    dictionary_wrong, "");
138277 +#endif
138278 +    dctx->validateChecksum = (dctx->fParams.checksumFlag && !dctx->forceIgnoreChecksum) ? 1 : 0;
138279 +    if (dctx->validateChecksum) xxh64_reset(&dctx->xxhState, 0);
138280 +    dctx->processedCSize += headerSize;
138281 +    return 0;
138284 +static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret)
138286 +    ZSTD_frameSizeInfo frameSizeInfo;
138287 +    frameSizeInfo.compressedSize = ret;
138288 +    frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;
138289 +    return frameSizeInfo;
138292 +static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize)
138294 +    ZSTD_frameSizeInfo frameSizeInfo;
138295 +    ZSTD_memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo));
138298 +    if ((srcSize >= ZSTD_SKIPPABLEHEADERSIZE)
138299 +        && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
138300 +        frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize);
138301 +        assert(ZSTD_isError(frameSizeInfo.compressedSize) ||
138302 +               frameSizeInfo.compressedSize <= srcSize);
138303 +        return frameSizeInfo;
138304 +    } else {
138305 +        const BYTE* ip = (const BYTE*)src;
138306 +        const BYTE* const ipstart = ip;
138307 +        size_t remainingSize = srcSize;
138308 +        size_t nbBlocks = 0;
138309 +        ZSTD_frameHeader zfh;
138311 +        /* Extract Frame Header */
138312 +        {   size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
138313 +            if (ZSTD_isError(ret))
138314 +                return ZSTD_errorFrameSizeInfo(ret);
138315 +            if (ret > 0)
138316 +                return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
138317 +        }
138319 +        ip += zfh.headerSize;
138320 +        remainingSize -= zfh.headerSize;
138322 +        /* Iterate over each block */
138323 +        while (1) {
138324 +            blockProperties_t blockProperties;
138325 +            size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
138326 +            if (ZSTD_isError(cBlockSize))
138327 +                return ZSTD_errorFrameSizeInfo(cBlockSize);
138329 +            if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
138330 +                return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
138332 +            ip += ZSTD_blockHeaderSize + cBlockSize;
138333 +            remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
138334 +            nbBlocks++;
138336 +            if (blockProperties.lastBlock) break;
138337 +        }
138339 +        /* Final frame content checksum */
138340 +        if (zfh.checksumFlag) {
138341 +            if (remainingSize < 4)
138342 +                return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
138343 +            ip += 4;
138344 +        }
138346 +        frameSizeInfo.compressedSize = (size_t)(ip - ipstart);
138347 +        frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN)
138348 +                                        ? zfh.frameContentSize
138349 +                                        : nbBlocks * zfh.blockSizeMax;
138350 +        return frameSizeInfo;
138351 +    }
138354 +/** ZSTD_findFrameCompressedSize() :
138355 + *  compatible with legacy mode
138356 + *  `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
138357 + *  `srcSize` must be at least as large as the frame contained
138358 + *  @return : the compressed size of the frame starting at `src` */
138359 +size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
138361 +    ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
138362 +    return frameSizeInfo.compressedSize;
138365 +/** ZSTD_decompressBound() :
138366 + *  compatible with legacy mode
138367 + *  `src` must point to the start of a ZSTD frame or a skippeable frame
138368 + *  `srcSize` must be at least as large as the frame contained
138369 + *  @return : the maximum decompressed size of the compressed source
138370 + */
138371 +unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)
138373 +    unsigned long long bound = 0;
138374 +    /* Iterate over each frame */
138375 +    while (srcSize > 0) {
138376 +        ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
138377 +        size_t const compressedSize = frameSizeInfo.compressedSize;
138378 +        unsigned long long const decompressedBound = frameSizeInfo.decompressedBound;
138379 +        if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR)
138380 +            return ZSTD_CONTENTSIZE_ERROR;
138381 +        assert(srcSize >= compressedSize);
138382 +        src = (const BYTE*)src + compressedSize;
138383 +        srcSize -= compressedSize;
138384 +        bound += decompressedBound;
138385 +    }
138386 +    return bound;
138390 +/*-*************************************************************
138391 + *   Frame decoding
138392 + ***************************************************************/
138394 +/** ZSTD_insertBlock() :
138395 + *  insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
138396 +size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)
138398 +    DEBUGLOG(5, "ZSTD_insertBlock: %u bytes", (unsigned)blockSize);
138399 +    ZSTD_checkContinuity(dctx, blockStart, blockSize);
138400 +    dctx->previousDstEnd = (const char*)blockStart + blockSize;
138401 +    return blockSize;
138405 +static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
138406 +                          const void* src, size_t srcSize)
138408 +    DEBUGLOG(5, "ZSTD_copyRawBlock");
138409 +    RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall, "");
138410 +    if (dst == NULL) {
138411 +        if (srcSize == 0) return 0;
138412 +        RETURN_ERROR(dstBuffer_null, "");
138413 +    }
138414 +    ZSTD_memcpy(dst, src, srcSize);
138415 +    return srcSize;
138418 +static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
138419 +                               BYTE b,
138420 +                               size_t regenSize)
138422 +    RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall, "");
138423 +    if (dst == NULL) {
138424 +        if (regenSize == 0) return 0;
138425 +        RETURN_ERROR(dstBuffer_null, "");
138426 +    }
138427 +    ZSTD_memset(dst, b, regenSize);
138428 +    return regenSize;
138431 +static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, unsigned streaming)
138433 +    (void)dctx;
138434 +    (void)uncompressedSize;
138435 +    (void)compressedSize;
138436 +    (void)streaming;
138440 +/*! ZSTD_decompressFrame() :
138441 + * @dctx must be properly initialized
138442 + *  will update *srcPtr and *srcSizePtr,
138443 + *  to make *srcPtr progress by one frame. */
138444 +static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
138445 +                                   void* dst, size_t dstCapacity,
138446 +                             const void** srcPtr, size_t *srcSizePtr)
138448 +    const BYTE* const istart = (const BYTE*)(*srcPtr);
138449 +    const BYTE* ip = istart;
138450 +    BYTE* const ostart = (BYTE*)dst;
138451 +    BYTE* const oend = dstCapacity != 0 ? ostart + dstCapacity : ostart;
138452 +    BYTE* op = ostart;
138453 +    size_t remainingSrcSize = *srcSizePtr;
138455 +    DEBUGLOG(4, "ZSTD_decompressFrame (srcSize:%i)", (int)*srcSizePtr);
138457 +    /* check */
138458 +    RETURN_ERROR_IF(
138459 +        remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN(dctx->format)+ZSTD_blockHeaderSize,
138460 +        srcSize_wrong, "");
138462 +    /* Frame Header */
138463 +    {   size_t const frameHeaderSize = ZSTD_frameHeaderSize_internal(
138464 +                ip, ZSTD_FRAMEHEADERSIZE_PREFIX(dctx->format), dctx->format);
138465 +        if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
138466 +        RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize,
138467 +                        srcSize_wrong, "");
138468 +        FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) , "");
138469 +        ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize;
138470 +    }
138472 +    /* Loop on each block */
138473 +    while (1) {
138474 +        size_t decodedSize;
138475 +        blockProperties_t blockProperties;
138476 +        size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties);
138477 +        if (ZSTD_isError(cBlockSize)) return cBlockSize;
138479 +        ip += ZSTD_blockHeaderSize;
138480 +        remainingSrcSize -= ZSTD_blockHeaderSize;
138481 +        RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong, "");
138483 +        switch(blockProperties.blockType)
138484 +        {
138485 +        case bt_compressed:
138486 +            decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oend-op), ip, cBlockSize, /* frame */ 1);
138487 +            break;
138488 +        case bt_raw :
138489 +            decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize);
138490 +            break;
138491 +        case bt_rle :
138492 +            decodedSize = ZSTD_setRleBlock(op, (size_t)(oend-op), *ip, blockProperties.origSize);
138493 +            break;
138494 +        case bt_reserved :
138495 +        default:
138496 +            RETURN_ERROR(corruption_detected, "invalid block type");
138497 +        }
138499 +        if (ZSTD_isError(decodedSize)) return decodedSize;
138500 +        if (dctx->validateChecksum)
138501 +            xxh64_update(&dctx->xxhState, op, decodedSize);
138502 +        if (decodedSize != 0)
138503 +            op += decodedSize;
138504 +        assert(ip != NULL);
138505 +        ip += cBlockSize;
138506 +        remainingSrcSize -= cBlockSize;
138507 +        if (blockProperties.lastBlock) break;
138508 +    }
138510 +    if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
138511 +        RETURN_ERROR_IF((U64)(op-ostart) != dctx->fParams.frameContentSize,
138512 +                        corruption_detected, "");
138513 +    }
138514 +    if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
138515 +        RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong, "");
138516 +        if (!dctx->forceIgnoreChecksum) {
138517 +            U32 const checkCalc = (U32)xxh64_digest(&dctx->xxhState);
138518 +            U32 checkRead;
138519 +            checkRead = MEM_readLE32(ip);
138520 +            RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong, "");
138521 +        }
138522 +        ip += 4;
138523 +        remainingSrcSize -= 4;
138524 +    }
138525 +    ZSTD_DCtx_trace_end(dctx, (U64)(op-ostart), (U64)(ip-istart), /* streaming */ 0);
138526 +    /* Allow caller to get size read */
138527 +    *srcPtr = ip;
138528 +    *srcSizePtr = remainingSrcSize;
138529 +    return (size_t)(op-ostart);
138532 +static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
138533 +                                        void* dst, size_t dstCapacity,
138534 +                                  const void* src, size_t srcSize,
138535 +                                  const void* dict, size_t dictSize,
138536 +                                  const ZSTD_DDict* ddict)
138538 +    void* const dststart = dst;
138539 +    int moreThan1Frame = 0;
138541 +    DEBUGLOG(5, "ZSTD_decompressMultiFrame");
138542 +    assert(dict==NULL || ddict==NULL);  /* either dict or ddict set, not both */
138544 +    if (ddict) {
138545 +        dict = ZSTD_DDict_dictContent(ddict);
138546 +        dictSize = ZSTD_DDict_dictSize(ddict);
138547 +    }
138549 +    while (srcSize >= ZSTD_startingInputLength(dctx->format)) {
138552 +        {   U32 const magicNumber = MEM_readLE32(src);
138553 +            DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
138554 +                        (unsigned)magicNumber, ZSTD_MAGICNUMBER);
138555 +            if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
138556 +                size_t const skippableSize = readSkippableFrameSize(src, srcSize);
138557 +                FORWARD_IF_ERROR(skippableSize, "readSkippableFrameSize failed");
138558 +                assert(skippableSize <= srcSize);
138560 +                src = (const BYTE *)src + skippableSize;
138561 +                srcSize -= skippableSize;
138562 +                continue;
138563 +        }   }
138565 +        if (ddict) {
138566 +            /* we were called from ZSTD_decompress_usingDDict */
138567 +            FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict), "");
138568 +        } else {
138569 +            /* this will initialize correctly with no dict if dict == NULL, so
138570 +             * use this in all cases but ddict */
138571 +            FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize), "");
138572 +        }
138573 +        ZSTD_checkContinuity(dctx, dst, dstCapacity);
138575 +        {   const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
138576 +                                                    &src, &srcSize);
138577 +            RETURN_ERROR_IF(
138578 +                (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown)
138579 +             && (moreThan1Frame==1),
138580 +                srcSize_wrong,
138581 +                "At least one frame successfully completed, "
138582 +                "but following bytes are garbage: "
138583 +                "it's more likely to be a srcSize error, "
138584 +                "specifying more input bytes than size of frame(s). "
138585 +                "Note: one could be unlucky, it might be a corruption error instead, "
138586 +                "happening right at the place where we expect zstd magic bytes. "
138587 +                "But this is _much_ less likely than a srcSize field error.");
138588 +            if (ZSTD_isError(res)) return res;
138589 +            assert(res <= dstCapacity);
138590 +            if (res != 0)
138591 +                dst = (BYTE*)dst + res;
138592 +            dstCapacity -= res;
138593 +        }
138594 +        moreThan1Frame = 1;
138595 +    }  /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
138597 +    RETURN_ERROR_IF(srcSize, srcSize_wrong, "input not entirely consumed");
138599 +    return (size_t)((BYTE*)dst - (BYTE*)dststart);
138602 +size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
138603 +                                 void* dst, size_t dstCapacity,
138604 +                           const void* src, size_t srcSize,
138605 +                           const void* dict, size_t dictSize)
138607 +    return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL);
138611 +static ZSTD_DDict const* ZSTD_getDDict(ZSTD_DCtx* dctx)
138613 +    switch (dctx->dictUses) {
138614 +    default:
138615 +        assert(0 /* Impossible */);
138616 +        /* fall-through */
138617 +    case ZSTD_dont_use:
138618 +        ZSTD_clearDict(dctx);
138619 +        return NULL;
138620 +    case ZSTD_use_indefinitely:
138621 +        return dctx->ddict;
138622 +    case ZSTD_use_once:
138623 +        dctx->dictUses = ZSTD_dont_use;
138624 +        return dctx->ddict;
138625 +    }
138628 +size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
138630 +    return ZSTD_decompress_usingDDict(dctx, dst, dstCapacity, src, srcSize, ZSTD_getDDict(dctx));
138634 +size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
138636 +#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
138637 +    size_t regenSize;
138638 +    ZSTD_DCtx* const dctx = ZSTD_createDCtx();
138639 +    RETURN_ERROR_IF(dctx==NULL, memory_allocation, "NULL pointer!");
138640 +    regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
138641 +    ZSTD_freeDCtx(dctx);
138642 +    return regenSize;
138643 +#else   /* stack mode */
138644 +    ZSTD_DCtx dctx;
138645 +    ZSTD_initDCtx_internal(&dctx);
138646 +    return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);
138647 +#endif
138651 +/*-**************************************
138652 +*   Advanced Streaming Decompression API
138653 +*   Bufferless and synchronous
138654 +****************************************/
138655 +size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; }
138658 + * Similar to ZSTD_nextSrcSizeToDecompress(), but when when a block input can be streamed,
138659 + * we allow taking a partial block as the input. Currently only raw uncompressed blocks can
138660 + * be streamed.
138662 + * For blocks that can be streamed, this allows us to reduce the latency until we produce
138663 + * output, and avoid copying the input.
138665 + * @param inputSize - The total amount of input that the caller currently has.
138666 + */
138667 +static size_t ZSTD_nextSrcSizeToDecompressWithInputSize(ZSTD_DCtx* dctx, size_t inputSize) {
138668 +    if (!(dctx->stage == ZSTDds_decompressBlock || dctx->stage == ZSTDds_decompressLastBlock))
138669 +        return dctx->expected;
138670 +    if (dctx->bType != bt_raw)
138671 +        return dctx->expected;
138672 +    return MIN(MAX(inputSize, 1), dctx->expected);
138675 +ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) {
138676 +    switch(dctx->stage)
138677 +    {
138678 +    default:   /* should not happen */
138679 +        assert(0);
138680 +    case ZSTDds_getFrameHeaderSize:
138681 +    case ZSTDds_decodeFrameHeader:
138682 +        return ZSTDnit_frameHeader;
138683 +    case ZSTDds_decodeBlockHeader:
138684 +        return ZSTDnit_blockHeader;
138685 +    case ZSTDds_decompressBlock:
138686 +        return ZSTDnit_block;
138687 +    case ZSTDds_decompressLastBlock:
138688 +        return ZSTDnit_lastBlock;
138689 +    case ZSTDds_checkChecksum:
138690 +        return ZSTDnit_checksum;
138691 +    case ZSTDds_decodeSkippableHeader:
138692 +    case ZSTDds_skipFrame:
138693 +        return ZSTDnit_skippableFrame;
138694 +    }
138697 +static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; }
138699 +/** ZSTD_decompressContinue() :
138700 + *  srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress())
138701 + *  @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
138702 + *            or an error code, which can be tested using ZSTD_isError() */
138703 +size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
138705 +    DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize);
138706 +    /* Sanity check */
138707 +    RETURN_ERROR_IF(srcSize != ZSTD_nextSrcSizeToDecompressWithInputSize(dctx, srcSize), srcSize_wrong, "not allowed");
138708 +    ZSTD_checkContinuity(dctx, dst, dstCapacity);
138710 +    dctx->processedCSize += srcSize;
138712 +    switch (dctx->stage)
138713 +    {
138714 +    case ZSTDds_getFrameHeaderSize :
138715 +        assert(src != NULL);
138716 +        if (dctx->format == ZSTD_f_zstd1) {  /* allows header */
138717 +            assert(srcSize >= ZSTD_FRAMEIDSIZE);  /* to read skippable magic number */
138718 +            if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {        /* skippable frame */
138719 +                ZSTD_memcpy(dctx->headerBuffer, src, srcSize);
138720 +                dctx->expected = ZSTD_SKIPPABLEHEADERSIZE - srcSize;  /* remaining to load to get full skippable frame header */
138721 +                dctx->stage = ZSTDds_decodeSkippableHeader;
138722 +                return 0;
138723 +        }   }
138724 +        dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format);
138725 +        if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize;
138726 +        ZSTD_memcpy(dctx->headerBuffer, src, srcSize);
138727 +        dctx->expected = dctx->headerSize - srcSize;
138728 +        dctx->stage = ZSTDds_decodeFrameHeader;
138729 +        return 0;
138731 +    case ZSTDds_decodeFrameHeader:
138732 +        assert(src != NULL);
138733 +        ZSTD_memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize);
138734 +        FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize), "");
138735 +        dctx->expected = ZSTD_blockHeaderSize;
138736 +        dctx->stage = ZSTDds_decodeBlockHeader;
138737 +        return 0;
138739 +    case ZSTDds_decodeBlockHeader:
138740 +        {   blockProperties_t bp;
138741 +            size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
138742 +            if (ZSTD_isError(cBlockSize)) return cBlockSize;
138743 +            RETURN_ERROR_IF(cBlockSize > dctx->fParams.blockSizeMax, corruption_detected, "Block Size Exceeds Maximum");
138744 +            dctx->expected = cBlockSize;
138745 +            dctx->bType = bp.blockType;
138746 +            dctx->rleSize = bp.origSize;
138747 +            if (cBlockSize) {
138748 +                dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock;
138749 +                return 0;
138750 +            }
138751 +            /* empty block */
138752 +            if (bp.lastBlock) {
138753 +                if (dctx->fParams.checksumFlag) {
138754 +                    dctx->expected = 4;
138755 +                    dctx->stage = ZSTDds_checkChecksum;
138756 +                } else {
138757 +                    dctx->expected = 0; /* end of frame */
138758 +                    dctx->stage = ZSTDds_getFrameHeaderSize;
138759 +                }
138760 +            } else {
138761 +                dctx->expected = ZSTD_blockHeaderSize;  /* jump to next header */
138762 +                dctx->stage = ZSTDds_decodeBlockHeader;
138763 +            }
138764 +            return 0;
138765 +        }
138767 +    case ZSTDds_decompressLastBlock:
138768 +    case ZSTDds_decompressBlock:
138769 +        DEBUGLOG(5, "ZSTD_decompressContinue: case ZSTDds_decompressBlock");
138770 +        {   size_t rSize;
138771 +            switch(dctx->bType)
138772 +            {
138773 +            case bt_compressed:
138774 +                DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed");
138775 +                rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1);
138776 +                dctx->expected = 0;  /* Streaming not supported */
138777 +                break;
138778 +            case bt_raw :
138779 +                assert(srcSize <= dctx->expected);
138780 +                rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize);
138781 +                FORWARD_IF_ERROR(rSize, "ZSTD_copyRawBlock failed");
138782 +                assert(rSize == srcSize);
138783 +                dctx->expected -= rSize;
138784 +                break;
138785 +            case bt_rle :
138786 +                rSize = ZSTD_setRleBlock(dst, dstCapacity, *(const BYTE*)src, dctx->rleSize);
138787 +                dctx->expected = 0;  /* Streaming not supported */
138788 +                break;
138789 +            case bt_reserved :   /* should never happen */
138790 +            default:
138791 +                RETURN_ERROR(corruption_detected, "invalid block type");
138792 +            }
138793 +            FORWARD_IF_ERROR(rSize, "");
138794 +            RETURN_ERROR_IF(rSize > dctx->fParams.blockSizeMax, corruption_detected, "Decompressed Block Size Exceeds Maximum");
138795 +            DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize);
138796 +            dctx->decodedSize += rSize;
138797 +            if (dctx->validateChecksum) xxh64_update(&dctx->xxhState, dst, rSize);
138798 +            dctx->previousDstEnd = (char*)dst + rSize;
138800 +            /* Stay on the same stage until we are finished streaming the block. */
138801 +            if (dctx->expected > 0) {
138802 +                return rSize;
138803 +            }
138805 +            if (dctx->stage == ZSTDds_decompressLastBlock) {   /* end of frame */
138806 +                DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize);
138807 +                RETURN_ERROR_IF(
138808 +                    dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
138809 +                 && dctx->decodedSize != dctx->fParams.frameContentSize,
138810 +                    corruption_detected, "");
138811 +                if (dctx->fParams.checksumFlag) {  /* another round for frame checksum */
138812 +                    dctx->expected = 4;
138813 +                    dctx->stage = ZSTDds_checkChecksum;
138814 +                } else {
138815 +                    ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1);
138816 +                    dctx->expected = 0;   /* ends here */
138817 +                    dctx->stage = ZSTDds_getFrameHeaderSize;
138818 +                }
138819 +            } else {
138820 +                dctx->stage = ZSTDds_decodeBlockHeader;
138821 +                dctx->expected = ZSTD_blockHeaderSize;
138822 +            }
138823 +            return rSize;
138824 +        }
138826 +    case ZSTDds_checkChecksum:
138827 +        assert(srcSize == 4);  /* guaranteed by dctx->expected */
138828 +        {
138829 +            if (dctx->validateChecksum) {
138830 +                U32 const h32 = (U32)xxh64_digest(&dctx->xxhState);
138831 +                U32 const check32 = MEM_readLE32(src);
138832 +                DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32);
138833 +                RETURN_ERROR_IF(check32 != h32, checksum_wrong, "");
138834 +            }
138835 +            ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1);
138836 +            dctx->expected = 0;
138837 +            dctx->stage = ZSTDds_getFrameHeaderSize;
138838 +            return 0;
138839 +        }
138841 +    case ZSTDds_decodeSkippableHeader:
138842 +        assert(src != NULL);
138843 +        assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE);
138844 +        ZSTD_memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize);   /* complete skippable header */
138845 +        dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE);   /* note : dctx->expected can grow seriously large, beyond local buffer size */
138846 +        dctx->stage = ZSTDds_skipFrame;
138847 +        return 0;
138849 +    case ZSTDds_skipFrame:
138850 +        dctx->expected = 0;
138851 +        dctx->stage = ZSTDds_getFrameHeaderSize;
138852 +        return 0;
138854 +    default:
138855 +        assert(0);   /* impossible */
138856 +        RETURN_ERROR(GENERIC, "impossible to reach");   /* some compiler require default to do something */
138857 +    }
138861 +static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
138863 +    dctx->dictEnd = dctx->previousDstEnd;
138864 +    dctx->virtualStart = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
138865 +    dctx->prefixStart = dict;
138866 +    dctx->previousDstEnd = (const char*)dict + dictSize;
138867 +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
138868 +    dctx->dictContentBeginForFuzzing = dctx->prefixStart;
138869 +    dctx->dictContentEndForFuzzing = dctx->previousDstEnd;
138870 +#endif
138871 +    return 0;
138874 +/*! ZSTD_loadDEntropy() :
138875 + *  dict : must point at beginning of a valid zstd dictionary.
138876 + * @return : size of entropy tables read */
138877 +size_t
138878 +ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
138879 +                  const void* const dict, size_t const dictSize)
138881 +    const BYTE* dictPtr = (const BYTE*)dict;
138882 +    const BYTE* const dictEnd = dictPtr + dictSize;
138884 +    RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted, "dict is too small");
138885 +    assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY);   /* dict must be valid */
138886 +    dictPtr += 8;   /* skip header = magic + dictID */
138888 +    ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, OFTable) == offsetof(ZSTD_entropyDTables_t, LLTable) + sizeof(entropy->LLTable));
138889 +    ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, MLTable) == offsetof(ZSTD_entropyDTables_t, OFTable) + sizeof(entropy->OFTable));
138890 +    ZSTD_STATIC_ASSERT(sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable) >= HUF_DECOMPRESS_WORKSPACE_SIZE);
138891 +    {   void* const workspace = &entropy->LLTable;   /* use fse tables as temporary workspace; implies fse tables are grouped together */
138892 +        size_t const workspaceSize = sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable);
138893 +#ifdef HUF_FORCE_DECOMPRESS_X1
138894 +        /* in minimal huffman, we always use X1 variants */
138895 +        size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable,
138896 +                                                dictPtr, dictEnd - dictPtr,
138897 +                                                workspace, workspaceSize);
138898 +#else
138899 +        size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable,
138900 +                                                dictPtr, (size_t)(dictEnd - dictPtr),
138901 +                                                workspace, workspaceSize);
138902 +#endif
138903 +        RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted, "");
138904 +        dictPtr += hSize;
138905 +    }
138907 +    {   short offcodeNCount[MaxOff+1];
138908 +        unsigned offcodeMaxValue = MaxOff, offcodeLog;
138909 +        size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (size_t)(dictEnd-dictPtr));
138910 +        RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
138911 +        RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted, "");
138912 +        RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
138913 +        ZSTD_buildFSETable( entropy->OFTable,
138914 +                            offcodeNCount, offcodeMaxValue,
138915 +                            OF_base, OF_bits,
138916 +                            offcodeLog,
138917 +                            entropy->workspace, sizeof(entropy->workspace),
138918 +                            /* bmi2 */0);
138919 +        dictPtr += offcodeHeaderSize;
138920 +    }
138922 +    {   short matchlengthNCount[MaxML+1];
138923 +        unsigned matchlengthMaxValue = MaxML, matchlengthLog;
138924 +        size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));
138925 +        RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
138926 +        RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted, "");
138927 +        RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
138928 +        ZSTD_buildFSETable( entropy->MLTable,
138929 +                            matchlengthNCount, matchlengthMaxValue,
138930 +                            ML_base, ML_bits,
138931 +                            matchlengthLog,
138932 +                            entropy->workspace, sizeof(entropy->workspace),
138933 +                            /* bmi2 */ 0);
138934 +        dictPtr += matchlengthHeaderSize;
138935 +    }
138937 +    {   short litlengthNCount[MaxLL+1];
138938 +        unsigned litlengthMaxValue = MaxLL, litlengthLog;
138939 +        size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));
138940 +        RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
138941 +        RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted, "");
138942 +        RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
138943 +        ZSTD_buildFSETable( entropy->LLTable,
138944 +                            litlengthNCount, litlengthMaxValue,
138945 +                            LL_base, LL_bits,
138946 +                            litlengthLog,
138947 +                            entropy->workspace, sizeof(entropy->workspace),
138948 +                            /* bmi2 */ 0);
138949 +        dictPtr += litlengthHeaderSize;
138950 +    }
138952 +    RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
138953 +    {   int i;
138954 +        size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
138955 +        for (i=0; i<3; i++) {
138956 +            U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
138957 +            RETURN_ERROR_IF(rep==0 || rep > dictContentSize,
138958 +                            dictionary_corrupted, "");
138959 +            entropy->rep[i] = rep;
138960 +    }   }
138962 +    return (size_t)(dictPtr - (const BYTE*)dict);
138965 +static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
138967 +    if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize);
138968 +    {   U32 const magic = MEM_readLE32(dict);
138969 +        if (magic != ZSTD_MAGIC_DICTIONARY) {
138970 +            return ZSTD_refDictContent(dctx, dict, dictSize);   /* pure content mode */
138971 +    }   }
138972 +    dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
138974 +    /* load entropy tables */
138975 +    {   size_t const eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize);
138976 +        RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted, "");
138977 +        dict = (const char*)dict + eSize;
138978 +        dictSize -= eSize;
138979 +    }
138980 +    dctx->litEntropy = dctx->fseEntropy = 1;
138982 +    /* reference dictionary content */
138983 +    return ZSTD_refDictContent(dctx, dict, dictSize);
138986 +size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
138988 +    assert(dctx != NULL);
138989 +    dctx->expected = ZSTD_startingInputLength(dctx->format);  /* dctx->format must be properly set */
138990 +    dctx->stage = ZSTDds_getFrameHeaderSize;
138991 +    dctx->processedCSize = 0;
138992 +    dctx->decodedSize = 0;
138993 +    dctx->previousDstEnd = NULL;
138994 +    dctx->prefixStart = NULL;
138995 +    dctx->virtualStart = NULL;
138996 +    dctx->dictEnd = NULL;
138997 +    dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001);  /* cover both little and big endian */
138998 +    dctx->litEntropy = dctx->fseEntropy = 0;
138999 +    dctx->dictID = 0;
139000 +    dctx->bType = bt_reserved;
139001 +    ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
139002 +    ZSTD_memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue));  /* initial repcodes */
139003 +    dctx->LLTptr = dctx->entropy.LLTable;
139004 +    dctx->MLTptr = dctx->entropy.MLTable;
139005 +    dctx->OFTptr = dctx->entropy.OFTable;
139006 +    dctx->HUFptr = dctx->entropy.hufTable;
139007 +    return 0;
139010 +size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
139012 +    FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , "");
139013 +    if (dict && dictSize)
139014 +        RETURN_ERROR_IF(
139015 +            ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)),
139016 +            dictionary_corrupted, "");
139017 +    return 0;
139021 +/* ======   ZSTD_DDict   ====== */
139023 +size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
139025 +    DEBUGLOG(4, "ZSTD_decompressBegin_usingDDict");
139026 +    assert(dctx != NULL);
139027 +    if (ddict) {
139028 +        const char* const dictStart = (const char*)ZSTD_DDict_dictContent(ddict);
139029 +        size_t const dictSize = ZSTD_DDict_dictSize(ddict);
139030 +        const void* const dictEnd = dictStart + dictSize;
139031 +        dctx->ddictIsCold = (dctx->dictEnd != dictEnd);
139032 +        DEBUGLOG(4, "DDict is %s",
139033 +                    dctx->ddictIsCold ? "~cold~" : "hot!");
139034 +    }
139035 +    FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , "");
139036 +    if (ddict) {   /* NULL ddict is equivalent to no dictionary */
139037 +        ZSTD_copyDDictParameters(dctx, ddict);
139038 +    }
139039 +    return 0;
139042 +/*! ZSTD_getDictID_fromDict() :
139043 + *  Provides the dictID stored within dictionary.
139044 + *  if @return == 0, the dictionary is not conformant with Zstandard specification.
139045 + *  It can still be loaded, but as a content-only dictionary. */
139046 +unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
139048 +    if (dictSize < 8) return 0;
139049 +    if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0;
139050 +    return MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
139053 +/*! ZSTD_getDictID_fromFrame() :
139054 + *  Provides the dictID required to decompress frame stored within `src`.
139055 + *  If @return == 0, the dictID could not be decoded.
139056 + *  This could for one of the following reasons :
139057 + *  - The frame does not require a dictionary (most common case).
139058 + *  - The frame was built with dictID intentionally removed.
139059 + *    Needed dictionary is a hidden information.
139060 + *    Note : this use case also happens when using a non-conformant dictionary.
139061 + *  - `srcSize` is too small, and as a result, frame header could not be decoded.
139062 + *    Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`.
139063 + *  - This is not a Zstandard frame.
139064 + *  When identifying the exact failure cause, it's possible to use
139065 + *  ZSTD_getFrameHeader(), which will provide a more precise error code. */
139066 +unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize)
139068 +    ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 };
139069 +    size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize);
139070 +    if (ZSTD_isError(hError)) return 0;
139071 +    return zfp.dictID;
139075 +/*! ZSTD_decompress_usingDDict() :
139076 +*   Decompression using a pre-digested Dictionary
139077 +*   Use dictionary without significant overhead. */
139078 +size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
139079 +                                  void* dst, size_t dstCapacity,
139080 +                            const void* src, size_t srcSize,
139081 +                            const ZSTD_DDict* ddict)
139083 +    /* pass content and size in case legacy frames are encountered */
139084 +    return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize,
139085 +                                     NULL, 0,
139086 +                                     ddict);
139090 +/*=====================================
139091 +*   Streaming decompression
139092 +*====================================*/
139094 +ZSTD_DStream* ZSTD_createDStream(void)
139096 +    DEBUGLOG(3, "ZSTD_createDStream");
139097 +    return ZSTD_createDStream_advanced(ZSTD_defaultCMem);
139100 +ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize)
139102 +    return ZSTD_initStaticDCtx(workspace, workspaceSize);
139105 +ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem)
139107 +    return ZSTD_createDCtx_advanced(customMem);
139110 +size_t ZSTD_freeDStream(ZSTD_DStream* zds)
139112 +    return ZSTD_freeDCtx(zds);
139116 +/* ***  Initialization  *** */
139118 +size_t ZSTD_DStreamInSize(void)  { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; }
139119 +size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; }
139121 +size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx,
139122 +                                   const void* dict, size_t dictSize,
139123 +                                         ZSTD_dictLoadMethod_e dictLoadMethod,
139124 +                                         ZSTD_dictContentType_e dictContentType)
139126 +    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
139127 +    ZSTD_clearDict(dctx);
139128 +    if (dict && dictSize != 0) {
139129 +        dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
139130 +        RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation, "NULL pointer!");
139131 +        dctx->ddict = dctx->ddictLocal;
139132 +        dctx->dictUses = ZSTD_use_indefinitely;
139133 +    }
139134 +    return 0;
139137 +size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
139139 +    return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
139142 +size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
139144 +    return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
139147 +size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
139149 +    FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType), "");
139150 +    dctx->dictUses = ZSTD_use_once;
139151 +    return 0;
139154 +size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize)
139156 +    return ZSTD_DCtx_refPrefix_advanced(dctx, prefix, prefixSize, ZSTD_dct_rawContent);
139160 +/* ZSTD_initDStream_usingDict() :
139161 + * return : expected size, aka ZSTD_startingInputLength().
139162 + * this function cannot fail */
139163 +size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)
139165 +    DEBUGLOG(4, "ZSTD_initDStream_usingDict");
139166 +    FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) , "");
139167 +    FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) , "");
139168 +    return ZSTD_startingInputLength(zds->format);
139171 +/* note : this variant can't fail */
139172 +size_t ZSTD_initDStream(ZSTD_DStream* zds)
139174 +    DEBUGLOG(4, "ZSTD_initDStream");
139175 +    return ZSTD_initDStream_usingDDict(zds, NULL);
139178 +/* ZSTD_initDStream_usingDDict() :
139179 + * ddict will just be referenced, and must outlive decompression session
139180 + * this function cannot fail */
139181 +size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
139183 +    FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) , "");
139184 +    FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) , "");
139185 +    return ZSTD_startingInputLength(dctx->format);
139188 +/* ZSTD_resetDStream() :
139189 + * return : expected size, aka ZSTD_startingInputLength().
139190 + * this function cannot fail */
139191 +size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
139193 +    FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only), "");
139194 +    return ZSTD_startingInputLength(dctx->format);
139198 +size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
139200 +    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
139201 +    ZSTD_clearDict(dctx);
139202 +    if (ddict) {
139203 +        dctx->ddict = ddict;
139204 +        dctx->dictUses = ZSTD_use_indefinitely;
139205 +        if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts) {
139206 +            if (dctx->ddictSet == NULL) {
139207 +                dctx->ddictSet = ZSTD_createDDictHashSet(dctx->customMem);
139208 +                if (!dctx->ddictSet) {
139209 +                    RETURN_ERROR(memory_allocation, "Failed to allocate memory for hash set!");
139210 +                }
139211 +            }
139212 +            assert(!dctx->staticSize);  /* Impossible: ddictSet cannot have been allocated if static dctx */
139213 +            FORWARD_IF_ERROR(ZSTD_DDictHashSet_addDDict(dctx->ddictSet, ddict, dctx->customMem), "");
139214 +        }
139215 +    }
139216 +    return 0;
139219 +/* ZSTD_DCtx_setMaxWindowSize() :
139220 + * note : no direct equivalence in ZSTD_DCtx_setParameter,
139221 + * since this version sets windowSize, and the other sets windowLog */
139222 +size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
139224 +    ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax);
139225 +    size_t const min = (size_t)1 << bounds.lowerBound;
139226 +    size_t const max = (size_t)1 << bounds.upperBound;
139227 +    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
139228 +    RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound, "");
139229 +    RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound, "");
139230 +    dctx->maxWindowSize = maxWindowSize;
139231 +    return 0;
139234 +size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format)
139236 +    return ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, (int)format);
139239 +ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam)
139241 +    ZSTD_bounds bounds = { 0, 0, 0 };
139242 +    switch(dParam) {
139243 +        case ZSTD_d_windowLogMax:
139244 +            bounds.lowerBound = ZSTD_WINDOWLOG_ABSOLUTEMIN;
139245 +            bounds.upperBound = ZSTD_WINDOWLOG_MAX;
139246 +            return bounds;
139247 +        case ZSTD_d_format:
139248 +            bounds.lowerBound = (int)ZSTD_f_zstd1;
139249 +            bounds.upperBound = (int)ZSTD_f_zstd1_magicless;
139250 +            ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
139251 +            return bounds;
139252 +        case ZSTD_d_stableOutBuffer:
139253 +            bounds.lowerBound = (int)ZSTD_bm_buffered;
139254 +            bounds.upperBound = (int)ZSTD_bm_stable;
139255 +            return bounds;
139256 +        case ZSTD_d_forceIgnoreChecksum:
139257 +            bounds.lowerBound = (int)ZSTD_d_validateChecksum;
139258 +            bounds.upperBound = (int)ZSTD_d_ignoreChecksum;
139259 +            return bounds;
139260 +        case ZSTD_d_refMultipleDDicts:
139261 +            bounds.lowerBound = (int)ZSTD_rmd_refSingleDDict;
139262 +            bounds.upperBound = (int)ZSTD_rmd_refMultipleDDicts;
139263 +            return bounds;
139264 +        default:;
139265 +    }
139266 +    bounds.error = ERROR(parameter_unsupported);
139267 +    return bounds;
139270 +/* ZSTD_dParam_withinBounds:
139271 + * @return 1 if value is within dParam bounds,
139272 + * 0 otherwise */
139273 +static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value)
139275 +    ZSTD_bounds const bounds = ZSTD_dParam_getBounds(dParam);
139276 +    if (ZSTD_isError(bounds.error)) return 0;
139277 +    if (value < bounds.lowerBound) return 0;
139278 +    if (value > bounds.upperBound) return 0;
139279 +    return 1;
139282 +#define CHECK_DBOUNDS(p,v) {                \
139283 +    RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound, ""); \
139286 +size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value)
139288 +    switch (param) {
139289 +        case ZSTD_d_windowLogMax:
139290 +            *value = (int)ZSTD_highbit32((U32)dctx->maxWindowSize);
139291 +            return 0;
139292 +        case ZSTD_d_format:
139293 +            *value = (int)dctx->format;
139294 +            return 0;
139295 +        case ZSTD_d_stableOutBuffer:
139296 +            *value = (int)dctx->outBufferMode;
139297 +            return 0;
139298 +        case ZSTD_d_forceIgnoreChecksum:
139299 +            *value = (int)dctx->forceIgnoreChecksum;
139300 +            return 0;
139301 +        case ZSTD_d_refMultipleDDicts:
139302 +            *value = (int)dctx->refMultipleDDicts;
139303 +            return 0;
139304 +        default:;
139305 +    }
139306 +    RETURN_ERROR(parameter_unsupported, "");
139309 +size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value)
139311 +    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
139312 +    switch(dParam) {
139313 +        case ZSTD_d_windowLogMax:
139314 +            if (value == 0) value = ZSTD_WINDOWLOG_LIMIT_DEFAULT;
139315 +            CHECK_DBOUNDS(ZSTD_d_windowLogMax, value);
139316 +            dctx->maxWindowSize = ((size_t)1) << value;
139317 +            return 0;
139318 +        case ZSTD_d_format:
139319 +            CHECK_DBOUNDS(ZSTD_d_format, value);
139320 +            dctx->format = (ZSTD_format_e)value;
139321 +            return 0;
139322 +        case ZSTD_d_stableOutBuffer:
139323 +            CHECK_DBOUNDS(ZSTD_d_stableOutBuffer, value);
139324 +            dctx->outBufferMode = (ZSTD_bufferMode_e)value;
139325 +            return 0;
139326 +        case ZSTD_d_forceIgnoreChecksum:
139327 +            CHECK_DBOUNDS(ZSTD_d_forceIgnoreChecksum, value);
139328 +            dctx->forceIgnoreChecksum = (ZSTD_forceIgnoreChecksum_e)value;
139329 +            return 0;
139330 +        case ZSTD_d_refMultipleDDicts:
139331 +            CHECK_DBOUNDS(ZSTD_d_refMultipleDDicts, value);
139332 +            if (dctx->staticSize != 0) {
139333 +                RETURN_ERROR(parameter_unsupported, "Static dctx does not support multiple DDicts!");
139334 +            }
139335 +            dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value;
139336 +            return 0;
139337 +        default:;
139338 +    }
139339 +    RETURN_ERROR(parameter_unsupported, "");
139342 +size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset)
139344 +    if ( (reset == ZSTD_reset_session_only)
139345 +      || (reset == ZSTD_reset_session_and_parameters) ) {
139346 +        dctx->streamStage = zdss_init;
139347 +        dctx->noForwardProgress = 0;
139348 +    }
139349 +    if ( (reset == ZSTD_reset_parameters)
139350 +      || (reset == ZSTD_reset_session_and_parameters) ) {
139351 +        RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
139352 +        ZSTD_clearDict(dctx);
139353 +        ZSTD_DCtx_resetParameters(dctx);
139354 +    }
139355 +    return 0;
139359 +size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx)
139361 +    return ZSTD_sizeof_DCtx(dctx);
139364 +size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)
139366 +    size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
139367 +    unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2);
139368 +    unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
139369 +    size_t const minRBSize = (size_t) neededSize;
139370 +    RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize,
139371 +                    frameParameter_windowTooLarge, "");
139372 +    return minRBSize;
139375 +size_t ZSTD_estimateDStreamSize(size_t windowSize)
139377 +    size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
139378 +    size_t const inBuffSize = blockSize;  /* no block can be larger */
139379 +    size_t const outBuffSize = ZSTD_decodingBufferSize_min(windowSize, ZSTD_CONTENTSIZE_UNKNOWN);
139380 +    return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize;
139383 +size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
139385 +    U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX;   /* note : should be user-selectable, but requires an additional parameter (or a dctx) */
139386 +    ZSTD_frameHeader zfh;
139387 +    size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
139388 +    if (ZSTD_isError(err)) return err;
139389 +    RETURN_ERROR_IF(err>0, srcSize_wrong, "");
139390 +    RETURN_ERROR_IF(zfh.windowSize > windowSizeMax,
139391 +                    frameParameter_windowTooLarge, "");
139392 +    return ZSTD_estimateDStreamSize((size_t)zfh.windowSize);
139396 +/* *****   Decompression   ***** */
139398 +static int ZSTD_DCtx_isOverflow(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize)
139400 +    return (zds->inBuffSize + zds->outBuffSize) >= (neededInBuffSize + neededOutBuffSize) * ZSTD_WORKSPACETOOLARGE_FACTOR;
139403 +static void ZSTD_DCtx_updateOversizedDuration(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize)
139405 +    if (ZSTD_DCtx_isOverflow(zds, neededInBuffSize, neededOutBuffSize))
139406 +        zds->oversizedDuration++;
139407 +    else
139408 +        zds->oversizedDuration = 0;
139411 +static int ZSTD_DCtx_isOversizedTooLong(ZSTD_DStream* zds)
139413 +    return zds->oversizedDuration >= ZSTD_WORKSPACETOOLARGE_MAXDURATION;
139416 +/* Checks that the output buffer hasn't changed if ZSTD_obm_stable is used. */
139417 +static size_t ZSTD_checkOutBuffer(ZSTD_DStream const* zds, ZSTD_outBuffer const* output)
139419 +    ZSTD_outBuffer const expect = zds->expectedOutBuffer;
139420 +    /* No requirement when ZSTD_obm_stable is not enabled. */
139421 +    if (zds->outBufferMode != ZSTD_bm_stable)
139422 +        return 0;
139423 +    /* Any buffer is allowed in zdss_init, this must be the same for every other call until
139424 +     * the context is reset.
139425 +     */
139426 +    if (zds->streamStage == zdss_init)
139427 +        return 0;
139428 +    /* The buffer must match our expectation exactly. */
139429 +    if (expect.dst == output->dst && expect.pos == output->pos && expect.size == output->size)
139430 +        return 0;
139431 +    RETURN_ERROR(dstBuffer_wrong, "ZSTD_d_stableOutBuffer enabled but output differs!");
139434 +/* Calls ZSTD_decompressContinue() with the right parameters for ZSTD_decompressStream()
139435 + * and updates the stage and the output buffer state. This call is extracted so it can be
139436 + * used both when reading directly from the ZSTD_inBuffer, and in buffered input mode.
139437 + * NOTE: You must break after calling this function since the streamStage is modified.
139438 + */
139439 +static size_t ZSTD_decompressContinueStream(
139440 +            ZSTD_DStream* zds, char** op, char* oend,
139441 +            void const* src, size_t srcSize) {
139442 +    int const isSkipFrame = ZSTD_isSkipFrame(zds);
139443 +    if (zds->outBufferMode == ZSTD_bm_buffered) {
139444 +        size_t const dstSize = isSkipFrame ? 0 : zds->outBuffSize - zds->outStart;
139445 +        size_t const decodedSize = ZSTD_decompressContinue(zds,
139446 +                zds->outBuff + zds->outStart, dstSize, src, srcSize);
139447 +        FORWARD_IF_ERROR(decodedSize, "");
139448 +        if (!decodedSize && !isSkipFrame) {
139449 +            zds->streamStage = zdss_read;
139450 +        } else {
139451 +            zds->outEnd = zds->outStart + decodedSize;
139452 +            zds->streamStage = zdss_flush;
139453 +        }
139454 +    } else {
139455 +        /* Write directly into the output buffer */
139456 +        size_t const dstSize = isSkipFrame ? 0 : (size_t)(oend - *op);
139457 +        size_t const decodedSize = ZSTD_decompressContinue(zds, *op, dstSize, src, srcSize);
139458 +        FORWARD_IF_ERROR(decodedSize, "");
139459 +        *op += decodedSize;
139460 +        /* Flushing is not needed. */
139461 +        zds->streamStage = zdss_read;
139462 +        assert(*op <= oend);
139463 +        assert(zds->outBufferMode == ZSTD_bm_stable);
139464 +    }
139465 +    return 0;
139468 +size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
139470 +    const char* const src = (const char*)input->src;
139471 +    const char* const istart = input->pos != 0 ? src + input->pos : src;
139472 +    const char* const iend = input->size != 0 ? src + input->size : src;
139473 +    const char* ip = istart;
139474 +    char* const dst = (char*)output->dst;
139475 +    char* const ostart = output->pos != 0 ? dst + output->pos : dst;
139476 +    char* const oend = output->size != 0 ? dst + output->size : dst;
139477 +    char* op = ostart;
139478 +    U32 someMoreWork = 1;
139480 +    DEBUGLOG(5, "ZSTD_decompressStream");
139481 +    RETURN_ERROR_IF(
139482 +        input->pos > input->size,
139483 +        srcSize_wrong,
139484 +        "forbidden. in: pos: %u   vs size: %u",
139485 +        (U32)input->pos, (U32)input->size);
139486 +    RETURN_ERROR_IF(
139487 +        output->pos > output->size,
139488 +        dstSize_tooSmall,
139489 +        "forbidden. out: pos: %u   vs size: %u",
139490 +        (U32)output->pos, (U32)output->size);
139491 +    DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos));
139492 +    FORWARD_IF_ERROR(ZSTD_checkOutBuffer(zds, output), "");
139494 +    while (someMoreWork) {
139495 +        switch(zds->streamStage)
139496 +        {
139497 +        case zdss_init :
139498 +            DEBUGLOG(5, "stage zdss_init => transparent reset ");
139499 +            zds->streamStage = zdss_loadHeader;
139500 +            zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
139501 +            zds->legacyVersion = 0;
139502 +            zds->hostageByte = 0;
139503 +            zds->expectedOutBuffer = *output;
139504 +            /* fall-through */
139506 +        case zdss_loadHeader :
139507 +            DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip));
139508 +            {   size_t const hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format);
139509 +                if (zds->refMultipleDDicts && zds->ddictSet) {
139510 +                    ZSTD_DCtx_selectFrameDDict(zds);
139511 +                }
139512 +                DEBUGLOG(5, "header size : %u", (U32)hSize);
139513 +                if (ZSTD_isError(hSize)) {
139514 +                    return hSize;   /* error */
139515 +                }
139516 +                if (hSize != 0) {   /* need more input */
139517 +                    size_t const toLoad = hSize - zds->lhSize;   /* if hSize!=0, hSize > zds->lhSize */
139518 +                    size_t const remainingInput = (size_t)(iend-ip);
139519 +                    assert(iend >= ip);
139520 +                    if (toLoad > remainingInput) {   /* not enough input to load full header */
139521 +                        if (remainingInput > 0) {
139522 +                            ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput);
139523 +                            zds->lhSize += remainingInput;
139524 +                        }
139525 +                        input->pos = input->size;
139526 +                        return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize;   /* remaining header bytes + next block header */
139527 +                    }
139528 +                    assert(ip != NULL);
139529 +                    ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad;
139530 +                    break;
139531 +            }   }
139533 +            /* check for single-pass mode opportunity */
139534 +            if (zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
139535 +                && zds->fParams.frameType != ZSTD_skippableFrame
139536 +                && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) {
139537 +                size_t const cSize = ZSTD_findFrameCompressedSize(istart, (size_t)(iend-istart));
139538 +                if (cSize <= (size_t)(iend-istart)) {
139539 +                    /* shortcut : using single-pass mode */
139540 +                    size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, (size_t)(oend-op), istart, cSize, ZSTD_getDDict(zds));
139541 +                    if (ZSTD_isError(decompressedSize)) return decompressedSize;
139542 +                    DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
139543 +                    ip = istart + cSize;
139544 +                    op += decompressedSize;
139545 +                    zds->expected = 0;
139546 +                    zds->streamStage = zdss_init;
139547 +                    someMoreWork = 0;
139548 +                    break;
139549 +            }   }
139551 +            /* Check output buffer is large enough for ZSTD_odm_stable. */
139552 +            if (zds->outBufferMode == ZSTD_bm_stable
139553 +                && zds->fParams.frameType != ZSTD_skippableFrame
139554 +                && zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
139555 +                && (U64)(size_t)(oend-op) < zds->fParams.frameContentSize) {
139556 +                RETURN_ERROR(dstSize_tooSmall, "ZSTD_obm_stable passed but ZSTD_outBuffer is too small");
139557 +            }
139559 +            /* Consume header (see ZSTDds_decodeFrameHeader) */
139560 +            DEBUGLOG(4, "Consume header");
139561 +            FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)), "");
139563 +            if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {  /* skippable frame */
139564 +                zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
139565 +                zds->stage = ZSTDds_skipFrame;
139566 +            } else {
139567 +                FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize), "");
139568 +                zds->expected = ZSTD_blockHeaderSize;
139569 +                zds->stage = ZSTDds_decodeBlockHeader;
139570 +            }
139572 +            /* control buffer memory usage */
139573 +            DEBUGLOG(4, "Control max memory usage (%u KB <= max %u KB)",
139574 +                        (U32)(zds->fParams.windowSize >>10),
139575 +                        (U32)(zds->maxWindowSize >> 10) );
139576 +            zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
139577 +            RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize,
139578 +                            frameParameter_windowTooLarge, "");
139580 +            /* Adapt buffer sizes to frame header instructions */
139581 +            {   size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
139582 +                size_t const neededOutBuffSize = zds->outBufferMode == ZSTD_bm_buffered
139583 +                        ? ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize)
139584 +                        : 0;
139586 +                ZSTD_DCtx_updateOversizedDuration(zds, neededInBuffSize, neededOutBuffSize);
139588 +                {   int const tooSmall = (zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize);
139589 +                    int const tooLarge = ZSTD_DCtx_isOversizedTooLong(zds);
139591 +                    if (tooSmall || tooLarge) {
139592 +                        size_t const bufferSize = neededInBuffSize + neededOutBuffSize;
139593 +                        DEBUGLOG(4, "inBuff  : from %u to %u",
139594 +                                    (U32)zds->inBuffSize, (U32)neededInBuffSize);
139595 +                        DEBUGLOG(4, "outBuff : from %u to %u",
139596 +                                    (U32)zds->outBuffSize, (U32)neededOutBuffSize);
139597 +                        if (zds->staticSize) {  /* static DCtx */
139598 +                            DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize);
139599 +                            assert(zds->staticSize >= sizeof(ZSTD_DCtx));  /* controlled at init */
139600 +                            RETURN_ERROR_IF(
139601 +                                bufferSize > zds->staticSize - sizeof(ZSTD_DCtx),
139602 +                                memory_allocation, "");
139603 +                        } else {
139604 +                            ZSTD_customFree(zds->inBuff, zds->customMem);
139605 +                            zds->inBuffSize = 0;
139606 +                            zds->outBuffSize = 0;
139607 +                            zds->inBuff = (char*)ZSTD_customMalloc(bufferSize, zds->customMem);
139608 +                            RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation, "");
139609 +                        }
139610 +                        zds->inBuffSize = neededInBuffSize;
139611 +                        zds->outBuff = zds->inBuff + zds->inBuffSize;
139612 +                        zds->outBuffSize = neededOutBuffSize;
139613 +            }   }   }
139614 +            zds->streamStage = zdss_read;
139615 +            /* fall-through */
139617 +        case zdss_read:
139618 +            DEBUGLOG(5, "stage zdss_read");
139619 +            {   size_t const neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip));
139620 +                DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize);
139621 +                if (neededInSize==0) {  /* end of frame */
139622 +                    zds->streamStage = zdss_init;
139623 +                    someMoreWork = 0;
139624 +                    break;
139625 +                }
139626 +                if ((size_t)(iend-ip) >= neededInSize) {  /* decode directly from src */
139627 +                    FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize), "");
139628 +                    ip += neededInSize;
139629 +                    /* Function modifies the stage so we must break */
139630 +                    break;
139631 +            }   }
139632 +            if (ip==iend) { someMoreWork = 0; break; }   /* no more input */
139633 +            zds->streamStage = zdss_load;
139634 +            /* fall-through */
139636 +        case zdss_load:
139637 +            {   size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
139638 +                size_t const toLoad = neededInSize - zds->inPos;
139639 +                int const isSkipFrame = ZSTD_isSkipFrame(zds);
139640 +                size_t loadedSize;
139641 +                /* At this point we shouldn't be decompressing a block that we can stream. */
139642 +                assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip));
139643 +                if (isSkipFrame) {
139644 +                    loadedSize = MIN(toLoad, (size_t)(iend-ip));
139645 +                } else {
139646 +                    RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos,
139647 +                                    corruption_detected,
139648 +                                    "should never happen");
139649 +                    loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, (size_t)(iend-ip));
139650 +                }
139651 +                ip += loadedSize;
139652 +                zds->inPos += loadedSize;
139653 +                if (loadedSize < toLoad) { someMoreWork = 0; break; }   /* not enough input, wait for more */
139655 +                /* decode loaded input */
139656 +                zds->inPos = 0;   /* input is consumed */
139657 +                FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, zds->inBuff, neededInSize), "");
139658 +                /* Function modifies the stage so we must break */
139659 +                break;
139660 +            }
139661 +        case zdss_flush:
139662 +            {   size_t const toFlushSize = zds->outEnd - zds->outStart;
139663 +                size_t const flushedSize = ZSTD_limitCopy(op, (size_t)(oend-op), zds->outBuff + zds->outStart, toFlushSize);
139664 +                op += flushedSize;
139665 +                zds->outStart += flushedSize;
139666 +                if (flushedSize == toFlushSize) {  /* flush completed */
139667 +                    zds->streamStage = zdss_read;
139668 +                    if ( (zds->outBuffSize < zds->fParams.frameContentSize)
139669 +                      && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) {
139670 +                        DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)",
139671 +                                (int)(zds->outBuffSize - zds->outStart),
139672 +                                (U32)zds->fParams.blockSizeMax);
139673 +                        zds->outStart = zds->outEnd = 0;
139674 +                    }
139675 +                    break;
139676 +            }   }
139677 +            /* cannot complete flush */
139678 +            someMoreWork = 0;
139679 +            break;
139681 +        default:
139682 +            assert(0);    /* impossible */
139683 +            RETURN_ERROR(GENERIC, "impossible to reach");   /* some compiler require default to do something */
139684 +    }   }
139686 +    /* result */
139687 +    input->pos = (size_t)(ip - (const char*)(input->src));
139688 +    output->pos = (size_t)(op - (char*)(output->dst));
139690 +    /* Update the expected output buffer for ZSTD_obm_stable. */
139691 +    zds->expectedOutBuffer = *output;
139693 +    if ((ip==istart) && (op==ostart)) {  /* no forward progress */
139694 +        zds->noForwardProgress ++;
139695 +        if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) {
139696 +            RETURN_ERROR_IF(op==oend, dstSize_tooSmall, "");
139697 +            RETURN_ERROR_IF(ip==iend, srcSize_wrong, "");
139698 +            assert(0);
139699 +        }
139700 +    } else {
139701 +        zds->noForwardProgress = 0;
139702 +    }
139703 +    {   size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds);
139704 +        if (!nextSrcSizeHint) {   /* frame fully decoded */
139705 +            if (zds->outEnd == zds->outStart) {  /* output fully flushed */
139706 +                if (zds->hostageByte) {
139707 +                    if (input->pos >= input->size) {
139708 +                        /* can't release hostage (not present) */
139709 +                        zds->streamStage = zdss_read;
139710 +                        return 1;
139711 +                    }
139712 +                    input->pos++;  /* release hostage */
139713 +                }   /* zds->hostageByte */
139714 +                return 0;
139715 +            }  /* zds->outEnd == zds->outStart */
139716 +            if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */
139717 +                input->pos--;   /* note : pos > 0, otherwise, impossible to finish reading last block */
139718 +                zds->hostageByte=1;
139719 +            }
139720 +            return 1;
139721 +        }  /* nextSrcSizeHint==0 */
139722 +        nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds) == ZSTDnit_block);   /* preload header of next block */
139723 +        assert(zds->inPos <= nextSrcSizeHint);
139724 +        nextSrcSizeHint -= zds->inPos;   /* part already loaded*/
139725 +        return nextSrcSizeHint;
139726 +    }
139729 +size_t ZSTD_decompressStream_simpleArgs (
139730 +                            ZSTD_DCtx* dctx,
139731 +                            void* dst, size_t dstCapacity, size_t* dstPos,
139732 +                      const void* src, size_t srcSize, size_t* srcPos)
139734 +    ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
139735 +    ZSTD_inBuffer  input  = { src, srcSize, *srcPos };
139736 +    /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
139737 +    size_t const cErr = ZSTD_decompressStream(dctx, &output, &input);
139738 +    *dstPos = output.pos;
139739 +    *srcPos = input.pos;
139740 +    return cErr;
139742 diff --git a/lib/zstd/decompress/zstd_decompress_block.c b/lib/zstd/decompress/zstd_decompress_block.c
139743 new file mode 100644
139744 index 000000000000..cd6eba55a21c
139745 --- /dev/null
139746 +++ b/lib/zstd/decompress/zstd_decompress_block.c
139747 @@ -0,0 +1,1540 @@
139749 + * Copyright (c) Yann Collet, Facebook, Inc.
139750 + * All rights reserved.
139752 + * This source code is licensed under both the BSD-style license (found in the
139753 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
139754 + * in the COPYING file in the root directory of this source tree).
139755 + * You may select, at your option, one of the above-listed licenses.
139756 + */
139758 +/* zstd_decompress_block :
139759 + * this module takes care of decompressing _compressed_ block */
139761 +/*-*******************************************************
139762 +*  Dependencies
139763 +*********************************************************/
139764 +#include "../common/zstd_deps.h"   /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
139765 +#include "../common/compiler.h"    /* prefetch */
139766 +#include "../common/cpu.h"         /* bmi2 */
139767 +#include "../common/mem.h"         /* low level memory routines */
139768 +#define FSE_STATIC_LINKING_ONLY
139769 +#include "../common/fse.h"
139770 +#define HUF_STATIC_LINKING_ONLY
139771 +#include "../common/huf.h"
139772 +#include "../common/zstd_internal.h"
139773 +#include "zstd_decompress_internal.h"   /* ZSTD_DCtx */
139774 +#include "zstd_ddict.h"  /* ZSTD_DDictDictContent */
139775 +#include "zstd_decompress_block.h"
139777 +/*_*******************************************************
139778 +*  Macros
139779 +**********************************************************/
139781 +/* These two optional macros force the use one way or another of the two
139782 + * ZSTD_decompressSequences implementations. You can't force in both directions
139783 + * at the same time.
139784 + */
139785 +#if defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
139786 +    defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
139787 +#error "Cannot force the use of the short and the long ZSTD_decompressSequences variants!"
139788 +#endif
139791 +/*_*******************************************************
139792 +*  Memory operations
139793 +**********************************************************/
139794 +static void ZSTD_copy4(void* dst, const void* src) { ZSTD_memcpy(dst, src, 4); }
139797 +/*-*************************************************************
139798 + *   Block decoding
139799 + ***************************************************************/
139801 +/*! ZSTD_getcBlockSize() :
139802 + *  Provides the size of compressed block from block header `src` */
139803 +size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
139804 +                          blockProperties_t* bpPtr)
139806 +    RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong, "");
139808 +    {   U32 const cBlockHeader = MEM_readLE24(src);
139809 +        U32 const cSize = cBlockHeader >> 3;
139810 +        bpPtr->lastBlock = cBlockHeader & 1;
139811 +        bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
139812 +        bpPtr->origSize = cSize;   /* only useful for RLE */
139813 +        if (bpPtr->blockType == bt_rle) return 1;
139814 +        RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected, "");
139815 +        return cSize;
139816 +    }
139820 +/* Hidden declaration for fullbench */
139821 +size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
139822 +                          const void* src, size_t srcSize);
139823 +/*! ZSTD_decodeLiteralsBlock() :
139824 + * @return : nb of bytes read from src (< srcSize )
139825 + *  note : symbol not declared but exposed for fullbench */
139826 +size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
139827 +                          const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */
139829 +    DEBUGLOG(5, "ZSTD_decodeLiteralsBlock");
139830 +    RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, "");
139832 +    {   const BYTE* const istart = (const BYTE*) src;
139833 +        symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
139835 +        switch(litEncType)
139836 +        {
139837 +        case set_repeat:
139838 +            DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block");
139839 +            RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted, "");
139840 +            /* fall-through */
139842 +        case set_compressed:
139843 +            RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3");
139844 +            {   size_t lhSize, litSize, litCSize;
139845 +                U32 singleStream=0;
139846 +                U32 const lhlCode = (istart[0] >> 2) & 3;
139847 +                U32 const lhc = MEM_readLE32(istart);
139848 +                size_t hufSuccess;
139849 +                switch(lhlCode)
139850 +                {
139851 +                case 0: case 1: default:   /* note : default is impossible, since lhlCode into [0..3] */
139852 +                    /* 2 - 2 - 10 - 10 */
139853 +                    singleStream = !lhlCode;
139854 +                    lhSize = 3;
139855 +                    litSize  = (lhc >> 4) & 0x3FF;
139856 +                    litCSize = (lhc >> 14) & 0x3FF;
139857 +                    break;
139858 +                case 2:
139859 +                    /* 2 - 2 - 14 - 14 */
139860 +                    lhSize = 4;
139861 +                    litSize  = (lhc >> 4) & 0x3FFF;
139862 +                    litCSize = lhc >> 18;
139863 +                    break;
139864 +                case 3:
139865 +                    /* 2 - 2 - 18 - 18 */
139866 +                    lhSize = 5;
139867 +                    litSize  = (lhc >> 4) & 0x3FFFF;
139868 +                    litCSize = (lhc >> 22) + ((size_t)istart[4] << 10);
139869 +                    break;
139870 +                }
139871 +                RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
139872 +                RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, "");
139874 +                /* prefetch huffman table if cold */
139875 +                if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
139876 +                    PREFETCH_AREA(dctx->HUFptr, sizeof(dctx->entropy.hufTable));
139877 +                }
139879 +                if (litEncType==set_repeat) {
139880 +                    if (singleStream) {
139881 +                        hufSuccess = HUF_decompress1X_usingDTable_bmi2(
139882 +                            dctx->litBuffer, litSize, istart+lhSize, litCSize,
139883 +                            dctx->HUFptr, dctx->bmi2);
139884 +                    } else {
139885 +                        hufSuccess = HUF_decompress4X_usingDTable_bmi2(
139886 +                            dctx->litBuffer, litSize, istart+lhSize, litCSize,
139887 +                            dctx->HUFptr, dctx->bmi2);
139888 +                    }
139889 +                } else {
139890 +                    if (singleStream) {
139891 +#if defined(HUF_FORCE_DECOMPRESS_X2)
139892 +                        hufSuccess = HUF_decompress1X_DCtx_wksp(
139893 +                            dctx->entropy.hufTable, dctx->litBuffer, litSize,
139894 +                            istart+lhSize, litCSize, dctx->workspace,
139895 +                            sizeof(dctx->workspace));
139896 +#else
139897 +                        hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2(
139898 +                            dctx->entropy.hufTable, dctx->litBuffer, litSize,
139899 +                            istart+lhSize, litCSize, dctx->workspace,
139900 +                            sizeof(dctx->workspace), dctx->bmi2);
139901 +#endif
139902 +                    } else {
139903 +                        hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2(
139904 +                            dctx->entropy.hufTable, dctx->litBuffer, litSize,
139905 +                            istart+lhSize, litCSize, dctx->workspace,
139906 +                            sizeof(dctx->workspace), dctx->bmi2);
139907 +                    }
139908 +                }
139910 +                RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected, "");
139912 +                dctx->litPtr = dctx->litBuffer;
139913 +                dctx->litSize = litSize;
139914 +                dctx->litEntropy = 1;
139915 +                if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable;
139916 +                ZSTD_memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
139917 +                return litCSize + lhSize;
139918 +            }
139920 +        case set_basic:
139921 +            {   size_t litSize, lhSize;
139922 +                U32 const lhlCode = ((istart[0]) >> 2) & 3;
139923 +                switch(lhlCode)
139924 +                {
139925 +                case 0: case 2: default:   /* note : default is impossible, since lhlCode into [0..3] */
139926 +                    lhSize = 1;
139927 +                    litSize = istart[0] >> 3;
139928 +                    break;
139929 +                case 1:
139930 +                    lhSize = 2;
139931 +                    litSize = MEM_readLE16(istart) >> 4;
139932 +                    break;
139933 +                case 3:
139934 +                    lhSize = 3;
139935 +                    litSize = MEM_readLE24(istart) >> 4;
139936 +                    break;
139937 +                }
139939 +                if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) {  /* risk reading beyond src buffer with wildcopy */
139940 +                    RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected, "");
139941 +                    ZSTD_memcpy(dctx->litBuffer, istart+lhSize, litSize);
139942 +                    dctx->litPtr = dctx->litBuffer;
139943 +                    dctx->litSize = litSize;
139944 +                    ZSTD_memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
139945 +                    return lhSize+litSize;
139946 +                }
139947 +                /* direct reference into compressed stream */
139948 +                dctx->litPtr = istart+lhSize;
139949 +                dctx->litSize = litSize;
139950 +                return lhSize+litSize;
139951 +            }
139953 +        case set_rle:
139954 +            {   U32 const lhlCode = ((istart[0]) >> 2) & 3;
139955 +                size_t litSize, lhSize;
139956 +                switch(lhlCode)
139957 +                {
139958 +                case 0: case 2: default:   /* note : default is impossible, since lhlCode into [0..3] */
139959 +                    lhSize = 1;
139960 +                    litSize = istart[0] >> 3;
139961 +                    break;
139962 +                case 1:
139963 +                    lhSize = 2;
139964 +                    litSize = MEM_readLE16(istart) >> 4;
139965 +                    break;
139966 +                case 3:
139967 +                    lhSize = 3;
139968 +                    litSize = MEM_readLE24(istart) >> 4;
139969 +                    RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4");
139970 +                    break;
139971 +                }
139972 +                RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
139973 +                ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
139974 +                dctx->litPtr = dctx->litBuffer;
139975 +                dctx->litSize = litSize;
139976 +                return lhSize+1;
139977 +            }
139978 +        default:
139979 +            RETURN_ERROR(corruption_detected, "impossible");
139980 +        }
139981 +    }
139984 +/* Default FSE distribution tables.
139985 + * These are pre-calculated FSE decoding tables using default distributions as defined in specification :
139986 + * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#default-distributions
139987 + * They were generated programmatically with following method :
139988 + * - start from default distributions, present in /lib/common/zstd_internal.h
139989 + * - generate tables normally, using ZSTD_buildFSETable()
139990 + * - printout the content of tables
139991 + * - pretify output, report below, test with fuzzer to ensure it's correct */
139993 +/* Default FSE distribution table for Literal Lengths */
139994 +static const ZSTD_seqSymbol LL_defaultDTable[(1<<LL_DEFAULTNORMLOG)+1] = {
139995 +     {  1,  1,  1, LL_DEFAULTNORMLOG},  /* header : fastMode, tableLog */
139996 +     /* nextState, nbAddBits, nbBits, baseVal */
139997 +     {  0,  0,  4,    0},  { 16,  0,  4,    0},
139998 +     { 32,  0,  5,    1},  {  0,  0,  5,    3},
139999 +     {  0,  0,  5,    4},  {  0,  0,  5,    6},
140000 +     {  0,  0,  5,    7},  {  0,  0,  5,    9},
140001 +     {  0,  0,  5,   10},  {  0,  0,  5,   12},
140002 +     {  0,  0,  6,   14},  {  0,  1,  5,   16},
140003 +     {  0,  1,  5,   20},  {  0,  1,  5,   22},
140004 +     {  0,  2,  5,   28},  {  0,  3,  5,   32},
140005 +     {  0,  4,  5,   48},  { 32,  6,  5,   64},
140006 +     {  0,  7,  5,  128},  {  0,  8,  6,  256},
140007 +     {  0, 10,  6, 1024},  {  0, 12,  6, 4096},
140008 +     { 32,  0,  4,    0},  {  0,  0,  4,    1},
140009 +     {  0,  0,  5,    2},  { 32,  0,  5,    4},
140010 +     {  0,  0,  5,    5},  { 32,  0,  5,    7},
140011 +     {  0,  0,  5,    8},  { 32,  0,  5,   10},
140012 +     {  0,  0,  5,   11},  {  0,  0,  6,   13},
140013 +     { 32,  1,  5,   16},  {  0,  1,  5,   18},
140014 +     { 32,  1,  5,   22},  {  0,  2,  5,   24},
140015 +     { 32,  3,  5,   32},  {  0,  3,  5,   40},
140016 +     {  0,  6,  4,   64},  { 16,  6,  4,   64},
140017 +     { 32,  7,  5,  128},  {  0,  9,  6,  512},
140018 +     {  0, 11,  6, 2048},  { 48,  0,  4,    0},
140019 +     { 16,  0,  4,    1},  { 32,  0,  5,    2},
140020 +     { 32,  0,  5,    3},  { 32,  0,  5,    5},
140021 +     { 32,  0,  5,    6},  { 32,  0,  5,    8},
140022 +     { 32,  0,  5,    9},  { 32,  0,  5,   11},
140023 +     { 32,  0,  5,   12},  {  0,  0,  6,   15},
140024 +     { 32,  1,  5,   18},  { 32,  1,  5,   20},
140025 +     { 32,  2,  5,   24},  { 32,  2,  5,   28},
140026 +     { 32,  3,  5,   40},  { 32,  4,  5,   48},
140027 +     {  0, 16,  6,65536},  {  0, 15,  6,32768},
140028 +     {  0, 14,  6,16384},  {  0, 13,  6, 8192},
140029 +};   /* LL_defaultDTable */
140031 +/* Default FSE distribution table for Offset Codes */
140032 +static const ZSTD_seqSymbol OF_defaultDTable[(1<<OF_DEFAULTNORMLOG)+1] = {
140033 +    {  1,  1,  1, OF_DEFAULTNORMLOG},  /* header : fastMode, tableLog */
140034 +    /* nextState, nbAddBits, nbBits, baseVal */
140035 +    {  0,  0,  5,    0},     {  0,  6,  4,   61},
140036 +    {  0,  9,  5,  509},     {  0, 15,  5,32765},
140037 +    {  0, 21,  5,2097149},   {  0,  3,  5,    5},
140038 +    {  0,  7,  4,  125},     {  0, 12,  5, 4093},
140039 +    {  0, 18,  5,262141},    {  0, 23,  5,8388605},
140040 +    {  0,  5,  5,   29},     {  0,  8,  4,  253},
140041 +    {  0, 14,  5,16381},     {  0, 20,  5,1048573},
140042 +    {  0,  2,  5,    1},     { 16,  7,  4,  125},
140043 +    {  0, 11,  5, 2045},     {  0, 17,  5,131069},
140044 +    {  0, 22,  5,4194301},   {  0,  4,  5,   13},
140045 +    { 16,  8,  4,  253},     {  0, 13,  5, 8189},
140046 +    {  0, 19,  5,524285},    {  0,  1,  5,    1},
140047 +    { 16,  6,  4,   61},     {  0, 10,  5, 1021},
140048 +    {  0, 16,  5,65533},     {  0, 28,  5,268435453},
140049 +    {  0, 27,  5,134217725}, {  0, 26,  5,67108861},
140050 +    {  0, 25,  5,33554429},  {  0, 24,  5,16777213},
140051 +};   /* OF_defaultDTable */
140054 +/* Default FSE distribution table for Match Lengths */
140055 +static const ZSTD_seqSymbol ML_defaultDTable[(1<<ML_DEFAULTNORMLOG)+1] = {
140056 +    {  1,  1,  1, ML_DEFAULTNORMLOG},  /* header : fastMode, tableLog */
140057 +    /* nextState, nbAddBits, nbBits, baseVal */
140058 +    {  0,  0,  6,    3},  {  0,  0,  4,    4},
140059 +    { 32,  0,  5,    5},  {  0,  0,  5,    6},
140060 +    {  0,  0,  5,    8},  {  0,  0,  5,    9},
140061 +    {  0,  0,  5,   11},  {  0,  0,  6,   13},
140062 +    {  0,  0,  6,   16},  {  0,  0,  6,   19},
140063 +    {  0,  0,  6,   22},  {  0,  0,  6,   25},
140064 +    {  0,  0,  6,   28},  {  0,  0,  6,   31},
140065 +    {  0,  0,  6,   34},  {  0,  1,  6,   37},
140066 +    {  0,  1,  6,   41},  {  0,  2,  6,   47},
140067 +    {  0,  3,  6,   59},  {  0,  4,  6,   83},
140068 +    {  0,  7,  6,  131},  {  0,  9,  6,  515},
140069 +    { 16,  0,  4,    4},  {  0,  0,  4,    5},
140070 +    { 32,  0,  5,    6},  {  0,  0,  5,    7},
140071 +    { 32,  0,  5,    9},  {  0,  0,  5,   10},
140072 +    {  0,  0,  6,   12},  {  0,  0,  6,   15},
140073 +    {  0,  0,  6,   18},  {  0,  0,  6,   21},
140074 +    {  0,  0,  6,   24},  {  0,  0,  6,   27},
140075 +    {  0,  0,  6,   30},  {  0,  0,  6,   33},
140076 +    {  0,  1,  6,   35},  {  0,  1,  6,   39},
140077 +    {  0,  2,  6,   43},  {  0,  3,  6,   51},
140078 +    {  0,  4,  6,   67},  {  0,  5,  6,   99},
140079 +    {  0,  8,  6,  259},  { 32,  0,  4,    4},
140080 +    { 48,  0,  4,    4},  { 16,  0,  4,    5},
140081 +    { 32,  0,  5,    7},  { 32,  0,  5,    8},
140082 +    { 32,  0,  5,   10},  { 32,  0,  5,   11},
140083 +    {  0,  0,  6,   14},  {  0,  0,  6,   17},
140084 +    {  0,  0,  6,   20},  {  0,  0,  6,   23},
140085 +    {  0,  0,  6,   26},  {  0,  0,  6,   29},
140086 +    {  0,  0,  6,   32},  {  0, 16,  6,65539},
140087 +    {  0, 15,  6,32771},  {  0, 14,  6,16387},
140088 +    {  0, 13,  6, 8195},  {  0, 12,  6, 4099},
140089 +    {  0, 11,  6, 2051},  {  0, 10,  6, 1027},
140090 +};   /* ML_defaultDTable */
140093 +static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U32 nbAddBits)
140095 +    void* ptr = dt;
140096 +    ZSTD_seqSymbol_header* const DTableH = (ZSTD_seqSymbol_header*)ptr;
140097 +    ZSTD_seqSymbol* const cell = dt + 1;
140099 +    DTableH->tableLog = 0;
140100 +    DTableH->fastMode = 0;
140102 +    cell->nbBits = 0;
140103 +    cell->nextState = 0;
140104 +    assert(nbAddBits < 255);
140105 +    cell->nbAdditionalBits = (BYTE)nbAddBits;
140106 +    cell->baseValue = baseValue;
140110 +/* ZSTD_buildFSETable() :
140111 + * generate FSE decoding table for one symbol (ll, ml or off)
140112 + * cannot fail if input is valid =>
140113 + * all inputs are presumed validated at this stage */
140114 +FORCE_INLINE_TEMPLATE
140115 +void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt,
140116 +            const short* normalizedCounter, unsigned maxSymbolValue,
140117 +            const U32* baseValue, const U32* nbAdditionalBits,
140118 +            unsigned tableLog, void* wksp, size_t wkspSize)
140120 +    ZSTD_seqSymbol* const tableDecode = dt+1;
140121 +    U32 const maxSV1 = maxSymbolValue + 1;
140122 +    U32 const tableSize = 1 << tableLog;
140124 +    U16* symbolNext = (U16*)wksp;
140125 +    BYTE* spread = (BYTE*)(symbolNext + MaxSeq + 1);
140126 +    U32 highThreshold = tableSize - 1;
140129 +    /* Sanity Checks */
140130 +    assert(maxSymbolValue <= MaxSeq);
140131 +    assert(tableLog <= MaxFSELog);
140132 +    assert(wkspSize >= ZSTD_BUILD_FSE_TABLE_WKSP_SIZE);
140133 +    (void)wkspSize;
140134 +    /* Init, lay down lowprob symbols */
140135 +    {   ZSTD_seqSymbol_header DTableH;
140136 +        DTableH.tableLog = tableLog;
140137 +        DTableH.fastMode = 1;
140138 +        {   S16 const largeLimit= (S16)(1 << (tableLog-1));
140139 +            U32 s;
140140 +            for (s=0; s<maxSV1; s++) {
140141 +                if (normalizedCounter[s]==-1) {
140142 +                    tableDecode[highThreshold--].baseValue = s;
140143 +                    symbolNext[s] = 1;
140144 +                } else {
140145 +                    if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
140146 +                    assert(normalizedCounter[s]>=0);
140147 +                    symbolNext[s] = (U16)normalizedCounter[s];
140148 +        }   }   }
140149 +        ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
140150 +    }
140152 +    /* Spread symbols */
140153 +    assert(tableSize <= 512);
140154 +    /* Specialized symbol spreading for the case when there are
140155 +     * no low probability (-1 count) symbols. When compressing
140156 +     * small blocks we avoid low probability symbols to hit this
140157 +     * case, since header decoding speed matters more.
140158 +     */
140159 +    if (highThreshold == tableSize - 1) {
140160 +        size_t const tableMask = tableSize-1;
140161 +        size_t const step = FSE_TABLESTEP(tableSize);
140162 +        /* First lay down the symbols in order.
140163 +         * We use a uint64_t to lay down 8 bytes at a time. This reduces branch
140164 +         * misses since small blocks generally have small table logs, so nearly
140165 +         * all symbols have counts <= 8. We ensure we have 8 bytes at the end of
140166 +         * our buffer to handle the over-write.
140167 +         */
140168 +        {
140169 +            U64 const add = 0x0101010101010101ull;
140170 +            size_t pos = 0;
140171 +            U64 sv = 0;
140172 +            U32 s;
140173 +            for (s=0; s<maxSV1; ++s, sv += add) {
140174 +                int i;
140175 +                int const n = normalizedCounter[s];
140176 +                MEM_write64(spread + pos, sv);
140177 +                for (i = 8; i < n; i += 8) {
140178 +                    MEM_write64(spread + pos + i, sv);
140179 +                }
140180 +                pos += n;
140181 +            }
140182 +        }
140183 +        /* Now we spread those positions across the table.
140184 +         * The benefit of doing it in two stages is that we avoid the the
140185 +         * variable size inner loop, which caused lots of branch misses.
140186 +         * Now we can run through all the positions without any branch misses.
140187 +         * We unroll the loop twice, since that is what emperically worked best.
140188 +         */
140189 +        {
140190 +            size_t position = 0;
140191 +            size_t s;
140192 +            size_t const unroll = 2;
140193 +            assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
140194 +            for (s = 0; s < (size_t)tableSize; s += unroll) {
140195 +                size_t u;
140196 +                for (u = 0; u < unroll; ++u) {
140197 +                    size_t const uPosition = (position + (u * step)) & tableMask;
140198 +                    tableDecode[uPosition].baseValue = spread[s + u];
140199 +                }
140200 +                position = (position + (unroll * step)) & tableMask;
140201 +            }
140202 +            assert(position == 0);
140203 +        }
140204 +    } else {
140205 +        U32 const tableMask = tableSize-1;
140206 +        U32 const step = FSE_TABLESTEP(tableSize);
140207 +        U32 s, position = 0;
140208 +        for (s=0; s<maxSV1; s++) {
140209 +            int i;
140210 +            int const n = normalizedCounter[s];
140211 +            for (i=0; i<n; i++) {
140212 +                tableDecode[position].baseValue = s;
140213 +                position = (position + step) & tableMask;
140214 +                while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
140215 +        }   }
140216 +        assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
140217 +    }
140219 +    /* Build Decoding table */
140220 +    {
140221 +        U32 u;
140222 +        for (u=0; u<tableSize; u++) {
140223 +            U32 const symbol = tableDecode[u].baseValue;
140224 +            U32 const nextState = symbolNext[symbol]++;
140225 +            tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
140226 +            tableDecode[u].nextState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
140227 +            assert(nbAdditionalBits[symbol] < 255);
140228 +            tableDecode[u].nbAdditionalBits = (BYTE)nbAdditionalBits[symbol];
140229 +            tableDecode[u].baseValue = baseValue[symbol];
140230 +        }
140231 +    }
140234 +/* Avoids the FORCE_INLINE of the _body() function. */
140235 +static void ZSTD_buildFSETable_body_default(ZSTD_seqSymbol* dt,
140236 +            const short* normalizedCounter, unsigned maxSymbolValue,
140237 +            const U32* baseValue, const U32* nbAdditionalBits,
140238 +            unsigned tableLog, void* wksp, size_t wkspSize)
140240 +    ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue,
140241 +            baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
140244 +#if DYNAMIC_BMI2
140245 +TARGET_ATTRIBUTE("bmi2") static void ZSTD_buildFSETable_body_bmi2(ZSTD_seqSymbol* dt,
140246 +            const short* normalizedCounter, unsigned maxSymbolValue,
140247 +            const U32* baseValue, const U32* nbAdditionalBits,
140248 +            unsigned tableLog, void* wksp, size_t wkspSize)
140250 +    ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue,
140251 +            baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
140253 +#endif
140255 +void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
140256 +            const short* normalizedCounter, unsigned maxSymbolValue,
140257 +            const U32* baseValue, const U32* nbAdditionalBits,
140258 +            unsigned tableLog, void* wksp, size_t wkspSize, int bmi2)
140260 +#if DYNAMIC_BMI2
140261 +    if (bmi2) {
140262 +        ZSTD_buildFSETable_body_bmi2(dt, normalizedCounter, maxSymbolValue,
140263 +                baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
140264 +        return;
140265 +    }
140266 +#endif
140267 +    (void)bmi2;
140268 +    ZSTD_buildFSETable_body_default(dt, normalizedCounter, maxSymbolValue,
140269 +            baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
140273 +/*! ZSTD_buildSeqTable() :
140274 + * @return : nb bytes read from src,
140275 + *           or an error code if it fails */
140276 +static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr,
140277 +                                 symbolEncodingType_e type, unsigned max, U32 maxLog,
140278 +                                 const void* src, size_t srcSize,
140279 +                                 const U32* baseValue, const U32* nbAdditionalBits,
140280 +                                 const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable,
140281 +                                 int ddictIsCold, int nbSeq, U32* wksp, size_t wkspSize,
140282 +                                 int bmi2)
140284 +    switch(type)
140285 +    {
140286 +    case set_rle :
140287 +        RETURN_ERROR_IF(!srcSize, srcSize_wrong, "");
140288 +        RETURN_ERROR_IF((*(const BYTE*)src) > max, corruption_detected, "");
140289 +        {   U32 const symbol = *(const BYTE*)src;
140290 +            U32 const baseline = baseValue[symbol];
140291 +            U32 const nbBits = nbAdditionalBits[symbol];
140292 +            ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits);
140293 +        }
140294 +        *DTablePtr = DTableSpace;
140295 +        return 1;
140296 +    case set_basic :
140297 +        *DTablePtr = defaultTable;
140298 +        return 0;
140299 +    case set_repeat:
140300 +        RETURN_ERROR_IF(!flagRepeatTable, corruption_detected, "");
140301 +        /* prefetch FSE table if used */
140302 +        if (ddictIsCold && (nbSeq > 24 /* heuristic */)) {
140303 +            const void* const pStart = *DTablePtr;
140304 +            size_t const pSize = sizeof(ZSTD_seqSymbol) * (SEQSYMBOL_TABLE_SIZE(maxLog));
140305 +            PREFETCH_AREA(pStart, pSize);
140306 +        }
140307 +        return 0;
140308 +    case set_compressed :
140309 +        {   unsigned tableLog;
140310 +            S16 norm[MaxSeq+1];
140311 +            size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
140312 +            RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected, "");
140313 +            RETURN_ERROR_IF(tableLog > maxLog, corruption_detected, "");
140314 +            ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog, wksp, wkspSize, bmi2);
140315 +            *DTablePtr = DTableSpace;
140316 +            return headerSize;
140317 +        }
140318 +    default :
140319 +        assert(0);
140320 +        RETURN_ERROR(GENERIC, "impossible");
140321 +    }
140324 +size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
140325 +                             const void* src, size_t srcSize)
140327 +    const BYTE* const istart = (const BYTE*)src;
140328 +    const BYTE* const iend = istart + srcSize;
140329 +    const BYTE* ip = istart;
140330 +    int nbSeq;
140331 +    DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
140333 +    /* check */
140334 +    RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong, "");
140336 +    /* SeqHead */
140337 +    nbSeq = *ip++;
140338 +    if (!nbSeq) {
140339 +        *nbSeqPtr=0;
140340 +        RETURN_ERROR_IF(srcSize != 1, srcSize_wrong, "");
140341 +        return 1;
140342 +    }
140343 +    if (nbSeq > 0x7F) {
140344 +        if (nbSeq == 0xFF) {
140345 +            RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, "");
140346 +            nbSeq = MEM_readLE16(ip) + LONGNBSEQ;
140347 +            ip+=2;
140348 +        } else {
140349 +            RETURN_ERROR_IF(ip >= iend, srcSize_wrong, "");
140350 +            nbSeq = ((nbSeq-0x80)<<8) + *ip++;
140351 +        }
140352 +    }
140353 +    *nbSeqPtr = nbSeq;
140355 +    /* FSE table descriptors */
140356 +    RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encoding types */
140357 +    {   symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
140358 +        symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
140359 +        symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
140360 +        ip++;
140362 +        /* Build DTables */
140363 +        {   size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,
140364 +                                                      LLtype, MaxLL, LLFSELog,
140365 +                                                      ip, iend-ip,
140366 +                                                      LL_base, LL_bits,
140367 +                                                      LL_defaultDTable, dctx->fseEntropy,
140368 +                                                      dctx->ddictIsCold, nbSeq,
140369 +                                                      dctx->workspace, sizeof(dctx->workspace),
140370 +                                                      dctx->bmi2);
140371 +            RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected, "ZSTD_buildSeqTable failed");
140372 +            ip += llhSize;
140373 +        }
140375 +        {   size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,
140376 +                                                      OFtype, MaxOff, OffFSELog,
140377 +                                                      ip, iend-ip,
140378 +                                                      OF_base, OF_bits,
140379 +                                                      OF_defaultDTable, dctx->fseEntropy,
140380 +                                                      dctx->ddictIsCold, nbSeq,
140381 +                                                      dctx->workspace, sizeof(dctx->workspace),
140382 +                                                      dctx->bmi2);
140383 +            RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected, "ZSTD_buildSeqTable failed");
140384 +            ip += ofhSize;
140385 +        }
140387 +        {   size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,
140388 +                                                      MLtype, MaxML, MLFSELog,
140389 +                                                      ip, iend-ip,
140390 +                                                      ML_base, ML_bits,
140391 +                                                      ML_defaultDTable, dctx->fseEntropy,
140392 +                                                      dctx->ddictIsCold, nbSeq,
140393 +                                                      dctx->workspace, sizeof(dctx->workspace),
140394 +                                                      dctx->bmi2);
140395 +            RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected, "ZSTD_buildSeqTable failed");
140396 +            ip += mlhSize;
140397 +        }
140398 +    }
140400 +    return ip-istart;
140404 +typedef struct {
140405 +    size_t litLength;
140406 +    size_t matchLength;
140407 +    size_t offset;
140408 +    const BYTE* match;
140409 +} seq_t;
140411 +typedef struct {
140412 +    size_t state;
140413 +    const ZSTD_seqSymbol* table;
140414 +} ZSTD_fseState;
140416 +typedef struct {
140417 +    BIT_DStream_t DStream;
140418 +    ZSTD_fseState stateLL;
140419 +    ZSTD_fseState stateOffb;
140420 +    ZSTD_fseState stateML;
140421 +    size_t prevOffset[ZSTD_REP_NUM];
140422 +    const BYTE* prefixStart;
140423 +    const BYTE* dictEnd;
140424 +    size_t pos;
140425 +} seqState_t;
140427 +/*! ZSTD_overlapCopy8() :
140428 + *  Copies 8 bytes from ip to op and updates op and ip where ip <= op.
140429 + *  If the offset is < 8 then the offset is spread to at least 8 bytes.
140431 + *  Precondition: *ip <= *op
140432 + *  Postcondition: *op - *op >= 8
140433 + */
140434 +HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) {
140435 +    assert(*ip <= *op);
140436 +    if (offset < 8) {
140437 +        /* close range match, overlap */
140438 +        static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
140439 +        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
140440 +        int const sub2 = dec64table[offset];
140441 +        (*op)[0] = (*ip)[0];
140442 +        (*op)[1] = (*ip)[1];
140443 +        (*op)[2] = (*ip)[2];
140444 +        (*op)[3] = (*ip)[3];
140445 +        *ip += dec32table[offset];
140446 +        ZSTD_copy4(*op+4, *ip);
140447 +        *ip -= sub2;
140448 +    } else {
140449 +        ZSTD_copy8(*op, *ip);
140450 +    }
140451 +    *ip += 8;
140452 +    *op += 8;
140453 +    assert(*op - *ip >= 8);
140456 +/*! ZSTD_safecopy() :
140457 + *  Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer
140458 + *  and write up to 16 bytes past oend_w (op >= oend_w is allowed).
140459 + *  This function is only called in the uncommon case where the sequence is near the end of the block. It
140460 + *  should be fast for a single long sequence, but can be slow for several short sequences.
140462 + *  @param ovtype controls the overlap detection
140463 + *         - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
140464 + *         - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart.
140465 + *           The src buffer must be before the dst buffer.
140466 + */
140467 +static void ZSTD_safecopy(BYTE* op, BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) {
140468 +    ptrdiff_t const diff = op - ip;
140469 +    BYTE* const oend = op + length;
140471 +    assert((ovtype == ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w)) ||
140472 +           (ovtype == ZSTD_overlap_src_before_dst && diff >= 0));
140474 +    if (length < 8) {
140475 +        /* Handle short lengths. */
140476 +        while (op < oend) *op++ = *ip++;
140477 +        return;
140478 +    }
140479 +    if (ovtype == ZSTD_overlap_src_before_dst) {
140480 +        /* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */
140481 +        assert(length >= 8);
140482 +        ZSTD_overlapCopy8(&op, &ip, diff);
140483 +        assert(op - ip >= 8);
140484 +        assert(op <= oend);
140485 +    }
140487 +    if (oend <= oend_w) {
140488 +        /* No risk of overwrite. */
140489 +        ZSTD_wildcopy(op, ip, length, ovtype);
140490 +        return;
140491 +    }
140492 +    if (op <= oend_w) {
140493 +        /* Wildcopy until we get close to the end. */
140494 +        assert(oend > oend_w);
140495 +        ZSTD_wildcopy(op, ip, oend_w - op, ovtype);
140496 +        ip += oend_w - op;
140497 +        op = oend_w;
140498 +    }
140499 +    /* Handle the leftovers. */
140500 +    while (op < oend) *op++ = *ip++;
140503 +/* ZSTD_execSequenceEnd():
140504 + * This version handles cases that are near the end of the output buffer. It requires
140505 + * more careful checks to make sure there is no overflow. By separating out these hard
140506 + * and unlikely cases, we can speed up the common cases.
140508 + * NOTE: This function needs to be fast for a single long sequence, but doesn't need
140509 + * to be optimized for many small sequences, since those fall into ZSTD_execSequence().
140510 + */
140511 +FORCE_NOINLINE
140512 +size_t ZSTD_execSequenceEnd(BYTE* op,
140513 +                            BYTE* const oend, seq_t sequence,
140514 +                            const BYTE** litPtr, const BYTE* const litLimit,
140515 +                            const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
140517 +    BYTE* const oLitEnd = op + sequence.litLength;
140518 +    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
140519 +    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
140520 +    const BYTE* match = oLitEnd - sequence.offset;
140521 +    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
140523 +    /* bounds checks : careful of address space overflow in 32-bit mode */
140524 +    RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer");
140525 +    RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer");
140526 +    assert(op < op + sequenceLength);
140527 +    assert(oLitEnd < op + sequenceLength);
140529 +    /* copy literals */
140530 +    ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap);
140531 +    op = oLitEnd;
140532 +    *litPtr = iLitEnd;
140534 +    /* copy Match */
140535 +    if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
140536 +        /* offset beyond prefix */
140537 +        RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, "");
140538 +        match = dictEnd - (prefixStart-match);
140539 +        if (match + sequence.matchLength <= dictEnd) {
140540 +            ZSTD_memmove(oLitEnd, match, sequence.matchLength);
140541 +            return sequenceLength;
140542 +        }
140543 +        /* span extDict & currentPrefixSegment */
140544 +        {   size_t const length1 = dictEnd - match;
140545 +            ZSTD_memmove(oLitEnd, match, length1);
140546 +            op = oLitEnd + length1;
140547 +            sequence.matchLength -= length1;
140548 +            match = prefixStart;
140549 +    }   }
140550 +    ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);
140551 +    return sequenceLength;
140554 +HINT_INLINE
140555 +size_t ZSTD_execSequence(BYTE* op,
140556 +                         BYTE* const oend, seq_t sequence,
140557 +                         const BYTE** litPtr, const BYTE* const litLimit,
140558 +                         const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
140560 +    BYTE* const oLitEnd = op + sequence.litLength;
140561 +    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
140562 +    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
140563 +    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;   /* risk : address space underflow on oend=NULL */
140564 +    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
140565 +    const BYTE* match = oLitEnd - sequence.offset;
140567 +    assert(op != NULL /* Precondition */);
140568 +    assert(oend_w < oend /* No underflow */);
140569 +    /* Handle edge cases in a slow path:
140570 +     *   - Read beyond end of literals
140571 +     *   - Match end is within WILDCOPY_OVERLIMIT of oend
140572 +     *   - 32-bit mode and the match length overflows
140573 +     */
140574 +    if (UNLIKELY(
140575 +            iLitEnd > litLimit ||
140576 +            oMatchEnd > oend_w ||
140577 +            (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH)))
140578 +        return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
140580 +    /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */
140581 +    assert(op <= oLitEnd /* No overflow */);
140582 +    assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */);
140583 +    assert(oMatchEnd <= oend /* No underflow */);
140584 +    assert(iLitEnd <= litLimit /* Literal length is in bounds */);
140585 +    assert(oLitEnd <= oend_w /* Can wildcopy literals */);
140586 +    assert(oMatchEnd <= oend_w /* Can wildcopy matches */);
140588 +    /* Copy Literals:
140589 +     * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9.
140590 +     * We likely don't need the full 32-byte wildcopy.
140591 +     */
140592 +    assert(WILDCOPY_OVERLENGTH >= 16);
140593 +    ZSTD_copy16(op, (*litPtr));
140594 +    if (UNLIKELY(sequence.litLength > 16)) {
140595 +        ZSTD_wildcopy(op+16, (*litPtr)+16, sequence.litLength-16, ZSTD_no_overlap);
140596 +    }
140597 +    op = oLitEnd;
140598 +    *litPtr = iLitEnd;   /* update for next sequence */
140600 +    /* Copy Match */
140601 +    if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
140602 +        /* offset beyond prefix -> go into extDict */
140603 +        RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, "");
140604 +        match = dictEnd + (match - prefixStart);
140605 +        if (match + sequence.matchLength <= dictEnd) {
140606 +            ZSTD_memmove(oLitEnd, match, sequence.matchLength);
140607 +            return sequenceLength;
140608 +        }
140609 +        /* span extDict & currentPrefixSegment */
140610 +        {   size_t const length1 = dictEnd - match;
140611 +            ZSTD_memmove(oLitEnd, match, length1);
140612 +            op = oLitEnd + length1;
140613 +            sequence.matchLength -= length1;
140614 +            match = prefixStart;
140615 +    }   }
140616 +    /* Match within prefix of 1 or more bytes */
140617 +    assert(op <= oMatchEnd);
140618 +    assert(oMatchEnd <= oend_w);
140619 +    assert(match >= prefixStart);
140620 +    assert(sequence.matchLength >= 1);
140622 +    /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy
140623 +     * without overlap checking.
140624 +     */
140625 +    if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) {
140626 +        /* We bet on a full wildcopy for matches, since we expect matches to be
140627 +         * longer than literals (in general). In silesia, ~10% of matches are longer
140628 +         * than 16 bytes.
140629 +         */
140630 +        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap);
140631 +        return sequenceLength;
140632 +    }
140633 +    assert(sequence.offset < WILDCOPY_VECLEN);
140635 +    /* Copy 8 bytes and spread the offset to be >= 8. */
140636 +    ZSTD_overlapCopy8(&op, &match, sequence.offset);
140638 +    /* If the match length is > 8 bytes, then continue with the wildcopy. */
140639 +    if (sequence.matchLength > 8) {
140640 +        assert(op < oMatchEnd);
140641 +        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);
140642 +    }
140643 +    return sequenceLength;
140646 +static void
140647 +ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt)
140649 +    const void* ptr = dt;
140650 +    const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr;
140651 +    DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
140652 +    DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits",
140653 +                (U32)DStatePtr->state, DTableH->tableLog);
140654 +    BIT_reloadDStream(bitD);
140655 +    DStatePtr->table = dt + 1;
140658 +FORCE_INLINE_TEMPLATE void
140659 +ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD)
140661 +    ZSTD_seqSymbol const DInfo = DStatePtr->table[DStatePtr->state];
140662 +    U32 const nbBits = DInfo.nbBits;
140663 +    size_t const lowBits = BIT_readBits(bitD, nbBits);
140664 +    DStatePtr->state = DInfo.nextState + lowBits;
140667 +FORCE_INLINE_TEMPLATE void
140668 +ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, ZSTD_seqSymbol const DInfo)
140670 +    U32 const nbBits = DInfo.nbBits;
140671 +    size_t const lowBits = BIT_readBits(bitD, nbBits);
140672 +    DStatePtr->state = DInfo.nextState + lowBits;
140675 +/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
140676 + * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
140677 + * bits before reloading. This value is the maximum number of bytes we read
140678 + * after reloading when we are decoding long offsets.
140679 + */
140680 +#define LONG_OFFSETS_MAX_EXTRA_BITS_32                       \
140681 +    (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32       \
140682 +        ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32  \
140683 +        : 0)
140685 +typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e;
140686 +typedef enum { ZSTD_p_noPrefetch=0, ZSTD_p_prefetch=1 } ZSTD_prefetch_e;
140688 +FORCE_INLINE_TEMPLATE seq_t
140689 +ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets, const ZSTD_prefetch_e prefetch)
140691 +    seq_t seq;
140692 +    ZSTD_seqSymbol const llDInfo = seqState->stateLL.table[seqState->stateLL.state];
140693 +    ZSTD_seqSymbol const mlDInfo = seqState->stateML.table[seqState->stateML.state];
140694 +    ZSTD_seqSymbol const ofDInfo = seqState->stateOffb.table[seqState->stateOffb.state];
140695 +    U32 const llBase = llDInfo.baseValue;
140696 +    U32 const mlBase = mlDInfo.baseValue;
140697 +    U32 const ofBase = ofDInfo.baseValue;
140698 +    BYTE const llBits = llDInfo.nbAdditionalBits;
140699 +    BYTE const mlBits = mlDInfo.nbAdditionalBits;
140700 +    BYTE const ofBits = ofDInfo.nbAdditionalBits;
140701 +    BYTE const totalBits = llBits+mlBits+ofBits;
140703 +    /* sequence */
140704 +    {   size_t offset;
140705 +        if (ofBits > 1) {
140706 +            ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
140707 +            ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
140708 +            assert(ofBits <= MaxOff);
140709 +            if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) {
140710 +                U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed);
140711 +                offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
140712 +                BIT_reloadDStream(&seqState->DStream);
140713 +                if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
140714 +                assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32);   /* to avoid another reload */
140715 +            } else {
140716 +                offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/);   /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */
140717 +                if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
140718 +            }
140719 +            seqState->prevOffset[2] = seqState->prevOffset[1];
140720 +            seqState->prevOffset[1] = seqState->prevOffset[0];
140721 +            seqState->prevOffset[0] = offset;
140722 +        } else {
140723 +            U32 const ll0 = (llBase == 0);
140724 +            if (LIKELY((ofBits == 0))) {
140725 +                if (LIKELY(!ll0))
140726 +                    offset = seqState->prevOffset[0];
140727 +                else {
140728 +                    offset = seqState->prevOffset[1];
140729 +                    seqState->prevOffset[1] = seqState->prevOffset[0];
140730 +                    seqState->prevOffset[0] = offset;
140731 +                }
140732 +            } else {
140733 +                offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1);
140734 +                {   size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
140735 +                    temp += !temp;   /* 0 is not valid; input is corrupted; force offset to 1 */
140736 +                    if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
140737 +                    seqState->prevOffset[1] = seqState->prevOffset[0];
140738 +                    seqState->prevOffset[0] = offset = temp;
140739 +        }   }   }
140740 +        seq.offset = offset;
140741 +    }
140743 +    seq.matchLength = mlBase;
140744 +    if (mlBits > 0)
140745 +        seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/);
140747 +    if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
140748 +        BIT_reloadDStream(&seqState->DStream);
140749 +    if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
140750 +        BIT_reloadDStream(&seqState->DStream);
140751 +    /* Ensure there are enough bits to read the rest of data in 64-bit mode. */
140752 +    ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
140754 +    seq.litLength = llBase;
140755 +    if (llBits > 0)
140756 +        seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/);
140758 +    if (MEM_32bits())
140759 +        BIT_reloadDStream(&seqState->DStream);
140761 +    DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
140762 +                (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
140764 +    if (prefetch == ZSTD_p_prefetch) {
140765 +        size_t const pos = seqState->pos + seq.litLength;
140766 +        const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart;
140767 +        seq.match = matchBase + pos - seq.offset;  /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
140768 +                                                    * No consequence though : no memory access will occur, offset is only used for prefetching */
140769 +        seqState->pos = pos + seq.matchLength;
140770 +    }
140772 +    /* ANS state update
140773 +     * gcc-9.0.0 does 2.5% worse with ZSTD_updateFseStateWithDInfo().
140774 +     * clang-9.2.0 does 7% worse with ZSTD_updateFseState().
140775 +     * Naturally it seems like ZSTD_updateFseStateWithDInfo() should be the
140776 +     * better option, so it is the default for other compilers. But, if you
140777 +     * measure that it is worse, please put up a pull request.
140778 +     */
140779 +    {
140780 +#if !defined(__clang__)
140781 +        const int kUseUpdateFseState = 1;
140782 +#else
140783 +        const int kUseUpdateFseState = 0;
140784 +#endif
140785 +        if (kUseUpdateFseState) {
140786 +            ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream);    /* <=  9 bits */
140787 +            ZSTD_updateFseState(&seqState->stateML, &seqState->DStream);    /* <=  9 bits */
140788 +            if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);    /* <= 18 bits */
140789 +            ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream);  /* <=  8 bits */
140790 +        } else {
140791 +            ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llDInfo);    /* <=  9 bits */
140792 +            ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlDInfo);    /* <=  9 bits */
140793 +            if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);    /* <= 18 bits */
140794 +            ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofDInfo);  /* <=  8 bits */
140795 +        }
140796 +    }
140798 +    return seq;
140801 +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
140802 +MEM_STATIC int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd)
140804 +    size_t const windowSize = dctx->fParams.windowSize;
140805 +    /* No dictionary used. */
140806 +    if (dctx->dictContentEndForFuzzing == NULL) return 0;
140807 +    /* Dictionary is our prefix. */
140808 +    if (prefixStart == dctx->dictContentBeginForFuzzing) return 1;
140809 +    /* Dictionary is not our ext-dict. */
140810 +    if (dctx->dictEnd != dctx->dictContentEndForFuzzing) return 0;
140811 +    /* Dictionary is not within our window size. */
140812 +    if ((size_t)(oLitEnd - prefixStart) >= windowSize) return 0;
140813 +    /* Dictionary is active. */
140814 +    return 1;
140817 +MEM_STATIC void ZSTD_assertValidSequence(
140818 +        ZSTD_DCtx const* dctx,
140819 +        BYTE const* op, BYTE const* oend,
140820 +        seq_t const seq,
140821 +        BYTE const* prefixStart, BYTE const* virtualStart)
140823 +#if DEBUGLEVEL >= 1
140824 +    size_t const windowSize = dctx->fParams.windowSize;
140825 +    size_t const sequenceSize = seq.litLength + seq.matchLength;
140826 +    BYTE const* const oLitEnd = op + seq.litLength;
140827 +    DEBUGLOG(6, "Checking sequence: litL=%u matchL=%u offset=%u",
140828 +            (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
140829 +    assert(op <= oend);
140830 +    assert((size_t)(oend - op) >= sequenceSize);
140831 +    assert(sequenceSize <= ZSTD_BLOCKSIZE_MAX);
140832 +    if (ZSTD_dictionaryIsActive(dctx, prefixStart, oLitEnd)) {
140833 +        size_t const dictSize = (size_t)((char const*)dctx->dictContentEndForFuzzing - (char const*)dctx->dictContentBeginForFuzzing);
140834 +        /* Offset must be within the dictionary. */
140835 +        assert(seq.offset <= (size_t)(oLitEnd - virtualStart));
140836 +        assert(seq.offset <= windowSize + dictSize);
140837 +    } else {
140838 +        /* Offset must be within our window. */
140839 +        assert(seq.offset <= windowSize);
140840 +    }
140841 +#else
140842 +    (void)dctx, (void)op, (void)oend, (void)seq, (void)prefixStart, (void)virtualStart;
140843 +#endif
140845 +#endif
140847 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
140848 +FORCE_INLINE_TEMPLATE size_t
140849 +DONT_VECTORIZE
140850 +ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
140851 +                               void* dst, size_t maxDstSize,
140852 +                         const void* seqStart, size_t seqSize, int nbSeq,
140853 +                         const ZSTD_longOffset_e isLongOffset,
140854 +                         const int frame)
140856 +    const BYTE* ip = (const BYTE*)seqStart;
140857 +    const BYTE* const iend = ip + seqSize;
140858 +    BYTE* const ostart = (BYTE*)dst;
140859 +    BYTE* const oend = ostart + maxDstSize;
140860 +    BYTE* op = ostart;
140861 +    const BYTE* litPtr = dctx->litPtr;
140862 +    const BYTE* const litEnd = litPtr + dctx->litSize;
140863 +    const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
140864 +    const BYTE* const vBase = (const BYTE*) (dctx->virtualStart);
140865 +    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
140866 +    DEBUGLOG(5, "ZSTD_decompressSequences_body");
140867 +    (void)frame;
140869 +    /* Regen sequences */
140870 +    if (nbSeq) {
140871 +        seqState_t seqState;
140872 +        size_t error = 0;
140873 +        dctx->fseEntropy = 1;
140874 +        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
140875 +        RETURN_ERROR_IF(
140876 +            ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
140877 +            corruption_detected, "");
140878 +        ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
140879 +        ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
140880 +        ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
140881 +        assert(dst != NULL);
140883 +        ZSTD_STATIC_ASSERT(
140884 +                BIT_DStream_unfinished < BIT_DStream_completed &&
140885 +                BIT_DStream_endOfBuffer < BIT_DStream_completed &&
140886 +                BIT_DStream_completed < BIT_DStream_overflow);
140888 +#if defined(__x86_64__)
140889 +        /* Align the decompression loop to 32 + 16 bytes.
140890 +         *
140891 +         * zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression
140892 +         * speed swings based on the alignment of the decompression loop. This
140893 +         * performance swing is caused by parts of the decompression loop falling
140894 +         * out of the DSB. The entire decompression loop should fit in the DSB,
140895 +         * when it can't we get much worse performance. You can measure if you've
140896 +         * hit the good case or the bad case with this perf command for some
140897 +         * compressed file test.zst:
140898 +         *
140899 +         *   perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \
140900 +         *             -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst
140901 +         *
140902 +         * If you see most cycles served out of the MITE you've hit the bad case.
140903 +         * If you see most cycles served out of the DSB you've hit the good case.
140904 +         * If it is pretty even then you may be in an okay case.
140905 +         *
140906 +         * I've been able to reproduce this issue on the following CPUs:
140907 +         *   - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9
140908 +         *               Use Instruments->Counters to get DSB/MITE cycles.
140909 +         *               I never got performance swings, but I was able to
140910 +         *               go from the good case of mostly DSB to half of the
140911 +         *               cycles served from MITE.
140912 +         *   - Coffeelake: Intel i9-9900k
140913 +         *
140914 +         * I haven't been able to reproduce the instability or DSB misses on any
140915 +         * of the following CPUS:
140916 +         *   - Haswell
140917 +         *   - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH
140918 +         *   - Skylake
140919 +         *
140920 +         * If you are seeing performance stability this script can help test.
140921 +         * It tests on 4 commits in zstd where I saw performance change.
140922 +         *
140923 +         *   https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4
140924 +         */
140925 +        __asm__(".p2align 5");
140926 +        __asm__("nop");
140927 +        __asm__(".p2align 4");
140928 +#endif
140929 +        for ( ; ; ) {
140930 +            seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_noPrefetch);
140931 +            size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd);
140932 +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
140933 +            assert(!ZSTD_isError(oneSeqSize));
140934 +            if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
140935 +#endif
140936 +            DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
140937 +            BIT_reloadDStream(&(seqState.DStream));
140938 +            op += oneSeqSize;
140939 +            /* gcc and clang both don't like early returns in this loop.
140940 +             * Instead break and check for an error at the end of the loop.
140941 +             */
140942 +            if (UNLIKELY(ZSTD_isError(oneSeqSize))) {
140943 +                error = oneSeqSize;
140944 +                break;
140945 +            }
140946 +            if (UNLIKELY(!--nbSeq)) break;
140947 +        }
140949 +        /* check if reached exact end */
140950 +        DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
140951 +        if (ZSTD_isError(error)) return error;
140952 +        RETURN_ERROR_IF(nbSeq, corruption_detected, "");
140953 +        RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, "");
140954 +        /* save reps for next block */
140955 +        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
140956 +    }
140958 +    /* last literal segment */
140959 +    {   size_t const lastLLSize = litEnd - litPtr;
140960 +        RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
140961 +        if (op != NULL) {
140962 +            ZSTD_memcpy(op, litPtr, lastLLSize);
140963 +            op += lastLLSize;
140964 +        }
140965 +    }
140967 +    return op-ostart;
140970 +static size_t
140971 +ZSTD_decompressSequences_default(ZSTD_DCtx* dctx,
140972 +                                 void* dst, size_t maxDstSize,
140973 +                           const void* seqStart, size_t seqSize, int nbSeq,
140974 +                           const ZSTD_longOffset_e isLongOffset,
140975 +                           const int frame)
140977 +    return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
140979 +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
140981 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
140982 +FORCE_INLINE_TEMPLATE size_t
140983 +ZSTD_decompressSequencesLong_body(
140984 +                               ZSTD_DCtx* dctx,
140985 +                               void* dst, size_t maxDstSize,
140986 +                         const void* seqStart, size_t seqSize, int nbSeq,
140987 +                         const ZSTD_longOffset_e isLongOffset,
140988 +                         const int frame)
140990 +    const BYTE* ip = (const BYTE*)seqStart;
140991 +    const BYTE* const iend = ip + seqSize;
140992 +    BYTE* const ostart = (BYTE*)dst;
140993 +    BYTE* const oend = ostart + maxDstSize;
140994 +    BYTE* op = ostart;
140995 +    const BYTE* litPtr = dctx->litPtr;
140996 +    const BYTE* const litEnd = litPtr + dctx->litSize;
140997 +    const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
140998 +    const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart);
140999 +    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
141000 +    (void)frame;
141002 +    /* Regen sequences */
141003 +    if (nbSeq) {
141004 +#define STORED_SEQS 4
141005 +#define STORED_SEQS_MASK (STORED_SEQS-1)
141006 +#define ADVANCED_SEQS 4
141007 +        seq_t sequences[STORED_SEQS];
141008 +        int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
141009 +        seqState_t seqState;
141010 +        int seqNb;
141011 +        dctx->fseEntropy = 1;
141012 +        { int i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
141013 +        seqState.prefixStart = prefixStart;
141014 +        seqState.pos = (size_t)(op-prefixStart);
141015 +        seqState.dictEnd = dictEnd;
141016 +        assert(dst != NULL);
141017 +        assert(iend >= ip);
141018 +        RETURN_ERROR_IF(
141019 +            ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
141020 +            corruption_detected, "");
141021 +        ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
141022 +        ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
141023 +        ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
141025 +        /* prepare in advance */
141026 +        for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNb<seqAdvance); seqNb++) {
141027 +            sequences[seqNb] = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_prefetch);
141028 +            PREFETCH_L1(sequences[seqNb].match); PREFETCH_L1(sequences[seqNb].match + sequences[seqNb].matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
141029 +        }
141030 +        RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected, "");
141032 +        /* decode and decompress */
141033 +        for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
141034 +            seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_prefetch);
141035 +            size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
141036 +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
141037 +            assert(!ZSTD_isError(oneSeqSize));
141038 +            if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
141039 +#endif
141040 +            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
141041 +            PREFETCH_L1(sequence.match); PREFETCH_L1(sequence.match + sequence.matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
141042 +            sequences[seqNb & STORED_SEQS_MASK] = sequence;
141043 +            op += oneSeqSize;
141044 +        }
141045 +        RETURN_ERROR_IF(seqNb<nbSeq, corruption_detected, "");
141047 +        /* finish queue */
141048 +        seqNb -= seqAdvance;
141049 +        for ( ; seqNb<nbSeq ; seqNb++) {
141050 +            size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[seqNb&STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
141051 +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
141052 +            assert(!ZSTD_isError(oneSeqSize));
141053 +            if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
141054 +#endif
141055 +            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
141056 +            op += oneSeqSize;
141057 +        }
141059 +        /* save reps for next block */
141060 +        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
141061 +    }
141063 +    /* last literal segment */
141064 +    {   size_t const lastLLSize = litEnd - litPtr;
141065 +        RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
141066 +        if (op != NULL) {
141067 +            ZSTD_memcpy(op, litPtr, lastLLSize);
141068 +            op += lastLLSize;
141069 +        }
141070 +    }
141072 +    return op-ostart;
141075 +static size_t
141076 +ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx,
141077 +                                 void* dst, size_t maxDstSize,
141078 +                           const void* seqStart, size_t seqSize, int nbSeq,
141079 +                           const ZSTD_longOffset_e isLongOffset,
141080 +                           const int frame)
141082 +    return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
141084 +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
141088 +#if DYNAMIC_BMI2
141090 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
141091 +static TARGET_ATTRIBUTE("bmi2") size_t
141092 +DONT_VECTORIZE
141093 +ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx,
141094 +                                 void* dst, size_t maxDstSize,
141095 +                           const void* seqStart, size_t seqSize, int nbSeq,
141096 +                           const ZSTD_longOffset_e isLongOffset,
141097 +                           const int frame)
141099 +    return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
141101 +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
141103 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
141104 +static TARGET_ATTRIBUTE("bmi2") size_t
141105 +ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx,
141106 +                                 void* dst, size_t maxDstSize,
141107 +                           const void* seqStart, size_t seqSize, int nbSeq,
141108 +                           const ZSTD_longOffset_e isLongOffset,
141109 +                           const int frame)
141111 +    return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
141113 +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
141115 +#endif /* DYNAMIC_BMI2 */
141117 +typedef size_t (*ZSTD_decompressSequences_t)(
141118 +                            ZSTD_DCtx* dctx,
141119 +                            void* dst, size_t maxDstSize,
141120 +                            const void* seqStart, size_t seqSize, int nbSeq,
141121 +                            const ZSTD_longOffset_e isLongOffset,
141122 +                            const int frame);
141124 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
141125 +static size_t
141126 +ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
141127 +                   const void* seqStart, size_t seqSize, int nbSeq,
141128 +                   const ZSTD_longOffset_e isLongOffset,
141129 +                   const int frame)
141131 +    DEBUGLOG(5, "ZSTD_decompressSequences");
141132 +#if DYNAMIC_BMI2
141133 +    if (dctx->bmi2) {
141134 +        return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
141135 +    }
141136 +#endif
141137 +  return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
141139 +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
141142 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
141143 +/* ZSTD_decompressSequencesLong() :
141144 + * decompression function triggered when a minimum share of offsets is considered "long",
141145 + * aka out of cache.
141146 + * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance".
141147 + * This function will try to mitigate main memory latency through the use of prefetching */
141148 +static size_t
141149 +ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
141150 +                             void* dst, size_t maxDstSize,
141151 +                             const void* seqStart, size_t seqSize, int nbSeq,
141152 +                             const ZSTD_longOffset_e isLongOffset,
141153 +                             const int frame)
141155 +    DEBUGLOG(5, "ZSTD_decompressSequencesLong");
141156 +#if DYNAMIC_BMI2
141157 +    if (dctx->bmi2) {
141158 +        return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
141159 +    }
141160 +#endif
141161 +  return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
141163 +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
141167 +#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
141168 +    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
141169 +/* ZSTD_getLongOffsetsShare() :
141170 + * condition : offTable must be valid
141171 + * @return : "share" of long offsets (arbitrarily defined as > (1<<23))
141172 + *           compared to maximum possible of (1<<OffFSELog) */
141173 +static unsigned
141174 +ZSTD_getLongOffsetsShare(const ZSTD_seqSymbol* offTable)
141176 +    const void* ptr = offTable;
141177 +    U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog;
141178 +    const ZSTD_seqSymbol* table = offTable + 1;
141179 +    U32 const max = 1 << tableLog;
141180 +    U32 u, total = 0;
141181 +    DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog);
141183 +    assert(max <= (1 << OffFSELog));  /* max not too large */
141184 +    for (u=0; u<max; u++) {
141185 +        if (table[u].nbAdditionalBits > 22) total += 1;
141186 +    }
141188 +    assert(tableLog <= OffFSELog);
141189 +    total <<= (OffFSELog - tableLog);  /* scale to OffFSELog */
141191 +    return total;
141193 +#endif
141195 +size_t
141196 +ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
141197 +                              void* dst, size_t dstCapacity,
141198 +                        const void* src, size_t srcSize, const int frame)
141199 +{   /* blockType == blockCompressed */
141200 +    const BYTE* ip = (const BYTE*)src;
141201 +    /* isLongOffset must be true if there are long offsets.
141202 +     * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.
141203 +     * We don't expect that to be the case in 64-bit mode.
141204 +     * In block mode, window size is not known, so we have to be conservative.
141205 +     * (note: but it could be evaluated from current-lowLimit)
141206 +     */
141207 +    ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))));
141208 +    DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
141210 +    RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong, "");
141212 +    /* Decode literals section */
141213 +    {   size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
141214 +        DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize);
141215 +        if (ZSTD_isError(litCSize)) return litCSize;
141216 +        ip += litCSize;
141217 +        srcSize -= litCSize;
141218 +    }
141220 +    /* Build Decoding Tables */
141221 +    {
141222 +        /* These macros control at build-time which decompressor implementation
141223 +         * we use. If neither is defined, we do some inspection and dispatch at
141224 +         * runtime.
141225 +         */
141226 +#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
141227 +    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
141228 +        int usePrefetchDecoder = dctx->ddictIsCold;
141229 +#endif
141230 +        int nbSeq;
141231 +        size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize);
141232 +        if (ZSTD_isError(seqHSize)) return seqHSize;
141233 +        ip += seqHSize;
141234 +        srcSize -= seqHSize;
141236 +        RETURN_ERROR_IF(dst == NULL && nbSeq > 0, dstSize_tooSmall, "NULL not handled");
141238 +#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
141239 +    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
141240 +        if ( !usePrefetchDecoder
141241 +          && (!frame || (dctx->fParams.windowSize > (1<<24)))
141242 +          && (nbSeq>ADVANCED_SEQS) ) {  /* could probably use a larger nbSeq limit */
141243 +            U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr);
141244 +            U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */
141245 +            usePrefetchDecoder = (shareLongOffsets >= minShare);
141246 +        }
141247 +#endif
141249 +        dctx->ddictIsCold = 0;
141251 +#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
141252 +    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
141253 +        if (usePrefetchDecoder)
141254 +#endif
141255 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
141256 +            return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
141257 +#endif
141259 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
141260 +        /* else */
141261 +        return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
141262 +#endif
141263 +    }
141267 +void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize)
141269 +    if (dst != dctx->previousDstEnd && dstSize > 0) {   /* not contiguous */
141270 +        dctx->dictEnd = dctx->previousDstEnd;
141271 +        dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
141272 +        dctx->prefixStart = dst;
141273 +        dctx->previousDstEnd = dst;
141274 +    }
141278 +size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
141279 +                            void* dst, size_t dstCapacity,
141280 +                      const void* src, size_t srcSize)
141282 +    size_t dSize;
141283 +    ZSTD_checkContinuity(dctx, dst, dstCapacity);
141284 +    dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0);
141285 +    dctx->previousDstEnd = (char*)dst + dSize;
141286 +    return dSize;
141288 diff --git a/lib/zstd/decompress/zstd_decompress_block.h b/lib/zstd/decompress/zstd_decompress_block.h
141289 new file mode 100644
141290 index 000000000000..e7f5f6689459
141291 --- /dev/null
141292 +++ b/lib/zstd/decompress/zstd_decompress_block.h
141293 @@ -0,0 +1,62 @@
141295 + * Copyright (c) Yann Collet, Facebook, Inc.
141296 + * All rights reserved.
141298 + * This source code is licensed under both the BSD-style license (found in the
141299 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
141300 + * in the COPYING file in the root directory of this source tree).
141301 + * You may select, at your option, one of the above-listed licenses.
141302 + */
141305 +#ifndef ZSTD_DEC_BLOCK_H
141306 +#define ZSTD_DEC_BLOCK_H
141308 +/*-*******************************************************
141309 + *  Dependencies
141310 + *********************************************************/
141311 +#include "../common/zstd_deps.h"   /* size_t */
141312 +#include <linux/zstd.h>    /* DCtx, and some public functions */
141313 +#include "../common/zstd_internal.h"  /* blockProperties_t, and some public functions */
141314 +#include "zstd_decompress_internal.h"  /* ZSTD_seqSymbol */
141317 +/* ===   Prototypes   === */
141319 +/* note: prototypes already published within `zstd.h` :
141320 + * ZSTD_decompressBlock()
141321 + */
141323 +/* note: prototypes already published within `zstd_internal.h` :
141324 + * ZSTD_getcBlockSize()
141325 + * ZSTD_decodeSeqHeaders()
141326 + */
141329 +/* ZSTD_decompressBlock_internal() :
141330 + * decompress block, starting at `src`,
141331 + * into destination buffer `dst`.
141332 + * @return : decompressed block size,
141333 + *           or an error code (which can be tested using ZSTD_isError())
141334 + */
141335 +size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
141336 +                               void* dst, size_t dstCapacity,
141337 +                         const void* src, size_t srcSize, const int frame);
141339 +/* ZSTD_buildFSETable() :
141340 + * generate FSE decoding table for one symbol (ll, ml or off)
141341 + * this function must be called with valid parameters only
141342 + * (dt is large enough, normalizedCounter distribution total is a power of 2, max is within range, etc.)
141343 + * in which case it cannot fail.
141344 + * The workspace must be 4-byte aligned and at least ZSTD_BUILD_FSE_TABLE_WKSP_SIZE bytes, which is
141345 + * defined in zstd_decompress_internal.h.
141346 + * Internal use only.
141347 + */
141348 +void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
141349 +             const short* normalizedCounter, unsigned maxSymbolValue,
141350 +             const U32* baseValue, const U32* nbAdditionalBits,
141351 +                   unsigned tableLog, void* wksp, size_t wkspSize,
141352 +                   int bmi2);
141355 +#endif /* ZSTD_DEC_BLOCK_H */
141356 diff --git a/lib/zstd/decompress/zstd_decompress_internal.h b/lib/zstd/decompress/zstd_decompress_internal.h
141357 new file mode 100644
141358 index 000000000000..4b9052f68755
141359 --- /dev/null
141360 +++ b/lib/zstd/decompress/zstd_decompress_internal.h
141361 @@ -0,0 +1,202 @@
141363 + * Copyright (c) Yann Collet, Facebook, Inc.
141364 + * All rights reserved.
141366 + * This source code is licensed under both the BSD-style license (found in the
141367 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
141368 + * in the COPYING file in the root directory of this source tree).
141369 + * You may select, at your option, one of the above-listed licenses.
141370 + */
141373 +/* zstd_decompress_internal:
141374 + * objects and definitions shared within lib/decompress modules */
141376 + #ifndef ZSTD_DECOMPRESS_INTERNAL_H
141377 + #define ZSTD_DECOMPRESS_INTERNAL_H
141380 +/*-*******************************************************
141381 + *  Dependencies
141382 + *********************************************************/
141383 +#include "../common/mem.h"             /* BYTE, U16, U32 */
141384 +#include "../common/zstd_internal.h"   /* ZSTD_seqSymbol */
141388 +/*-*******************************************************
141389 + *  Constants
141390 + *********************************************************/
141391 +static UNUSED_ATTR const U32 LL_base[MaxLL+1] = {
141392 +                 0,    1,    2,     3,     4,     5,     6,      7,
141393 +                 8,    9,   10,    11,    12,    13,    14,     15,
141394 +                16,   18,   20,    22,    24,    28,    32,     40,
141395 +                48,   64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
141396 +                0x2000, 0x4000, 0x8000, 0x10000 };
141398 +static UNUSED_ATTR const U32 OF_base[MaxOff+1] = {
141399 +                 0,        1,       1,       5,     0xD,     0x1D,     0x3D,     0x7D,
141400 +                 0xFD,   0x1FD,   0x3FD,   0x7FD,   0xFFD,   0x1FFD,   0x3FFD,   0x7FFD,
141401 +                 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
141402 +                 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD };
141404 +static UNUSED_ATTR const U32 OF_bits[MaxOff+1] = {
141405 +                     0,  1,  2,  3,  4,  5,  6,  7,
141406 +                     8,  9, 10, 11, 12, 13, 14, 15,
141407 +                    16, 17, 18, 19, 20, 21, 22, 23,
141408 +                    24, 25, 26, 27, 28, 29, 30, 31 };
141410 +static UNUSED_ATTR const U32 ML_base[MaxML+1] = {
141411 +                     3,  4,  5,    6,     7,     8,     9,    10,
141412 +                    11, 12, 13,   14,    15,    16,    17,    18,
141413 +                    19, 20, 21,   22,    23,    24,    25,    26,
141414 +                    27, 28, 29,   30,    31,    32,    33,    34,
141415 +                    35, 37, 39,   41,    43,    47,    51,    59,
141416 +                    67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
141417 +                    0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
141420 +/*-*******************************************************
141421 + *  Decompression types
141422 + *********************************************************/
141423 + typedef struct {
141424 +     U32 fastMode;
141425 +     U32 tableLog;
141426 + } ZSTD_seqSymbol_header;
141428 + typedef struct {
141429 +     U16  nextState;
141430 +     BYTE nbAdditionalBits;
141431 +     BYTE nbBits;
141432 +     U32  baseValue;
141433 + } ZSTD_seqSymbol;
141435 + #define SEQSYMBOL_TABLE_SIZE(log)   (1 + (1 << (log)))
141437 +#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE (sizeof(S16) * (MaxSeq + 1) + (1u << MaxFSELog) + sizeof(U64))
141438 +#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32 ((ZSTD_BUILD_FSE_TABLE_WKSP_SIZE + sizeof(U32) - 1) / sizeof(U32))
141440 +typedef struct {
141441 +    ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)];    /* Note : Space reserved for FSE Tables */
141442 +    ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)];   /* is also used as temporary workspace while building hufTable during DDict creation */
141443 +    ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)];    /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */
141444 +    HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)];  /* can accommodate HUF_decompress4X */
141445 +    U32 rep[ZSTD_REP_NUM];
141446 +    U32 workspace[ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32];
141447 +} ZSTD_entropyDTables_t;
141449 +typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
141450 +               ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock,
141451 +               ZSTDds_decompressLastBlock, ZSTDds_checkChecksum,
141452 +               ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage;
141454 +typedef enum { zdss_init=0, zdss_loadHeader,
141455 +               zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
141457 +typedef enum {
141458 +    ZSTD_use_indefinitely = -1,  /* Use the dictionary indefinitely */
141459 +    ZSTD_dont_use = 0,           /* Do not use the dictionary (if one exists free it) */
141460 +    ZSTD_use_once = 1            /* Use the dictionary once and set to ZSTD_dont_use */
141461 +} ZSTD_dictUses_e;
141463 +/* Hashset for storing references to multiple ZSTD_DDict within ZSTD_DCtx */
141464 +typedef struct {
141465 +    const ZSTD_DDict** ddictPtrTable;
141466 +    size_t ddictPtrTableSize;
141467 +    size_t ddictPtrCount;
141468 +} ZSTD_DDictHashSet;
141470 +struct ZSTD_DCtx_s
141472 +    const ZSTD_seqSymbol* LLTptr;
141473 +    const ZSTD_seqSymbol* MLTptr;
141474 +    const ZSTD_seqSymbol* OFTptr;
141475 +    const HUF_DTable* HUFptr;
141476 +    ZSTD_entropyDTables_t entropy;
141477 +    U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];   /* space needed when building huffman tables */
141478 +    const void* previousDstEnd;   /* detect continuity */
141479 +    const void* prefixStart;      /* start of current segment */
141480 +    const void* virtualStart;     /* virtual start of previous segment if it was just before current one */
141481 +    const void* dictEnd;          /* end of previous segment */
141482 +    size_t expected;
141483 +    ZSTD_frameHeader fParams;
141484 +    U64 processedCSize;
141485 +    U64 decodedSize;
141486 +    blockType_e bType;            /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */
141487 +    ZSTD_dStage stage;
141488 +    U32 litEntropy;
141489 +    U32 fseEntropy;
141490 +    struct xxh64_state xxhState;
141491 +    size_t headerSize;
141492 +    ZSTD_format_e format;
141493 +    ZSTD_forceIgnoreChecksum_e forceIgnoreChecksum;   /* User specified: if == 1, will ignore checksums in compressed frame. Default == 0 */
141494 +    U32 validateChecksum;         /* if == 1, will validate checksum. Is == 1 if (fParams.checksumFlag == 1) and (forceIgnoreChecksum == 0). */
141495 +    const BYTE* litPtr;
141496 +    ZSTD_customMem customMem;
141497 +    size_t litSize;
141498 +    size_t rleSize;
141499 +    size_t staticSize;
141500 +    int bmi2;                     /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
141502 +    /* dictionary */
141503 +    ZSTD_DDict* ddictLocal;
141504 +    const ZSTD_DDict* ddict;     /* set by ZSTD_initDStream_usingDDict(), or ZSTD_DCtx_refDDict() */
141505 +    U32 dictID;
141506 +    int ddictIsCold;             /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */
141507 +    ZSTD_dictUses_e dictUses;
141508 +    ZSTD_DDictHashSet* ddictSet;                    /* Hash set for multiple ddicts */
141509 +    ZSTD_refMultipleDDicts_e refMultipleDDicts;     /* User specified: if == 1, will allow references to multiple DDicts. Default == 0 (disabled) */
141511 +    /* streaming */
141512 +    ZSTD_dStreamStage streamStage;
141513 +    char*  inBuff;
141514 +    size_t inBuffSize;
141515 +    size_t inPos;
141516 +    size_t maxWindowSize;
141517 +    char*  outBuff;
141518 +    size_t outBuffSize;
141519 +    size_t outStart;
141520 +    size_t outEnd;
141521 +    size_t lhSize;
141522 +    void* legacyContext;
141523 +    U32 previousLegacyVersion;
141524 +    U32 legacyVersion;
141525 +    U32 hostageByte;
141526 +    int noForwardProgress;
141527 +    ZSTD_bufferMode_e outBufferMode;
141528 +    ZSTD_outBuffer expectedOutBuffer;
141530 +    /* workspace */
141531 +    BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];
141532 +    BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
141534 +    size_t oversizedDuration;
141536 +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
141537 +    void const* dictContentBeginForFuzzing;
141538 +    void const* dictContentEndForFuzzing;
141539 +#endif
141541 +    /* Tracing */
141542 +};  /* typedef'd to ZSTD_DCtx within "zstd.h" */
141545 +/*-*******************************************************
141546 + *  Shared internal functions
141547 + *********************************************************/
141549 +/*! ZSTD_loadDEntropy() :
141550 + *  dict : must point at beginning of a valid zstd dictionary.
141551 + * @return : size of dictionary header (size of magic number + dict ID + entropy tables) */
141552 +size_t ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
141553 +                   const void* const dict, size_t const dictSize);
141555 +/*! ZSTD_checkContinuity() :
141556 + *  check if next `dst` follows previous position, where decompression ended.
141557 + *  If yes, do nothing (continue on current segment).
141558 + *  If not, classify previous segment as "external dictionary", and start a new segment.
141559 + *  This function cannot fail. */
141560 +void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize);
141563 +#endif /* ZSTD_DECOMPRESS_INTERNAL_H */
141564 diff --git a/lib/zstd/decompress_sources.h b/lib/zstd/decompress_sources.h
141565 new file mode 100644
141566 index 000000000000..f35bef03eb22
141567 --- /dev/null
141568 +++ b/lib/zstd/decompress_sources.h
141569 @@ -0,0 +1,28 @@
141570 +/* SPDX-License-Identifier: GPL-2.0-only */
141572 + * Copyright (c) Facebook, Inc.
141573 + * All rights reserved.
141575 + * This source code is licensed under both the BSD-style license (found in the
141576 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
141577 + * in the COPYING file in the root directory of this source tree).
141578 + * You may select, at your option, one of the above-listed licenses.
141579 + */
141582 + * This file includes every .c file needed for decompression.
141583 + * It is used by lib/decompress_unzstd.c to include the decompression
141584 + * source into the translation-unit, so it can be used for kernel
141585 + * decompression.
141586 + */
141588 +#include "common/debug.c"
141589 +#include "common/entropy_common.c"
141590 +#include "common/error_private.c"
141591 +#include "common/fse_decompress.c"
141592 +#include "common/zstd_common.c"
141593 +#include "decompress/huf_decompress.c"
141594 +#include "decompress/zstd_ddict.c"
141595 +#include "decompress/zstd_decompress.c"
141596 +#include "decompress/zstd_decompress_block.c"
141597 +#include "zstd_decompress_module.c"
141598 diff --git a/lib/zstd/entropy_common.c b/lib/zstd/entropy_common.c
141599 deleted file mode 100644
141600 index 2b0a643c32c4..000000000000
141601 --- a/lib/zstd/entropy_common.c
141602 +++ /dev/null
141603 @@ -1,243 +0,0 @@
141605 - * Common functions of New Generation Entropy library
141606 - * Copyright (C) 2016, Yann Collet.
141608 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
141610 - * Redistribution and use in source and binary forms, with or without
141611 - * modification, are permitted provided that the following conditions are
141612 - * met:
141614 - *   * Redistributions of source code must retain the above copyright
141615 - * notice, this list of conditions and the following disclaimer.
141616 - *   * Redistributions in binary form must reproduce the above
141617 - * copyright notice, this list of conditions and the following disclaimer
141618 - * in the documentation and/or other materials provided with the
141619 - * distribution.
141621 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
141622 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
141623 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
141624 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
141625 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
141626 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
141627 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
141628 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
141629 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
141630 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
141631 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
141633 - * This program is free software; you can redistribute it and/or modify it under
141634 - * the terms of the GNU General Public License version 2 as published by the
141635 - * Free Software Foundation. This program is dual-licensed; you may select
141636 - * either version 2 of the GNU General Public License ("GPL") or BSD license
141637 - * ("BSD").
141639 - * You can contact the author at :
141640 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
141641 - */
141643 -/* *************************************
141644 -*  Dependencies
141645 -***************************************/
141646 -#include "error_private.h" /* ERR_*, ERROR */
141647 -#include "fse.h"
141648 -#include "huf.h"
141649 -#include "mem.h"
141651 -/*===   Version   ===*/
141652 -unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
141654 -/*===   Error Management   ===*/
141655 -unsigned FSE_isError(size_t code) { return ERR_isError(code); }
141657 -unsigned HUF_isError(size_t code) { return ERR_isError(code); }
141659 -/*-**************************************************************
141660 -*  FSE NCount encoding-decoding
141661 -****************************************************************/
141662 -size_t FSE_readNCount(short *normalizedCounter, unsigned *maxSVPtr, unsigned *tableLogPtr, const void *headerBuffer, size_t hbSize)
141664 -       const BYTE *const istart = (const BYTE *)headerBuffer;
141665 -       const BYTE *const iend = istart + hbSize;
141666 -       const BYTE *ip = istart;
141667 -       int nbBits;
141668 -       int remaining;
141669 -       int threshold;
141670 -       U32 bitStream;
141671 -       int bitCount;
141672 -       unsigned charnum = 0;
141673 -       int previous0 = 0;
141675 -       if (hbSize < 4)
141676 -               return ERROR(srcSize_wrong);
141677 -       bitStream = ZSTD_readLE32(ip);
141678 -       nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
141679 -       if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX)
141680 -               return ERROR(tableLog_tooLarge);
141681 -       bitStream >>= 4;
141682 -       bitCount = 4;
141683 -       *tableLogPtr = nbBits;
141684 -       remaining = (1 << nbBits) + 1;
141685 -       threshold = 1 << nbBits;
141686 -       nbBits++;
141688 -       while ((remaining > 1) & (charnum <= *maxSVPtr)) {
141689 -               if (previous0) {
141690 -                       unsigned n0 = charnum;
141691 -                       while ((bitStream & 0xFFFF) == 0xFFFF) {
141692 -                               n0 += 24;
141693 -                               if (ip < iend - 5) {
141694 -                                       ip += 2;
141695 -                                       bitStream = ZSTD_readLE32(ip) >> bitCount;
141696 -                               } else {
141697 -                                       bitStream >>= 16;
141698 -                                       bitCount += 16;
141699 -                               }
141700 -                       }
141701 -                       while ((bitStream & 3) == 3) {
141702 -                               n0 += 3;
141703 -                               bitStream >>= 2;
141704 -                               bitCount += 2;
141705 -                       }
141706 -                       n0 += bitStream & 3;
141707 -                       bitCount += 2;
141708 -                       if (n0 > *maxSVPtr)
141709 -                               return ERROR(maxSymbolValue_tooSmall);
141710 -                       while (charnum < n0)
141711 -                               normalizedCounter[charnum++] = 0;
141712 -                       if ((ip <= iend - 7) || (ip + (bitCount >> 3) <= iend - 4)) {
141713 -                               ip += bitCount >> 3;
141714 -                               bitCount &= 7;
141715 -                               bitStream = ZSTD_readLE32(ip) >> bitCount;
141716 -                       } else {
141717 -                               bitStream >>= 2;
141718 -                       }
141719 -               }
141720 -               {
141721 -                       int const max = (2 * threshold - 1) - remaining;
141722 -                       int count;
141724 -                       if ((bitStream & (threshold - 1)) < (U32)max) {
141725 -                               count = bitStream & (threshold - 1);
141726 -                               bitCount += nbBits - 1;
141727 -                       } else {
141728 -                               count = bitStream & (2 * threshold - 1);
141729 -                               if (count >= threshold)
141730 -                                       count -= max;
141731 -                               bitCount += nbBits;
141732 -                       }
141734 -                       count--;                                 /* extra accuracy */
141735 -                       remaining -= count < 0 ? -count : count; /* -1 means +1 */
141736 -                       normalizedCounter[charnum++] = (short)count;
141737 -                       previous0 = !count;
141738 -                       while (remaining < threshold) {
141739 -                               nbBits--;
141740 -                               threshold >>= 1;
141741 -                       }
141743 -                       if ((ip <= iend - 7) || (ip + (bitCount >> 3) <= iend - 4)) {
141744 -                               ip += bitCount >> 3;
141745 -                               bitCount &= 7;
141746 -                       } else {
141747 -                               bitCount -= (int)(8 * (iend - 4 - ip));
141748 -                               ip = iend - 4;
141749 -                       }
141750 -                       bitStream = ZSTD_readLE32(ip) >> (bitCount & 31);
141751 -               }
141752 -       } /* while ((remaining>1) & (charnum<=*maxSVPtr)) */
141753 -       if (remaining != 1)
141754 -               return ERROR(corruption_detected);
141755 -       if (bitCount > 32)
141756 -               return ERROR(corruption_detected);
141757 -       *maxSVPtr = charnum - 1;
141759 -       ip += (bitCount + 7) >> 3;
141760 -       return ip - istart;
141763 -/*! HUF_readStats() :
141764 -       Read compact Huffman tree, saved by HUF_writeCTable().
141765 -       `huffWeight` is destination buffer.
141766 -       `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
141767 -       @return : size read from `src` , or an error Code .
141768 -       Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
141770 -size_t HUF_readStats_wksp(BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
141772 -       U32 weightTotal;
141773 -       const BYTE *ip = (const BYTE *)src;
141774 -       size_t iSize;
141775 -       size_t oSize;
141777 -       if (!srcSize)
141778 -               return ERROR(srcSize_wrong);
141779 -       iSize = ip[0];
141780 -       /* memset(huffWeight, 0, hwSize);   */ /* is not necessary, even though some analyzer complain ... */
141782 -       if (iSize >= 128) { /* special header */
141783 -               oSize = iSize - 127;
141784 -               iSize = ((oSize + 1) / 2);
141785 -               if (iSize + 1 > srcSize)
141786 -                       return ERROR(srcSize_wrong);
141787 -               if (oSize >= hwSize)
141788 -                       return ERROR(corruption_detected);
141789 -               ip += 1;
141790 -               {
141791 -                       U32 n;
141792 -                       for (n = 0; n < oSize; n += 2) {
141793 -                               huffWeight[n] = ip[n / 2] >> 4;
141794 -                               huffWeight[n + 1] = ip[n / 2] & 15;
141795 -                       }
141796 -               }
141797 -       } else {                                                 /* header compressed with FSE (normal case) */
141798 -               if (iSize + 1 > srcSize)
141799 -                       return ERROR(srcSize_wrong);
141800 -               oSize = FSE_decompress_wksp(huffWeight, hwSize - 1, ip + 1, iSize, 6, workspace, workspaceSize); /* max (hwSize-1) values decoded, as last one is implied */
141801 -               if (FSE_isError(oSize))
141802 -                       return oSize;
141803 -       }
141805 -       /* collect weight stats */
141806 -       memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
141807 -       weightTotal = 0;
141808 -       {
141809 -               U32 n;
141810 -               for (n = 0; n < oSize; n++) {
141811 -                       if (huffWeight[n] >= HUF_TABLELOG_MAX)
141812 -                               return ERROR(corruption_detected);
141813 -                       rankStats[huffWeight[n]]++;
141814 -                       weightTotal += (1 << huffWeight[n]) >> 1;
141815 -               }
141816 -       }
141817 -       if (weightTotal == 0)
141818 -               return ERROR(corruption_detected);
141820 -       /* get last non-null symbol weight (implied, total must be 2^n) */
141821 -       {
141822 -               U32 const tableLog = BIT_highbit32(weightTotal) + 1;
141823 -               if (tableLog > HUF_TABLELOG_MAX)
141824 -                       return ERROR(corruption_detected);
141825 -               *tableLogPtr = tableLog;
141826 -               /* determine last weight */
141827 -               {
141828 -                       U32 const total = 1 << tableLog;
141829 -                       U32 const rest = total - weightTotal;
141830 -                       U32 const verif = 1 << BIT_highbit32(rest);
141831 -                       U32 const lastWeight = BIT_highbit32(rest) + 1;
141832 -                       if (verif != rest)
141833 -                               return ERROR(corruption_detected); /* last value must be a clean power of 2 */
141834 -                       huffWeight[oSize] = (BYTE)lastWeight;
141835 -                       rankStats[lastWeight]++;
141836 -               }
141837 -       }
141839 -       /* check tree construction validity */
141840 -       if ((rankStats[1] < 2) || (rankStats[1] & 1))
141841 -               return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
141843 -       /* results */
141844 -       *nbSymbolsPtr = (U32)(oSize + 1);
141845 -       return iSize + 1;
141847 diff --git a/lib/zstd/error_private.h b/lib/zstd/error_private.h
141848 deleted file mode 100644
141849 index 1a60b31f706c..000000000000
141850 --- a/lib/zstd/error_private.h
141851 +++ /dev/null
141852 @@ -1,53 +0,0 @@
141854 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
141855 - * All rights reserved.
141857 - * This source code is licensed under the BSD-style license found in the
141858 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
141859 - * An additional grant of patent rights can be found in the PATENTS file in the
141860 - * same directory.
141862 - * This program is free software; you can redistribute it and/or modify it under
141863 - * the terms of the GNU General Public License version 2 as published by the
141864 - * Free Software Foundation. This program is dual-licensed; you may select
141865 - * either version 2 of the GNU General Public License ("GPL") or BSD license
141866 - * ("BSD").
141867 - */
141869 -/* Note : this module is expected to remain private, do not expose it */
141871 -#ifndef ERROR_H_MODULE
141872 -#define ERROR_H_MODULE
141874 -/* ****************************************
141875 -*  Dependencies
141876 -******************************************/
141877 -#include <linux/types.h> /* size_t */
141878 -#include <linux/zstd.h>  /* enum list */
141880 -/* ****************************************
141881 -*  Compiler-specific
141882 -******************************************/
141883 -#define ERR_STATIC static __attribute__((unused))
141885 -/*-****************************************
141886 -*  Customization (error_public.h)
141887 -******************************************/
141888 -typedef ZSTD_ErrorCode ERR_enum;
141889 -#define PREFIX(name) ZSTD_error_##name
141891 -/*-****************************************
141892 -*  Error codes handling
141893 -******************************************/
141894 -#define ERROR(name) ((size_t)-PREFIX(name))
141896 -ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
141898 -ERR_STATIC ERR_enum ERR_getErrorCode(size_t code)
141900 -       if (!ERR_isError(code))
141901 -               return (ERR_enum)0;
141902 -       return (ERR_enum)(0 - code);
141905 -#endif /* ERROR_H_MODULE */
141906 diff --git a/lib/zstd/fse.h b/lib/zstd/fse.h
141907 deleted file mode 100644
141908 index 7460ab04b191..000000000000
141909 --- a/lib/zstd/fse.h
141910 +++ /dev/null
141911 @@ -1,575 +0,0 @@
141913 - * FSE : Finite State Entropy codec
141914 - * Public Prototypes declaration
141915 - * Copyright (C) 2013-2016, Yann Collet.
141917 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
141919 - * Redistribution and use in source and binary forms, with or without
141920 - * modification, are permitted provided that the following conditions are
141921 - * met:
141923 - *   * Redistributions of source code must retain the above copyright
141924 - * notice, this list of conditions and the following disclaimer.
141925 - *   * Redistributions in binary form must reproduce the above
141926 - * copyright notice, this list of conditions and the following disclaimer
141927 - * in the documentation and/or other materials provided with the
141928 - * distribution.
141930 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
141931 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
141932 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
141933 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
141934 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
141935 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
141936 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
141937 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
141938 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
141939 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
141940 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
141942 - * This program is free software; you can redistribute it and/or modify it under
141943 - * the terms of the GNU General Public License version 2 as published by the
141944 - * Free Software Foundation. This program is dual-licensed; you may select
141945 - * either version 2 of the GNU General Public License ("GPL") or BSD license
141946 - * ("BSD").
141948 - * You can contact the author at :
141949 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
141950 - */
141951 -#ifndef FSE_H
141952 -#define FSE_H
141954 -/*-*****************************************
141955 -*  Dependencies
141956 -******************************************/
141957 -#include <linux/types.h> /* size_t, ptrdiff_t */
141959 -/*-*****************************************
141960 -*  FSE_PUBLIC_API : control library symbols visibility
141961 -******************************************/
141962 -#define FSE_PUBLIC_API
141964 -/*------   Version   ------*/
141965 -#define FSE_VERSION_MAJOR 0
141966 -#define FSE_VERSION_MINOR 9
141967 -#define FSE_VERSION_RELEASE 0
141969 -#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
141970 -#define FSE_QUOTE(str) #str
141971 -#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
141972 -#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
141974 -#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR * 100 * 100 + FSE_VERSION_MINOR * 100 + FSE_VERSION_RELEASE)
141975 -FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */
141977 -/*-*****************************************
141978 -*  Tool functions
141979 -******************************************/
141980 -FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */
141982 -/* Error Management */
141983 -FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */
141985 -/*-*****************************************
141986 -*  FSE detailed API
141987 -******************************************/
141989 -FSE_compress() does the following:
141990 -1. count symbol occurrence from source[] into table count[]
141991 -2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
141992 -3. save normalized counters to memory buffer using writeNCount()
141993 -4. build encoding table 'CTable' from normalized counters
141994 -5. encode the data stream using encoding table 'CTable'
141996 -FSE_decompress() does the following:
141997 -1. read normalized counters with readNCount()
141998 -2. build decoding table 'DTable' from normalized counters
141999 -3. decode the data stream using decoding table 'DTable'
142001 -The following API allows targeting specific sub-functions for advanced tasks.
142002 -For example, it's possible to compress several blocks using the same 'CTable',
142003 -or to save and provide normalized distribution using external method.
142006 -/* *** COMPRESSION *** */
142007 -/*! FSE_optimalTableLog():
142008 -       dynamically downsize 'tableLog' when conditions are met.
142009 -       It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
142010 -       @return : recommended tableLog (necessarily <= 'maxTableLog') */
142011 -FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
142013 -/*! FSE_normalizeCount():
142014 -       normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
142015 -       'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
142016 -       @return : tableLog,
142017 -                         or an errorCode, which can be tested using FSE_isError() */
142018 -FSE_PUBLIC_API size_t FSE_normalizeCount(short *normalizedCounter, unsigned tableLog, const unsigned *count, size_t srcSize, unsigned maxSymbolValue);
142020 -/*! FSE_NCountWriteBound():
142021 -       Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
142022 -       Typically useful for allocation purpose. */
142023 -FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
142025 -/*! FSE_writeNCount():
142026 -       Compactly save 'normalizedCounter' into 'buffer'.
142027 -       @return : size of the compressed table,
142028 -                         or an errorCode, which can be tested using FSE_isError(). */
142029 -FSE_PUBLIC_API size_t FSE_writeNCount(void *buffer, size_t bufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
142031 -/*! Constructor and Destructor of FSE_CTable.
142032 -       Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
142033 -typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */
142035 -/*! FSE_compress_usingCTable():
142036 -       Compress `src` using `ct` into `dst` which must be already allocated.
142037 -       @return : size of compressed data (<= `dstCapacity`),
142038 -                         or 0 if compressed data could not fit into `dst`,
142039 -                         or an errorCode, which can be tested using FSE_isError() */
142040 -FSE_PUBLIC_API size_t FSE_compress_usingCTable(void *dst, size_t dstCapacity, const void *src, size_t srcSize, const FSE_CTable *ct);
142043 -Tutorial :
142044 -----------
142045 -The first step is to count all symbols. FSE_count() does this job very fast.
142046 -Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.
142047 -'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]
142048 -maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)
142049 -FSE_count() will return the number of occurrence of the most frequent symbol.
142050 -This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.
142051 -If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
142053 -The next step is to normalize the frequencies.
142054 -FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.
142055 -It also guarantees a minimum of 1 to any Symbol with frequency >= 1.
142056 -You can use 'tableLog'==0 to mean "use default tableLog value".
142057 -If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),
142058 -which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
142060 -The result of FSE_normalizeCount() will be saved into a table,
142061 -called 'normalizedCounter', which is a table of signed short.
142062 -'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.
142063 -The return value is tableLog if everything proceeded as expected.
142064 -It is 0 if there is a single symbol within distribution.
142065 -If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
142067 -'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().
142068 -'buffer' must be already allocated.
142069 -For guaranteed success, buffer size must be at least FSE_headerBound().
142070 -The result of the function is the number of bytes written into 'buffer'.
142071 -If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
142073 -'normalizedCounter' can then be used to create the compression table 'CTable'.
142074 -The space required by 'CTable' must be already allocated, using FSE_createCTable().
142075 -You can then use FSE_buildCTable() to fill 'CTable'.
142076 -If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
142078 -'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().
142079 -Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'
142080 -The function returns the size of compressed data (without header), necessarily <= `dstCapacity`.
142081 -If it returns '0', compressed data could not fit into 'dst'.
142082 -If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
142085 -/* *** DECOMPRESSION *** */
142087 -/*! FSE_readNCount():
142088 -       Read compactly saved 'normalizedCounter' from 'rBuffer'.
142089 -       @return : size read from 'rBuffer',
142090 -                         or an errorCode, which can be tested using FSE_isError().
142091 -                         maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
142092 -FSE_PUBLIC_API size_t FSE_readNCount(short *normalizedCounter, unsigned *maxSymbolValuePtr, unsigned *tableLogPtr, const void *rBuffer, size_t rBuffSize);
142094 -/*! Constructor and Destructor of FSE_DTable.
142095 -       Note that its size depends on 'tableLog' */
142096 -typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
142098 -/*! FSE_buildDTable():
142099 -       Builds 'dt', which must be already allocated, using FSE_createDTable().
142100 -       return : 0, or an errorCode, which can be tested using FSE_isError() */
142101 -FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable *dt, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize);
142103 -/*! FSE_decompress_usingDTable():
142104 -       Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
142105 -       into `dst` which must be already allocated.
142106 -       @return : size of regenerated data (necessarily <= `dstCapacity`),
142107 -                         or an errorCode, which can be tested using FSE_isError() */
142108 -FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt);
142111 -Tutorial :
142112 -----------
142113 -(Note : these functions only decompress FSE-compressed blocks.
142114 - If block is uncompressed, use memcpy() instead
142115 - If block is a single repeated byte, use memset() instead )
142117 -The first step is to obtain the normalized frequencies of symbols.
142118 -This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
142119 -'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
142120 -In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
142121 -or size the table to handle worst case situations (typically 256).
142122 -FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
142123 -The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
142124 -Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
142125 -If there is an error, the function will return an error code, which can be tested using FSE_isError().
142127 -The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
142128 -This is performed by the function FSE_buildDTable().
142129 -The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
142130 -If there is an error, the function will return an error code, which can be tested using FSE_isError().
142132 -`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().
142133 -`cSrcSize` must be strictly correct, otherwise decompression will fail.
142134 -FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
142135 -If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
142138 -/* *** Dependency *** */
142139 -#include "bitstream.h"
142141 -/* *****************************************
142142 -*  Static allocation
142143 -*******************************************/
142144 -/* FSE buffer bounds */
142145 -#define FSE_NCOUNTBOUND 512
142146 -#define FSE_BLOCKBOUND(size) (size + (size >> 7))
142147 -#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
142149 -/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
142150 -#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1 << (maxTableLog - 1)) + ((maxSymbolValue + 1) * 2))
142151 -#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1 << maxTableLog))
142153 -/* *****************************************
142154 -*  FSE advanced API
142155 -*******************************************/
142156 -/* FSE_count_wksp() :
142157 - * Same as FSE_count(), but using an externally provided scratch buffer.
142158 - * `workSpace` size must be table of >= `1024` unsigned
142159 - */
142160 -size_t FSE_count_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace);
142162 -/* FSE_countFast_wksp() :
142163 - * Same as FSE_countFast(), but using an externally provided scratch buffer.
142164 - * `workSpace` must be a table of minimum `1024` unsigned
142165 - */
142166 -size_t FSE_countFast_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize, unsigned *workSpace);
142168 -/*! FSE_count_simple
142169 - * Same as FSE_countFast(), but does not use any additional memory (not even on stack).
142170 - * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`).
142172 -size_t FSE_count_simple(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize);
142174 -unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
142175 -/**< same as FSE_optimalTableLog(), which used `minus==2` */
142177 -size_t FSE_buildCTable_raw(FSE_CTable *ct, unsigned nbBits);
142178 -/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
142180 -size_t FSE_buildCTable_rle(FSE_CTable *ct, unsigned char symbolValue);
142181 -/**< build a fake FSE_CTable, designed to compress always the same symbolValue */
142183 -/* FSE_buildCTable_wksp() :
142184 - * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
142185 - * `wkspSize` must be >= `(1<<tableLog)`.
142186 - */
142187 -size_t FSE_buildCTable_wksp(FSE_CTable *ct, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, size_t wkspSize);
142189 -size_t FSE_buildDTable_raw(FSE_DTable *dt, unsigned nbBits);
142190 -/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
142192 -size_t FSE_buildDTable_rle(FSE_DTable *dt, unsigned char symbolValue);
142193 -/**< build a fake FSE_DTable, designed to always generate the same symbolValue */
142195 -size_t FSE_decompress_wksp(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, unsigned maxLog, void *workspace, size_t workspaceSize);
142196 -/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DTABLE_SIZE_U32(maxLog)` */
142198 -/* *****************************************
142199 -*  FSE symbol compression API
142200 -*******************************************/
142202 -   This API consists of small unitary functions, which highly benefit from being inlined.
142203 -   Hence their body are included in next section.
142205 -typedef struct {
142206 -       ptrdiff_t value;
142207 -       const void *stateTable;
142208 -       const void *symbolTT;
142209 -       unsigned stateLog;
142210 -} FSE_CState_t;
142212 -static void FSE_initCState(FSE_CState_t *CStatePtr, const FSE_CTable *ct);
142214 -static void FSE_encodeSymbol(BIT_CStream_t *bitC, FSE_CState_t *CStatePtr, unsigned symbol);
142216 -static void FSE_flushCState(BIT_CStream_t *bitC, const FSE_CState_t *CStatePtr);
142218 -/**<
142219 -These functions are inner components of FSE_compress_usingCTable().
142220 -They allow the creation of custom streams, mixing multiple tables and bit sources.
142222 -A key property to keep in mind is that encoding and decoding are done **in reverse direction**.
142223 -So the first symbol you will encode is the last you will decode, like a LIFO stack.
142225 -You will need a few variables to track your CStream. They are :
142227 -FSE_CTable    ct;         // Provided by FSE_buildCTable()
142228 -BIT_CStream_t bitStream;  // bitStream tracking structure
142229 -FSE_CState_t  state;      // State tracking structure (can have several)
142232 -The first thing to do is to init bitStream and state.
142233 -       size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);
142234 -       FSE_initCState(&state, ct);
142236 -Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();
142237 -You can then encode your input data, byte after byte.
142238 -FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.
142239 -Remember decoding will be done in reverse direction.
142240 -       FSE_encodeByte(&bitStream, &state, symbol);
142242 -At any time, you can also add any bit sequence.
142243 -Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders
142244 -       BIT_addBits(&bitStream, bitField, nbBits);
142246 -The above methods don't commit data to memory, they just store it into local register, for speed.
142247 -Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
142248 -Writing data to memory is a manual operation, performed by the flushBits function.
142249 -       BIT_flushBits(&bitStream);
142251 -Your last FSE encoding operation shall be to flush your last state value(s).
142252 -       FSE_flushState(&bitStream, &state);
142254 -Finally, you must close the bitStream.
142255 -The function returns the size of CStream in bytes.
142256 -If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)
142257 -If there is an error, it returns an errorCode (which can be tested using FSE_isError()).
142258 -       size_t size = BIT_closeCStream(&bitStream);
142261 -/* *****************************************
142262 -*  FSE symbol decompression API
142263 -*******************************************/
142264 -typedef struct {
142265 -       size_t state;
142266 -       const void *table; /* precise table may vary, depending on U16 */
142267 -} FSE_DState_t;
142269 -static void FSE_initDState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD, const FSE_DTable *dt);
142271 -static unsigned char FSE_decodeSymbol(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD);
142273 -static unsigned FSE_endOfDState(const FSE_DState_t *DStatePtr);
142275 -/**<
142276 -Let's now decompose FSE_decompress_usingDTable() into its unitary components.
142277 -You will decode FSE-encoded symbols from the bitStream,
142278 -and also any other bitFields you put in, **in reverse order**.
142280 -You will need a few variables to track your bitStream. They are :
142282 -BIT_DStream_t DStream;    // Stream context
142283 -FSE_DState_t  DState;     // State context. Multiple ones are possible
142284 -FSE_DTable*   DTablePtr;  // Decoding table, provided by FSE_buildDTable()
142286 -The first thing to do is to init the bitStream.
142287 -       errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
142289 -You should then retrieve your initial state(s)
142290 -(in reverse flushing order if you have several ones) :
142291 -       errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
142293 -You can then decode your data, symbol after symbol.
142294 -For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.
142295 -Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).
142296 -       unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
142298 -You can retrieve any bitfield you eventually stored into the bitStream (in reverse order)
142299 -Note : maximum allowed nbBits is 25, for 32-bits compatibility
142300 -       size_t bitField = BIT_readBits(&DStream, nbBits);
142302 -All above operations only read from local register (which size depends on size_t).
142303 -Refueling the register from memory is manually performed by the reload method.
142304 -       endSignal = FSE_reloadDStream(&DStream);
142306 -BIT_reloadDStream() result tells if there is still some more data to read from DStream.
142307 -BIT_DStream_unfinished : there is still some data left into the DStream.
142308 -BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.
142309 -BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.
142310 -BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
142312 -When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,
142313 -to properly detect the exact end of stream.
142314 -After each decoded symbol, check if DStream is fully consumed using this simple test :
142315 -       BIT_reloadDStream(&DStream) >= BIT_DStream_completed
142317 -When it's done, verify decompression is fully completed, by checking both DStream and the relevant states.
142318 -Checking if DStream has reached its end is performed by :
142319 -       BIT_endOfDStream(&DStream);
142320 -Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.
142321 -       FSE_endOfDState(&DState);
142324 -/* *****************************************
142325 -*  FSE unsafe API
142326 -*******************************************/
142327 -static unsigned char FSE_decodeSymbolFast(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD);
142328 -/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
142330 -/* *****************************************
142331 -*  Implementation of inlined functions
142332 -*******************************************/
142333 -typedef struct {
142334 -       int deltaFindState;
142335 -       U32 deltaNbBits;
142336 -} FSE_symbolCompressionTransform; /* total 8 bytes */
142338 -ZSTD_STATIC void FSE_initCState(FSE_CState_t *statePtr, const FSE_CTable *ct)
142340 -       const void *ptr = ct;
142341 -       const U16 *u16ptr = (const U16 *)ptr;
142342 -       const U32 tableLog = ZSTD_read16(ptr);
142343 -       statePtr->value = (ptrdiff_t)1 << tableLog;
142344 -       statePtr->stateTable = u16ptr + 2;
142345 -       statePtr->symbolTT = ((const U32 *)ct + 1 + (tableLog ? (1 << (tableLog - 1)) : 1));
142346 -       statePtr->stateLog = tableLog;
142349 -/*! FSE_initCState2() :
142350 -*   Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
142351 -*   uses the smallest state value possible, saving the cost of this symbol */
142352 -ZSTD_STATIC void FSE_initCState2(FSE_CState_t *statePtr, const FSE_CTable *ct, U32 symbol)
142354 -       FSE_initCState(statePtr, ct);
142355 -       {
142356 -               const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform *)(statePtr->symbolTT))[symbol];
142357 -               const U16 *stateTable = (const U16 *)(statePtr->stateTable);
142358 -               U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1 << 15)) >> 16);
142359 -               statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
142360 -               statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
142361 -       }
142364 -ZSTD_STATIC void FSE_encodeSymbol(BIT_CStream_t *bitC, FSE_CState_t *statePtr, U32 symbol)
142366 -       const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform *)(statePtr->symbolTT))[symbol];
142367 -       const U16 *const stateTable = (const U16 *)(statePtr->stateTable);
142368 -       U32 nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
142369 -       BIT_addBits(bitC, statePtr->value, nbBitsOut);
142370 -       statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
142373 -ZSTD_STATIC void FSE_flushCState(BIT_CStream_t *bitC, const FSE_CState_t *statePtr)
142375 -       BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
142376 -       BIT_flushBits(bitC);
142379 -/* ======    Decompression    ====== */
142381 -typedef struct {
142382 -       U16 tableLog;
142383 -       U16 fastMode;
142384 -} FSE_DTableHeader; /* sizeof U32 */
142386 -typedef struct {
142387 -       unsigned short newState;
142388 -       unsigned char symbol;
142389 -       unsigned char nbBits;
142390 -} FSE_decode_t; /* size == U32 */
142392 -ZSTD_STATIC void FSE_initDState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD, const FSE_DTable *dt)
142394 -       const void *ptr = dt;
142395 -       const FSE_DTableHeader *const DTableH = (const FSE_DTableHeader *)ptr;
142396 -       DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
142397 -       BIT_reloadDStream(bitD);
142398 -       DStatePtr->table = dt + 1;
142401 -ZSTD_STATIC BYTE FSE_peekSymbol(const FSE_DState_t *DStatePtr)
142403 -       FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
142404 -       return DInfo.symbol;
142407 -ZSTD_STATIC void FSE_updateState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD)
142409 -       FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
142410 -       U32 const nbBits = DInfo.nbBits;
142411 -       size_t const lowBits = BIT_readBits(bitD, nbBits);
142412 -       DStatePtr->state = DInfo.newState + lowBits;
142415 -ZSTD_STATIC BYTE FSE_decodeSymbol(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD)
142417 -       FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
142418 -       U32 const nbBits = DInfo.nbBits;
142419 -       BYTE const symbol = DInfo.symbol;
142420 -       size_t const lowBits = BIT_readBits(bitD, nbBits);
142422 -       DStatePtr->state = DInfo.newState + lowBits;
142423 -       return symbol;
142426 -/*! FSE_decodeSymbolFast() :
142427 -       unsafe, only works if no symbol has a probability > 50% */
142428 -ZSTD_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD)
142430 -       FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
142431 -       U32 const nbBits = DInfo.nbBits;
142432 -       BYTE const symbol = DInfo.symbol;
142433 -       size_t const lowBits = BIT_readBitsFast(bitD, nbBits);
142435 -       DStatePtr->state = DInfo.newState + lowBits;
142436 -       return symbol;
142439 -ZSTD_STATIC unsigned FSE_endOfDState(const FSE_DState_t *DStatePtr) { return DStatePtr->state == 0; }
142441 -/* **************************************************************
142442 -*  Tuning parameters
142443 -****************************************************************/
142444 -/*!MEMORY_USAGE :
142445 -*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
142446 -*  Increasing memory usage improves compression ratio
142447 -*  Reduced memory usage can improve speed, due to cache effect
142448 -*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
142449 -#ifndef FSE_MAX_MEMORY_USAGE
142450 -#define FSE_MAX_MEMORY_USAGE 14
142451 -#endif
142452 -#ifndef FSE_DEFAULT_MEMORY_USAGE
142453 -#define FSE_DEFAULT_MEMORY_USAGE 13
142454 -#endif
142456 -/*!FSE_MAX_SYMBOL_VALUE :
142457 -*  Maximum symbol value authorized.
142458 -*  Required for proper stack allocation */
142459 -#ifndef FSE_MAX_SYMBOL_VALUE
142460 -#define FSE_MAX_SYMBOL_VALUE 255
142461 -#endif
142463 -/* **************************************************************
142464 -*  template functions type & suffix
142465 -****************************************************************/
142466 -#define FSE_FUNCTION_TYPE BYTE
142467 -#define FSE_FUNCTION_EXTENSION
142468 -#define FSE_DECODE_TYPE FSE_decode_t
142470 -/* ***************************************************************
142471 -*  Constants
142472 -*****************************************************************/
142473 -#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE - 2)
142474 -#define FSE_MAX_TABLESIZE (1U << FSE_MAX_TABLELOG)
142475 -#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE - 1)
142476 -#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE - 2)
142477 -#define FSE_MIN_TABLELOG 5
142479 -#define FSE_TABLELOG_ABSOLUTE_MAX 15
142480 -#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
142481 -#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
142482 -#endif
142484 -#define FSE_TABLESTEP(tableSize) ((tableSize >> 1) + (tableSize >> 3) + 3)
142486 -#endif /* FSE_H */
142487 diff --git a/lib/zstd/fse_compress.c b/lib/zstd/fse_compress.c
142488 deleted file mode 100644
142489 index ef3d1741d532..000000000000
142490 --- a/lib/zstd/fse_compress.c
142491 +++ /dev/null
142492 @@ -1,795 +0,0 @@
142494 - * FSE : Finite State Entropy encoder
142495 - * Copyright (C) 2013-2015, Yann Collet.
142497 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
142499 - * Redistribution and use in source and binary forms, with or without
142500 - * modification, are permitted provided that the following conditions are
142501 - * met:
142503 - *   * Redistributions of source code must retain the above copyright
142504 - * notice, this list of conditions and the following disclaimer.
142505 - *   * Redistributions in binary form must reproduce the above
142506 - * copyright notice, this list of conditions and the following disclaimer
142507 - * in the documentation and/or other materials provided with the
142508 - * distribution.
142510 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
142511 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
142512 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
142513 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
142514 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
142515 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
142516 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
142517 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
142518 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
142519 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
142520 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
142522 - * This program is free software; you can redistribute it and/or modify it under
142523 - * the terms of the GNU General Public License version 2 as published by the
142524 - * Free Software Foundation. This program is dual-licensed; you may select
142525 - * either version 2 of the GNU General Public License ("GPL") or BSD license
142526 - * ("BSD").
142528 - * You can contact the author at :
142529 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
142530 - */
142532 -/* **************************************************************
142533 -*  Compiler specifics
142534 -****************************************************************/
142535 -#define FORCE_INLINE static __always_inline
142537 -/* **************************************************************
142538 -*  Includes
142539 -****************************************************************/
142540 -#include "bitstream.h"
142541 -#include "fse.h"
142542 -#include <linux/compiler.h>
142543 -#include <linux/kernel.h>
142544 -#include <linux/math64.h>
142545 -#include <linux/string.h> /* memcpy, memset */
142547 -/* **************************************************************
142548 -*  Error Management
142549 -****************************************************************/
142550 -#define FSE_STATIC_ASSERT(c)                                   \
142551 -       {                                                      \
142552 -               enum { FSE_static_assert = 1 / (int)(!!(c)) }; \
142553 -       } /* use only *after* variable declarations */
142555 -/* **************************************************************
142556 -*  Templates
142557 -****************************************************************/
142559 -  designed to be included
142560 -  for type-specific functions (template emulation in C)
142561 -  Objective is to write these functions only once, for improved maintenance
142564 -/* safety checks */
142565 -#ifndef FSE_FUNCTION_EXTENSION
142566 -#error "FSE_FUNCTION_EXTENSION must be defined"
142567 -#endif
142568 -#ifndef FSE_FUNCTION_TYPE
142569 -#error "FSE_FUNCTION_TYPE must be defined"
142570 -#endif
142572 -/* Function names */
142573 -#define FSE_CAT(X, Y) X##Y
142574 -#define FSE_FUNCTION_NAME(X, Y) FSE_CAT(X, Y)
142575 -#define FSE_TYPE_NAME(X, Y) FSE_CAT(X, Y)
142577 -/* Function templates */
142579 -/* FSE_buildCTable_wksp() :
142580 - * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
142581 - * wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
142582 - * workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
142583 - */
142584 -size_t FSE_buildCTable_wksp(FSE_CTable *ct, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize)
142586 -       U32 const tableSize = 1 << tableLog;
142587 -       U32 const tableMask = tableSize - 1;
142588 -       void *const ptr = ct;
142589 -       U16 *const tableU16 = ((U16 *)ptr) + 2;
142590 -       void *const FSCT = ((U32 *)ptr) + 1 /* header */ + (tableLog ? tableSize >> 1 : 1);
142591 -       FSE_symbolCompressionTransform *const symbolTT = (FSE_symbolCompressionTransform *)(FSCT);
142592 -       U32 const step = FSE_TABLESTEP(tableSize);
142593 -       U32 highThreshold = tableSize - 1;
142595 -       U32 *cumul;
142596 -       FSE_FUNCTION_TYPE *tableSymbol;
142597 -       size_t spaceUsed32 = 0;
142599 -       cumul = (U32 *)workspace + spaceUsed32;
142600 -       spaceUsed32 += FSE_MAX_SYMBOL_VALUE + 2;
142601 -       tableSymbol = (FSE_FUNCTION_TYPE *)((U32 *)workspace + spaceUsed32);
142602 -       spaceUsed32 += ALIGN(sizeof(FSE_FUNCTION_TYPE) * ((size_t)1 << tableLog), sizeof(U32)) >> 2;
142604 -       if ((spaceUsed32 << 2) > workspaceSize)
142605 -               return ERROR(tableLog_tooLarge);
142606 -       workspace = (U32 *)workspace + spaceUsed32;
142607 -       workspaceSize -= (spaceUsed32 << 2);
142609 -       /* CTable header */
142610 -       tableU16[-2] = (U16)tableLog;
142611 -       tableU16[-1] = (U16)maxSymbolValue;
142613 -       /* For explanations on how to distribute symbol values over the table :
142614 -       *  http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
142616 -       /* symbol start positions */
142617 -       {
142618 -               U32 u;
142619 -               cumul[0] = 0;
142620 -               for (u = 1; u <= maxSymbolValue + 1; u++) {
142621 -                       if (normalizedCounter[u - 1] == -1) { /* Low proba symbol */
142622 -                               cumul[u] = cumul[u - 1] + 1;
142623 -                               tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u - 1);
142624 -                       } else {
142625 -                               cumul[u] = cumul[u - 1] + normalizedCounter[u - 1];
142626 -                       }
142627 -               }
142628 -               cumul[maxSymbolValue + 1] = tableSize + 1;
142629 -       }
142631 -       /* Spread symbols */
142632 -       {
142633 -               U32 position = 0;
142634 -               U32 symbol;
142635 -               for (symbol = 0; symbol <= maxSymbolValue; symbol++) {
142636 -                       int nbOccurences;
142637 -                       for (nbOccurences = 0; nbOccurences < normalizedCounter[symbol]; nbOccurences++) {
142638 -                               tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
142639 -                               position = (position + step) & tableMask;
142640 -                               while (position > highThreshold)
142641 -                                       position = (position + step) & tableMask; /* Low proba area */
142642 -                       }
142643 -               }
142645 -               if (position != 0)
142646 -                       return ERROR(GENERIC); /* Must have gone through all positions */
142647 -       }
142649 -       /* Build table */
142650 -       {
142651 -               U32 u;
142652 -               for (u = 0; u < tableSize; u++) {
142653 -                       FSE_FUNCTION_TYPE s = tableSymbol[u];   /* note : static analyzer may not understand tableSymbol is properly initialized */
142654 -                       tableU16[cumul[s]++] = (U16)(tableSize + u); /* TableU16 : sorted by symbol order; gives next state value */
142655 -               }
142656 -       }
142658 -       /* Build Symbol Transformation Table */
142659 -       {
142660 -               unsigned total = 0;
142661 -               unsigned s;
142662 -               for (s = 0; s <= maxSymbolValue; s++) {
142663 -                       switch (normalizedCounter[s]) {
142664 -                       case 0: break;
142666 -                       case -1:
142667 -                       case 1:
142668 -                               symbolTT[s].deltaNbBits = (tableLog << 16) - (1 << tableLog);
142669 -                               symbolTT[s].deltaFindState = total - 1;
142670 -                               total++;
142671 -                               break;
142672 -                       default: {
142673 -                               U32 const maxBitsOut = tableLog - BIT_highbit32(normalizedCounter[s] - 1);
142674 -                               U32 const minStatePlus = normalizedCounter[s] << maxBitsOut;
142675 -                               symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
142676 -                               symbolTT[s].deltaFindState = total - normalizedCounter[s];
142677 -                               total += normalizedCounter[s];
142678 -                       }
142679 -                       }
142680 -               }
142681 -       }
142683 -       return 0;
142686 -/*-**************************************************************
142687 -*  FSE NCount encoding-decoding
142688 -****************************************************************/
142689 -size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
142691 -       size_t const maxHeaderSize = (((maxSymbolValue + 1) * tableLog) >> 3) + 3;
142692 -       return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
142695 -static size_t FSE_writeNCount_generic(void *header, size_t headerBufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
142696 -                                     unsigned writeIsSafe)
142698 -       BYTE *const ostart = (BYTE *)header;
142699 -       BYTE *out = ostart;
142700 -       BYTE *const oend = ostart + headerBufferSize;
142701 -       int nbBits;
142702 -       const int tableSize = 1 << tableLog;
142703 -       int remaining;
142704 -       int threshold;
142705 -       U32 bitStream;
142706 -       int bitCount;
142707 -       unsigned charnum = 0;
142708 -       int previous0 = 0;
142710 -       bitStream = 0;
142711 -       bitCount = 0;
142712 -       /* Table Size */
142713 -       bitStream += (tableLog - FSE_MIN_TABLELOG) << bitCount;
142714 -       bitCount += 4;
142716 -       /* Init */
142717 -       remaining = tableSize + 1; /* +1 for extra accuracy */
142718 -       threshold = tableSize;
142719 -       nbBits = tableLog + 1;
142721 -       while (remaining > 1) { /* stops at 1 */
142722 -               if (previous0) {
142723 -                       unsigned start = charnum;
142724 -                       while (!normalizedCounter[charnum])
142725 -                               charnum++;
142726 -                       while (charnum >= start + 24) {
142727 -                               start += 24;
142728 -                               bitStream += 0xFFFFU << bitCount;
142729 -                               if ((!writeIsSafe) && (out > oend - 2))
142730 -                                       return ERROR(dstSize_tooSmall); /* Buffer overflow */
142731 -                               out[0] = (BYTE)bitStream;
142732 -                               out[1] = (BYTE)(bitStream >> 8);
142733 -                               out += 2;
142734 -                               bitStream >>= 16;
142735 -                       }
142736 -                       while (charnum >= start + 3) {
142737 -                               start += 3;
142738 -                               bitStream += 3 << bitCount;
142739 -                               bitCount += 2;
142740 -                       }
142741 -                       bitStream += (charnum - start) << bitCount;
142742 -                       bitCount += 2;
142743 -                       if (bitCount > 16) {
142744 -                               if ((!writeIsSafe) && (out > oend - 2))
142745 -                                       return ERROR(dstSize_tooSmall); /* Buffer overflow */
142746 -                               out[0] = (BYTE)bitStream;
142747 -                               out[1] = (BYTE)(bitStream >> 8);
142748 -                               out += 2;
142749 -                               bitStream >>= 16;
142750 -                               bitCount -= 16;
142751 -                       }
142752 -               }
142753 -               {
142754 -                       int count = normalizedCounter[charnum++];
142755 -                       int const max = (2 * threshold - 1) - remaining;
142756 -                       remaining -= count < 0 ? -count : count;
142757 -                       count++; /* +1 for extra accuracy */
142758 -                       if (count >= threshold)
142759 -                               count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
142760 -                       bitStream += count << bitCount;
142761 -                       bitCount += nbBits;
142762 -                       bitCount -= (count < max);
142763 -                       previous0 = (count == 1);
142764 -                       if (remaining < 1)
142765 -                               return ERROR(GENERIC);
142766 -                       while (remaining < threshold)
142767 -                               nbBits--, threshold >>= 1;
142768 -               }
142769 -               if (bitCount > 16) {
142770 -                       if ((!writeIsSafe) && (out > oend - 2))
142771 -                               return ERROR(dstSize_tooSmall); /* Buffer overflow */
142772 -                       out[0] = (BYTE)bitStream;
142773 -                       out[1] = (BYTE)(bitStream >> 8);
142774 -                       out += 2;
142775 -                       bitStream >>= 16;
142776 -                       bitCount -= 16;
142777 -               }
142778 -       }
142780 -       /* flush remaining bitStream */
142781 -       if ((!writeIsSafe) && (out > oend - 2))
142782 -               return ERROR(dstSize_tooSmall); /* Buffer overflow */
142783 -       out[0] = (BYTE)bitStream;
142784 -       out[1] = (BYTE)(bitStream >> 8);
142785 -       out += (bitCount + 7) / 8;
142787 -       if (charnum > maxSymbolValue + 1)
142788 -               return ERROR(GENERIC);
142790 -       return (out - ostart);
142793 -size_t FSE_writeNCount(void *buffer, size_t bufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
142795 -       if (tableLog > FSE_MAX_TABLELOG)
142796 -               return ERROR(tableLog_tooLarge); /* Unsupported */
142797 -       if (tableLog < FSE_MIN_TABLELOG)
142798 -               return ERROR(GENERIC); /* Unsupported */
142800 -       if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
142801 -               return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
142803 -       return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1);
142806 -/*-**************************************************************
142807 -*  Counting histogram
142808 -****************************************************************/
142809 -/*! FSE_count_simple
142810 -       This function counts byte values within `src`, and store the histogram into table `count`.
142811 -       It doesn't use any additional memory.
142812 -       But this function is unsafe : it doesn't check that all values within `src` can fit into `count`.
142813 -       For this reason, prefer using a table `count` with 256 elements.
142814 -       @return : count of most numerous element
142816 -size_t FSE_count_simple(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize)
142818 -       const BYTE *ip = (const BYTE *)src;
142819 -       const BYTE *const end = ip + srcSize;
142820 -       unsigned maxSymbolValue = *maxSymbolValuePtr;
142821 -       unsigned max = 0;
142823 -       memset(count, 0, (maxSymbolValue + 1) * sizeof(*count));
142824 -       if (srcSize == 0) {
142825 -               *maxSymbolValuePtr = 0;
142826 -               return 0;
142827 -       }
142829 -       while (ip < end)
142830 -               count[*ip++]++;
142832 -       while (!count[maxSymbolValue])
142833 -               maxSymbolValue--;
142834 -       *maxSymbolValuePtr = maxSymbolValue;
142836 -       {
142837 -               U32 s;
142838 -               for (s = 0; s <= maxSymbolValue; s++)
142839 -                       if (count[s] > max)
142840 -                               max = count[s];
142841 -       }
142843 -       return (size_t)max;
142846 -/* FSE_count_parallel_wksp() :
142847 - * Same as FSE_count_parallel(), but using an externally provided scratch buffer.
142848 - * `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`` */
142849 -static size_t FSE_count_parallel_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned checkMax,
142850 -                                     unsigned *const workSpace)
142852 -       const BYTE *ip = (const BYTE *)source;
142853 -       const BYTE *const iend = ip + sourceSize;
142854 -       unsigned maxSymbolValue = *maxSymbolValuePtr;
142855 -       unsigned max = 0;
142856 -       U32 *const Counting1 = workSpace;
142857 -       U32 *const Counting2 = Counting1 + 256;
142858 -       U32 *const Counting3 = Counting2 + 256;
142859 -       U32 *const Counting4 = Counting3 + 256;
142861 -       memset(Counting1, 0, 4 * 256 * sizeof(unsigned));
142863 -       /* safety checks */
142864 -       if (!sourceSize) {
142865 -               memset(count, 0, maxSymbolValue + 1);
142866 -               *maxSymbolValuePtr = 0;
142867 -               return 0;
142868 -       }
142869 -       if (!maxSymbolValue)
142870 -               maxSymbolValue = 255; /* 0 == default */
142872 -       /* by stripes of 16 bytes */
142873 -       {
142874 -               U32 cached = ZSTD_read32(ip);
142875 -               ip += 4;
142876 -               while (ip < iend - 15) {
142877 -                       U32 c = cached;
142878 -                       cached = ZSTD_read32(ip);
142879 -                       ip += 4;
142880 -                       Counting1[(BYTE)c]++;
142881 -                       Counting2[(BYTE)(c >> 8)]++;
142882 -                       Counting3[(BYTE)(c >> 16)]++;
142883 -                       Counting4[c >> 24]++;
142884 -                       c = cached;
142885 -                       cached = ZSTD_read32(ip);
142886 -                       ip += 4;
142887 -                       Counting1[(BYTE)c]++;
142888 -                       Counting2[(BYTE)(c >> 8)]++;
142889 -                       Counting3[(BYTE)(c >> 16)]++;
142890 -                       Counting4[c >> 24]++;
142891 -                       c = cached;
142892 -                       cached = ZSTD_read32(ip);
142893 -                       ip += 4;
142894 -                       Counting1[(BYTE)c]++;
142895 -                       Counting2[(BYTE)(c >> 8)]++;
142896 -                       Counting3[(BYTE)(c >> 16)]++;
142897 -                       Counting4[c >> 24]++;
142898 -                       c = cached;
142899 -                       cached = ZSTD_read32(ip);
142900 -                       ip += 4;
142901 -                       Counting1[(BYTE)c]++;
142902 -                       Counting2[(BYTE)(c >> 8)]++;
142903 -                       Counting3[(BYTE)(c >> 16)]++;
142904 -                       Counting4[c >> 24]++;
142905 -               }
142906 -               ip -= 4;
142907 -       }
142909 -       /* finish last symbols */
142910 -       while (ip < iend)
142911 -               Counting1[*ip++]++;
142913 -       if (checkMax) { /* verify stats will fit into destination table */
142914 -               U32 s;
142915 -               for (s = 255; s > maxSymbolValue; s--) {
142916 -                       Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
142917 -                       if (Counting1[s])
142918 -                               return ERROR(maxSymbolValue_tooSmall);
142919 -               }
142920 -       }
142922 -       {
142923 -               U32 s;
142924 -               for (s = 0; s <= maxSymbolValue; s++) {
142925 -                       count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
142926 -                       if (count[s] > max)
142927 -                               max = count[s];
142928 -               }
142929 -       }
142931 -       while (!count[maxSymbolValue])
142932 -               maxSymbolValue--;
142933 -       *maxSymbolValuePtr = maxSymbolValue;
142934 -       return (size_t)max;
142937 -/* FSE_countFast_wksp() :
142938 - * Same as FSE_countFast(), but using an externally provided scratch buffer.
142939 - * `workSpace` size must be table of >= `1024` unsigned */
142940 -size_t FSE_countFast_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace)
142942 -       if (sourceSize < 1500)
142943 -               return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize);
142944 -       return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace);
142947 -/* FSE_count_wksp() :
142948 - * Same as FSE_count(), but using an externally provided scratch buffer.
142949 - * `workSpace` size must be table of >= `1024` unsigned */
142950 -size_t FSE_count_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace)
142952 -       if (*maxSymbolValuePtr < 255)
142953 -               return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace);
142954 -       *maxSymbolValuePtr = 255;
142955 -       return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace);
142958 -/*-**************************************************************
142959 -*  FSE Compression Code
142960 -****************************************************************/
142961 -/*! FSE_sizeof_CTable() :
142962 -       FSE_CTable is a variable size structure which contains :
142963 -       `U16 tableLog;`
142964 -       `U16 maxSymbolValue;`
142965 -       `U16 nextStateNumber[1 << tableLog];`                         // This size is variable
142966 -       `FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];`  // This size is variable
142967 -Allocation is manual (C standard does not support variable-size structures).
142969 -size_t FSE_sizeof_CTable(unsigned maxSymbolValue, unsigned tableLog)
142971 -       if (tableLog > FSE_MAX_TABLELOG)
142972 -               return ERROR(tableLog_tooLarge);
142973 -       return FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue) * sizeof(U32);
142976 -/* provides the minimum logSize to safely represent a distribution */
142977 -static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
142979 -       U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1;
142980 -       U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
142981 -       U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
142982 -       return minBits;
142985 -unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
142987 -       U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
142988 -       U32 tableLog = maxTableLog;
142989 -       U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
142990 -       if (tableLog == 0)
142991 -               tableLog = FSE_DEFAULT_TABLELOG;
142992 -       if (maxBitsSrc < tableLog)
142993 -               tableLog = maxBitsSrc; /* Accuracy can be reduced */
142994 -       if (minBits > tableLog)
142995 -               tableLog = minBits; /* Need a minimum to safely represent all symbol values */
142996 -       if (tableLog < FSE_MIN_TABLELOG)
142997 -               tableLog = FSE_MIN_TABLELOG;
142998 -       if (tableLog > FSE_MAX_TABLELOG)
142999 -               tableLog = FSE_MAX_TABLELOG;
143000 -       return tableLog;
143003 -unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
143005 -       return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
143008 -/* Secondary normalization method.
143009 -   To be used when primary method fails. */
143011 -static size_t FSE_normalizeM2(short *norm, U32 tableLog, const unsigned *count, size_t total, U32 maxSymbolValue)
143013 -       short const NOT_YET_ASSIGNED = -2;
143014 -       U32 s;
143015 -       U32 distributed = 0;
143016 -       U32 ToDistribute;
143018 -       /* Init */
143019 -       U32 const lowThreshold = (U32)(total >> tableLog);
143020 -       U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
143022 -       for (s = 0; s <= maxSymbolValue; s++) {
143023 -               if (count[s] == 0) {
143024 -                       norm[s] = 0;
143025 -                       continue;
143026 -               }
143027 -               if (count[s] <= lowThreshold) {
143028 -                       norm[s] = -1;
143029 -                       distributed++;
143030 -                       total -= count[s];
143031 -                       continue;
143032 -               }
143033 -               if (count[s] <= lowOne) {
143034 -                       norm[s] = 1;
143035 -                       distributed++;
143036 -                       total -= count[s];
143037 -                       continue;
143038 -               }
143040 -               norm[s] = NOT_YET_ASSIGNED;
143041 -       }
143042 -       ToDistribute = (1 << tableLog) - distributed;
143044 -       if ((total / ToDistribute) > lowOne) {
143045 -               /* risk of rounding to zero */
143046 -               lowOne = (U32)((total * 3) / (ToDistribute * 2));
143047 -               for (s = 0; s <= maxSymbolValue; s++) {
143048 -                       if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
143049 -                               norm[s] = 1;
143050 -                               distributed++;
143051 -                               total -= count[s];
143052 -                               continue;
143053 -                       }
143054 -               }
143055 -               ToDistribute = (1 << tableLog) - distributed;
143056 -       }
143058 -       if (distributed == maxSymbolValue + 1) {
143059 -               /* all values are pretty poor;
143060 -                  probably incompressible data (should have already been detected);
143061 -                  find max, then give all remaining points to max */
143062 -               U32 maxV = 0, maxC = 0;
143063 -               for (s = 0; s <= maxSymbolValue; s++)
143064 -                       if (count[s] > maxC)
143065 -                               maxV = s, maxC = count[s];
143066 -               norm[maxV] += (short)ToDistribute;
143067 -               return 0;
143068 -       }
143070 -       if (total == 0) {
143071 -               /* all of the symbols were low enough for the lowOne or lowThreshold */
143072 -               for (s = 0; ToDistribute > 0; s = (s + 1) % (maxSymbolValue + 1))
143073 -                       if (norm[s] > 0)
143074 -                               ToDistribute--, norm[s]++;
143075 -               return 0;
143076 -       }
143078 -       {
143079 -               U64 const vStepLog = 62 - tableLog;
143080 -               U64 const mid = (1ULL << (vStepLog - 1)) - 1;
143081 -               U64 const rStep = div_u64((((U64)1 << vStepLog) * ToDistribute) + mid, (U32)total); /* scale on remaining */
143082 -               U64 tmpTotal = mid;
143083 -               for (s = 0; s <= maxSymbolValue; s++) {
143084 -                       if (norm[s] == NOT_YET_ASSIGNED) {
143085 -                               U64 const end = tmpTotal + (count[s] * rStep);
143086 -                               U32 const sStart = (U32)(tmpTotal >> vStepLog);
143087 -                               U32 const sEnd = (U32)(end >> vStepLog);
143088 -                               U32 const weight = sEnd - sStart;
143089 -                               if (weight < 1)
143090 -                                       return ERROR(GENERIC);
143091 -                               norm[s] = (short)weight;
143092 -                               tmpTotal = end;
143093 -                       }
143094 -               }
143095 -       }
143097 -       return 0;
143100 -size_t FSE_normalizeCount(short *normalizedCounter, unsigned tableLog, const unsigned *count, size_t total, unsigned maxSymbolValue)
143102 -       /* Sanity checks */
143103 -       if (tableLog == 0)
143104 -               tableLog = FSE_DEFAULT_TABLELOG;
143105 -       if (tableLog < FSE_MIN_TABLELOG)
143106 -               return ERROR(GENERIC); /* Unsupported size */
143107 -       if (tableLog > FSE_MAX_TABLELOG)
143108 -               return ERROR(tableLog_tooLarge); /* Unsupported size */
143109 -       if (tableLog < FSE_minTableLog(total, maxSymbolValue))
143110 -               return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
143112 -       {
143113 -               U32 const rtbTable[] = {0, 473195, 504333, 520860, 550000, 700000, 750000, 830000};
143114 -               U64 const scale = 62 - tableLog;
143115 -               U64 const step = div_u64((U64)1 << 62, (U32)total); /* <== here, one division ! */
143116 -               U64 const vStep = 1ULL << (scale - 20);
143117 -               int stillToDistribute = 1 << tableLog;
143118 -               unsigned s;
143119 -               unsigned largest = 0;
143120 -               short largestP = 0;
143121 -               U32 lowThreshold = (U32)(total >> tableLog);
143123 -               for (s = 0; s <= maxSymbolValue; s++) {
143124 -                       if (count[s] == total)
143125 -                               return 0; /* rle special case */
143126 -                       if (count[s] == 0) {
143127 -                               normalizedCounter[s] = 0;
143128 -                               continue;
143129 -                       }
143130 -                       if (count[s] <= lowThreshold) {
143131 -                               normalizedCounter[s] = -1;
143132 -                               stillToDistribute--;
143133 -                       } else {
143134 -                               short proba = (short)((count[s] * step) >> scale);
143135 -                               if (proba < 8) {
143136 -                                       U64 restToBeat = vStep * rtbTable[proba];
143137 -                                       proba += (count[s] * step) - ((U64)proba << scale) > restToBeat;
143138 -                               }
143139 -                               if (proba > largestP)
143140 -                                       largestP = proba, largest = s;
143141 -                               normalizedCounter[s] = proba;
143142 -                               stillToDistribute -= proba;
143143 -                       }
143144 -               }
143145 -               if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
143146 -                       /* corner case, need another normalization method */
143147 -                       size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue);
143148 -                       if (FSE_isError(errorCode))
143149 -                               return errorCode;
143150 -               } else
143151 -                       normalizedCounter[largest] += (short)stillToDistribute;
143152 -       }
143154 -       return tableLog;
143157 -/* fake FSE_CTable, for raw (uncompressed) input */
143158 -size_t FSE_buildCTable_raw(FSE_CTable *ct, unsigned nbBits)
143160 -       const unsigned tableSize = 1 << nbBits;
143161 -       const unsigned tableMask = tableSize - 1;
143162 -       const unsigned maxSymbolValue = tableMask;
143163 -       void *const ptr = ct;
143164 -       U16 *const tableU16 = ((U16 *)ptr) + 2;
143165 -       void *const FSCT = ((U32 *)ptr) + 1 /* header */ + (tableSize >> 1); /* assumption : tableLog >= 1 */
143166 -       FSE_symbolCompressionTransform *const symbolTT = (FSE_symbolCompressionTransform *)(FSCT);
143167 -       unsigned s;
143169 -       /* Sanity checks */
143170 -       if (nbBits < 1)
143171 -               return ERROR(GENERIC); /* min size */
143173 -       /* header */
143174 -       tableU16[-2] = (U16)nbBits;
143175 -       tableU16[-1] = (U16)maxSymbolValue;
143177 -       /* Build table */
143178 -       for (s = 0; s < tableSize; s++)
143179 -               tableU16[s] = (U16)(tableSize + s);
143181 -       /* Build Symbol Transformation Table */
143182 -       {
143183 -               const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
143184 -               for (s = 0; s <= maxSymbolValue; s++) {
143185 -                       symbolTT[s].deltaNbBits = deltaNbBits;
143186 -                       symbolTT[s].deltaFindState = s - 1;
143187 -               }
143188 -       }
143190 -       return 0;
143193 -/* fake FSE_CTable, for rle input (always same symbol) */
143194 -size_t FSE_buildCTable_rle(FSE_CTable *ct, BYTE symbolValue)
143196 -       void *ptr = ct;
143197 -       U16 *tableU16 = ((U16 *)ptr) + 2;
143198 -       void *FSCTptr = (U32 *)ptr + 2;
143199 -       FSE_symbolCompressionTransform *symbolTT = (FSE_symbolCompressionTransform *)FSCTptr;
143201 -       /* header */
143202 -       tableU16[-2] = (U16)0;
143203 -       tableU16[-1] = (U16)symbolValue;
143205 -       /* Build table */
143206 -       tableU16[0] = 0;
143207 -       tableU16[1] = 0; /* just in case */
143209 -       /* Build Symbol Transformation Table */
143210 -       symbolTT[symbolValue].deltaNbBits = 0;
143211 -       symbolTT[symbolValue].deltaFindState = 0;
143213 -       return 0;
143216 -static size_t FSE_compress_usingCTable_generic(void *dst, size_t dstSize, const void *src, size_t srcSize, const FSE_CTable *ct, const unsigned fast)
143218 -       const BYTE *const istart = (const BYTE *)src;
143219 -       const BYTE *const iend = istart + srcSize;
143220 -       const BYTE *ip = iend;
143222 -       BIT_CStream_t bitC;
143223 -       FSE_CState_t CState1, CState2;
143225 -       /* init */
143226 -       if (srcSize <= 2)
143227 -               return 0;
143228 -       {
143229 -               size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
143230 -               if (FSE_isError(initError))
143231 -                       return 0; /* not enough space available to write a bitstream */
143232 -       }
143234 -#define FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
143236 -       if (srcSize & 1) {
143237 -               FSE_initCState2(&CState1, ct, *--ip);
143238 -               FSE_initCState2(&CState2, ct, *--ip);
143239 -               FSE_encodeSymbol(&bitC, &CState1, *--ip);
143240 -               FSE_FLUSHBITS(&bitC);
143241 -       } else {
143242 -               FSE_initCState2(&CState2, ct, *--ip);
143243 -               FSE_initCState2(&CState1, ct, *--ip);
143244 -       }
143246 -       /* join to mod 4 */
143247 -       srcSize -= 2;
143248 -       if ((sizeof(bitC.bitContainer) * 8 > FSE_MAX_TABLELOG * 4 + 7) && (srcSize & 2)) { /* test bit 2 */
143249 -               FSE_encodeSymbol(&bitC, &CState2, *--ip);
143250 -               FSE_encodeSymbol(&bitC, &CState1, *--ip);
143251 -               FSE_FLUSHBITS(&bitC);
143252 -       }
143254 -       /* 2 or 4 encoding per loop */
143255 -       while (ip > istart) {
143257 -               FSE_encodeSymbol(&bitC, &CState2, *--ip);
143259 -               if (sizeof(bitC.bitContainer) * 8 < FSE_MAX_TABLELOG * 2 + 7) /* this test must be static */
143260 -                       FSE_FLUSHBITS(&bitC);
143262 -               FSE_encodeSymbol(&bitC, &CState1, *--ip);
143264 -               if (sizeof(bitC.bitContainer) * 8 > FSE_MAX_TABLELOG * 4 + 7) { /* this test must be static */
143265 -                       FSE_encodeSymbol(&bitC, &CState2, *--ip);
143266 -                       FSE_encodeSymbol(&bitC, &CState1, *--ip);
143267 -               }
143269 -               FSE_FLUSHBITS(&bitC);
143270 -       }
143272 -       FSE_flushCState(&bitC, &CState2);
143273 -       FSE_flushCState(&bitC, &CState1);
143274 -       return BIT_closeCStream(&bitC);
143277 -size_t FSE_compress_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const FSE_CTable *ct)
143279 -       unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
143281 -       if (fast)
143282 -               return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
143283 -       else
143284 -               return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
143287 -size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
143288 diff --git a/lib/zstd/fse_decompress.c b/lib/zstd/fse_decompress.c
143289 deleted file mode 100644
143290 index 0b353530fb3f..000000000000
143291 --- a/lib/zstd/fse_decompress.c
143292 +++ /dev/null
143293 @@ -1,325 +0,0 @@
143295 - * FSE : Finite State Entropy decoder
143296 - * Copyright (C) 2013-2015, Yann Collet.
143298 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
143300 - * Redistribution and use in source and binary forms, with or without
143301 - * modification, are permitted provided that the following conditions are
143302 - * met:
143304 - *   * Redistributions of source code must retain the above copyright
143305 - * notice, this list of conditions and the following disclaimer.
143306 - *   * Redistributions in binary form must reproduce the above
143307 - * copyright notice, this list of conditions and the following disclaimer
143308 - * in the documentation and/or other materials provided with the
143309 - * distribution.
143311 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
143312 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
143313 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
143314 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
143315 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
143316 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
143317 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
143318 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
143319 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
143320 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
143321 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
143323 - * This program is free software; you can redistribute it and/or modify it under
143324 - * the terms of the GNU General Public License version 2 as published by the
143325 - * Free Software Foundation. This program is dual-licensed; you may select
143326 - * either version 2 of the GNU General Public License ("GPL") or BSD license
143327 - * ("BSD").
143329 - * You can contact the author at :
143330 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
143331 - */
143333 -/* **************************************************************
143334 -*  Compiler specifics
143335 -****************************************************************/
143336 -#define FORCE_INLINE static __always_inline
143338 -/* **************************************************************
143339 -*  Includes
143340 -****************************************************************/
143341 -#include "bitstream.h"
143342 -#include "fse.h"
143343 -#include "zstd_internal.h"
143344 -#include <linux/compiler.h>
143345 -#include <linux/kernel.h>
143346 -#include <linux/string.h> /* memcpy, memset */
143348 -/* **************************************************************
143349 -*  Error Management
143350 -****************************************************************/
143351 -#define FSE_isError ERR_isError
143352 -#define FSE_STATIC_ASSERT(c)                                   \
143353 -       {                                                      \
143354 -               enum { FSE_static_assert = 1 / (int)(!!(c)) }; \
143355 -       } /* use only *after* variable declarations */
143357 -/* **************************************************************
143358 -*  Templates
143359 -****************************************************************/
143361 -  designed to be included
143362 -  for type-specific functions (template emulation in C)
143363 -  Objective is to write these functions only once, for improved maintenance
143366 -/* safety checks */
143367 -#ifndef FSE_FUNCTION_EXTENSION
143368 -#error "FSE_FUNCTION_EXTENSION must be defined"
143369 -#endif
143370 -#ifndef FSE_FUNCTION_TYPE
143371 -#error "FSE_FUNCTION_TYPE must be defined"
143372 -#endif
143374 -/* Function names */
143375 -#define FSE_CAT(X, Y) X##Y
143376 -#define FSE_FUNCTION_NAME(X, Y) FSE_CAT(X, Y)
143377 -#define FSE_TYPE_NAME(X, Y) FSE_CAT(X, Y)
143379 -/* Function templates */
143381 -size_t FSE_buildDTable_wksp(FSE_DTable *dt, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize)
143383 -       void *const tdPtr = dt + 1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
143384 -       FSE_DECODE_TYPE *const tableDecode = (FSE_DECODE_TYPE *)(tdPtr);
143385 -       U16 *symbolNext = (U16 *)workspace;
143387 -       U32 const maxSV1 = maxSymbolValue + 1;
143388 -       U32 const tableSize = 1 << tableLog;
143389 -       U32 highThreshold = tableSize - 1;
143391 -       /* Sanity Checks */
143392 -       if (workspaceSize < sizeof(U16) * (FSE_MAX_SYMBOL_VALUE + 1))
143393 -               return ERROR(tableLog_tooLarge);
143394 -       if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE)
143395 -               return ERROR(maxSymbolValue_tooLarge);
143396 -       if (tableLog > FSE_MAX_TABLELOG)
143397 -               return ERROR(tableLog_tooLarge);
143399 -       /* Init, lay down lowprob symbols */
143400 -       {
143401 -               FSE_DTableHeader DTableH;
143402 -               DTableH.tableLog = (U16)tableLog;
143403 -               DTableH.fastMode = 1;
143404 -               {
143405 -                       S16 const largeLimit = (S16)(1 << (tableLog - 1));
143406 -                       U32 s;
143407 -                       for (s = 0; s < maxSV1; s++) {
143408 -                               if (normalizedCounter[s] == -1) {
143409 -                                       tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
143410 -                                       symbolNext[s] = 1;
143411 -                               } else {
143412 -                                       if (normalizedCounter[s] >= largeLimit)
143413 -                                               DTableH.fastMode = 0;
143414 -                                       symbolNext[s] = normalizedCounter[s];
143415 -                               }
143416 -                       }
143417 -               }
143418 -               memcpy(dt, &DTableH, sizeof(DTableH));
143419 -       }
143421 -       /* Spread symbols */
143422 -       {
143423 -               U32 const tableMask = tableSize - 1;
143424 -               U32 const step = FSE_TABLESTEP(tableSize);
143425 -               U32 s, position = 0;
143426 -               for (s = 0; s < maxSV1; s++) {
143427 -                       int i;
143428 -                       for (i = 0; i < normalizedCounter[s]; i++) {
143429 -                               tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
143430 -                               position = (position + step) & tableMask;
143431 -                               while (position > highThreshold)
143432 -                                       position = (position + step) & tableMask; /* lowprob area */
143433 -                       }
143434 -               }
143435 -               if (position != 0)
143436 -                       return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
143437 -       }
143439 -       /* Build Decoding table */
143440 -       {
143441 -               U32 u;
143442 -               for (u = 0; u < tableSize; u++) {
143443 -                       FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
143444 -                       U16 nextState = symbolNext[symbol]++;
143445 -                       tableDecode[u].nbBits = (BYTE)(tableLog - BIT_highbit32((U32)nextState));
143446 -                       tableDecode[u].newState = (U16)((nextState << tableDecode[u].nbBits) - tableSize);
143447 -               }
143448 -       }
143450 -       return 0;
143453 -/*-*******************************************************
143454 -*  Decompression (Byte symbols)
143455 -*********************************************************/
143456 -size_t FSE_buildDTable_rle(FSE_DTable *dt, BYTE symbolValue)
143458 -       void *ptr = dt;
143459 -       FSE_DTableHeader *const DTableH = (FSE_DTableHeader *)ptr;
143460 -       void *dPtr = dt + 1;
143461 -       FSE_decode_t *const cell = (FSE_decode_t *)dPtr;
143463 -       DTableH->tableLog = 0;
143464 -       DTableH->fastMode = 0;
143466 -       cell->newState = 0;
143467 -       cell->symbol = symbolValue;
143468 -       cell->nbBits = 0;
143470 -       return 0;
143473 -size_t FSE_buildDTable_raw(FSE_DTable *dt, unsigned nbBits)
143475 -       void *ptr = dt;
143476 -       FSE_DTableHeader *const DTableH = (FSE_DTableHeader *)ptr;
143477 -       void *dPtr = dt + 1;
143478 -       FSE_decode_t *const dinfo = (FSE_decode_t *)dPtr;
143479 -       const unsigned tableSize = 1 << nbBits;
143480 -       const unsigned tableMask = tableSize - 1;
143481 -       const unsigned maxSV1 = tableMask + 1;
143482 -       unsigned s;
143484 -       /* Sanity checks */
143485 -       if (nbBits < 1)
143486 -               return ERROR(GENERIC); /* min size */
143488 -       /* Build Decoding Table */
143489 -       DTableH->tableLog = (U16)nbBits;
143490 -       DTableH->fastMode = 1;
143491 -       for (s = 0; s < maxSV1; s++) {
143492 -               dinfo[s].newState = 0;
143493 -               dinfo[s].symbol = (BYTE)s;
143494 -               dinfo[s].nbBits = (BYTE)nbBits;
143495 -       }
143497 -       return 0;
143500 -FORCE_INLINE size_t FSE_decompress_usingDTable_generic(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt,
143501 -                                                      const unsigned fast)
143503 -       BYTE *const ostart = (BYTE *)dst;
143504 -       BYTE *op = ostart;
143505 -       BYTE *const omax = op + maxDstSize;
143506 -       BYTE *const olimit = omax - 3;
143508 -       BIT_DStream_t bitD;
143509 -       FSE_DState_t state1;
143510 -       FSE_DState_t state2;
143512 -       /* Init */
143513 -       CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
143515 -       FSE_initDState(&state1, &bitD, dt);
143516 -       FSE_initDState(&state2, &bitD, dt);
143518 -#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
143520 -       /* 4 symbols per loop */
143521 -       for (; (BIT_reloadDStream(&bitD) == BIT_DStream_unfinished) & (op < olimit); op += 4) {
143522 -               op[0] = FSE_GETSYMBOL(&state1);
143524 -               if (FSE_MAX_TABLELOG * 2 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */
143525 -                       BIT_reloadDStream(&bitD);
143527 -               op[1] = FSE_GETSYMBOL(&state2);
143529 -               if (FSE_MAX_TABLELOG * 4 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */
143530 -               {
143531 -                       if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) {
143532 -                               op += 2;
143533 -                               break;
143534 -                       }
143535 -               }
143537 -               op[2] = FSE_GETSYMBOL(&state1);
143539 -               if (FSE_MAX_TABLELOG * 2 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */
143540 -                       BIT_reloadDStream(&bitD);
143542 -               op[3] = FSE_GETSYMBOL(&state2);
143543 -       }
143545 -       /* tail */
143546 -       /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
143547 -       while (1) {
143548 -               if (op > (omax - 2))
143549 -                       return ERROR(dstSize_tooSmall);
143550 -               *op++ = FSE_GETSYMBOL(&state1);
143551 -               if (BIT_reloadDStream(&bitD) == BIT_DStream_overflow) {
143552 -                       *op++ = FSE_GETSYMBOL(&state2);
143553 -                       break;
143554 -               }
143556 -               if (op > (omax - 2))
143557 -                       return ERROR(dstSize_tooSmall);
143558 -               *op++ = FSE_GETSYMBOL(&state2);
143559 -               if (BIT_reloadDStream(&bitD) == BIT_DStream_overflow) {
143560 -                       *op++ = FSE_GETSYMBOL(&state1);
143561 -                       break;
143562 -               }
143563 -       }
143565 -       return op - ostart;
143568 -size_t FSE_decompress_usingDTable(void *dst, size_t originalSize, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt)
143570 -       const void *ptr = dt;
143571 -       const FSE_DTableHeader *DTableH = (const FSE_DTableHeader *)ptr;
143572 -       const U32 fastMode = DTableH->fastMode;
143574 -       /* select fast mode (static) */
143575 -       if (fastMode)
143576 -               return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
143577 -       return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
143580 -size_t FSE_decompress_wksp(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, unsigned maxLog, void *workspace, size_t workspaceSize)
143582 -       const BYTE *const istart = (const BYTE *)cSrc;
143583 -       const BYTE *ip = istart;
143584 -       unsigned tableLog;
143585 -       unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
143586 -       size_t NCountLength;
143588 -       FSE_DTable *dt;
143589 -       short *counting;
143590 -       size_t spaceUsed32 = 0;
143592 -       FSE_STATIC_ASSERT(sizeof(FSE_DTable) == sizeof(U32));
143594 -       dt = (FSE_DTable *)((U32 *)workspace + spaceUsed32);
143595 -       spaceUsed32 += FSE_DTABLE_SIZE_U32(maxLog);
143596 -       counting = (short *)((U32 *)workspace + spaceUsed32);
143597 -       spaceUsed32 += ALIGN(sizeof(short) * (FSE_MAX_SYMBOL_VALUE + 1), sizeof(U32)) >> 2;
143599 -       if ((spaceUsed32 << 2) > workspaceSize)
143600 -               return ERROR(tableLog_tooLarge);
143601 -       workspace = (U32 *)workspace + spaceUsed32;
143602 -       workspaceSize -= (spaceUsed32 << 2);
143604 -       /* normal FSE decoding mode */
143605 -       NCountLength = FSE_readNCount(counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
143606 -       if (FSE_isError(NCountLength))
143607 -               return NCountLength;
143608 -       // if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size; supposed to be already checked in NCountLength, only remaining
143609 -       // case : NCountLength==cSrcSize */
143610 -       if (tableLog > maxLog)
143611 -               return ERROR(tableLog_tooLarge);
143612 -       ip += NCountLength;
143613 -       cSrcSize -= NCountLength;
143615 -       CHECK_F(FSE_buildDTable_wksp(dt, counting, maxSymbolValue, tableLog, workspace, workspaceSize));
143617 -       return FSE_decompress_usingDTable(dst, dstCapacity, ip, cSrcSize, dt); /* always return, even if it is an error code */
143619 diff --git a/lib/zstd/huf.h b/lib/zstd/huf.h
143620 deleted file mode 100644
143621 index 2143da28d952..000000000000
143622 --- a/lib/zstd/huf.h
143623 +++ /dev/null
143624 @@ -1,212 +0,0 @@
143626 - * Huffman coder, part of New Generation Entropy library
143627 - * header file
143628 - * Copyright (C) 2013-2016, Yann Collet.
143630 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
143632 - * Redistribution and use in source and binary forms, with or without
143633 - * modification, are permitted provided that the following conditions are
143634 - * met:
143636 - *   * Redistributions of source code must retain the above copyright
143637 - * notice, this list of conditions and the following disclaimer.
143638 - *   * Redistributions in binary form must reproduce the above
143639 - * copyright notice, this list of conditions and the following disclaimer
143640 - * in the documentation and/or other materials provided with the
143641 - * distribution.
143643 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
143644 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
143645 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
143646 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
143647 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
143648 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
143649 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
143650 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
143651 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
143652 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
143653 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
143655 - * This program is free software; you can redistribute it and/or modify it under
143656 - * the terms of the GNU General Public License version 2 as published by the
143657 - * Free Software Foundation. This program is dual-licensed; you may select
143658 - * either version 2 of the GNU General Public License ("GPL") or BSD license
143659 - * ("BSD").
143661 - * You can contact the author at :
143662 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
143663 - */
143664 -#ifndef HUF_H_298734234
143665 -#define HUF_H_298734234
143667 -/* *** Dependencies *** */
143668 -#include <linux/types.h> /* size_t */
143670 -/* ***   Tool functions *** */
143671 -#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */
143672 -size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */
143674 -/* Error Management */
143675 -unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */
143677 -/* ***   Advanced function   *** */
143679 -/** HUF_compress4X_wksp() :
143680 -*   Same as HUF_compress2(), but uses externally allocated `workSpace`, which must be a table of >= 1024 unsigned */
143681 -size_t HUF_compress4X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
143682 -                          size_t wkspSize); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
143684 -/* *** Dependencies *** */
143685 -#include "mem.h" /* U32 */
143687 -/* *** Constants *** */
143688 -#define HUF_TABLELOG_MAX 12     /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
143689 -#define HUF_TABLELOG_DEFAULT 11 /* tableLog by default, when not specified */
143690 -#define HUF_SYMBOLVALUE_MAX 255
143692 -#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
143693 -#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
143694 -#error "HUF_TABLELOG_MAX is too large !"
143695 -#endif
143697 -/* ****************************************
143698 -*  Static allocation
143699 -******************************************/
143700 -/* HUF buffer bounds */
143701 -#define HUF_CTABLEBOUND 129
143702 -#define HUF_BLOCKBOUND(size) (size + (size >> 8) + 8)                   /* only true if incompressible pre-filtered with fast heuristic */
143703 -#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
143705 -/* static allocation of HUF's Compression Table */
143706 -#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
143707 -       U32 name##hb[maxSymbolValue + 1];              \
143708 -       void *name##hv = &(name##hb);                  \
143709 -       HUF_CElt *name = (HUF_CElt *)(name##hv) /* no final ; */
143711 -/* static allocation of HUF's DTable */
143712 -typedef U32 HUF_DTable;
143713 -#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1 << (maxTableLog)))
143714 -#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = {((U32)((maxTableLog)-1) * 0x01000001)}
143715 -#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = {((U32)(maxTableLog)*0x01000001)}
143717 -/* The workspace must have alignment at least 4 and be at least this large */
143718 -#define HUF_COMPRESS_WORKSPACE_SIZE (6 << 10)
143719 -#define HUF_COMPRESS_WORKSPACE_SIZE_U32 (HUF_COMPRESS_WORKSPACE_SIZE / sizeof(U32))
143721 -/* The workspace must have alignment at least 4 and be at least this large */
143722 -#define HUF_DECOMPRESS_WORKSPACE_SIZE (3 << 10)
143723 -#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
143725 -/* ****************************************
143726 -*  Advanced decompression functions
143727 -******************************************/
143728 -size_t HUF_decompress4X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize); /**< decodes RLE and uncompressed */
143729 -size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
143730 -                               size_t workspaceSize);                                                         /**< considers RLE and uncompressed as errors */
143731 -size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
143732 -                                  size_t workspaceSize); /**< single-symbol decoder */
143733 -size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
143734 -                                  size_t workspaceSize); /**< double-symbols decoder */
143736 -/* ****************************************
143737 -*  HUF detailed API
143738 -******************************************/
143740 -HUF_compress() does the following:
143741 -1. count symbol occurrence from source[] into table count[] using FSE_count()
143742 -2. (optional) refine tableLog using HUF_optimalTableLog()
143743 -3. build Huffman table from count using HUF_buildCTable()
143744 -4. save Huffman table to memory buffer using HUF_writeCTable_wksp()
143745 -5. encode the data stream using HUF_compress4X_usingCTable()
143747 -The following API allows targeting specific sub-functions for advanced tasks.
143748 -For example, it's possible to compress several blocks using the same 'CTable',
143749 -or to save and regenerate 'CTable' using external methods.
143751 -/* FSE_count() : find it within "fse.h" */
143752 -unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
143753 -typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */
143754 -size_t HUF_writeCTable_wksp(void *dst, size_t maxDstSize, const HUF_CElt *CTable, unsigned maxSymbolValue, unsigned huffLog, void *workspace, size_t workspaceSize);
143755 -size_t HUF_compress4X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable);
143757 -typedef enum {
143758 -       HUF_repeat_none,  /**< Cannot use the previous table */
143759 -       HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1,
143760 -                            4}X_repeat */
143761 -       HUF_repeat_valid  /**< Can use the previous table and it is asumed to be valid */
143762 -} HUF_repeat;
143763 -/** HUF_compress4X_repeat() :
143764 -*   Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
143765 -*   If it uses hufTable it does not modify hufTable or repeat.
143766 -*   If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
143767 -*   If preferRepeat then the old table will always be used if valid. */
143768 -size_t HUF_compress4X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
143769 -                            size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat,
143770 -                            int preferRepeat); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
143772 -/** HUF_buildCTable_wksp() :
143773 - *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
143774 - *  `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
143775 - */
143776 -size_t HUF_buildCTable_wksp(HUF_CElt *tree, const U32 *count, U32 maxSymbolValue, U32 maxNbBits, void *workSpace, size_t wkspSize);
143778 -/*! HUF_readStats() :
143779 -       Read compact Huffman tree, saved by HUF_writeCTable().
143780 -       `huffWeight` is destination buffer.
143781 -       @return : size read from `src` , or an error Code .
143782 -       Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
143783 -size_t HUF_readStats_wksp(BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize,
143784 -                         void *workspace, size_t workspaceSize);
143786 -/** HUF_readCTable() :
143787 -*   Loading a CTable saved with HUF_writeCTable() */
143788 -size_t HUF_readCTable_wksp(HUF_CElt *CTable, unsigned maxSymbolValue, const void *src, size_t srcSize, void *workspace, size_t workspaceSize);
143791 -HUF_decompress() does the following:
143792 -1. select the decompression algorithm (X2, X4) based on pre-computed heuristics
143793 -2. build Huffman table from save, using HUF_readDTableXn()
143794 -3. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable
143797 -/** HUF_selectDecoder() :
143798 -*   Tells which decoder is likely to decode faster,
143799 -*   based on a set of pre-determined metrics.
143800 -*   @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
143801 -*   Assumption : 0 < cSrcSize < dstSize <= 128 KB */
143802 -U32 HUF_selectDecoder(size_t dstSize, size_t cSrcSize);
143804 -size_t HUF_readDTableX2_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize);
143805 -size_t HUF_readDTableX4_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize);
143807 -size_t HUF_decompress4X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
143808 -size_t HUF_decompress4X2_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
143809 -size_t HUF_decompress4X4_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
143811 -/* single stream variants */
143813 -size_t HUF_compress1X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
143814 -                          size_t wkspSize); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
143815 -size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable);
143816 -/** HUF_compress1X_repeat() :
143817 -*   Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
143818 -*   If it uses hufTable it does not modify hufTable or repeat.
143819 -*   If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
143820 -*   If preferRepeat then the old table will always be used if valid. */
143821 -size_t HUF_compress1X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
143822 -                            size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat,
143823 -                            int preferRepeat); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
143825 -size_t HUF_decompress1X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize);
143826 -size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
143827 -                                  size_t workspaceSize); /**< single-symbol decoder */
143828 -size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
143829 -                                  size_t workspaceSize); /**< double-symbols decoder */
143831 -size_t HUF_decompress1X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize,
143832 -                                   const HUF_DTable *DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */
143833 -size_t HUF_decompress1X2_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
143834 -size_t HUF_decompress1X4_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
143836 -#endif /* HUF_H_298734234 */
143837 diff --git a/lib/zstd/huf_compress.c b/lib/zstd/huf_compress.c
143838 deleted file mode 100644
143839 index fd32838c185f..000000000000
143840 --- a/lib/zstd/huf_compress.c
143841 +++ /dev/null
143842 @@ -1,773 +0,0 @@
143844 - * Huffman encoder, part of New Generation Entropy library
143845 - * Copyright (C) 2013-2016, Yann Collet.
143847 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
143849 - * Redistribution and use in source and binary forms, with or without
143850 - * modification, are permitted provided that the following conditions are
143851 - * met:
143853 - *   * Redistributions of source code must retain the above copyright
143854 - * notice, this list of conditions and the following disclaimer.
143855 - *   * Redistributions in binary form must reproduce the above
143856 - * copyright notice, this list of conditions and the following disclaimer
143857 - * in the documentation and/or other materials provided with the
143858 - * distribution.
143860 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
143861 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
143862 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
143863 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
143864 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
143865 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
143866 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
143867 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
143868 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
143869 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
143870 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
143872 - * This program is free software; you can redistribute it and/or modify it under
143873 - * the terms of the GNU General Public License version 2 as published by the
143874 - * Free Software Foundation. This program is dual-licensed; you may select
143875 - * either version 2 of the GNU General Public License ("GPL") or BSD license
143876 - * ("BSD").
143878 - * You can contact the author at :
143879 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
143880 - */
143882 -/* **************************************************************
143883 -*  Includes
143884 -****************************************************************/
143885 -#include "bitstream.h"
143886 -#include "fse.h" /* header compression */
143887 -#include "huf.h"
143888 -#include <linux/kernel.h>
143889 -#include <linux/string.h> /* memcpy, memset */
143891 -/* **************************************************************
143892 -*  Error Management
143893 -****************************************************************/
143894 -#define HUF_STATIC_ASSERT(c)                                   \
143895 -       {                                                      \
143896 -               enum { HUF_static_assert = 1 / (int)(!!(c)) }; \
143897 -       } /* use only *after* variable declarations */
143898 -#define CHECK_V_F(e, f)     \
143899 -       size_t const e = f; \
143900 -       if (ERR_isError(e)) \
143901 -       return f
143902 -#define CHECK_F(f)                        \
143903 -       {                                 \
143904 -               CHECK_V_F(_var_err__, f); \
143905 -       }
143907 -/* **************************************************************
143908 -*  Utils
143909 -****************************************************************/
143910 -unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
143912 -       return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
143915 -/* *******************************************************
143916 -*  HUF : Huffman block compression
143917 -*********************************************************/
143918 -/* HUF_compressWeights() :
143919 - * Same as FSE_compress(), but dedicated to huff0's weights compression.
143920 - * The use case needs much less stack memory.
143921 - * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
143922 - */
143923 -#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
143924 -size_t HUF_compressWeights_wksp(void *dst, size_t dstSize, const void *weightTable, size_t wtSize, void *workspace, size_t workspaceSize)
143926 -       BYTE *const ostart = (BYTE *)dst;
143927 -       BYTE *op = ostart;
143928 -       BYTE *const oend = ostart + dstSize;
143930 -       U32 maxSymbolValue = HUF_TABLELOG_MAX;
143931 -       U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
143933 -       FSE_CTable *CTable;
143934 -       U32 *count;
143935 -       S16 *norm;
143936 -       size_t spaceUsed32 = 0;
143938 -       HUF_STATIC_ASSERT(sizeof(FSE_CTable) == sizeof(U32));
143940 -       CTable = (FSE_CTable *)((U32 *)workspace + spaceUsed32);
143941 -       spaceUsed32 += FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX);
143942 -       count = (U32 *)workspace + spaceUsed32;
143943 -       spaceUsed32 += HUF_TABLELOG_MAX + 1;
143944 -       norm = (S16 *)((U32 *)workspace + spaceUsed32);
143945 -       spaceUsed32 += ALIGN(sizeof(S16) * (HUF_TABLELOG_MAX + 1), sizeof(U32)) >> 2;
143947 -       if ((spaceUsed32 << 2) > workspaceSize)
143948 -               return ERROR(tableLog_tooLarge);
143949 -       workspace = (U32 *)workspace + spaceUsed32;
143950 -       workspaceSize -= (spaceUsed32 << 2);
143952 -       /* init conditions */
143953 -       if (wtSize <= 1)
143954 -               return 0; /* Not compressible */
143956 -       /* Scan input and build symbol stats */
143957 -       {
143958 -               CHECK_V_F(maxCount, FSE_count_simple(count, &maxSymbolValue, weightTable, wtSize));
143959 -               if (maxCount == wtSize)
143960 -                       return 1; /* only a single symbol in src : rle */
143961 -               if (maxCount == 1)
143962 -                       return 0; /* each symbol present maximum once => not compressible */
143963 -       }
143965 -       tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
143966 -       CHECK_F(FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue));
143968 -       /* Write table description header */
143969 -       {
143970 -               CHECK_V_F(hSize, FSE_writeNCount(op, oend - op, norm, maxSymbolValue, tableLog));
143971 -               op += hSize;
143972 -       }
143974 -       /* Compress */
143975 -       CHECK_F(FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, workspace, workspaceSize));
143976 -       {
143977 -               CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable));
143978 -               if (cSize == 0)
143979 -                       return 0; /* not enough space for compressed data */
143980 -               op += cSize;
143981 -       }
143983 -       return op - ostart;
143986 -struct HUF_CElt_s {
143987 -       U16 val;
143988 -       BYTE nbBits;
143989 -}; /* typedef'd to HUF_CElt within "huf.h" */
143991 -/*! HUF_writeCTable_wksp() :
143992 -       `CTable` : Huffman tree to save, using huf representation.
143993 -       @return : size of saved CTable */
143994 -size_t HUF_writeCTable_wksp(void *dst, size_t maxDstSize, const HUF_CElt *CTable, U32 maxSymbolValue, U32 huffLog, void *workspace, size_t workspaceSize)
143996 -       BYTE *op = (BYTE *)dst;
143997 -       U32 n;
143999 -       BYTE *bitsToWeight;
144000 -       BYTE *huffWeight;
144001 -       size_t spaceUsed32 = 0;
144003 -       bitsToWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
144004 -       spaceUsed32 += ALIGN(HUF_TABLELOG_MAX + 1, sizeof(U32)) >> 2;
144005 -       huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
144006 -       spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX, sizeof(U32)) >> 2;
144008 -       if ((spaceUsed32 << 2) > workspaceSize)
144009 -               return ERROR(tableLog_tooLarge);
144010 -       workspace = (U32 *)workspace + spaceUsed32;
144011 -       workspaceSize -= (spaceUsed32 << 2);
144013 -       /* check conditions */
144014 -       if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
144015 -               return ERROR(maxSymbolValue_tooLarge);
144017 -       /* convert to weight */
144018 -       bitsToWeight[0] = 0;
144019 -       for (n = 1; n < huffLog + 1; n++)
144020 -               bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
144021 -       for (n = 0; n < maxSymbolValue; n++)
144022 -               huffWeight[n] = bitsToWeight[CTable[n].nbBits];
144024 -       /* attempt weights compression by FSE */
144025 -       {
144026 -               CHECK_V_F(hSize, HUF_compressWeights_wksp(op + 1, maxDstSize - 1, huffWeight, maxSymbolValue, workspace, workspaceSize));
144027 -               if ((hSize > 1) & (hSize < maxSymbolValue / 2)) { /* FSE compressed */
144028 -                       op[0] = (BYTE)hSize;
144029 -                       return hSize + 1;
144030 -               }
144031 -       }
144033 -       /* write raw values as 4-bits (max : 15) */
144034 -       if (maxSymbolValue > (256 - 128))
144035 -               return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */
144036 -       if (((maxSymbolValue + 1) / 2) + 1 > maxDstSize)
144037 -               return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
144038 -       op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue - 1));
144039 -       huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
144040 -       for (n = 0; n < maxSymbolValue; n += 2)
144041 -               op[(n / 2) + 1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n + 1]);
144042 -       return ((maxSymbolValue + 1) / 2) + 1;
144045 -size_t HUF_readCTable_wksp(HUF_CElt *CTable, U32 maxSymbolValue, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
144047 -       U32 *rankVal;
144048 -       BYTE *huffWeight;
144049 -       U32 tableLog = 0;
144050 -       U32 nbSymbols = 0;
144051 -       size_t readSize;
144052 -       size_t spaceUsed32 = 0;
144054 -       rankVal = (U32 *)workspace + spaceUsed32;
144055 -       spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1;
144056 -       huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
144057 -       spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
144059 -       if ((spaceUsed32 << 2) > workspaceSize)
144060 -               return ERROR(tableLog_tooLarge);
144061 -       workspace = (U32 *)workspace + spaceUsed32;
144062 -       workspaceSize -= (spaceUsed32 << 2);
144064 -       /* get symbol weights */
144065 -       readSize = HUF_readStats_wksp(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize);
144066 -       if (ERR_isError(readSize))
144067 -               return readSize;
144069 -       /* check result */
144070 -       if (tableLog > HUF_TABLELOG_MAX)
144071 -               return ERROR(tableLog_tooLarge);
144072 -       if (nbSymbols > maxSymbolValue + 1)
144073 -               return ERROR(maxSymbolValue_tooSmall);
144075 -       /* Prepare base value per rank */
144076 -       {
144077 -               U32 n, nextRankStart = 0;
144078 -               for (n = 1; n <= tableLog; n++) {
144079 -                       U32 curr = nextRankStart;
144080 -                       nextRankStart += (rankVal[n] << (n - 1));
144081 -                       rankVal[n] = curr;
144082 -               }
144083 -       }
144085 -       /* fill nbBits */
144086 -       {
144087 -               U32 n;
144088 -               for (n = 0; n < nbSymbols; n++) {
144089 -                       const U32 w = huffWeight[n];
144090 -                       CTable[n].nbBits = (BYTE)(tableLog + 1 - w);
144091 -               }
144092 -       }
144094 -       /* fill val */
144095 -       {
144096 -               U16 nbPerRank[HUF_TABLELOG_MAX + 2] = {0}; /* support w=0=>n=tableLog+1 */
144097 -               U16 valPerRank[HUF_TABLELOG_MAX + 2] = {0};
144098 -               {
144099 -                       U32 n;
144100 -                       for (n = 0; n < nbSymbols; n++)
144101 -                               nbPerRank[CTable[n].nbBits]++;
144102 -               }
144103 -               /* determine stating value per rank */
144104 -               valPerRank[tableLog + 1] = 0; /* for w==0 */
144105 -               {
144106 -                       U16 min = 0;
144107 -                       U32 n;
144108 -                       for (n = tableLog; n > 0; n--) { /* start at n=tablelog <-> w=1 */
144109 -                               valPerRank[n] = min;     /* get starting value within each rank */
144110 -                               min += nbPerRank[n];
144111 -                               min >>= 1;
144112 -                       }
144113 -               }
144114 -               /* assign value within rank, symbol order */
144115 -               {
144116 -                       U32 n;
144117 -                       for (n = 0; n <= maxSymbolValue; n++)
144118 -                               CTable[n].val = valPerRank[CTable[n].nbBits]++;
144119 -               }
144120 -       }
144122 -       return readSize;
144125 -typedef struct nodeElt_s {
144126 -       U32 count;
144127 -       U16 parent;
144128 -       BYTE byte;
144129 -       BYTE nbBits;
144130 -} nodeElt;
144132 -static U32 HUF_setMaxHeight(nodeElt *huffNode, U32 lastNonNull, U32 maxNbBits)
144134 -       const U32 largestBits = huffNode[lastNonNull].nbBits;
144135 -       if (largestBits <= maxNbBits)
144136 -               return largestBits; /* early exit : no elt > maxNbBits */
144138 -       /* there are several too large elements (at least >= 2) */
144139 -       {
144140 -               int totalCost = 0;
144141 -               const U32 baseCost = 1 << (largestBits - maxNbBits);
144142 -               U32 n = lastNonNull;
144144 -               while (huffNode[n].nbBits > maxNbBits) {
144145 -                       totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
144146 -                       huffNode[n].nbBits = (BYTE)maxNbBits;
144147 -                       n--;
144148 -               } /* n stops at huffNode[n].nbBits <= maxNbBits */
144149 -               while (huffNode[n].nbBits == maxNbBits)
144150 -                       n--; /* n end at index of smallest symbol using < maxNbBits */
144152 -               /* renorm totalCost */
144153 -               totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */
144155 -               /* repay normalized cost */
144156 -               {
144157 -                       U32 const noSymbol = 0xF0F0F0F0;
144158 -                       U32 rankLast[HUF_TABLELOG_MAX + 2];
144159 -                       int pos;
144161 -                       /* Get pos of last (smallest) symbol per rank */
144162 -                       memset(rankLast, 0xF0, sizeof(rankLast));
144163 -                       {
144164 -                               U32 currNbBits = maxNbBits;
144165 -                               for (pos = n; pos >= 0; pos--) {
144166 -                                       if (huffNode[pos].nbBits >= currNbBits)
144167 -                                               continue;
144168 -                                       currNbBits = huffNode[pos].nbBits; /* < maxNbBits */
144169 -                                       rankLast[maxNbBits - currNbBits] = pos;
144170 -                               }
144171 -                       }
144173 -                       while (totalCost > 0) {
144174 -                               U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1;
144175 -                               for (; nBitsToDecrease > 1; nBitsToDecrease--) {
144176 -                                       U32 highPos = rankLast[nBitsToDecrease];
144177 -                                       U32 lowPos = rankLast[nBitsToDecrease - 1];
144178 -                                       if (highPos == noSymbol)
144179 -                                               continue;
144180 -                                       if (lowPos == noSymbol)
144181 -                                               break;
144182 -                                       {
144183 -                                               U32 const highTotal = huffNode[highPos].count;
144184 -                                               U32 const lowTotal = 2 * huffNode[lowPos].count;
144185 -                                               if (highTotal <= lowTotal)
144186 -                                                       break;
144187 -                                       }
144188 -                               }
144189 -                               /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
144190 -                               /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
144191 -                               while ((nBitsToDecrease <= HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
144192 -                                       nBitsToDecrease++;
144193 -                               totalCost -= 1 << (nBitsToDecrease - 1);
144194 -                               if (rankLast[nBitsToDecrease - 1] == noSymbol)
144195 -                                       rankLast[nBitsToDecrease - 1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */
144196 -                               huffNode[rankLast[nBitsToDecrease]].nbBits++;
144197 -                               if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */
144198 -                                       rankLast[nBitsToDecrease] = noSymbol;
144199 -                               else {
144200 -                                       rankLast[nBitsToDecrease]--;
144201 -                                       if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits - nBitsToDecrease)
144202 -                                               rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
144203 -                               }
144204 -                       } /* while (totalCost > 0) */
144206 -                       while (totalCost < 0) {                /* Sometimes, cost correction overshoot */
144207 -                               if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0
144208 -                                                                 (using maxNbBits) */
144209 -                                       while (huffNode[n].nbBits == maxNbBits)
144210 -                                               n--;
144211 -                                       huffNode[n + 1].nbBits--;
144212 -                                       rankLast[1] = n + 1;
144213 -                                       totalCost++;
144214 -                                       continue;
144215 -                               }
144216 -                               huffNode[rankLast[1] + 1].nbBits--;
144217 -                               rankLast[1]++;
144218 -                               totalCost++;
144219 -                       }
144220 -               }
144221 -       } /* there are several too large elements (at least >= 2) */
144223 -       return maxNbBits;
144226 -typedef struct {
144227 -       U32 base;
144228 -       U32 curr;
144229 -} rankPos;
144231 -static void HUF_sort(nodeElt *huffNode, const U32 *count, U32 maxSymbolValue)
144233 -       rankPos rank[32];
144234 -       U32 n;
144236 -       memset(rank, 0, sizeof(rank));
144237 -       for (n = 0; n <= maxSymbolValue; n++) {
144238 -               U32 r = BIT_highbit32(count[n] + 1);
144239 -               rank[r].base++;
144240 -       }
144241 -       for (n = 30; n > 0; n--)
144242 -               rank[n - 1].base += rank[n].base;
144243 -       for (n = 0; n < 32; n++)
144244 -               rank[n].curr = rank[n].base;
144245 -       for (n = 0; n <= maxSymbolValue; n++) {
144246 -               U32 const c = count[n];
144247 -               U32 const r = BIT_highbit32(c + 1) + 1;
144248 -               U32 pos = rank[r].curr++;
144249 -               while ((pos > rank[r].base) && (c > huffNode[pos - 1].count))
144250 -                       huffNode[pos] = huffNode[pos - 1], pos--;
144251 -               huffNode[pos].count = c;
144252 -               huffNode[pos].byte = (BYTE)n;
144253 -       }
144256 -/** HUF_buildCTable_wksp() :
144257 - *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
144258 - *  `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
144259 - */
144260 -#define STARTNODE (HUF_SYMBOLVALUE_MAX + 1)
144261 -typedef nodeElt huffNodeTable[2 * HUF_SYMBOLVALUE_MAX + 1 + 1];
144262 -size_t HUF_buildCTable_wksp(HUF_CElt *tree, const U32 *count, U32 maxSymbolValue, U32 maxNbBits, void *workSpace, size_t wkspSize)
144264 -       nodeElt *const huffNode0 = (nodeElt *)workSpace;
144265 -       nodeElt *const huffNode = huffNode0 + 1;
144266 -       U32 n, nonNullRank;
144267 -       int lowS, lowN;
144268 -       U16 nodeNb = STARTNODE;
144269 -       U32 nodeRoot;
144271 -       /* safety checks */
144272 -       if (wkspSize < sizeof(huffNodeTable))
144273 -               return ERROR(GENERIC); /* workSpace is not large enough */
144274 -       if (maxNbBits == 0)
144275 -               maxNbBits = HUF_TABLELOG_DEFAULT;
144276 -       if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
144277 -               return ERROR(GENERIC);
144278 -       memset(huffNode0, 0, sizeof(huffNodeTable));
144280 -       /* sort, decreasing order */
144281 -       HUF_sort(huffNode, count, maxSymbolValue);
144283 -       /* init for parents */
144284 -       nonNullRank = maxSymbolValue;
144285 -       while (huffNode[nonNullRank].count == 0)
144286 -               nonNullRank--;
144287 -       lowS = nonNullRank;
144288 -       nodeRoot = nodeNb + lowS - 1;
144289 -       lowN = nodeNb;
144290 -       huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS - 1].count;
144291 -       huffNode[lowS].parent = huffNode[lowS - 1].parent = nodeNb;
144292 -       nodeNb++;
144293 -       lowS -= 2;
144294 -       for (n = nodeNb; n <= nodeRoot; n++)
144295 -               huffNode[n].count = (U32)(1U << 30);
144296 -       huffNode0[0].count = (U32)(1U << 31); /* fake entry, strong barrier */
144298 -       /* create parents */
144299 -       while (nodeNb <= nodeRoot) {
144300 -               U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
144301 -               U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
144302 -               huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
144303 -               huffNode[n1].parent = huffNode[n2].parent = nodeNb;
144304 -               nodeNb++;
144305 -       }
144307 -       /* distribute weights (unlimited tree height) */
144308 -       huffNode[nodeRoot].nbBits = 0;
144309 -       for (n = nodeRoot - 1; n >= STARTNODE; n--)
144310 -               huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1;
144311 -       for (n = 0; n <= nonNullRank; n++)
144312 -               huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1;
144314 -       /* enforce maxTableLog */
144315 -       maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits);
144317 -       /* fill result into tree (val, nbBits) */
144318 -       {
144319 -               U16 nbPerRank[HUF_TABLELOG_MAX + 1] = {0};
144320 -               U16 valPerRank[HUF_TABLELOG_MAX + 1] = {0};
144321 -               if (maxNbBits > HUF_TABLELOG_MAX)
144322 -                       return ERROR(GENERIC); /* check fit into table */
144323 -               for (n = 0; n <= nonNullRank; n++)
144324 -                       nbPerRank[huffNode[n].nbBits]++;
144325 -               /* determine stating value per rank */
144326 -               {
144327 -                       U16 min = 0;
144328 -                       for (n = maxNbBits; n > 0; n--) {
144329 -                               valPerRank[n] = min; /* get starting value within each rank */
144330 -                               min += nbPerRank[n];
144331 -                               min >>= 1;
144332 -                       }
144333 -               }
144334 -               for (n = 0; n <= maxSymbolValue; n++)
144335 -                       tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */
144336 -               for (n = 0; n <= maxSymbolValue; n++)
144337 -                       tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */
144338 -       }
144340 -       return maxNbBits;
144343 -static size_t HUF_estimateCompressedSize(HUF_CElt *CTable, const unsigned *count, unsigned maxSymbolValue)
144345 -       size_t nbBits = 0;
144346 -       int s;
144347 -       for (s = 0; s <= (int)maxSymbolValue; ++s) {
144348 -               nbBits += CTable[s].nbBits * count[s];
144349 -       }
144350 -       return nbBits >> 3;
144353 -static int HUF_validateCTable(const HUF_CElt *CTable, const unsigned *count, unsigned maxSymbolValue)
144355 -       int bad = 0;
144356 -       int s;
144357 -       for (s = 0; s <= (int)maxSymbolValue; ++s) {
144358 -               bad |= (count[s] != 0) & (CTable[s].nbBits == 0);
144359 -       }
144360 -       return !bad;
144363 -static void HUF_encodeSymbol(BIT_CStream_t *bitCPtr, U32 symbol, const HUF_CElt *CTable)
144365 -       BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
144368 -size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
144370 -#define HUF_FLUSHBITS(s)  BIT_flushBits(s)
144372 -#define HUF_FLUSHBITS_1(stream)                                            \
144373 -       if (sizeof((stream)->bitContainer) * 8 < HUF_TABLELOG_MAX * 2 + 7) \
144374 -       HUF_FLUSHBITS(stream)
144376 -#define HUF_FLUSHBITS_2(stream)                                            \
144377 -       if (sizeof((stream)->bitContainer) * 8 < HUF_TABLELOG_MAX * 4 + 7) \
144378 -       HUF_FLUSHBITS(stream)
144380 -size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable)
144382 -       const BYTE *ip = (const BYTE *)src;
144383 -       BYTE *const ostart = (BYTE *)dst;
144384 -       BYTE *const oend = ostart + dstSize;
144385 -       BYTE *op = ostart;
144386 -       size_t n;
144387 -       BIT_CStream_t bitC;
144389 -       /* init */
144390 -       if (dstSize < 8)
144391 -               return 0; /* not enough space to compress */
144392 -       {
144393 -               size_t const initErr = BIT_initCStream(&bitC, op, oend - op);
144394 -               if (HUF_isError(initErr))
144395 -                       return 0;
144396 -       }
144398 -       n = srcSize & ~3; /* join to mod 4 */
144399 -       switch (srcSize & 3) {
144400 -       case 3: HUF_encodeSymbol(&bitC, ip[n + 2], CTable); HUF_FLUSHBITS_2(&bitC);
144401 -               fallthrough;
144402 -       case 2: HUF_encodeSymbol(&bitC, ip[n + 1], CTable); HUF_FLUSHBITS_1(&bitC);
144403 -               fallthrough;
144404 -       case 1: HUF_encodeSymbol(&bitC, ip[n + 0], CTable); HUF_FLUSHBITS(&bitC);
144405 -               fallthrough;
144406 -       case 0:
144407 -       default:;
144408 -       }
144410 -       for (; n > 0; n -= 4) { /* note : n&3==0 at this stage */
144411 -               HUF_encodeSymbol(&bitC, ip[n - 1], CTable);
144412 -               HUF_FLUSHBITS_1(&bitC);
144413 -               HUF_encodeSymbol(&bitC, ip[n - 2], CTable);
144414 -               HUF_FLUSHBITS_2(&bitC);
144415 -               HUF_encodeSymbol(&bitC, ip[n - 3], CTable);
144416 -               HUF_FLUSHBITS_1(&bitC);
144417 -               HUF_encodeSymbol(&bitC, ip[n - 4], CTable);
144418 -               HUF_FLUSHBITS(&bitC);
144419 -       }
144421 -       return BIT_closeCStream(&bitC);
144424 -size_t HUF_compress4X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable)
144426 -       size_t const segmentSize = (srcSize + 3) / 4; /* first 3 segments */
144427 -       const BYTE *ip = (const BYTE *)src;
144428 -       const BYTE *const iend = ip + srcSize;
144429 -       BYTE *const ostart = (BYTE *)dst;
144430 -       BYTE *const oend = ostart + dstSize;
144431 -       BYTE *op = ostart;
144433 -       if (dstSize < 6 + 1 + 1 + 1 + 8)
144434 -               return 0; /* minimum space to compress successfully */
144435 -       if (srcSize < 12)
144436 -               return 0; /* no saving possible : too small input */
144437 -       op += 6;          /* jumpTable */
144439 -       {
144440 -               CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable));
144441 -               if (cSize == 0)
144442 -                       return 0;
144443 -               ZSTD_writeLE16(ostart, (U16)cSize);
144444 -               op += cSize;
144445 -       }
144447 -       ip += segmentSize;
144448 -       {
144449 -               CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable));
144450 -               if (cSize == 0)
144451 -                       return 0;
144452 -               ZSTD_writeLE16(ostart + 2, (U16)cSize);
144453 -               op += cSize;
144454 -       }
144456 -       ip += segmentSize;
144457 -       {
144458 -               CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable));
144459 -               if (cSize == 0)
144460 -                       return 0;
144461 -               ZSTD_writeLE16(ostart + 4, (U16)cSize);
144462 -               op += cSize;
144463 -       }
144465 -       ip += segmentSize;
144466 -       {
144467 -               CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, iend - ip, CTable));
144468 -               if (cSize == 0)
144469 -                       return 0;
144470 -               op += cSize;
144471 -       }
144473 -       return op - ostart;
144476 -static size_t HUF_compressCTable_internal(BYTE *const ostart, BYTE *op, BYTE *const oend, const void *src, size_t srcSize, unsigned singleStream,
144477 -                                         const HUF_CElt *CTable)
144479 -       size_t const cSize =
144480 -           singleStream ? HUF_compress1X_usingCTable(op, oend - op, src, srcSize, CTable) : HUF_compress4X_usingCTable(op, oend - op, src, srcSize, CTable);
144481 -       if (HUF_isError(cSize)) {
144482 -               return cSize;
144483 -       }
144484 -       if (cSize == 0) {
144485 -               return 0;
144486 -       } /* uncompressible */
144487 -       op += cSize;
144488 -       /* check compressibility */
144489 -       if ((size_t)(op - ostart) >= srcSize - 1) {
144490 -               return 0;
144491 -       }
144492 -       return op - ostart;
144495 -/* `workSpace` must a table of at least 1024 unsigned */
144496 -static size_t HUF_compress_internal(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog,
144497 -                                   unsigned singleStream, void *workSpace, size_t wkspSize, HUF_CElt *oldHufTable, HUF_repeat *repeat, int preferRepeat)
144499 -       BYTE *const ostart = (BYTE *)dst;
144500 -       BYTE *const oend = ostart + dstSize;
144501 -       BYTE *op = ostart;
144503 -       U32 *count;
144504 -       size_t const countSize = sizeof(U32) * (HUF_SYMBOLVALUE_MAX + 1);
144505 -       HUF_CElt *CTable;
144506 -       size_t const CTableSize = sizeof(HUF_CElt) * (HUF_SYMBOLVALUE_MAX + 1);
144508 -       /* checks & inits */
144509 -       if (wkspSize < sizeof(huffNodeTable) + countSize + CTableSize)
144510 -               return ERROR(GENERIC);
144511 -       if (!srcSize)
144512 -               return 0; /* Uncompressed (note : 1 means rle, so first byte must be correct) */
144513 -       if (!dstSize)
144514 -               return 0; /* cannot fit within dst budget */
144515 -       if (srcSize > HUF_BLOCKSIZE_MAX)
144516 -               return ERROR(srcSize_wrong); /* curr block size limit */
144517 -       if (huffLog > HUF_TABLELOG_MAX)
144518 -               return ERROR(tableLog_tooLarge);
144519 -       if (!maxSymbolValue)
144520 -               maxSymbolValue = HUF_SYMBOLVALUE_MAX;
144521 -       if (!huffLog)
144522 -               huffLog = HUF_TABLELOG_DEFAULT;
144524 -       count = (U32 *)workSpace;
144525 -       workSpace = (BYTE *)workSpace + countSize;
144526 -       wkspSize -= countSize;
144527 -       CTable = (HUF_CElt *)workSpace;
144528 -       workSpace = (BYTE *)workSpace + CTableSize;
144529 -       wkspSize -= CTableSize;
144531 -       /* Heuristic : If we don't need to check the validity of the old table use the old table for small inputs */
144532 -       if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
144533 -               return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
144534 -       }
144536 -       /* Scan input and build symbol stats */
144537 -       {
144538 -               CHECK_V_F(largest, FSE_count_wksp(count, &maxSymbolValue, (const BYTE *)src, srcSize, (U32 *)workSpace));
144539 -               if (largest == srcSize) {
144540 -                       *ostart = ((const BYTE *)src)[0];
144541 -                       return 1;
144542 -               } /* single symbol, rle */
144543 -               if (largest <= (srcSize >> 7) + 1)
144544 -                       return 0; /* Fast heuristic : not compressible enough */
144545 -       }
144547 -       /* Check validity of previous table */
144548 -       if (repeat && *repeat == HUF_repeat_check && !HUF_validateCTable(oldHufTable, count, maxSymbolValue)) {
144549 -               *repeat = HUF_repeat_none;
144550 -       }
144551 -       /* Heuristic : use existing table for small inputs */
144552 -       if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
144553 -               return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
144554 -       }
144556 -       /* Build Huffman Tree */
144557 -       huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
144558 -       {
144559 -               CHECK_V_F(maxBits, HUF_buildCTable_wksp(CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize));
144560 -               huffLog = (U32)maxBits;
144561 -               /* Zero the unused symbols so we can check it for validity */
144562 -               memset(CTable + maxSymbolValue + 1, 0, CTableSize - (maxSymbolValue + 1) * sizeof(HUF_CElt));
144563 -       }
144565 -       /* Write table description header */
144566 -       {
144567 -               CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, CTable, maxSymbolValue, huffLog, workSpace, wkspSize));
144568 -               /* Check if using the previous table will be beneficial */
144569 -               if (repeat && *repeat != HUF_repeat_none) {
144570 -                       size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, count, maxSymbolValue);
144571 -                       size_t const newSize = HUF_estimateCompressedSize(CTable, count, maxSymbolValue);
144572 -                       if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
144573 -                               return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
144574 -                       }
144575 -               }
144576 -               /* Use the new table */
144577 -               if (hSize + 12ul >= srcSize) {
144578 -                       return 0;
144579 -               }
144580 -               op += hSize;
144581 -               if (repeat) {
144582 -                       *repeat = HUF_repeat_none;
144583 -               }
144584 -               if (oldHufTable) {
144585 -                       memcpy(oldHufTable, CTable, CTableSize);
144586 -               } /* Save the new table */
144587 -       }
144588 -       return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, CTable);
144591 -size_t HUF_compress1X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
144592 -                          size_t wkspSize)
144594 -       return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, NULL, NULL, 0);
144597 -size_t HUF_compress1X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
144598 -                            size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, int preferRepeat)
144600 -       return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, hufTable, repeat,
144601 -                                    preferRepeat);
144604 -size_t HUF_compress4X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
144605 -                          size_t wkspSize)
144607 -       return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, NULL, NULL, 0);
144610 -size_t HUF_compress4X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
144611 -                            size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, int preferRepeat)
144613 -       return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, hufTable, repeat,
144614 -                                    preferRepeat);
144616 diff --git a/lib/zstd/huf_decompress.c b/lib/zstd/huf_decompress.c
144617 deleted file mode 100644
144618 index 6526482047dc..000000000000
144619 --- a/lib/zstd/huf_decompress.c
144620 +++ /dev/null
144621 @@ -1,960 +0,0 @@
144623 - * Huffman decoder, part of New Generation Entropy library
144624 - * Copyright (C) 2013-2016, Yann Collet.
144626 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
144628 - * Redistribution and use in source and binary forms, with or without
144629 - * modification, are permitted provided that the following conditions are
144630 - * met:
144632 - *   * Redistributions of source code must retain the above copyright
144633 - * notice, this list of conditions and the following disclaimer.
144634 - *   * Redistributions in binary form must reproduce the above
144635 - * copyright notice, this list of conditions and the following disclaimer
144636 - * in the documentation and/or other materials provided with the
144637 - * distribution.
144639 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
144640 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
144641 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
144642 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
144643 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
144644 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
144645 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
144646 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
144647 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
144648 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
144649 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
144651 - * This program is free software; you can redistribute it and/or modify it under
144652 - * the terms of the GNU General Public License version 2 as published by the
144653 - * Free Software Foundation. This program is dual-licensed; you may select
144654 - * either version 2 of the GNU General Public License ("GPL") or BSD license
144655 - * ("BSD").
144657 - * You can contact the author at :
144658 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
144659 - */
144661 -/* **************************************************************
144662 -*  Compiler specifics
144663 -****************************************************************/
144664 -#define FORCE_INLINE static __always_inline
144666 -/* **************************************************************
144667 -*  Dependencies
144668 -****************************************************************/
144669 -#include "bitstream.h" /* BIT_* */
144670 -#include "fse.h"       /* header compression */
144671 -#include "huf.h"
144672 -#include <linux/compiler.h>
144673 -#include <linux/kernel.h>
144674 -#include <linux/string.h> /* memcpy, memset */
144676 -/* **************************************************************
144677 -*  Error Management
144678 -****************************************************************/
144679 -#define HUF_STATIC_ASSERT(c)                                   \
144680 -       {                                                      \
144681 -               enum { HUF_static_assert = 1 / (int)(!!(c)) }; \
144682 -       } /* use only *after* variable declarations */
144684 -/*-***************************/
144685 -/*  generic DTableDesc       */
144686 -/*-***************************/
144688 -typedef struct {
144689 -       BYTE maxTableLog;
144690 -       BYTE tableType;
144691 -       BYTE tableLog;
144692 -       BYTE reserved;
144693 -} DTableDesc;
144695 -static DTableDesc HUF_getDTableDesc(const HUF_DTable *table)
144697 -       DTableDesc dtd;
144698 -       memcpy(&dtd, table, sizeof(dtd));
144699 -       return dtd;
144702 -/*-***************************/
144703 -/*  single-symbol decoding   */
144704 -/*-***************************/
144706 -typedef struct {
144707 -       BYTE byte;
144708 -       BYTE nbBits;
144709 -} HUF_DEltX2; /* single-symbol decoding */
144711 -size_t HUF_readDTableX2_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
144713 -       U32 tableLog = 0;
144714 -       U32 nbSymbols = 0;
144715 -       size_t iSize;
144716 -       void *const dtPtr = DTable + 1;
144717 -       HUF_DEltX2 *const dt = (HUF_DEltX2 *)dtPtr;
144719 -       U32 *rankVal;
144720 -       BYTE *huffWeight;
144721 -       size_t spaceUsed32 = 0;
144723 -       rankVal = (U32 *)workspace + spaceUsed32;
144724 -       spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1;
144725 -       huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
144726 -       spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
144728 -       if ((spaceUsed32 << 2) > workspaceSize)
144729 -               return ERROR(tableLog_tooLarge);
144730 -       workspace = (U32 *)workspace + spaceUsed32;
144731 -       workspaceSize -= (spaceUsed32 << 2);
144733 -       HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
144734 -       /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
144736 -       iSize = HUF_readStats_wksp(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize);
144737 -       if (HUF_isError(iSize))
144738 -               return iSize;
144740 -       /* Table header */
144741 -       {
144742 -               DTableDesc dtd = HUF_getDTableDesc(DTable);
144743 -               if (tableLog > (U32)(dtd.maxTableLog + 1))
144744 -                       return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */
144745 -               dtd.tableType = 0;
144746 -               dtd.tableLog = (BYTE)tableLog;
144747 -               memcpy(DTable, &dtd, sizeof(dtd));
144748 -       }
144750 -       /* Calculate starting value for each rank */
144751 -       {
144752 -               U32 n, nextRankStart = 0;
144753 -               for (n = 1; n < tableLog + 1; n++) {
144754 -                       U32 const curr = nextRankStart;
144755 -                       nextRankStart += (rankVal[n] << (n - 1));
144756 -                       rankVal[n] = curr;
144757 -               }
144758 -       }
144760 -       /* fill DTable */
144761 -       {
144762 -               U32 n;
144763 -               for (n = 0; n < nbSymbols; n++) {
144764 -                       U32 const w = huffWeight[n];
144765 -                       U32 const length = (1 << w) >> 1;
144766 -                       U32 u;
144767 -                       HUF_DEltX2 D;
144768 -                       D.byte = (BYTE)n;
144769 -                       D.nbBits = (BYTE)(tableLog + 1 - w);
144770 -                       for (u = rankVal[w]; u < rankVal[w] + length; u++)
144771 -                               dt[u] = D;
144772 -                       rankVal[w] += length;
144773 -               }
144774 -       }
144776 -       return iSize;
144779 -static BYTE HUF_decodeSymbolX2(BIT_DStream_t *Dstream, const HUF_DEltX2 *dt, const U32 dtLog)
144781 -       size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
144782 -       BYTE const c = dt[val].byte;
144783 -       BIT_skipBits(Dstream, dt[val].nbBits);
144784 -       return c;
144787 -#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
144789 -#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr)         \
144790 -       if (ZSTD_64bits() || (HUF_TABLELOG_MAX <= 12)) \
144791 -       HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
144793 -#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
144794 -       if (ZSTD_64bits())                     \
144795 -       HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
144797 -FORCE_INLINE size_t HUF_decodeStreamX2(BYTE *p, BIT_DStream_t *const bitDPtr, BYTE *const pEnd, const HUF_DEltX2 *const dt, const U32 dtLog)
144799 -       BYTE *const pStart = p;
144801 -       /* up to 4 symbols at a time */
144802 -       while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd - 4)) {
144803 -               HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
144804 -               HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
144805 -               HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
144806 -               HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
144807 -       }
144809 -       /* closer to the end */
144810 -       while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
144811 -               HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
144813 -       /* no more data to retrieve from bitstream, hence no need to reload */
144814 -       while (p < pEnd)
144815 -               HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
144817 -       return pEnd - pStart;
144820 -static size_t HUF_decompress1X2_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
144822 -       BYTE *op = (BYTE *)dst;
144823 -       BYTE *const oend = op + dstSize;
144824 -       const void *dtPtr = DTable + 1;
144825 -       const HUF_DEltX2 *const dt = (const HUF_DEltX2 *)dtPtr;
144826 -       BIT_DStream_t bitD;
144827 -       DTableDesc const dtd = HUF_getDTableDesc(DTable);
144828 -       U32 const dtLog = dtd.tableLog;
144830 -       {
144831 -               size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
144832 -               if (HUF_isError(errorCode))
144833 -                       return errorCode;
144834 -       }
144836 -       HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog);
144838 -       /* check */
144839 -       if (!BIT_endOfDStream(&bitD))
144840 -               return ERROR(corruption_detected);
144842 -       return dstSize;
144845 -size_t HUF_decompress1X2_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
144847 -       DTableDesc dtd = HUF_getDTableDesc(DTable);
144848 -       if (dtd.tableType != 0)
144849 -               return ERROR(GENERIC);
144850 -       return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
144853 -size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable *DCtx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
144855 -       const BYTE *ip = (const BYTE *)cSrc;
144857 -       size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workspace, workspaceSize);
144858 -       if (HUF_isError(hSize))
144859 -               return hSize;
144860 -       if (hSize >= cSrcSize)
144861 -               return ERROR(srcSize_wrong);
144862 -       ip += hSize;
144863 -       cSrcSize -= hSize;
144865 -       return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx);
144868 -static size_t HUF_decompress4X2_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
144870 -       /* Check */
144871 -       if (cSrcSize < 10)
144872 -               return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
144874 -       {
144875 -               const BYTE *const istart = (const BYTE *)cSrc;
144876 -               BYTE *const ostart = (BYTE *)dst;
144877 -               BYTE *const oend = ostart + dstSize;
144878 -               const void *const dtPtr = DTable + 1;
144879 -               const HUF_DEltX2 *const dt = (const HUF_DEltX2 *)dtPtr;
144881 -               /* Init */
144882 -               BIT_DStream_t bitD1;
144883 -               BIT_DStream_t bitD2;
144884 -               BIT_DStream_t bitD3;
144885 -               BIT_DStream_t bitD4;
144886 -               size_t const length1 = ZSTD_readLE16(istart);
144887 -               size_t const length2 = ZSTD_readLE16(istart + 2);
144888 -               size_t const length3 = ZSTD_readLE16(istart + 4);
144889 -               size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
144890 -               const BYTE *const istart1 = istart + 6; /* jumpTable */
144891 -               const BYTE *const istart2 = istart1 + length1;
144892 -               const BYTE *const istart3 = istart2 + length2;
144893 -               const BYTE *const istart4 = istart3 + length3;
144894 -               const size_t segmentSize = (dstSize + 3) / 4;
144895 -               BYTE *const opStart2 = ostart + segmentSize;
144896 -               BYTE *const opStart3 = opStart2 + segmentSize;
144897 -               BYTE *const opStart4 = opStart3 + segmentSize;
144898 -               BYTE *op1 = ostart;
144899 -               BYTE *op2 = opStart2;
144900 -               BYTE *op3 = opStart3;
144901 -               BYTE *op4 = opStart4;
144902 -               U32 endSignal;
144903 -               DTableDesc const dtd = HUF_getDTableDesc(DTable);
144904 -               U32 const dtLog = dtd.tableLog;
144906 -               if (length4 > cSrcSize)
144907 -                       return ERROR(corruption_detected); /* overflow */
144908 -               {
144909 -                       size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1);
144910 -                       if (HUF_isError(errorCode))
144911 -                               return errorCode;
144912 -               }
144913 -               {
144914 -                       size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2);
144915 -                       if (HUF_isError(errorCode))
144916 -                               return errorCode;
144917 -               }
144918 -               {
144919 -                       size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3);
144920 -                       if (HUF_isError(errorCode))
144921 -                               return errorCode;
144922 -               }
144923 -               {
144924 -                       size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4);
144925 -                       if (HUF_isError(errorCode))
144926 -                               return errorCode;
144927 -               }
144929 -               /* 16-32 symbols per loop (4-8 symbols per stream) */
144930 -               endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
144931 -               for (; (endSignal == BIT_DStream_unfinished) && (op4 < (oend - 7));) {
144932 -                       HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
144933 -                       HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
144934 -                       HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
144935 -                       HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
144936 -                       HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
144937 -                       HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
144938 -                       HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
144939 -                       HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
144940 -                       HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
144941 -                       HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
144942 -                       HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
144943 -                       HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
144944 -                       HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
144945 -                       HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
144946 -                       HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
144947 -                       HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
144948 -                       endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
144949 -               }
144951 -               /* check corruption */
144952 -               if (op1 > opStart2)
144953 -                       return ERROR(corruption_detected);
144954 -               if (op2 > opStart3)
144955 -                       return ERROR(corruption_detected);
144956 -               if (op3 > opStart4)
144957 -                       return ERROR(corruption_detected);
144958 -               /* note : op4 supposed already verified within main loop */
144960 -               /* finish bitStreams one by one */
144961 -               HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
144962 -               HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
144963 -               HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
144964 -               HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
144966 -               /* check */
144967 -               endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
144968 -               if (!endSignal)
144969 -                       return ERROR(corruption_detected);
144971 -               /* decoded size */
144972 -               return dstSize;
144973 -       }
144976 -size_t HUF_decompress4X2_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
144978 -       DTableDesc dtd = HUF_getDTableDesc(DTable);
144979 -       if (dtd.tableType != 0)
144980 -               return ERROR(GENERIC);
144981 -       return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
144984 -size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
144986 -       const BYTE *ip = (const BYTE *)cSrc;
144988 -       size_t const hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, workspace, workspaceSize);
144989 -       if (HUF_isError(hSize))
144990 -               return hSize;
144991 -       if (hSize >= cSrcSize)
144992 -               return ERROR(srcSize_wrong);
144993 -       ip += hSize;
144994 -       cSrcSize -= hSize;
144996 -       return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);
144999 -/* *************************/
145000 -/* double-symbols decoding */
145001 -/* *************************/
145002 -typedef struct {
145003 -       U16 sequence;
145004 -       BYTE nbBits;
145005 -       BYTE length;
145006 -} HUF_DEltX4; /* double-symbols decoding */
145008 -typedef struct {
145009 -       BYTE symbol;
145010 -       BYTE weight;
145011 -} sortedSymbol_t;
145013 -/* HUF_fillDTableX4Level2() :
145014 - * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
145015 -static void HUF_fillDTableX4Level2(HUF_DEltX4 *DTable, U32 sizeLog, const U32 consumed, const U32 *rankValOrigin, const int minWeight,
145016 -                                  const sortedSymbol_t *sortedSymbols, const U32 sortedListSize, U32 nbBitsBaseline, U16 baseSeq)
145018 -       HUF_DEltX4 DElt;
145019 -       U32 rankVal[HUF_TABLELOG_MAX + 1];
145021 -       /* get pre-calculated rankVal */
145022 -       memcpy(rankVal, rankValOrigin, sizeof(rankVal));
145024 -       /* fill skipped values */
145025 -       if (minWeight > 1) {
145026 -               U32 i, skipSize = rankVal[minWeight];
145027 -               ZSTD_writeLE16(&(DElt.sequence), baseSeq);
145028 -               DElt.nbBits = (BYTE)(consumed);
145029 -               DElt.length = 1;
145030 -               for (i = 0; i < skipSize; i++)
145031 -                       DTable[i] = DElt;
145032 -       }
145034 -       /* fill DTable */
145035 -       {
145036 -               U32 s;
145037 -               for (s = 0; s < sortedListSize; s++) { /* note : sortedSymbols already skipped */
145038 -                       const U32 symbol = sortedSymbols[s].symbol;
145039 -                       const U32 weight = sortedSymbols[s].weight;
145040 -                       const U32 nbBits = nbBitsBaseline - weight;
145041 -                       const U32 length = 1 << (sizeLog - nbBits);
145042 -                       const U32 start = rankVal[weight];
145043 -                       U32 i = start;
145044 -                       const U32 end = start + length;
145046 -                       ZSTD_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
145047 -                       DElt.nbBits = (BYTE)(nbBits + consumed);
145048 -                       DElt.length = 2;
145049 -                       do {
145050 -                               DTable[i++] = DElt;
145051 -                       } while (i < end); /* since length >= 1 */
145053 -                       rankVal[weight] += length;
145054 -               }
145055 -       }
145058 -typedef U32 rankVal_t[HUF_TABLELOG_MAX][HUF_TABLELOG_MAX + 1];
145059 -typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
145061 -static void HUF_fillDTableX4(HUF_DEltX4 *DTable, const U32 targetLog, const sortedSymbol_t *sortedList, const U32 sortedListSize, const U32 *rankStart,
145062 -                            rankVal_t rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline)
145064 -       U32 rankVal[HUF_TABLELOG_MAX + 1];
145065 -       const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
145066 -       const U32 minBits = nbBitsBaseline - maxWeight;
145067 -       U32 s;
145069 -       memcpy(rankVal, rankValOrigin, sizeof(rankVal));
145071 -       /* fill DTable */
145072 -       for (s = 0; s < sortedListSize; s++) {
145073 -               const U16 symbol = sortedList[s].symbol;
145074 -               const U32 weight = sortedList[s].weight;
145075 -               const U32 nbBits = nbBitsBaseline - weight;
145076 -               const U32 start = rankVal[weight];
145077 -               const U32 length = 1 << (targetLog - nbBits);
145079 -               if (targetLog - nbBits >= minBits) { /* enough room for a second symbol */
145080 -                       U32 sortedRank;
145081 -                       int minWeight = nbBits + scaleLog;
145082 -                       if (minWeight < 1)
145083 -                               minWeight = 1;
145084 -                       sortedRank = rankStart[minWeight];
145085 -                       HUF_fillDTableX4Level2(DTable + start, targetLog - nbBits, nbBits, rankValOrigin[nbBits], minWeight, sortedList + sortedRank,
145086 -                                              sortedListSize - sortedRank, nbBitsBaseline, symbol);
145087 -               } else {
145088 -                       HUF_DEltX4 DElt;
145089 -                       ZSTD_writeLE16(&(DElt.sequence), symbol);
145090 -                       DElt.nbBits = (BYTE)(nbBits);
145091 -                       DElt.length = 1;
145092 -                       {
145093 -                               U32 const end = start + length;
145094 -                               U32 u;
145095 -                               for (u = start; u < end; u++)
145096 -                                       DTable[u] = DElt;
145097 -                       }
145098 -               }
145099 -               rankVal[weight] += length;
145100 -       }
145103 -size_t HUF_readDTableX4_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
145105 -       U32 tableLog, maxW, sizeOfSort, nbSymbols;
145106 -       DTableDesc dtd = HUF_getDTableDesc(DTable);
145107 -       U32 const maxTableLog = dtd.maxTableLog;
145108 -       size_t iSize;
145109 -       void *dtPtr = DTable + 1; /* force compiler to avoid strict-aliasing */
145110 -       HUF_DEltX4 *const dt = (HUF_DEltX4 *)dtPtr;
145111 -       U32 *rankStart;
145113 -       rankValCol_t *rankVal;
145114 -       U32 *rankStats;
145115 -       U32 *rankStart0;
145116 -       sortedSymbol_t *sortedSymbol;
145117 -       BYTE *weightList;
145118 -       size_t spaceUsed32 = 0;
145120 -       HUF_STATIC_ASSERT((sizeof(rankValCol_t) & 3) == 0);
145122 -       rankVal = (rankValCol_t *)((U32 *)workspace + spaceUsed32);
145123 -       spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2;
145124 -       rankStats = (U32 *)workspace + spaceUsed32;
145125 -       spaceUsed32 += HUF_TABLELOG_MAX + 1;
145126 -       rankStart0 = (U32 *)workspace + spaceUsed32;
145127 -       spaceUsed32 += HUF_TABLELOG_MAX + 2;
145128 -       sortedSymbol = (sortedSymbol_t *)((U32 *)workspace + spaceUsed32);
145129 -       spaceUsed32 += ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2;
145130 -       weightList = (BYTE *)((U32 *)workspace + spaceUsed32);
145131 -       spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
145133 -       if ((spaceUsed32 << 2) > workspaceSize)
145134 -               return ERROR(tableLog_tooLarge);
145135 -       workspace = (U32 *)workspace + spaceUsed32;
145136 -       workspaceSize -= (spaceUsed32 << 2);
145138 -       rankStart = rankStart0 + 1;
145139 -       memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1));
145141 -       HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */
145142 -       if (maxTableLog > HUF_TABLELOG_MAX)
145143 -               return ERROR(tableLog_tooLarge);
145144 -       /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
145146 -       iSize = HUF_readStats_wksp(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize);
145147 -       if (HUF_isError(iSize))
145148 -               return iSize;
145150 -       /* check result */
145151 -       if (tableLog > maxTableLog)
145152 -               return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
145154 -       /* find maxWeight */
145155 -       for (maxW = tableLog; rankStats[maxW] == 0; maxW--) {
145156 -       } /* necessarily finds a solution before 0 */
145158 -       /* Get start index of each weight */
145159 -       {
145160 -               U32 w, nextRankStart = 0;
145161 -               for (w = 1; w < maxW + 1; w++) {
145162 -                       U32 curr = nextRankStart;
145163 -                       nextRankStart += rankStats[w];
145164 -                       rankStart[w] = curr;
145165 -               }
145166 -               rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
145167 -               sizeOfSort = nextRankStart;
145168 -       }
145170 -       /* sort symbols by weight */
145171 -       {
145172 -               U32 s;
145173 -               for (s = 0; s < nbSymbols; s++) {
145174 -                       U32 const w = weightList[s];
145175 -                       U32 const r = rankStart[w]++;
145176 -                       sortedSymbol[r].symbol = (BYTE)s;
145177 -                       sortedSymbol[r].weight = (BYTE)w;
145178 -               }
145179 -               rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
145180 -       }
145182 -       /* Build rankVal */
145183 -       {
145184 -               U32 *const rankVal0 = rankVal[0];
145185 -               {
145186 -                       int const rescale = (maxTableLog - tableLog) - 1; /* tableLog <= maxTableLog */
145187 -                       U32 nextRankVal = 0;
145188 -                       U32 w;
145189 -                       for (w = 1; w < maxW + 1; w++) {
145190 -                               U32 curr = nextRankVal;
145191 -                               nextRankVal += rankStats[w] << (w + rescale);
145192 -                               rankVal0[w] = curr;
145193 -                       }
145194 -               }
145195 -               {
145196 -                       U32 const minBits = tableLog + 1 - maxW;
145197 -                       U32 consumed;
145198 -                       for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
145199 -                               U32 *const rankValPtr = rankVal[consumed];
145200 -                               U32 w;
145201 -                               for (w = 1; w < maxW + 1; w++) {
145202 -                                       rankValPtr[w] = rankVal0[w] >> consumed;
145203 -                               }
145204 -                       }
145205 -               }
145206 -       }
145208 -       HUF_fillDTableX4(dt, maxTableLog, sortedSymbol, sizeOfSort, rankStart0, rankVal, maxW, tableLog + 1);
145210 -       dtd.tableLog = (BYTE)maxTableLog;
145211 -       dtd.tableType = 1;
145212 -       memcpy(DTable, &dtd, sizeof(dtd));
145213 -       return iSize;
145216 -static U32 HUF_decodeSymbolX4(void *op, BIT_DStream_t *DStream, const HUF_DEltX4 *dt, const U32 dtLog)
145218 -       size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
145219 -       memcpy(op, dt + val, 2);
145220 -       BIT_skipBits(DStream, dt[val].nbBits);
145221 -       return dt[val].length;
145224 -static U32 HUF_decodeLastSymbolX4(void *op, BIT_DStream_t *DStream, const HUF_DEltX4 *dt, const U32 dtLog)
145226 -       size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
145227 -       memcpy(op, dt + val, 1);
145228 -       if (dt[val].length == 1)
145229 -               BIT_skipBits(DStream, dt[val].nbBits);
145230 -       else {
145231 -               if (DStream->bitsConsumed < (sizeof(DStream->bitContainer) * 8)) {
145232 -                       BIT_skipBits(DStream, dt[val].nbBits);
145233 -                       if (DStream->bitsConsumed > (sizeof(DStream->bitContainer) * 8))
145234 -                               /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
145235 -                               DStream->bitsConsumed = (sizeof(DStream->bitContainer) * 8);
145236 -               }
145237 -       }
145238 -       return 1;
145241 -#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
145243 -#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr)         \
145244 -       if (ZSTD_64bits() || (HUF_TABLELOG_MAX <= 12)) \
145245 -       ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
145247 -#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
145248 -       if (ZSTD_64bits())                     \
145249 -       ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
145251 -FORCE_INLINE size_t HUF_decodeStreamX4(BYTE *p, BIT_DStream_t *bitDPtr, BYTE *const pEnd, const HUF_DEltX4 *const dt, const U32 dtLog)
145253 -       BYTE *const pStart = p;
145255 -       /* up to 8 symbols at a time */
145256 -       while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd - (sizeof(bitDPtr->bitContainer) - 1))) {
145257 -               HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
145258 -               HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
145259 -               HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
145260 -               HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
145261 -       }
145263 -       /* closer to end : up to 2 symbols at a time */
145264 -       while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd - 2))
145265 -               HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
145267 -       while (p <= pEnd - 2)
145268 -               HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
145270 -       if (p < pEnd)
145271 -               p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
145273 -       return p - pStart;
145276 -static size_t HUF_decompress1X4_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
145278 -       BIT_DStream_t bitD;
145280 -       /* Init */
145281 -       {
145282 -               size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
145283 -               if (HUF_isError(errorCode))
145284 -                       return errorCode;
145285 -       }
145287 -       /* decode */
145288 -       {
145289 -               BYTE *const ostart = (BYTE *)dst;
145290 -               BYTE *const oend = ostart + dstSize;
145291 -               const void *const dtPtr = DTable + 1; /* force compiler to not use strict-aliasing */
145292 -               const HUF_DEltX4 *const dt = (const HUF_DEltX4 *)dtPtr;
145293 -               DTableDesc const dtd = HUF_getDTableDesc(DTable);
145294 -               HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog);
145295 -       }
145297 -       /* check */
145298 -       if (!BIT_endOfDStream(&bitD))
145299 -               return ERROR(corruption_detected);
145301 -       /* decoded size */
145302 -       return dstSize;
145305 -size_t HUF_decompress1X4_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
145307 -       DTableDesc dtd = HUF_getDTableDesc(DTable);
145308 -       if (dtd.tableType != 1)
145309 -               return ERROR(GENERIC);
145310 -       return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
145313 -size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable *DCtx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
145315 -       const BYTE *ip = (const BYTE *)cSrc;
145317 -       size_t const hSize = HUF_readDTableX4_wksp(DCtx, cSrc, cSrcSize, workspace, workspaceSize);
145318 -       if (HUF_isError(hSize))
145319 -               return hSize;
145320 -       if (hSize >= cSrcSize)
145321 -               return ERROR(srcSize_wrong);
145322 -       ip += hSize;
145323 -       cSrcSize -= hSize;
145325 -       return HUF_decompress1X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx);
145328 -static size_t HUF_decompress4X4_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
145330 -       if (cSrcSize < 10)
145331 -               return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
145333 -       {
145334 -               const BYTE *const istart = (const BYTE *)cSrc;
145335 -               BYTE *const ostart = (BYTE *)dst;
145336 -               BYTE *const oend = ostart + dstSize;
145337 -               const void *const dtPtr = DTable + 1;
145338 -               const HUF_DEltX4 *const dt = (const HUF_DEltX4 *)dtPtr;
145340 -               /* Init */
145341 -               BIT_DStream_t bitD1;
145342 -               BIT_DStream_t bitD2;
145343 -               BIT_DStream_t bitD3;
145344 -               BIT_DStream_t bitD4;
145345 -               size_t const length1 = ZSTD_readLE16(istart);
145346 -               size_t const length2 = ZSTD_readLE16(istart + 2);
145347 -               size_t const length3 = ZSTD_readLE16(istart + 4);
145348 -               size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
145349 -               const BYTE *const istart1 = istart + 6; /* jumpTable */
145350 -               const BYTE *const istart2 = istart1 + length1;
145351 -               const BYTE *const istart3 = istart2 + length2;
145352 -               const BYTE *const istart4 = istart3 + length3;
145353 -               size_t const segmentSize = (dstSize + 3) / 4;
145354 -               BYTE *const opStart2 = ostart + segmentSize;
145355 -               BYTE *const opStart3 = opStart2 + segmentSize;
145356 -               BYTE *const opStart4 = opStart3 + segmentSize;
145357 -               BYTE *op1 = ostart;
145358 -               BYTE *op2 = opStart2;
145359 -               BYTE *op3 = opStart3;
145360 -               BYTE *op4 = opStart4;
145361 -               U32 endSignal;
145362 -               DTableDesc const dtd = HUF_getDTableDesc(DTable);
145363 -               U32 const dtLog = dtd.tableLog;
145365 -               if (length4 > cSrcSize)
145366 -                       return ERROR(corruption_detected); /* overflow */
145367 -               {
145368 -                       size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1);
145369 -                       if (HUF_isError(errorCode))
145370 -                               return errorCode;
145371 -               }
145372 -               {
145373 -                       size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2);
145374 -                       if (HUF_isError(errorCode))
145375 -                               return errorCode;
145376 -               }
145377 -               {
145378 -                       size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3);
145379 -                       if (HUF_isError(errorCode))
145380 -                               return errorCode;
145381 -               }
145382 -               {
145383 -                       size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4);
145384 -                       if (HUF_isError(errorCode))
145385 -                               return errorCode;
145386 -               }
145388 -               /* 16-32 symbols per loop (4-8 symbols per stream) */
145389 -               endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
145390 -               for (; (endSignal == BIT_DStream_unfinished) & (op4 < (oend - (sizeof(bitD4.bitContainer) - 1)));) {
145391 -                       HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
145392 -                       HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
145393 -                       HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
145394 -                       HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
145395 -                       HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
145396 -                       HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
145397 -                       HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
145398 -                       HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
145399 -                       HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
145400 -                       HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
145401 -                       HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
145402 -                       HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
145403 -                       HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
145404 -                       HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
145405 -                       HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
145406 -                       HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
145408 -                       endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
145409 -               }
145411 -               /* check corruption */
145412 -               if (op1 > opStart2)
145413 -                       return ERROR(corruption_detected);
145414 -               if (op2 > opStart3)
145415 -                       return ERROR(corruption_detected);
145416 -               if (op3 > opStart4)
145417 -                       return ERROR(corruption_detected);
145418 -               /* note : op4 already verified within main loop */
145420 -               /* finish bitStreams one by one */
145421 -               HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
145422 -               HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
145423 -               HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
145424 -               HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
145426 -               /* check */
145427 -               {
145428 -                       U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
145429 -                       if (!endCheck)
145430 -                               return ERROR(corruption_detected);
145431 -               }
145433 -               /* decoded size */
145434 -               return dstSize;
145435 -       }
145438 -size_t HUF_decompress4X4_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
145440 -       DTableDesc dtd = HUF_getDTableDesc(DTable);
145441 -       if (dtd.tableType != 1)
145442 -               return ERROR(GENERIC);
145443 -       return HUF_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
145446 -size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
145448 -       const BYTE *ip = (const BYTE *)cSrc;
145450 -       size_t hSize = HUF_readDTableX4_wksp(dctx, cSrc, cSrcSize, workspace, workspaceSize);
145451 -       if (HUF_isError(hSize))
145452 -               return hSize;
145453 -       if (hSize >= cSrcSize)
145454 -               return ERROR(srcSize_wrong);
145455 -       ip += hSize;
145456 -       cSrcSize -= hSize;
145458 -       return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);
145461 -/* ********************************/
145462 -/* Generic decompression selector */
145463 -/* ********************************/
145465 -size_t HUF_decompress1X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
145467 -       DTableDesc const dtd = HUF_getDTableDesc(DTable);
145468 -       return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable)
145469 -                            : HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
145472 -size_t HUF_decompress4X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
145474 -       DTableDesc const dtd = HUF_getDTableDesc(DTable);
145475 -       return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable)
145476 -                            : HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
145479 -typedef struct {
145480 -       U32 tableTime;
145481 -       U32 decode256Time;
145482 -} algo_time_t;
145483 -static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = {
145484 -    /* single, double, quad */
145485 -    {{0, 0}, {1, 1}, {2, 2}},               /* Q==0 : impossible */
145486 -    {{0, 0}, {1, 1}, {2, 2}},               /* Q==1 : impossible */
145487 -    {{38, 130}, {1313, 74}, {2151, 38}},     /* Q == 2 : 12-18% */
145488 -    {{448, 128}, {1353, 74}, {2238, 41}},    /* Q == 3 : 18-25% */
145489 -    {{556, 128}, {1353, 74}, {2238, 47}},    /* Q == 4 : 25-32% */
145490 -    {{714, 128}, {1418, 74}, {2436, 53}},    /* Q == 5 : 32-38% */
145491 -    {{883, 128}, {1437, 74}, {2464, 61}},    /* Q == 6 : 38-44% */
145492 -    {{897, 128}, {1515, 75}, {2622, 68}},    /* Q == 7 : 44-50% */
145493 -    {{926, 128}, {1613, 75}, {2730, 75}},    /* Q == 8 : 50-56% */
145494 -    {{947, 128}, {1729, 77}, {3359, 77}},    /* Q == 9 : 56-62% */
145495 -    {{1107, 128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */
145496 -    {{1177, 128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */
145497 -    {{1242, 128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */
145498 -    {{1349, 128}, {2644, 106}, {5260, 106}}, /* Q ==13 : 81-87% */
145499 -    {{1455, 128}, {2422, 124}, {4174, 124}}, /* Q ==14 : 87-93% */
145500 -    {{722, 128}, {1891, 145}, {1936, 146}},  /* Q ==15 : 93-99% */
145503 -/** HUF_selectDecoder() :
145504 -*   Tells which decoder is likely to decode faster,
145505 -*   based on a set of pre-determined metrics.
145506 -*   @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
145507 -*   Assumption : 0 < cSrcSize < dstSize <= 128 KB */
145508 -U32 HUF_selectDecoder(size_t dstSize, size_t cSrcSize)
145510 -       /* decoder timing evaluation */
145511 -       U32 const Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */
145512 -       U32 const D256 = (U32)(dstSize >> 8);
145513 -       U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
145514 -       U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
145515 -       DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, for cache eviction */
145517 -       return DTime1 < DTime0;
145520 -typedef size_t (*decompressionAlgo)(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize);
145522 -size_t HUF_decompress4X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
145524 -       /* validation checks */
145525 -       if (dstSize == 0)
145526 -               return ERROR(dstSize_tooSmall);
145527 -       if (cSrcSize > dstSize)
145528 -               return ERROR(corruption_detected); /* invalid */
145529 -       if (cSrcSize == dstSize) {
145530 -               memcpy(dst, cSrc, dstSize);
145531 -               return dstSize;
145532 -       } /* not compressed */
145533 -       if (cSrcSize == 1) {
145534 -               memset(dst, *(const BYTE *)cSrc, dstSize);
145535 -               return dstSize;
145536 -       } /* RLE */
145538 -       {
145539 -               U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
145540 -               return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize)
145541 -                             : HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize);
145542 -       }
145545 -size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
145547 -       /* validation checks */
145548 -       if (dstSize == 0)
145549 -               return ERROR(dstSize_tooSmall);
145550 -       if ((cSrcSize >= dstSize) || (cSrcSize <= 1))
145551 -               return ERROR(corruption_detected); /* invalid */
145553 -       {
145554 -               U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
145555 -               return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize)
145556 -                             : HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize);
145557 -       }
145560 -size_t HUF_decompress1X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
145562 -       /* validation checks */
145563 -       if (dstSize == 0)
145564 -               return ERROR(dstSize_tooSmall);
145565 -       if (cSrcSize > dstSize)
145566 -               return ERROR(corruption_detected); /* invalid */
145567 -       if (cSrcSize == dstSize) {
145568 -               memcpy(dst, cSrc, dstSize);
145569 -               return dstSize;
145570 -       } /* not compressed */
145571 -       if (cSrcSize == 1) {
145572 -               memset(dst, *(const BYTE *)cSrc, dstSize);
145573 -               return dstSize;
145574 -       } /* RLE */
145576 -       {
145577 -               U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
145578 -               return algoNb ? HUF_decompress1X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize)
145579 -                             : HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize);
145580 -       }
145582 diff --git a/lib/zstd/mem.h b/lib/zstd/mem.h
145583 deleted file mode 100644
145584 index 93d7a2c377fe..000000000000
145585 --- a/lib/zstd/mem.h
145586 +++ /dev/null
145587 @@ -1,151 +0,0 @@
145589 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
145590 - * All rights reserved.
145592 - * This source code is licensed under the BSD-style license found in the
145593 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
145594 - * An additional grant of patent rights can be found in the PATENTS file in the
145595 - * same directory.
145597 - * This program is free software; you can redistribute it and/or modify it under
145598 - * the terms of the GNU General Public License version 2 as published by the
145599 - * Free Software Foundation. This program is dual-licensed; you may select
145600 - * either version 2 of the GNU General Public License ("GPL") or BSD license
145601 - * ("BSD").
145602 - */
145604 -#ifndef MEM_H_MODULE
145605 -#define MEM_H_MODULE
145607 -/*-****************************************
145608 -*  Dependencies
145609 -******************************************/
145610 -#include <asm/unaligned.h>
145611 -#include <linux/string.h> /* memcpy */
145612 -#include <linux/types.h>  /* size_t, ptrdiff_t */
145614 -/*-****************************************
145615 -*  Compiler specifics
145616 -******************************************/
145617 -#define ZSTD_STATIC static inline
145619 -/*-**************************************************************
145620 -*  Basic Types
145621 -*****************************************************************/
145622 -typedef uint8_t BYTE;
145623 -typedef uint16_t U16;
145624 -typedef int16_t S16;
145625 -typedef uint32_t U32;
145626 -typedef int32_t S32;
145627 -typedef uint64_t U64;
145628 -typedef int64_t S64;
145629 -typedef ptrdiff_t iPtrDiff;
145630 -typedef uintptr_t uPtrDiff;
145632 -/*-**************************************************************
145633 -*  Memory I/O
145634 -*****************************************************************/
145635 -ZSTD_STATIC unsigned ZSTD_32bits(void) { return sizeof(size_t) == 4; }
145636 -ZSTD_STATIC unsigned ZSTD_64bits(void) { return sizeof(size_t) == 8; }
145638 -#if defined(__LITTLE_ENDIAN)
145639 -#define ZSTD_LITTLE_ENDIAN 1
145640 -#else
145641 -#define ZSTD_LITTLE_ENDIAN 0
145642 -#endif
145644 -ZSTD_STATIC unsigned ZSTD_isLittleEndian(void) { return ZSTD_LITTLE_ENDIAN; }
145646 -ZSTD_STATIC U16 ZSTD_read16(const void *memPtr) { return get_unaligned((const U16 *)memPtr); }
145648 -ZSTD_STATIC U32 ZSTD_read32(const void *memPtr) { return get_unaligned((const U32 *)memPtr); }
145650 -ZSTD_STATIC U64 ZSTD_read64(const void *memPtr) { return get_unaligned((const U64 *)memPtr); }
145652 -ZSTD_STATIC size_t ZSTD_readST(const void *memPtr) { return get_unaligned((const size_t *)memPtr); }
145654 -ZSTD_STATIC void ZSTD_write16(void *memPtr, U16 value) { put_unaligned(value, (U16 *)memPtr); }
145656 -ZSTD_STATIC void ZSTD_write32(void *memPtr, U32 value) { put_unaligned(value, (U32 *)memPtr); }
145658 -ZSTD_STATIC void ZSTD_write64(void *memPtr, U64 value) { put_unaligned(value, (U64 *)memPtr); }
145660 -/*=== Little endian r/w ===*/
145662 -ZSTD_STATIC U16 ZSTD_readLE16(const void *memPtr) { return get_unaligned_le16(memPtr); }
145664 -ZSTD_STATIC void ZSTD_writeLE16(void *memPtr, U16 val) { put_unaligned_le16(val, memPtr); }
145666 -ZSTD_STATIC U32 ZSTD_readLE24(const void *memPtr) { return ZSTD_readLE16(memPtr) + (((const BYTE *)memPtr)[2] << 16); }
145668 -ZSTD_STATIC void ZSTD_writeLE24(void *memPtr, U32 val)
145670 -       ZSTD_writeLE16(memPtr, (U16)val);
145671 -       ((BYTE *)memPtr)[2] = (BYTE)(val >> 16);
145674 -ZSTD_STATIC U32 ZSTD_readLE32(const void *memPtr) { return get_unaligned_le32(memPtr); }
145676 -ZSTD_STATIC void ZSTD_writeLE32(void *memPtr, U32 val32) { put_unaligned_le32(val32, memPtr); }
145678 -ZSTD_STATIC U64 ZSTD_readLE64(const void *memPtr) { return get_unaligned_le64(memPtr); }
145680 -ZSTD_STATIC void ZSTD_writeLE64(void *memPtr, U64 val64) { put_unaligned_le64(val64, memPtr); }
145682 -ZSTD_STATIC size_t ZSTD_readLEST(const void *memPtr)
145684 -       if (ZSTD_32bits())
145685 -               return (size_t)ZSTD_readLE32(memPtr);
145686 -       else
145687 -               return (size_t)ZSTD_readLE64(memPtr);
145690 -ZSTD_STATIC void ZSTD_writeLEST(void *memPtr, size_t val)
145692 -       if (ZSTD_32bits())
145693 -               ZSTD_writeLE32(memPtr, (U32)val);
145694 -       else
145695 -               ZSTD_writeLE64(memPtr, (U64)val);
145698 -/*=== Big endian r/w ===*/
145700 -ZSTD_STATIC U32 ZSTD_readBE32(const void *memPtr) { return get_unaligned_be32(memPtr); }
145702 -ZSTD_STATIC void ZSTD_writeBE32(void *memPtr, U32 val32) { put_unaligned_be32(val32, memPtr); }
145704 -ZSTD_STATIC U64 ZSTD_readBE64(const void *memPtr) { return get_unaligned_be64(memPtr); }
145706 -ZSTD_STATIC void ZSTD_writeBE64(void *memPtr, U64 val64) { put_unaligned_be64(val64, memPtr); }
145708 -ZSTD_STATIC size_t ZSTD_readBEST(const void *memPtr)
145710 -       if (ZSTD_32bits())
145711 -               return (size_t)ZSTD_readBE32(memPtr);
145712 -       else
145713 -               return (size_t)ZSTD_readBE64(memPtr);
145716 -ZSTD_STATIC void ZSTD_writeBEST(void *memPtr, size_t val)
145718 -       if (ZSTD_32bits())
145719 -               ZSTD_writeBE32(memPtr, (U32)val);
145720 -       else
145721 -               ZSTD_writeBE64(memPtr, (U64)val);
145724 -/* function safe only for comparisons */
145725 -ZSTD_STATIC U32 ZSTD_readMINMATCH(const void *memPtr, U32 length)
145727 -       switch (length) {
145728 -       default:
145729 -       case 4: return ZSTD_read32(memPtr);
145730 -       case 3:
145731 -               if (ZSTD_isLittleEndian())
145732 -                       return ZSTD_read32(memPtr) << 8;
145733 -               else
145734 -                       return ZSTD_read32(memPtr) >> 8;
145735 -       }
145738 -#endif /* MEM_H_MODULE */
145739 diff --git a/lib/zstd/zstd_common.c b/lib/zstd/zstd_common.c
145740 deleted file mode 100644
145741 index a282624ee155..000000000000
145742 --- a/lib/zstd/zstd_common.c
145743 +++ /dev/null
145744 @@ -1,75 +0,0 @@
145746 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
145747 - * All rights reserved.
145749 - * This source code is licensed under the BSD-style license found in the
145750 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
145751 - * An additional grant of patent rights can be found in the PATENTS file in the
145752 - * same directory.
145754 - * This program is free software; you can redistribute it and/or modify it under
145755 - * the terms of the GNU General Public License version 2 as published by the
145756 - * Free Software Foundation. This program is dual-licensed; you may select
145757 - * either version 2 of the GNU General Public License ("GPL") or BSD license
145758 - * ("BSD").
145759 - */
145761 -/*-*************************************
145762 -*  Dependencies
145763 -***************************************/
145764 -#include "error_private.h"
145765 -#include "zstd_internal.h" /* declaration of ZSTD_isError, ZSTD_getErrorName, ZSTD_getErrorCode, ZSTD_getErrorString, ZSTD_versionNumber */
145766 -#include <linux/kernel.h>
145768 -/*=**************************************************************
145769 -*  Custom allocator
145770 -****************************************************************/
145772 -#define stack_push(stack, size)                                 \
145773 -       ({                                                      \
145774 -               void *const ptr = ZSTD_PTR_ALIGN((stack)->ptr); \
145775 -               (stack)->ptr = (char *)ptr + (size);            \
145776 -               (stack)->ptr <= (stack)->end ? ptr : NULL;      \
145777 -       })
145779 -ZSTD_customMem ZSTD_initStack(void *workspace, size_t workspaceSize)
145781 -       ZSTD_customMem stackMem = {ZSTD_stackAlloc, ZSTD_stackFree, workspace};
145782 -       ZSTD_stack *stack = (ZSTD_stack *)workspace;
145783 -       /* Verify preconditions */
145784 -       if (!workspace || workspaceSize < sizeof(ZSTD_stack) || workspace != ZSTD_PTR_ALIGN(workspace)) {
145785 -               ZSTD_customMem error = {NULL, NULL, NULL};
145786 -               return error;
145787 -       }
145788 -       /* Initialize the stack */
145789 -       stack->ptr = workspace;
145790 -       stack->end = (char *)workspace + workspaceSize;
145791 -       stack_push(stack, sizeof(ZSTD_stack));
145792 -       return stackMem;
145795 -void *ZSTD_stackAllocAll(void *opaque, size_t *size)
145797 -       ZSTD_stack *stack = (ZSTD_stack *)opaque;
145798 -       *size = (BYTE const *)stack->end - (BYTE *)ZSTD_PTR_ALIGN(stack->ptr);
145799 -       return stack_push(stack, *size);
145802 -void *ZSTD_stackAlloc(void *opaque, size_t size)
145804 -       ZSTD_stack *stack = (ZSTD_stack *)opaque;
145805 -       return stack_push(stack, size);
145807 -void ZSTD_stackFree(void *opaque, void *address)
145809 -       (void)opaque;
145810 -       (void)address;
145813 -void *ZSTD_malloc(size_t size, ZSTD_customMem customMem) { return customMem.customAlloc(customMem.opaque, size); }
145815 -void ZSTD_free(void *ptr, ZSTD_customMem customMem)
145817 -       if (ptr != NULL)
145818 -               customMem.customFree(customMem.opaque, ptr);
145820 diff --git a/lib/zstd/zstd_compress_module.c b/lib/zstd/zstd_compress_module.c
145821 new file mode 100644
145822 index 000000000000..37d08ff43e6e
145823 --- /dev/null
145824 +++ b/lib/zstd/zstd_compress_module.c
145825 @@ -0,0 +1,124 @@
145826 +// SPDX-License-Identifier: GPL-2.0-only
145828 + * Copyright (c) Facebook, Inc.
145829 + * All rights reserved.
145831 + * This source code is licensed under both the BSD-style license (found in the
145832 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
145833 + * in the COPYING file in the root directory of this source tree).
145834 + * You may select, at your option, one of the above-listed licenses.
145835 + */
145837 +#include <linux/kernel.h>
145838 +#include <linux/module.h>
145839 +#include <linux/string.h>
145840 +#include <linux/zstd.h>
145842 +#include "common/zstd_deps.h"
145843 +#include "common/zstd_internal.h"
145845 +int zstd_min_clevel(void)
145847 +       return ZSTD_minCLevel();
145849 +EXPORT_SYMBOL(zstd_min_clevel);
145851 +int zstd_max_clevel(void)
145853 +       return ZSTD_maxCLevel();
145855 +EXPORT_SYMBOL(zstd_max_clevel);
145857 +size_t zstd_compress_bound(size_t src_size)
145859 +       return ZSTD_compressBound(src_size);
145861 +EXPORT_SYMBOL(zstd_compress_bound);
145863 +zstd_parameters zstd_get_params(int level,
145864 +       unsigned long long estimated_src_size)
145866 +       return ZSTD_getParams(level, estimated_src_size, 0);
145868 +EXPORT_SYMBOL(zstd_get_params);
145870 +size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *cparams)
145872 +       return ZSTD_estimateCCtxSize_usingCParams(*cparams);
145874 +EXPORT_SYMBOL(zstd_cctx_workspace_bound);
145876 +zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size)
145878 +       if (workspace == NULL)
145879 +               return NULL;
145880 +       return ZSTD_initStaticCCtx(workspace, workspace_size);
145882 +EXPORT_SYMBOL(zstd_init_cctx);
145884 +size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
145885 +       const void *src, size_t src_size, const zstd_parameters *parameters)
145887 +       return ZSTD_compress_advanced(cctx, dst, dst_capacity, src, src_size, NULL, 0, *parameters);
145889 +EXPORT_SYMBOL(zstd_compress_cctx);
145891 +size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams)
145893 +       return ZSTD_estimateCStreamSize_usingCParams(*cparams);
145895 +EXPORT_SYMBOL(zstd_cstream_workspace_bound);
145897 +zstd_cstream *zstd_init_cstream(const zstd_parameters *parameters,
145898 +       unsigned long long pledged_src_size, void *workspace, size_t workspace_size)
145900 +       zstd_cstream *cstream;
145901 +       size_t ret;
145903 +       if (workspace == NULL)
145904 +               return NULL;
145906 +       cstream = ZSTD_initStaticCStream(workspace, workspace_size);
145907 +       if (cstream == NULL)
145908 +               return NULL;
145910 +       /* 0 means unknown in linux zstd API but means 0 in new zstd API */
145911 +       if (pledged_src_size == 0)
145912 +               pledged_src_size = ZSTD_CONTENTSIZE_UNKNOWN;
145914 +       ret = ZSTD_initCStream_advanced(cstream, NULL, 0, *parameters, pledged_src_size);
145915 +       if (ZSTD_isError(ret))
145916 +               return NULL;
145918 +       return cstream;
145920 +EXPORT_SYMBOL(zstd_init_cstream);
145922 +size_t zstd_reset_cstream(zstd_cstream *cstream,
145923 +       unsigned long long pledged_src_size)
145925 +       return ZSTD_resetCStream(cstream, pledged_src_size);
145927 +EXPORT_SYMBOL(zstd_reset_cstream);
145929 +size_t zstd_compress_stream(zstd_cstream *cstream, zstd_out_buffer *output,
145930 +       zstd_in_buffer *input)
145932 +       return ZSTD_compressStream(cstream, output, input);
145934 +EXPORT_SYMBOL(zstd_compress_stream);
145936 +size_t zstd_flush_stream(zstd_cstream *cstream, zstd_out_buffer *output)
145938 +       return ZSTD_flushStream(cstream, output);
145940 +EXPORT_SYMBOL(zstd_flush_stream);
145942 +size_t zstd_end_stream(zstd_cstream *cstream, zstd_out_buffer *output)
145944 +       return ZSTD_endStream(cstream, output);
145946 +EXPORT_SYMBOL(zstd_end_stream);
145948 +MODULE_LICENSE("Dual BSD/GPL");
145949 +MODULE_DESCRIPTION("Zstd Compressor");
145950 diff --git a/lib/zstd/zstd_decompress_module.c b/lib/zstd/zstd_decompress_module.c
145951 new file mode 100644
145952 index 000000000000..15005cdb9eca
145953 --- /dev/null
145954 +++ b/lib/zstd/zstd_decompress_module.c
145955 @@ -0,0 +1,105 @@
145956 +// SPDX-License-Identifier: GPL-2.0-only
145958 + * Copyright (c) Facebook, Inc.
145959 + * All rights reserved.
145961 + * This source code is licensed under both the BSD-style license (found in the
145962 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
145963 + * in the COPYING file in the root directory of this source tree).
145964 + * You may select, at your option, one of the above-listed licenses.
145965 + */
145967 +#include <linux/kernel.h>
145968 +#include <linux/module.h>
145969 +#include <linux/string.h>
145970 +#include <linux/zstd.h>
145972 +#include "common/zstd_deps.h"
145974 +/* Common symbols. zstd_compress must depend on zstd_decompress. */
145976 +unsigned int zstd_is_error(size_t code)
145978 +       return ZSTD_isError(code);
145980 +EXPORT_SYMBOL(zstd_is_error);
145982 +zstd_error_code zstd_get_error_code(size_t code)
145984 +       return ZSTD_getErrorCode(code);
145986 +EXPORT_SYMBOL(zstd_get_error_code);
145988 +const char *zstd_get_error_name(size_t code)
145990 +       return ZSTD_getErrorName(code);
145992 +EXPORT_SYMBOL(zstd_get_error_name);
145994 +/* Decompression symbols. */
145996 +size_t zstd_dctx_workspace_bound(void)
145998 +       return ZSTD_estimateDCtxSize();
146000 +EXPORT_SYMBOL(zstd_dctx_workspace_bound);
146002 +zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size)
146004 +       if (workspace == NULL)
146005 +               return NULL;
146006 +       return ZSTD_initStaticDCtx(workspace, workspace_size);
146008 +EXPORT_SYMBOL(zstd_init_dctx);
146010 +size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity,
146011 +       const void *src, size_t src_size)
146013 +       return ZSTD_decompressDCtx(dctx, dst, dst_capacity, src, src_size);
146015 +EXPORT_SYMBOL(zstd_decompress_dctx);
146017 +size_t zstd_dstream_workspace_bound(size_t max_window_size)
146019 +       return ZSTD_estimateDStreamSize(max_window_size);
146021 +EXPORT_SYMBOL(zstd_dstream_workspace_bound);
146023 +zstd_dstream *zstd_init_dstream(size_t max_window_size, void *workspace,
146024 +       size_t workspace_size)
146026 +       if (workspace == NULL)
146027 +               return NULL;
146028 +       (void)max_window_size;
146029 +       return ZSTD_initStaticDStream(workspace, workspace_size);
146031 +EXPORT_SYMBOL(zstd_init_dstream);
146033 +size_t zstd_reset_dstream(zstd_dstream *dstream)
146035 +       return ZSTD_resetDStream(dstream);
146037 +EXPORT_SYMBOL(zstd_reset_dstream);
146039 +size_t zstd_decompress_stream(zstd_dstream *dstream, zstd_out_buffer *output,
146040 +       zstd_in_buffer *input)
146042 +       return ZSTD_decompressStream(dstream, output, input);
146044 +EXPORT_SYMBOL(zstd_decompress_stream);
146046 +size_t zstd_find_frame_compressed_size(const void *src, size_t src_size)
146048 +       return ZSTD_findFrameCompressedSize(src, src_size);
146050 +EXPORT_SYMBOL(zstd_find_frame_compressed_size);
146052 +size_t zstd_get_frame_header(zstd_frame_header *header, const void *src,
146053 +       size_t src_size)
146055 +       return ZSTD_getFrameHeader(header, src, src_size);
146057 +EXPORT_SYMBOL(zstd_get_frame_header);
146059 +MODULE_LICENSE("Dual BSD/GPL");
146060 +MODULE_DESCRIPTION("Zstd Decompressor");
146061 diff --git a/lib/zstd/zstd_internal.h b/lib/zstd/zstd_internal.h
146062 deleted file mode 100644
146063 index dac753397f86..000000000000
146064 --- a/lib/zstd/zstd_internal.h
146065 +++ /dev/null
146066 @@ -1,273 +0,0 @@
146068 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
146069 - * All rights reserved.
146071 - * This source code is licensed under the BSD-style license found in the
146072 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
146073 - * An additional grant of patent rights can be found in the PATENTS file in the
146074 - * same directory.
146076 - * This program is free software; you can redistribute it and/or modify it under
146077 - * the terms of the GNU General Public License version 2 as published by the
146078 - * Free Software Foundation. This program is dual-licensed; you may select
146079 - * either version 2 of the GNU General Public License ("GPL") or BSD license
146080 - * ("BSD").
146081 - */
146083 -#ifndef ZSTD_CCOMMON_H_MODULE
146084 -#define ZSTD_CCOMMON_H_MODULE
146086 -/*-*******************************************************
146087 -*  Compiler specifics
146088 -*********************************************************/
146089 -#define FORCE_INLINE static __always_inline
146090 -#define FORCE_NOINLINE static noinline
146092 -/*-*************************************
146093 -*  Dependencies
146094 -***************************************/
146095 -#include "error_private.h"
146096 -#include "mem.h"
146097 -#include <linux/compiler.h>
146098 -#include <linux/kernel.h>
146099 -#include <linux/xxhash.h>
146100 -#include <linux/zstd.h>
146102 -/*-*************************************
146103 -*  shared macros
146104 -***************************************/
146105 -#define MIN(a, b) ((a) < (b) ? (a) : (b))
146106 -#define MAX(a, b) ((a) > (b) ? (a) : (b))
146107 -#define CHECK_F(f)                       \
146108 -       {                                \
146109 -               size_t const errcod = f; \
146110 -               if (ERR_isError(errcod)) \
146111 -                       return errcod;   \
146112 -       } /* check and Forward error code */
146113 -#define CHECK_E(f, e)                    \
146114 -       {                                \
146115 -               size_t const errcod = f; \
146116 -               if (ERR_isError(errcod)) \
146117 -                       return ERROR(e); \
146118 -       } /* check and send Error code */
146119 -#define ZSTD_STATIC_ASSERT(c)                                   \
146120 -       {                                                       \
146121 -               enum { ZSTD_static_assert = 1 / (int)(!!(c)) }; \
146122 -       }
146124 -/*-*************************************
146125 -*  Common constants
146126 -***************************************/
146127 -#define ZSTD_OPT_NUM (1 << 12)
146128 -#define ZSTD_DICT_MAGIC 0xEC30A437 /* v0.7+ */
146130 -#define ZSTD_REP_NUM 3               /* number of repcodes */
146131 -#define ZSTD_REP_CHECK (ZSTD_REP_NUM) /* number of repcodes to check by the optimal parser */
146132 -#define ZSTD_REP_MOVE (ZSTD_REP_NUM - 1)
146133 -#define ZSTD_REP_MOVE_OPT (ZSTD_REP_NUM)
146134 -static const U32 repStartValue[ZSTD_REP_NUM] = {1, 4, 8};
146136 -#define KB *(1 << 10)
146137 -#define MB *(1 << 20)
146138 -#define GB *(1U << 30)
146140 -#define BIT7 128
146141 -#define BIT6 64
146142 -#define BIT5 32
146143 -#define BIT4 16
146144 -#define BIT1 2
146145 -#define BIT0 1
146147 -#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
146148 -static const size_t ZSTD_fcs_fieldSize[4] = {0, 2, 4, 8};
146149 -static const size_t ZSTD_did_fieldSize[4] = {0, 1, 2, 4};
146151 -#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
146152 -static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
146153 -typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
146155 -#define MIN_SEQUENCES_SIZE 1                                                                     /* nbSeq==0 */
146156 -#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
146158 -#define HufLog 12
146159 -typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
146161 -#define LONGNBSEQ 0x7F00
146163 -#define MINMATCH 3
146164 -#define EQUAL_READ32 4
146166 -#define Litbits 8
146167 -#define MaxLit ((1 << Litbits) - 1)
146168 -#define MaxML 52
146169 -#define MaxLL 35
146170 -#define MaxOff 28
146171 -#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
146172 -#define MLFSELog 9
146173 -#define LLFSELog 9
146174 -#define OffFSELog 8
146176 -static const U32 LL_bits[MaxLL + 1] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
146177 -static const S16 LL_defaultNorm[MaxLL + 1] = {4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, -1, -1, -1, -1};
146178 -#define LL_DEFAULTNORMLOG 6 /* for static allocation */
146179 -static const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
146181 -static const U32 ML_bits[MaxML + 1] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,  0,  0,  0,  0,  0,  0, 0,
146182 -                                      0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
146183 -static const S16 ML_defaultNorm[MaxML + 1] = {1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,  1,  1,  1,  1,  1,  1, 1,
146184 -                                             1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1};
146185 -#define ML_DEFAULTNORMLOG 6 /* for static allocation */
146186 -static const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
146188 -static const S16 OF_defaultNorm[MaxOff + 1] = {1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1};
146189 -#define OF_DEFAULTNORMLOG 5 /* for static allocation */
146190 -static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
146192 -/*-*******************************************
146193 -*  Shared functions to include for inlining
146194 -*********************************************/
146195 -ZSTD_STATIC void ZSTD_copy8(void *dst, const void *src) {
146196 -       /*
146197 -        * zstd relies heavily on gcc being able to analyze and inline this
146198 -        * memcpy() call, since it is called in a tight loop. Preboot mode
146199 -        * is compiled in freestanding mode, which stops gcc from analyzing
146200 -        * memcpy(). Use __builtin_memcpy() to tell gcc to analyze this as a
146201 -        * regular memcpy().
146202 -        */
146203 -       __builtin_memcpy(dst, src, 8);
146205 -/*! ZSTD_wildcopy() :
146206 -*   custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
146207 -#define WILDCOPY_OVERLENGTH 8
146208 -ZSTD_STATIC void ZSTD_wildcopy(void *dst, const void *src, ptrdiff_t length)
146210 -       const BYTE* ip = (const BYTE*)src;
146211 -       BYTE* op = (BYTE*)dst;
146212 -       BYTE* const oend = op + length;
146213 -#if defined(GCC_VERSION) && GCC_VERSION >= 70000 && GCC_VERSION < 70200
146214 -       /*
146215 -        * Work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388.
146216 -        * Avoid the bad case where the loop only runs once by handling the
146217 -        * special case separately. This doesn't trigger the bug because it
146218 -        * doesn't involve pointer/integer overflow.
146219 -        */
146220 -       if (length <= 8)
146221 -               return ZSTD_copy8(dst, src);
146222 -#endif
146223 -       do {
146224 -               ZSTD_copy8(op, ip);
146225 -               op += 8;
146226 -               ip += 8;
146227 -       } while (op < oend);
146230 -/*-*******************************************
146231 -*  Private interfaces
146232 -*********************************************/
146233 -typedef struct ZSTD_stats_s ZSTD_stats_t;
146235 -typedef struct {
146236 -       U32 off;
146237 -       U32 len;
146238 -} ZSTD_match_t;
146240 -typedef struct {
146241 -       U32 price;
146242 -       U32 off;
146243 -       U32 mlen;
146244 -       U32 litlen;
146245 -       U32 rep[ZSTD_REP_NUM];
146246 -} ZSTD_optimal_t;
146248 -typedef struct seqDef_s {
146249 -       U32 offset;
146250 -       U16 litLength;
146251 -       U16 matchLength;
146252 -} seqDef;
146254 -typedef struct {
146255 -       seqDef *sequencesStart;
146256 -       seqDef *sequences;
146257 -       BYTE *litStart;
146258 -       BYTE *lit;
146259 -       BYTE *llCode;
146260 -       BYTE *mlCode;
146261 -       BYTE *ofCode;
146262 -       U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
146263 -       U32 longLengthPos;
146264 -       /* opt */
146265 -       ZSTD_optimal_t *priceTable;
146266 -       ZSTD_match_t *matchTable;
146267 -       U32 *matchLengthFreq;
146268 -       U32 *litLengthFreq;
146269 -       U32 *litFreq;
146270 -       U32 *offCodeFreq;
146271 -       U32 matchLengthSum;
146272 -       U32 matchSum;
146273 -       U32 litLengthSum;
146274 -       U32 litSum;
146275 -       U32 offCodeSum;
146276 -       U32 log2matchLengthSum;
146277 -       U32 log2matchSum;
146278 -       U32 log2litLengthSum;
146279 -       U32 log2litSum;
146280 -       U32 log2offCodeSum;
146281 -       U32 factor;
146282 -       U32 staticPrices;
146283 -       U32 cachedPrice;
146284 -       U32 cachedLitLength;
146285 -       const BYTE *cachedLiterals;
146286 -} seqStore_t;
146288 -const seqStore_t *ZSTD_getSeqStore(const ZSTD_CCtx *ctx);
146289 -void ZSTD_seqToCodes(const seqStore_t *seqStorePtr);
146290 -int ZSTD_isSkipFrame(ZSTD_DCtx *dctx);
146292 -/*= Custom memory allocation functions */
146293 -typedef void *(*ZSTD_allocFunction)(void *opaque, size_t size);
146294 -typedef void (*ZSTD_freeFunction)(void *opaque, void *address);
146295 -typedef struct {
146296 -       ZSTD_allocFunction customAlloc;
146297 -       ZSTD_freeFunction customFree;
146298 -       void *opaque;
146299 -} ZSTD_customMem;
146301 -void *ZSTD_malloc(size_t size, ZSTD_customMem customMem);
146302 -void ZSTD_free(void *ptr, ZSTD_customMem customMem);
146304 -/*====== stack allocation  ======*/
146306 -typedef struct {
146307 -       void *ptr;
146308 -       const void *end;
146309 -} ZSTD_stack;
146311 -#define ZSTD_ALIGN(x) ALIGN(x, sizeof(size_t))
146312 -#define ZSTD_PTR_ALIGN(p) PTR_ALIGN(p, sizeof(size_t))
146314 -ZSTD_customMem ZSTD_initStack(void *workspace, size_t workspaceSize);
146316 -void *ZSTD_stackAllocAll(void *opaque, size_t *size);
146317 -void *ZSTD_stackAlloc(void *opaque, size_t size);
146318 -void ZSTD_stackFree(void *opaque, void *address);
146320 -/*======  common function  ======*/
146322 -ZSTD_STATIC U32 ZSTD_highbit32(U32 val) { return 31 - __builtin_clz(val); }
146324 -/* hidden functions */
146326 -/* ZSTD_invalidateRepCodes() :
146327 - * ensures next compression will not use repcodes from previous block.
146328 - * Note : only works with regular variant;
146329 - *        do not use with extDict variant ! */
146330 -void ZSTD_invalidateRepCodes(ZSTD_CCtx *cctx);
146332 -size_t ZSTD_freeCCtx(ZSTD_CCtx *cctx);
146333 -size_t ZSTD_freeDCtx(ZSTD_DCtx *dctx);
146334 -size_t ZSTD_freeCDict(ZSTD_CDict *cdict);
146335 -size_t ZSTD_freeDDict(ZSTD_DDict *cdict);
146336 -size_t ZSTD_freeCStream(ZSTD_CStream *zcs);
146337 -size_t ZSTD_freeDStream(ZSTD_DStream *zds);
146339 -#endif /* ZSTD_CCOMMON_H_MODULE */
146340 diff --git a/lib/zstd/zstd_opt.h b/lib/zstd/zstd_opt.h
146341 deleted file mode 100644
146342 index 55e1b4cba808..000000000000
146343 --- a/lib/zstd/zstd_opt.h
146344 +++ /dev/null
146345 @@ -1,1014 +0,0 @@
146347 - * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
146348 - * All rights reserved.
146350 - * This source code is licensed under the BSD-style license found in the
146351 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
146352 - * An additional grant of patent rights can be found in the PATENTS file in the
146353 - * same directory.
146355 - * This program is free software; you can redistribute it and/or modify it under
146356 - * the terms of the GNU General Public License version 2 as published by the
146357 - * Free Software Foundation. This program is dual-licensed; you may select
146358 - * either version 2 of the GNU General Public License ("GPL") or BSD license
146359 - * ("BSD").
146360 - */
146362 -/* Note : this file is intended to be included within zstd_compress.c */
146364 -#ifndef ZSTD_OPT_H_91842398743
146365 -#define ZSTD_OPT_H_91842398743
146367 -#define ZSTD_LITFREQ_ADD 2
146368 -#define ZSTD_FREQ_DIV 4
146369 -#define ZSTD_MAX_PRICE (1 << 30)
146371 -/*-*************************************
146372 -*  Price functions for optimal parser
146373 -***************************************/
146374 -FORCE_INLINE void ZSTD_setLog2Prices(seqStore_t *ssPtr)
146376 -       ssPtr->log2matchLengthSum = ZSTD_highbit32(ssPtr->matchLengthSum + 1);
146377 -       ssPtr->log2litLengthSum = ZSTD_highbit32(ssPtr->litLengthSum + 1);
146378 -       ssPtr->log2litSum = ZSTD_highbit32(ssPtr->litSum + 1);
146379 -       ssPtr->log2offCodeSum = ZSTD_highbit32(ssPtr->offCodeSum + 1);
146380 -       ssPtr->factor = 1 + ((ssPtr->litSum >> 5) / ssPtr->litLengthSum) + ((ssPtr->litSum << 1) / (ssPtr->litSum + ssPtr->matchSum));
146383 -ZSTD_STATIC void ZSTD_rescaleFreqs(seqStore_t *ssPtr, const BYTE *src, size_t srcSize)
146385 -       unsigned u;
146387 -       ssPtr->cachedLiterals = NULL;
146388 -       ssPtr->cachedPrice = ssPtr->cachedLitLength = 0;
146389 -       ssPtr->staticPrices = 0;
146391 -       if (ssPtr->litLengthSum == 0) {
146392 -               if (srcSize <= 1024)
146393 -                       ssPtr->staticPrices = 1;
146395 -               for (u = 0; u <= MaxLit; u++)
146396 -                       ssPtr->litFreq[u] = 0;
146397 -               for (u = 0; u < srcSize; u++)
146398 -                       ssPtr->litFreq[src[u]]++;
146400 -               ssPtr->litSum = 0;
146401 -               ssPtr->litLengthSum = MaxLL + 1;
146402 -               ssPtr->matchLengthSum = MaxML + 1;
146403 -               ssPtr->offCodeSum = (MaxOff + 1);
146404 -               ssPtr->matchSum = (ZSTD_LITFREQ_ADD << Litbits);
146406 -               for (u = 0; u <= MaxLit; u++) {
146407 -                       ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u] >> ZSTD_FREQ_DIV);
146408 -                       ssPtr->litSum += ssPtr->litFreq[u];
146409 -               }
146410 -               for (u = 0; u <= MaxLL; u++)
146411 -                       ssPtr->litLengthFreq[u] = 1;
146412 -               for (u = 0; u <= MaxML; u++)
146413 -                       ssPtr->matchLengthFreq[u] = 1;
146414 -               for (u = 0; u <= MaxOff; u++)
146415 -                       ssPtr->offCodeFreq[u] = 1;
146416 -       } else {
146417 -               ssPtr->matchLengthSum = 0;
146418 -               ssPtr->litLengthSum = 0;
146419 -               ssPtr->offCodeSum = 0;
146420 -               ssPtr->matchSum = 0;
146421 -               ssPtr->litSum = 0;
146423 -               for (u = 0; u <= MaxLit; u++) {
146424 -                       ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u] >> (ZSTD_FREQ_DIV + 1));
146425 -                       ssPtr->litSum += ssPtr->litFreq[u];
146426 -               }
146427 -               for (u = 0; u <= MaxLL; u++) {
146428 -                       ssPtr->litLengthFreq[u] = 1 + (ssPtr->litLengthFreq[u] >> (ZSTD_FREQ_DIV + 1));
146429 -                       ssPtr->litLengthSum += ssPtr->litLengthFreq[u];
146430 -               }
146431 -               for (u = 0; u <= MaxML; u++) {
146432 -                       ssPtr->matchLengthFreq[u] = 1 + (ssPtr->matchLengthFreq[u] >> ZSTD_FREQ_DIV);
146433 -                       ssPtr->matchLengthSum += ssPtr->matchLengthFreq[u];
146434 -                       ssPtr->matchSum += ssPtr->matchLengthFreq[u] * (u + 3);
146435 -               }
146436 -               ssPtr->matchSum *= ZSTD_LITFREQ_ADD;
146437 -               for (u = 0; u <= MaxOff; u++) {
146438 -                       ssPtr->offCodeFreq[u] = 1 + (ssPtr->offCodeFreq[u] >> ZSTD_FREQ_DIV);
146439 -                       ssPtr->offCodeSum += ssPtr->offCodeFreq[u];
146440 -               }
146441 -       }
146443 -       ZSTD_setLog2Prices(ssPtr);
146446 -FORCE_INLINE U32 ZSTD_getLiteralPrice(seqStore_t *ssPtr, U32 litLength, const BYTE *literals)
146448 -       U32 price, u;
146450 -       if (ssPtr->staticPrices)
146451 -               return ZSTD_highbit32((U32)litLength + 1) + (litLength * 6);
146453 -       if (litLength == 0)
146454 -               return ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[0] + 1);
146456 -       /* literals */
146457 -       if (ssPtr->cachedLiterals == literals) {
146458 -               U32 const additional = litLength - ssPtr->cachedLitLength;
146459 -               const BYTE *literals2 = ssPtr->cachedLiterals + ssPtr->cachedLitLength;
146460 -               price = ssPtr->cachedPrice + additional * ssPtr->log2litSum;
146461 -               for (u = 0; u < additional; u++)
146462 -                       price -= ZSTD_highbit32(ssPtr->litFreq[literals2[u]] + 1);
146463 -               ssPtr->cachedPrice = price;
146464 -               ssPtr->cachedLitLength = litLength;
146465 -       } else {
146466 -               price = litLength * ssPtr->log2litSum;
146467 -               for (u = 0; u < litLength; u++)
146468 -                       price -= ZSTD_highbit32(ssPtr->litFreq[literals[u]] + 1);
146470 -               if (litLength >= 12) {
146471 -                       ssPtr->cachedLiterals = literals;
146472 -                       ssPtr->cachedPrice = price;
146473 -                       ssPtr->cachedLitLength = litLength;
146474 -               }
146475 -       }
146477 -       /* literal Length */
146478 -       {
146479 -               const BYTE LL_deltaCode = 19;
146480 -               const BYTE llCode = (litLength > 63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
146481 -               price += LL_bits[llCode] + ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[llCode] + 1);
146482 -       }
146484 -       return price;
146487 -FORCE_INLINE U32 ZSTD_getPrice(seqStore_t *seqStorePtr, U32 litLength, const BYTE *literals, U32 offset, U32 matchLength, const int ultra)
146489 -       /* offset */
146490 -       U32 price;
146491 -       BYTE const offCode = (BYTE)ZSTD_highbit32(offset + 1);
146493 -       if (seqStorePtr->staticPrices)
146494 -               return ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + ZSTD_highbit32((U32)matchLength + 1) + 16 + offCode;
146496 -       price = offCode + seqStorePtr->log2offCodeSum - ZSTD_highbit32(seqStorePtr->offCodeFreq[offCode] + 1);
146497 -       if (!ultra && offCode >= 20)
146498 -               price += (offCode - 19) * 2;
146500 -       /* match Length */
146501 -       {
146502 -               const BYTE ML_deltaCode = 36;
146503 -               const BYTE mlCode = (matchLength > 127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
146504 -               price += ML_bits[mlCode] + seqStorePtr->log2matchLengthSum - ZSTD_highbit32(seqStorePtr->matchLengthFreq[mlCode] + 1);
146505 -       }
146507 -       return price + ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + seqStorePtr->factor;
146510 -ZSTD_STATIC void ZSTD_updatePrice(seqStore_t *seqStorePtr, U32 litLength, const BYTE *literals, U32 offset, U32 matchLength)
146512 -       U32 u;
146514 -       /* literals */
146515 -       seqStorePtr->litSum += litLength * ZSTD_LITFREQ_ADD;
146516 -       for (u = 0; u < litLength; u++)
146517 -               seqStorePtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
146519 -       /* literal Length */
146520 -       {
146521 -               const BYTE LL_deltaCode = 19;
146522 -               const BYTE llCode = (litLength > 63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
146523 -               seqStorePtr->litLengthFreq[llCode]++;
146524 -               seqStorePtr->litLengthSum++;
146525 -       }
146527 -       /* match offset */
146528 -       {
146529 -               BYTE const offCode = (BYTE)ZSTD_highbit32(offset + 1);
146530 -               seqStorePtr->offCodeSum++;
146531 -               seqStorePtr->offCodeFreq[offCode]++;
146532 -       }
146534 -       /* match Length */
146535 -       {
146536 -               const BYTE ML_deltaCode = 36;
146537 -               const BYTE mlCode = (matchLength > 127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
146538 -               seqStorePtr->matchLengthFreq[mlCode]++;
146539 -               seqStorePtr->matchLengthSum++;
146540 -       }
146542 -       ZSTD_setLog2Prices(seqStorePtr);
146545 -#define SET_PRICE(pos, mlen_, offset_, litlen_, price_)           \
146546 -       {                                                         \
146547 -               while (last_pos < pos) {                          \
146548 -                       opt[last_pos + 1].price = ZSTD_MAX_PRICE; \
146549 -                       last_pos++;                               \
146550 -               }                                                 \
146551 -               opt[pos].mlen = mlen_;                            \
146552 -               opt[pos].off = offset_;                           \
146553 -               opt[pos].litlen = litlen_;                        \
146554 -               opt[pos].price = price_;                          \
146555 -       }
146557 -/* Update hashTable3 up to ip (excluded)
146558 -   Assumption : always within prefix (i.e. not within extDict) */
146559 -FORCE_INLINE
146560 -U32 ZSTD_insertAndFindFirstIndexHash3(ZSTD_CCtx *zc, const BYTE *ip)
146562 -       U32 *const hashTable3 = zc->hashTable3;
146563 -       U32 const hashLog3 = zc->hashLog3;
146564 -       const BYTE *const base = zc->base;
146565 -       U32 idx = zc->nextToUpdate3;
146566 -       const U32 target = zc->nextToUpdate3 = (U32)(ip - base);
146567 -       const size_t hash3 = ZSTD_hash3Ptr(ip, hashLog3);
146569 -       while (idx < target) {
146570 -               hashTable3[ZSTD_hash3Ptr(base + idx, hashLog3)] = idx;
146571 -               idx++;
146572 -       }
146574 -       return hashTable3[hash3];
146577 -/*-*************************************
146578 -*  Binary Tree search
146579 -***************************************/
146580 -static U32 ZSTD_insertBtAndGetAllMatches(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, U32 nbCompares, const U32 mls, U32 extDict,
146581 -                                        ZSTD_match_t *matches, const U32 minMatchLen)
146583 -       const BYTE *const base = zc->base;
146584 -       const U32 curr = (U32)(ip - base);
146585 -       const U32 hashLog = zc->params.cParams.hashLog;
146586 -       const size_t h = ZSTD_hashPtr(ip, hashLog, mls);
146587 -       U32 *const hashTable = zc->hashTable;
146588 -       U32 matchIndex = hashTable[h];
146589 -       U32 *const bt = zc->chainTable;
146590 -       const U32 btLog = zc->params.cParams.chainLog - 1;
146591 -       const U32 btMask = (1U << btLog) - 1;
146592 -       size_t commonLengthSmaller = 0, commonLengthLarger = 0;
146593 -       const BYTE *const dictBase = zc->dictBase;
146594 -       const U32 dictLimit = zc->dictLimit;
146595 -       const BYTE *const dictEnd = dictBase + dictLimit;
146596 -       const BYTE *const prefixStart = base + dictLimit;
146597 -       const U32 btLow = btMask >= curr ? 0 : curr - btMask;
146598 -       const U32 windowLow = zc->lowLimit;
146599 -       U32 *smallerPtr = bt + 2 * (curr & btMask);
146600 -       U32 *largerPtr = bt + 2 * (curr & btMask) + 1;
146601 -       U32 matchEndIdx = curr + 8;
146602 -       U32 dummy32; /* to be nullified at the end */
146603 -       U32 mnum = 0;
146605 -       const U32 minMatch = (mls == 3) ? 3 : 4;
146606 -       size_t bestLength = minMatchLen - 1;
146608 -       if (minMatch == 3) { /* HC3 match finder */
146609 -               U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(zc, ip);
146610 -               if (matchIndex3 > windowLow && (curr - matchIndex3 < (1 << 18))) {
146611 -                       const BYTE *match;
146612 -                       size_t currMl = 0;
146613 -                       if ((!extDict) || matchIndex3 >= dictLimit) {
146614 -                               match = base + matchIndex3;
146615 -                               if (match[bestLength] == ip[bestLength])
146616 -                                       currMl = ZSTD_count(ip, match, iLimit);
146617 -                       } else {
146618 -                               match = dictBase + matchIndex3;
146619 -                               if (ZSTD_readMINMATCH(match, MINMATCH) ==
146620 -                                   ZSTD_readMINMATCH(ip, MINMATCH)) /* assumption : matchIndex3 <= dictLimit-4 (by table construction) */
146621 -                                       currMl = ZSTD_count_2segments(ip + MINMATCH, match + MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH;
146622 -                       }
146624 -                       /* save best solution */
146625 -                       if (currMl > bestLength) {
146626 -                               bestLength = currMl;
146627 -                               matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex3;
146628 -                               matches[mnum].len = (U32)currMl;
146629 -                               mnum++;
146630 -                               if (currMl > ZSTD_OPT_NUM)
146631 -                                       goto update;
146632 -                               if (ip + currMl == iLimit)
146633 -                                       goto update; /* best possible, and avoid read overflow*/
146634 -                       }
146635 -               }
146636 -       }
146638 -       hashTable[h] = curr; /* Update Hash Table */
146640 -       while (nbCompares-- && (matchIndex > windowLow)) {
146641 -               U32 *nextPtr = bt + 2 * (matchIndex & btMask);
146642 -               size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
146643 -               const BYTE *match;
146645 -               if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
146646 -                       match = base + matchIndex;
146647 -                       if (match[matchLength] == ip[matchLength]) {
146648 -                               matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iLimit) + 1;
146649 -                       }
146650 -               } else {
146651 -                       match = dictBase + matchIndex;
146652 -                       matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iLimit, dictEnd, prefixStart);
146653 -                       if (matchIndex + matchLength >= dictLimit)
146654 -                               match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
146655 -               }
146657 -               if (matchLength > bestLength) {
146658 -                       if (matchLength > matchEndIdx - matchIndex)
146659 -                               matchEndIdx = matchIndex + (U32)matchLength;
146660 -                       bestLength = matchLength;
146661 -                       matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex;
146662 -                       matches[mnum].len = (U32)matchLength;
146663 -                       mnum++;
146664 -                       if (matchLength > ZSTD_OPT_NUM)
146665 -                               break;
146666 -                       if (ip + matchLength == iLimit) /* equal : no way to know if inf or sup */
146667 -                               break;                  /* drop, to guarantee consistency (miss a little bit of compression) */
146668 -               }
146670 -               if (match[matchLength] < ip[matchLength]) {
146671 -                       /* match is smaller than curr */
146672 -                       *smallerPtr = matchIndex;         /* update smaller idx */
146673 -                       commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
146674 -                       if (matchIndex <= btLow) {
146675 -                               smallerPtr = &dummy32;
146676 -                               break;
146677 -                       }                         /* beyond tree size, stop the search */
146678 -                       smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */
146679 -                       matchIndex = nextPtr[1];  /* new matchIndex larger than previous (closer to curr) */
146680 -               } else {
146681 -                       /* match is larger than curr */
146682 -                       *largerPtr = matchIndex;
146683 -                       commonLengthLarger = matchLength;
146684 -                       if (matchIndex <= btLow) {
146685 -                               largerPtr = &dummy32;
146686 -                               break;
146687 -                       } /* beyond tree size, stop the search */
146688 -                       largerPtr = nextPtr;
146689 -                       matchIndex = nextPtr[0];
146690 -               }
146691 -       }
146693 -       *smallerPtr = *largerPtr = 0;
146695 -update:
146696 -       zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr + 1;
146697 -       return mnum;
146700 -/** Tree updater, providing best match */
146701 -static U32 ZSTD_BtGetAllMatches(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, const U32 maxNbAttempts, const U32 mls, ZSTD_match_t *matches,
146702 -                               const U32 minMatchLen)
146704 -       if (ip < zc->base + zc->nextToUpdate)
146705 -               return 0; /* skipped area */
146706 -       ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
146707 -       return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 0, matches, minMatchLen);
146710 -static U32 ZSTD_BtGetAllMatches_selectMLS(ZSTD_CCtx *zc, /* Index table will be updated */
146711 -                                         const BYTE *ip, const BYTE *const iHighLimit, const U32 maxNbAttempts, const U32 matchLengthSearch,
146712 -                                         ZSTD_match_t *matches, const U32 minMatchLen)
146714 -       switch (matchLengthSearch) {
146715 -       case 3: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen);
146716 -       default:
146717 -       case 4: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
146718 -       case 5: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
146719 -       case 7:
146720 -       case 6: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
146721 -       }
146724 -/** Tree updater, providing best match */
146725 -static U32 ZSTD_BtGetAllMatches_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, const U32 maxNbAttempts, const U32 mls,
146726 -                                       ZSTD_match_t *matches, const U32 minMatchLen)
146728 -       if (ip < zc->base + zc->nextToUpdate)
146729 -               return 0; /* skipped area */
146730 -       ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
146731 -       return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 1, matches, minMatchLen);
146734 -static U32 ZSTD_BtGetAllMatches_selectMLS_extDict(ZSTD_CCtx *zc, /* Index table will be updated */
146735 -                                                 const BYTE *ip, const BYTE *const iHighLimit, const U32 maxNbAttempts, const U32 matchLengthSearch,
146736 -                                                 ZSTD_match_t *matches, const U32 minMatchLen)
146738 -       switch (matchLengthSearch) {
146739 -       case 3: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen);
146740 -       default:
146741 -       case 4: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
146742 -       case 5: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
146743 -       case 7:
146744 -       case 6: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
146745 -       }
146748 -/*-*******************************
146749 -*  Optimal parser
146750 -*********************************/
146751 -FORCE_INLINE
146752 -void ZSTD_compressBlock_opt_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const int ultra)
146754 -       seqStore_t *seqStorePtr = &(ctx->seqStore);
146755 -       const BYTE *const istart = (const BYTE *)src;
146756 -       const BYTE *ip = istart;
146757 -       const BYTE *anchor = istart;
146758 -       const BYTE *const iend = istart + srcSize;
146759 -       const BYTE *const ilimit = iend - 8;
146760 -       const BYTE *const base = ctx->base;
146761 -       const BYTE *const prefixStart = base + ctx->dictLimit;
146763 -       const U32 maxSearches = 1U << ctx->params.cParams.searchLog;
146764 -       const U32 sufficient_len = ctx->params.cParams.targetLength;
146765 -       const U32 mls = ctx->params.cParams.searchLength;
146766 -       const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4;
146768 -       ZSTD_optimal_t *opt = seqStorePtr->priceTable;
146769 -       ZSTD_match_t *matches = seqStorePtr->matchTable;
146770 -       const BYTE *inr;
146771 -       U32 offset, rep[ZSTD_REP_NUM];
146773 -       /* init */
146774 -       ctx->nextToUpdate3 = ctx->nextToUpdate;
146775 -       ZSTD_rescaleFreqs(seqStorePtr, (const BYTE *)src, srcSize);
146776 -       ip += (ip == prefixStart);
146777 -       {
146778 -               U32 i;
146779 -               for (i = 0; i < ZSTD_REP_NUM; i++)
146780 -                       rep[i] = ctx->rep[i];
146781 -       }
146783 -       /* Match Loop */
146784 -       while (ip < ilimit) {
146785 -               U32 cur, match_num, last_pos, litlen, price;
146786 -               U32 u, mlen, best_mlen, best_off, litLength;
146787 -               memset(opt, 0, sizeof(ZSTD_optimal_t));
146788 -               last_pos = 0;
146789 -               litlen = (U32)(ip - anchor);
146791 -               /* check repCode */
146792 -               {
146793 -                       U32 i, last_i = ZSTD_REP_CHECK + (ip == anchor);
146794 -                       for (i = (ip == anchor); i < last_i; i++) {
146795 -                               const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
146796 -                               if ((repCur > 0) && (repCur < (S32)(ip - prefixStart)) &&
146797 -                                   (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repCur, minMatch))) {
146798 -                                       mlen = (U32)ZSTD_count(ip + minMatch, ip + minMatch - repCur, iend) + minMatch;
146799 -                                       if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
146800 -                                               best_mlen = mlen;
146801 -                                               best_off = i;
146802 -                                               cur = 0;
146803 -                                               last_pos = 1;
146804 -                                               goto _storeSequence;
146805 -                                       }
146806 -                                       best_off = i - (ip == anchor);
146807 -                                       do {
146808 -                                               price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
146809 -                                               if (mlen > last_pos || price < opt[mlen].price)
146810 -                                                       SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
146811 -                                               mlen--;
146812 -                                       } while (mlen >= minMatch);
146813 -                               }
146814 -                       }
146815 -               }
146817 -               match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, ip, iend, maxSearches, mls, matches, minMatch);
146819 -               if (!last_pos && !match_num) {
146820 -                       ip++;
146821 -                       continue;
146822 -               }
146824 -               if (match_num && (matches[match_num - 1].len > sufficient_len || matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
146825 -                       best_mlen = matches[match_num - 1].len;
146826 -                       best_off = matches[match_num - 1].off;
146827 -                       cur = 0;
146828 -                       last_pos = 1;
146829 -                       goto _storeSequence;
146830 -               }
146832 -               /* set prices using matches at position = 0 */
146833 -               best_mlen = (last_pos) ? last_pos : minMatch;
146834 -               for (u = 0; u < match_num; u++) {
146835 -                       mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
146836 -                       best_mlen = matches[u].len;
146837 -                       while (mlen <= best_mlen) {
146838 -                               price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
146839 -                               if (mlen > last_pos || price < opt[mlen].price)
146840 -                                       SET_PRICE(mlen, mlen, matches[u].off, litlen, price); /* note : macro modifies last_pos */
146841 -                               mlen++;
146842 -                       }
146843 -               }
146845 -               if (last_pos < minMatch) {
146846 -                       ip++;
146847 -                       continue;
146848 -               }
146850 -               /* initialize opt[0] */
146851 -               {
146852 -                       U32 i;
146853 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
146854 -                               opt[0].rep[i] = rep[i];
146855 -               }
146856 -               opt[0].mlen = 1;
146857 -               opt[0].litlen = litlen;
146859 -               /* check further positions */
146860 -               for (cur = 1; cur <= last_pos; cur++) {
146861 -                       inr = ip + cur;
146863 -                       if (opt[cur - 1].mlen == 1) {
146864 -                               litlen = opt[cur - 1].litlen + 1;
146865 -                               if (cur > litlen) {
146866 -                                       price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - litlen);
146867 -                               } else
146868 -                                       price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor);
146869 -                       } else {
146870 -                               litlen = 1;
146871 -                               price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - 1);
146872 -                       }
146874 -                       if (cur > last_pos || price <= opt[cur].price)
146875 -                               SET_PRICE(cur, 1, 0, litlen, price);
146877 -                       if (cur == last_pos)
146878 -                               break;
146880 -                       if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */
146881 -                               continue;
146883 -                       mlen = opt[cur].mlen;
146884 -                       if (opt[cur].off > ZSTD_REP_MOVE_OPT) {
146885 -                               opt[cur].rep[2] = opt[cur - mlen].rep[1];
146886 -                               opt[cur].rep[1] = opt[cur - mlen].rep[0];
146887 -                               opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT;
146888 -                       } else {
146889 -                               opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur - mlen].rep[1] : opt[cur - mlen].rep[2];
146890 -                               opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur - mlen].rep[0] : opt[cur - mlen].rep[1];
146891 -                               opt[cur].rep[0] =
146892 -                                   ((opt[cur].off == ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur - mlen].rep[0] - 1) : (opt[cur - mlen].rep[opt[cur].off]);
146893 -                       }
146895 -                       best_mlen = minMatch;
146896 -                       {
146897 -                               U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
146898 -                               for (i = (opt[cur].mlen != 1); i < last_i; i++) { /* check rep */
146899 -                                       const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
146900 -                                       if ((repCur > 0) && (repCur < (S32)(inr - prefixStart)) &&
146901 -                                           (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(inr - repCur, minMatch))) {
146902 -                                               mlen = (U32)ZSTD_count(inr + minMatch, inr + minMatch - repCur, iend) + minMatch;
146904 -                                               if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
146905 -                                                       best_mlen = mlen;
146906 -                                                       best_off = i;
146907 -                                                       last_pos = cur + 1;
146908 -                                                       goto _storeSequence;
146909 -                                               }
146911 -                                               best_off = i - (opt[cur].mlen != 1);
146912 -                                               if (mlen > best_mlen)
146913 -                                                       best_mlen = mlen;
146915 -                                               do {
146916 -                                                       if (opt[cur].mlen == 1) {
146917 -                                                               litlen = opt[cur].litlen;
146918 -                                                               if (cur > litlen) {
146919 -                                                                       price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr - litlen,
146920 -                                                                                                                       best_off, mlen - MINMATCH, ultra);
146921 -                                                               } else
146922 -                                                                       price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
146923 -                                                       } else {
146924 -                                                               litlen = 0;
146925 -                                                               price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
146926 -                                                       }
146928 -                                                       if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
146929 -                                                               SET_PRICE(cur + mlen, mlen, i, litlen, price);
146930 -                                                       mlen--;
146931 -                                               } while (mlen >= minMatch);
146932 -                                       }
146933 -                               }
146934 -                       }
146936 -                       match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, inr, iend, maxSearches, mls, matches, best_mlen);
146938 -                       if (match_num > 0 && (matches[match_num - 1].len > sufficient_len || cur + matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
146939 -                               best_mlen = matches[match_num - 1].len;
146940 -                               best_off = matches[match_num - 1].off;
146941 -                               last_pos = cur + 1;
146942 -                               goto _storeSequence;
146943 -                       }
146945 -                       /* set prices using matches at position = cur */
146946 -                       for (u = 0; u < match_num; u++) {
146947 -                               mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
146948 -                               best_mlen = matches[u].len;
146950 -                               while (mlen <= best_mlen) {
146951 -                                       if (opt[cur].mlen == 1) {
146952 -                                               litlen = opt[cur].litlen;
146953 -                                               if (cur > litlen)
146954 -                                                       price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip + cur - litlen,
146955 -                                                                                                       matches[u].off - 1, mlen - MINMATCH, ultra);
146956 -                                               else
146957 -                                                       price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
146958 -                                       } else {
146959 -                                               litlen = 0;
146960 -                                               price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off - 1, mlen - MINMATCH, ultra);
146961 -                                       }
146963 -                                       if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
146964 -                                               SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price);
146966 -                                       mlen++;
146967 -                               }
146968 -                       }
146969 -               }
146971 -               best_mlen = opt[last_pos].mlen;
146972 -               best_off = opt[last_pos].off;
146973 -               cur = last_pos - best_mlen;
146975 -       /* store sequence */
146976 -_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
146977 -               opt[0].mlen = 1;
146979 -               while (1) {
146980 -                       mlen = opt[cur].mlen;
146981 -                       offset = opt[cur].off;
146982 -                       opt[cur].mlen = best_mlen;
146983 -                       opt[cur].off = best_off;
146984 -                       best_mlen = mlen;
146985 -                       best_off = offset;
146986 -                       if (mlen > cur)
146987 -                               break;
146988 -                       cur -= mlen;
146989 -               }
146991 -               for (u = 0; u <= last_pos;) {
146992 -                       u += opt[u].mlen;
146993 -               }
146995 -               for (cur = 0; cur < last_pos;) {
146996 -                       mlen = opt[cur].mlen;
146997 -                       if (mlen == 1) {
146998 -                               ip++;
146999 -                               cur++;
147000 -                               continue;
147001 -                       }
147002 -                       offset = opt[cur].off;
147003 -                       cur += mlen;
147004 -                       litLength = (U32)(ip - anchor);
147006 -                       if (offset > ZSTD_REP_MOVE_OPT) {
147007 -                               rep[2] = rep[1];
147008 -                               rep[1] = rep[0];
147009 -                               rep[0] = offset - ZSTD_REP_MOVE_OPT;
147010 -                               offset--;
147011 -                       } else {
147012 -                               if (offset != 0) {
147013 -                                       best_off = (offset == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
147014 -                                       if (offset != 1)
147015 -                                               rep[2] = rep[1];
147016 -                                       rep[1] = rep[0];
147017 -                                       rep[0] = best_off;
147018 -                               }
147019 -                               if (litLength == 0)
147020 -                                       offset--;
147021 -                       }
147023 -                       ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
147024 -                       ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
147025 -                       anchor = ip = ip + mlen;
147026 -               }
147027 -       } /* for (cur=0; cur < last_pos; ) */
147029 -       /* Save reps for next block */
147030 -       {
147031 -               int i;
147032 -               for (i = 0; i < ZSTD_REP_NUM; i++)
147033 -                       ctx->repToConfirm[i] = rep[i];
147034 -       }
147036 -       /* Last Literals */
147037 -       {
147038 -               size_t const lastLLSize = iend - anchor;
147039 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
147040 -               seqStorePtr->lit += lastLLSize;
147041 -       }
147044 -FORCE_INLINE
147045 -void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const int ultra)
147047 -       seqStore_t *seqStorePtr = &(ctx->seqStore);
147048 -       const BYTE *const istart = (const BYTE *)src;
147049 -       const BYTE *ip = istart;
147050 -       const BYTE *anchor = istart;
147051 -       const BYTE *const iend = istart + srcSize;
147052 -       const BYTE *const ilimit = iend - 8;
147053 -       const BYTE *const base = ctx->base;
147054 -       const U32 lowestIndex = ctx->lowLimit;
147055 -       const U32 dictLimit = ctx->dictLimit;
147056 -       const BYTE *const prefixStart = base + dictLimit;
147057 -       const BYTE *const dictBase = ctx->dictBase;
147058 -       const BYTE *const dictEnd = dictBase + dictLimit;
147060 -       const U32 maxSearches = 1U << ctx->params.cParams.searchLog;
147061 -       const U32 sufficient_len = ctx->params.cParams.targetLength;
147062 -       const U32 mls = ctx->params.cParams.searchLength;
147063 -       const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4;
147065 -       ZSTD_optimal_t *opt = seqStorePtr->priceTable;
147066 -       ZSTD_match_t *matches = seqStorePtr->matchTable;
147067 -       const BYTE *inr;
147069 -       /* init */
147070 -       U32 offset, rep[ZSTD_REP_NUM];
147071 -       {
147072 -               U32 i;
147073 -               for (i = 0; i < ZSTD_REP_NUM; i++)
147074 -                       rep[i] = ctx->rep[i];
147075 -       }
147077 -       ctx->nextToUpdate3 = ctx->nextToUpdate;
147078 -       ZSTD_rescaleFreqs(seqStorePtr, (const BYTE *)src, srcSize);
147079 -       ip += (ip == prefixStart);
147081 -       /* Match Loop */
147082 -       while (ip < ilimit) {
147083 -               U32 cur, match_num, last_pos, litlen, price;
147084 -               U32 u, mlen, best_mlen, best_off, litLength;
147085 -               U32 curr = (U32)(ip - base);
147086 -               memset(opt, 0, sizeof(ZSTD_optimal_t));
147087 -               last_pos = 0;
147088 -               opt[0].litlen = (U32)(ip - anchor);
147090 -               /* check repCode */
147091 -               {
147092 -                       U32 i, last_i = ZSTD_REP_CHECK + (ip == anchor);
147093 -                       for (i = (ip == anchor); i < last_i; i++) {
147094 -                               const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
147095 -                               const U32 repIndex = (U32)(curr - repCur);
147096 -                               const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
147097 -                               const BYTE *const repMatch = repBase + repIndex;
147098 -                               if ((repCur > 0 && repCur <= (S32)curr) &&
147099 -                                   (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
147100 -                                   && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch))) {
147101 -                                       /* repcode detected we should take it */
147102 -                                       const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
147103 -                                       mlen = (U32)ZSTD_count_2segments(ip + minMatch, repMatch + minMatch, iend, repEnd, prefixStart) + minMatch;
147105 -                                       if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
147106 -                                               best_mlen = mlen;
147107 -                                               best_off = i;
147108 -                                               cur = 0;
147109 -                                               last_pos = 1;
147110 -                                               goto _storeSequence;
147111 -                                       }
147113 -                                       best_off = i - (ip == anchor);
147114 -                                       litlen = opt[0].litlen;
147115 -                                       do {
147116 -                                               price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
147117 -                                               if (mlen > last_pos || price < opt[mlen].price)
147118 -                                                       SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
147119 -                                               mlen--;
147120 -                                       } while (mlen >= minMatch);
147121 -                               }
147122 -                       }
147123 -               }
147125 -               match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, ip, iend, maxSearches, mls, matches, minMatch); /* first search (depth 0) */
147127 -               if (!last_pos && !match_num) {
147128 -                       ip++;
147129 -                       continue;
147130 -               }
147132 -               {
147133 -                       U32 i;
147134 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
147135 -                               opt[0].rep[i] = rep[i];
147136 -               }
147137 -               opt[0].mlen = 1;
147139 -               if (match_num && (matches[match_num - 1].len > sufficient_len || matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
147140 -                       best_mlen = matches[match_num - 1].len;
147141 -                       best_off = matches[match_num - 1].off;
147142 -                       cur = 0;
147143 -                       last_pos = 1;
147144 -                       goto _storeSequence;
147145 -               }
147147 -               best_mlen = (last_pos) ? last_pos : minMatch;
147149 -               /* set prices using matches at position = 0 */
147150 -               for (u = 0; u < match_num; u++) {
147151 -                       mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
147152 -                       best_mlen = matches[u].len;
147153 -                       litlen = opt[0].litlen;
147154 -                       while (mlen <= best_mlen) {
147155 -                               price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
147156 -                               if (mlen > last_pos || price < opt[mlen].price)
147157 -                                       SET_PRICE(mlen, mlen, matches[u].off, litlen, price);
147158 -                               mlen++;
147159 -                       }
147160 -               }
147162 -               if (last_pos < minMatch) {
147163 -                       ip++;
147164 -                       continue;
147165 -               }
147167 -               /* check further positions */
147168 -               for (cur = 1; cur <= last_pos; cur++) {
147169 -                       inr = ip + cur;
147171 -                       if (opt[cur - 1].mlen == 1) {
147172 -                               litlen = opt[cur - 1].litlen + 1;
147173 -                               if (cur > litlen) {
147174 -                                       price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - litlen);
147175 -                               } else
147176 -                                       price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor);
147177 -                       } else {
147178 -                               litlen = 1;
147179 -                               price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - 1);
147180 -                       }
147182 -                       if (cur > last_pos || price <= opt[cur].price)
147183 -                               SET_PRICE(cur, 1, 0, litlen, price);
147185 -                       if (cur == last_pos)
147186 -                               break;
147188 -                       if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */
147189 -                               continue;
147191 -                       mlen = opt[cur].mlen;
147192 -                       if (opt[cur].off > ZSTD_REP_MOVE_OPT) {
147193 -                               opt[cur].rep[2] = opt[cur - mlen].rep[1];
147194 -                               opt[cur].rep[1] = opt[cur - mlen].rep[0];
147195 -                               opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT;
147196 -                       } else {
147197 -                               opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur - mlen].rep[1] : opt[cur - mlen].rep[2];
147198 -                               opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur - mlen].rep[0] : opt[cur - mlen].rep[1];
147199 -                               opt[cur].rep[0] =
147200 -                                   ((opt[cur].off == ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur - mlen].rep[0] - 1) : (opt[cur - mlen].rep[opt[cur].off]);
147201 -                       }
147203 -                       best_mlen = minMatch;
147204 -                       {
147205 -                               U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
147206 -                               for (i = (mlen != 1); i < last_i; i++) {
147207 -                                       const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
147208 -                                       const U32 repIndex = (U32)(curr + cur - repCur);
147209 -                                       const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
147210 -                                       const BYTE *const repMatch = repBase + repIndex;
147211 -                                       if ((repCur > 0 && repCur <= (S32)(curr + cur)) &&
147212 -                                           (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
147213 -                                           && (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch))) {
147214 -                                               /* repcode detected */
147215 -                                               const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
147216 -                                               mlen = (U32)ZSTD_count_2segments(inr + minMatch, repMatch + minMatch, iend, repEnd, prefixStart) + minMatch;
147218 -                                               if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
147219 -                                                       best_mlen = mlen;
147220 -                                                       best_off = i;
147221 -                                                       last_pos = cur + 1;
147222 -                                                       goto _storeSequence;
147223 -                                               }
147225 -                                               best_off = i - (opt[cur].mlen != 1);
147226 -                                               if (mlen > best_mlen)
147227 -                                                       best_mlen = mlen;
147229 -                                               do {
147230 -                                                       if (opt[cur].mlen == 1) {
147231 -                                                               litlen = opt[cur].litlen;
147232 -                                                               if (cur > litlen) {
147233 -                                                                       price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr - litlen,
147234 -                                                                                                                       best_off, mlen - MINMATCH, ultra);
147235 -                                                               } else
147236 -                                                                       price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
147237 -                                                       } else {
147238 -                                                               litlen = 0;
147239 -                                                               price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
147240 -                                                       }
147242 -                                                       if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
147243 -                                                               SET_PRICE(cur + mlen, mlen, i, litlen, price);
147244 -                                                       mlen--;
147245 -                                               } while (mlen >= minMatch);
147246 -                                       }
147247 -                               }
147248 -                       }
147250 -                       match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, inr, iend, maxSearches, mls, matches, minMatch);
147252 -                       if (match_num > 0 && (matches[match_num - 1].len > sufficient_len || cur + matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
147253 -                               best_mlen = matches[match_num - 1].len;
147254 -                               best_off = matches[match_num - 1].off;
147255 -                               last_pos = cur + 1;
147256 -                               goto _storeSequence;
147257 -                       }
147259 -                       /* set prices using matches at position = cur */
147260 -                       for (u = 0; u < match_num; u++) {
147261 -                               mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
147262 -                               best_mlen = matches[u].len;
147264 -                               while (mlen <= best_mlen) {
147265 -                                       if (opt[cur].mlen == 1) {
147266 -                                               litlen = opt[cur].litlen;
147267 -                                               if (cur > litlen)
147268 -                                                       price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip + cur - litlen,
147269 -                                                                                                       matches[u].off - 1, mlen - MINMATCH, ultra);
147270 -                                               else
147271 -                                                       price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
147272 -                                       } else {
147273 -                                               litlen = 0;
147274 -                                               price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off - 1, mlen - MINMATCH, ultra);
147275 -                                       }
147277 -                                       if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
147278 -                                               SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price);
147280 -                                       mlen++;
147281 -                               }
147282 -                       }
147283 -               } /* for (cur = 1; cur <= last_pos; cur++) */
147285 -               best_mlen = opt[last_pos].mlen;
147286 -               best_off = opt[last_pos].off;
147287 -               cur = last_pos - best_mlen;
147289 -       /* store sequence */
147290 -_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
147291 -               opt[0].mlen = 1;
147293 -               while (1) {
147294 -                       mlen = opt[cur].mlen;
147295 -                       offset = opt[cur].off;
147296 -                       opt[cur].mlen = best_mlen;
147297 -                       opt[cur].off = best_off;
147298 -                       best_mlen = mlen;
147299 -                       best_off = offset;
147300 -                       if (mlen > cur)
147301 -                               break;
147302 -                       cur -= mlen;
147303 -               }
147305 -               for (u = 0; u <= last_pos;) {
147306 -                       u += opt[u].mlen;
147307 -               }
147309 -               for (cur = 0; cur < last_pos;) {
147310 -                       mlen = opt[cur].mlen;
147311 -                       if (mlen == 1) {
147312 -                               ip++;
147313 -                               cur++;
147314 -                               continue;
147315 -                       }
147316 -                       offset = opt[cur].off;
147317 -                       cur += mlen;
147318 -                       litLength = (U32)(ip - anchor);
147320 -                       if (offset > ZSTD_REP_MOVE_OPT) {
147321 -                               rep[2] = rep[1];
147322 -                               rep[1] = rep[0];
147323 -                               rep[0] = offset - ZSTD_REP_MOVE_OPT;
147324 -                               offset--;
147325 -                       } else {
147326 -                               if (offset != 0) {
147327 -                                       best_off = (offset == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
147328 -                                       if (offset != 1)
147329 -                                               rep[2] = rep[1];
147330 -                                       rep[1] = rep[0];
147331 -                                       rep[0] = best_off;
147332 -                               }
147334 -                               if (litLength == 0)
147335 -                                       offset--;
147336 -                       }
147338 -                       ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
147339 -                       ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
147340 -                       anchor = ip = ip + mlen;
147341 -               }
147342 -       } /* for (cur=0; cur < last_pos; ) */
147344 -       /* Save reps for next block */
147345 -       {
147346 -               int i;
147347 -               for (i = 0; i < ZSTD_REP_NUM; i++)
147348 -                       ctx->repToConfirm[i] = rep[i];
147349 -       }
147351 -       /* Last Literals */
147352 -       {
147353 -               size_t lastLLSize = iend - anchor;
147354 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
147355 -               seqStorePtr->lit += lastLLSize;
147356 -       }
147359 -#endif /* ZSTD_OPT_H_91842398743 */
147360 diff --git a/localversion b/localversion
147361 new file mode 100644
147362 index 000000000000..c21af2f75ee0
147363 --- /dev/null
147364 +++ b/localversion
147365 @@ -0,0 +1 @@
147366 +-xanmod1
147367 diff --git a/localversion-cacule b/localversion-cacule
147368 new file mode 100644
147369 index 000000000000..585f177f9bba
147370 --- /dev/null
147371 +++ b/localversion-cacule
147372 @@ -0,0 +1 @@
147373 +-cacule
147374 diff --git a/mm/Kconfig b/mm/Kconfig
147375 index 24c045b24b95..5650c2d3c9c2 100644
147376 --- a/mm/Kconfig
147377 +++ b/mm/Kconfig
147378 @@ -122,6 +122,41 @@ config SPARSEMEM_VMEMMAP
147379           pfn_to_page and page_to_pfn operations.  This is the most
147380           efficient option when sufficient kernel resources are available.
147382 +config CLEAN_LOW_KBYTES
147383 +       int "Default value for vm.clean_low_kbytes"
147384 +       depends on SYSCTL
147385 +       default "150000"
147386 +       help
147387 +         The vm.clean_low_kbytes sysctl knob provides *best-effort*
147388 +         protection of clean file pages. The clean file pages on the current
147389 +         node won't be reclaimed under memory pressure when their amount is
147390 +         below vm.clean_low_kbytes *unless* we threaten to OOM or have
147391 +         no free swap space or vm.swappiness=0.
147393 +         Protection of clean file pages may be used to prevent thrashing and
147394 +         reducing I/O under low-memory conditions.
147396 +         Setting it to a high value may result in a early eviction of anonymous
147397 +         pages into the swap space by attempting to hold the protected amount of
147398 +         clean file pages in memory.
147400 +config CLEAN_MIN_KBYTES
147401 +       int "Default value for vm.clean_min_kbytes"
147402 +       depends on SYSCTL
147403 +       default "0"
147404 +       help
147405 +         The vm.clean_min_kbytes sysctl knob provides *hard* protection
147406 +         of clean file pages. The clean file pages on the current node won't be
147407 +         reclaimed under memory pressure when their amount is below
147408 +         vm.clean_min_kbytes.
147410 +         Hard protection of clean file pages may be used to avoid high latency and
147411 +         prevent livelock in near-OOM conditions.
147413 +         Setting it to a high value may result in a early out-of-memory condition
147414 +         due to the inability to reclaim the protected amount of clean file pages
147415 +         when other types of pages cannot be reclaimed.
147417  config HAVE_MEMBLOCK_PHYS_MAP
147418         bool
147420 @@ -872,4 +907,59 @@ config MAPPING_DIRTY_HELPERS
147421  config KMAP_LOCAL
147422         bool
147424 +config LRU_GEN
147425 +       bool "Multigenerational LRU"
147426 +       depends on MMU
147427 +       help
147428 +         A high performance LRU implementation to heavily overcommit workloads
147429 +         that are not IO bound. See Documentation/vm/multigen_lru.rst for
147430 +         details.
147432 +         Warning: do not enable this option unless you plan to use it because
147433 +         it introduces a small per-process and per-memcg and per-node memory
147434 +         overhead.
147436 +config NR_LRU_GENS
147437 +       int "Max number of generations"
147438 +       depends on LRU_GEN
147439 +       range 4 31
147440 +       default 7
147441 +       help
147442 +         This will use order_base_2(N+1) spare bits from page flags.
147444 +         Warning: do not use numbers larger than necessary because each
147445 +         generation introduces a small per-node and per-memcg memory overhead.
147447 +config TIERS_PER_GEN
147448 +       int "Number of tiers per generation"
147449 +       depends on LRU_GEN
147450 +       range 2 5
147451 +       default 4
147452 +       help
147453 +         This will use N-2 spare bits from page flags.
147455 +         Higher values generally offer better protection to active pages under
147456 +         heavy buffered I/O workloads.
147458 +config LRU_GEN_ENABLED
147459 +       bool "Turn on by default"
147460 +       depends on LRU_GEN
147461 +       help
147462 +         The default value of /sys/kernel/mm/lru_gen/enabled is 0. This option
147463 +         changes it to 1.
147465 +         Warning: the default value is the fast path. See
147466 +         Documentation/static-keys.txt for details.
147468 +config LRU_GEN_STATS
147469 +       bool "Full stats for debugging"
147470 +       depends on LRU_GEN
147471 +       help
147472 +         This option keeps full stats for each generation, which can be read
147473 +         from /sys/kernel/debug/lru_gen_full.
147475 +         Warning: do not enable this option unless you plan to use it because
147476 +         it introduces an additional small per-process and per-memcg and
147477 +         per-node memory overhead.
147479  endmenu
147480 diff --git a/mm/gup.c b/mm/gup.c
147481 index ef7d2da9f03f..4164a70160e3 100644
147482 --- a/mm/gup.c
147483 +++ b/mm/gup.c
147484 @@ -1535,10 +1535,6 @@ struct page *get_dump_page(unsigned long addr)
147485                                       FOLL_FORCE | FOLL_DUMP | FOLL_GET);
147486         if (locked)
147487                 mmap_read_unlock(mm);
147489 -       if (ret == 1 && is_page_poisoned(page))
147490 -               return NULL;
147492         return (ret == 1) ? page : NULL;
147494  #endif /* CONFIG_ELF_CORE */
147495 @@ -1551,54 +1547,60 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
147496                                         struct vm_area_struct **vmas,
147497                                         unsigned int gup_flags)
147499 -       unsigned long i;
147500 -       unsigned long step;
147501 -       bool drain_allow = true;
147502 -       bool migrate_allow = true;
147503 +       unsigned long i, isolation_error_count;
147504 +       bool drain_allow;
147505         LIST_HEAD(cma_page_list);
147506         long ret = nr_pages;
147507 +       struct page *prev_head, *head;
147508         struct migration_target_control mtc = {
147509                 .nid = NUMA_NO_NODE,
147510                 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN,
147511         };
147513  check_again:
147514 -       for (i = 0; i < nr_pages;) {
147516 -               struct page *head = compound_head(pages[i]);
147518 -               /*
147519 -                * gup may start from a tail page. Advance step by the left
147520 -                * part.
147521 -                */
147522 -               step = compound_nr(head) - (pages[i] - head);
147523 +       prev_head = NULL;
147524 +       isolation_error_count = 0;
147525 +       drain_allow = true;
147526 +       for (i = 0; i < nr_pages; i++) {
147527 +               head = compound_head(pages[i]);
147528 +               if (head == prev_head)
147529 +                       continue;
147530 +               prev_head = head;
147531                 /*
147532                  * If we get a page from the CMA zone, since we are going to
147533                  * be pinning these entries, we might as well move them out
147534                  * of the CMA zone if possible.
147535                  */
147536                 if (is_migrate_cma_page(head)) {
147537 -                       if (PageHuge(head))
147538 -                               isolate_huge_page(head, &cma_page_list);
147539 -                       else {
147540 +                       if (PageHuge(head)) {
147541 +                               if (!isolate_huge_page(head, &cma_page_list))
147542 +                                       isolation_error_count++;
147543 +                       } else {
147544                                 if (!PageLRU(head) && drain_allow) {
147545                                         lru_add_drain_all();
147546                                         drain_allow = false;
147547                                 }
147549 -                               if (!isolate_lru_page(head)) {
147550 -                                       list_add_tail(&head->lru, &cma_page_list);
147551 -                                       mod_node_page_state(page_pgdat(head),
147552 -                                                           NR_ISOLATED_ANON +
147553 -                                                           page_is_file_lru(head),
147554 -                                                           thp_nr_pages(head));
147555 +                               if (isolate_lru_page(head)) {
147556 +                                       isolation_error_count++;
147557 +                                       continue;
147558                                 }
147559 +                               list_add_tail(&head->lru, &cma_page_list);
147560 +                               mod_node_page_state(page_pgdat(head),
147561 +                                                   NR_ISOLATED_ANON +
147562 +                                                   page_is_file_lru(head),
147563 +                                                   thp_nr_pages(head));
147564                         }
147565                 }
147567 -               i += step;
147568         }
147570 +       /*
147571 +        * If list is empty, and no isolation errors, means that all pages are
147572 +        * in the correct zone.
147573 +        */
147574 +       if (list_empty(&cma_page_list) && !isolation_error_count)
147575 +               return ret;
147577         if (!list_empty(&cma_page_list)) {
147578                 /*
147579                  * drop the above get_user_pages reference.
147580 @@ -1609,34 +1611,28 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
147581                         for (i = 0; i < nr_pages; i++)
147582                                 put_page(pages[i]);
147584 -               if (migrate_pages(&cma_page_list, alloc_migration_target, NULL,
147585 -                       (unsigned long)&mtc, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
147586 -                       /*
147587 -                        * some of the pages failed migration. Do get_user_pages
147588 -                        * without migration.
147589 -                        */
147590 -                       migrate_allow = false;
147592 +               ret = migrate_pages(&cma_page_list, alloc_migration_target,
147593 +                                   NULL, (unsigned long)&mtc, MIGRATE_SYNC,
147594 +                                   MR_CONTIG_RANGE);
147595 +               if (ret) {
147596                         if (!list_empty(&cma_page_list))
147597                                 putback_movable_pages(&cma_page_list);
147598 +                       return ret > 0 ? -ENOMEM : ret;
147599                 }
147600 -               /*
147601 -                * We did migrate all the pages, Try to get the page references
147602 -                * again migrating any new CMA pages which we failed to isolate
147603 -                * earlier.
147604 -                */
147605 -               ret = __get_user_pages_locked(mm, start, nr_pages,
147606 -                                                  pages, vmas, NULL,
147607 -                                                  gup_flags);
147609 -               if ((ret > 0) && migrate_allow) {
147610 -                       nr_pages = ret;
147611 -                       drain_allow = true;
147612 -                       goto check_again;
147613 -               }
147615 +               /* We unpinned pages before migration, pin them again */
147616 +               ret = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
147617 +                                             NULL, gup_flags);
147618 +               if (ret <= 0)
147619 +                       return ret;
147620 +               nr_pages = ret;
147621         }
147623 -       return ret;
147624 +       /*
147625 +        * check again because pages were unpinned, and we also might have
147626 +        * had isolation errors and need more pages to migrate.
147627 +        */
147628 +       goto check_again;
147630  #else
147631  static long check_and_migrate_cma_pages(struct mm_struct *mm,
147632 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
147633 index ae907a9c2050..2cf46270c84b 100644
147634 --- a/mm/huge_memory.c
147635 +++ b/mm/huge_memory.c
147636 @@ -637,7 +637,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
147637                 entry = mk_huge_pmd(page, vma->vm_page_prot);
147638                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
147639                 page_add_new_anon_rmap(page, vma, haddr, true);
147640 -               lru_cache_add_inactive_or_unevictable(page, vma);
147641 +               lru_cache_add_page_vma(page, vma, true);
147642                 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
147643                 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
147644                 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
147645 @@ -2418,7 +2418,8 @@ static void __split_huge_page_tail(struct page *head, int tail,
147646  #ifdef CONFIG_64BIT
147647                          (1L << PG_arch_2) |
147648  #endif
147649 -                        (1L << PG_dirty)));
147650 +                        (1L << PG_dirty) |
147651 +                        LRU_GEN_MASK | LRU_USAGE_MASK));
147653         /* ->mapping in first tail page is compound_mapcount */
147654         VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
147655 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
147656 index a86a58ef132d..96b722af092e 100644
147657 --- a/mm/hugetlb.c
147658 +++ b/mm/hugetlb.c
147659 @@ -743,13 +743,20 @@ void hugetlb_fix_reserve_counts(struct inode *inode)
147661         struct hugepage_subpool *spool = subpool_inode(inode);
147662         long rsv_adjust;
147663 +       bool reserved = false;
147665         rsv_adjust = hugepage_subpool_get_pages(spool, 1);
147666 -       if (rsv_adjust) {
147667 +       if (rsv_adjust > 0) {
147668                 struct hstate *h = hstate_inode(inode);
147670 -               hugetlb_acct_memory(h, 1);
147671 +               if (!hugetlb_acct_memory(h, 1))
147672 +                       reserved = true;
147673 +       } else if (!rsv_adjust) {
147674 +               reserved = true;
147675         }
147677 +       if (!reserved)
147678 +               pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
147682 @@ -3898,6 +3905,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
147683                                  * See Documentation/vm/mmu_notifier.rst
147684                                  */
147685                                 huge_ptep_set_wrprotect(src, addr, src_pte);
147686 +                               entry = huge_pte_wrprotect(entry);
147687                         }
147689                         page_dup_rmap(ptepage, true);
147690 diff --git a/mm/internal.h b/mm/internal.h
147691 index cb3c5e0a7799..1432feec62df 100644
147692 --- a/mm/internal.h
147693 +++ b/mm/internal.h
147694 @@ -97,26 +97,6 @@ static inline void set_page_refcounted(struct page *page)
147695         set_page_count(page, 1);
147699 - * When kernel touch the user page, the user page may be have been marked
147700 - * poison but still mapped in user space, if without this page, the kernel
147701 - * can guarantee the data integrity and operation success, the kernel is
147702 - * better to check the posion status and avoid touching it, be good not to
147703 - * panic, coredump for process fatal signal is a sample case matching this
147704 - * scenario. Or if kernel can't guarantee the data integrity, it's better
147705 - * not to call this function, let kernel touch the poison page and get to
147706 - * panic.
147707 - */
147708 -static inline bool is_page_poisoned(struct page *page)
147710 -       if (PageHWPoison(page))
147711 -               return true;
147712 -       else if (PageHuge(page) && PageHWPoison(compound_head(page)))
147713 -               return true;
147715 -       return false;
147718  extern unsigned long highest_memmap_pfn;
147721 diff --git a/mm/kfence/core.c b/mm/kfence/core.c
147722 index d53c91f881a4..f0be2c5038b5 100644
147723 --- a/mm/kfence/core.c
147724 +++ b/mm/kfence/core.c
147725 @@ -10,6 +10,7 @@
147726  #include <linux/atomic.h>
147727  #include <linux/bug.h>
147728  #include <linux/debugfs.h>
147729 +#include <linux/irq_work.h>
147730  #include <linux/kcsan-checks.h>
147731  #include <linux/kfence.h>
147732  #include <linux/kmemleak.h>
147733 @@ -586,6 +587,17 @@ late_initcall(kfence_debugfs_init);
147735  /* === Allocation Gate Timer ================================================ */
147737 +#ifdef CONFIG_KFENCE_STATIC_KEYS
147738 +/* Wait queue to wake up allocation-gate timer task. */
147739 +static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
147741 +static void wake_up_kfence_timer(struct irq_work *work)
147743 +       wake_up(&allocation_wait);
147745 +static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
147746 +#endif
147749   * Set up delayed work, which will enable and disable the static key. We need to
147750   * use a work queue (rather than a simple timer), since enabling and disabling a
147751 @@ -603,25 +615,13 @@ static void toggle_allocation_gate(struct work_struct *work)
147752         if (!READ_ONCE(kfence_enabled))
147753                 return;
147755 -       /* Enable static key, and await allocation to happen. */
147756         atomic_set(&kfence_allocation_gate, 0);
147757  #ifdef CONFIG_KFENCE_STATIC_KEYS
147758 +       /* Enable static key, and await allocation to happen. */
147759         static_branch_enable(&kfence_allocation_key);
147760 -       /*
147761 -        * Await an allocation. Timeout after 1 second, in case the kernel stops
147762 -        * doing allocations, to avoid stalling this worker task for too long.
147763 -        */
147764 -       {
147765 -               unsigned long end_wait = jiffies + HZ;
147767 -               do {
147768 -                       set_current_state(TASK_UNINTERRUPTIBLE);
147769 -                       if (atomic_read(&kfence_allocation_gate) != 0)
147770 -                               break;
147771 -                       schedule_timeout(1);
147772 -               } while (time_before(jiffies, end_wait));
147773 -               __set_current_state(TASK_RUNNING);
147774 -       }
147776 +       wait_event_timeout(allocation_wait, atomic_read(&kfence_allocation_gate), HZ);
147778         /* Disable static key and reset timer. */
147779         static_branch_disable(&kfence_allocation_key);
147780  #endif
147781 @@ -728,6 +728,19 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
147782          */
147783         if (atomic_read(&kfence_allocation_gate) || atomic_inc_return(&kfence_allocation_gate) > 1)
147784                 return NULL;
147785 +#ifdef CONFIG_KFENCE_STATIC_KEYS
147786 +       /*
147787 +        * waitqueue_active() is fully ordered after the update of
147788 +        * kfence_allocation_gate per atomic_inc_return().
147789 +        */
147790 +       if (waitqueue_active(&allocation_wait)) {
147791 +               /*
147792 +                * Calling wake_up() here may deadlock when allocations happen
147793 +                * from within timer code. Use an irq_work to defer it.
147794 +                */
147795 +               irq_work_queue(&wake_up_kfence_timer_work);
147796 +       }
147797 +#endif
147799         if (!READ_ONCE(kfence_enabled))
147800                 return NULL;
147801 diff --git a/mm/khugepaged.c b/mm/khugepaged.c
147802 index a7d6cb912b05..fd14b1e3c6f1 100644
147803 --- a/mm/khugepaged.c
147804 +++ b/mm/khugepaged.c
147805 @@ -716,17 +716,17 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
147806                 if (pte_write(pteval))
147807                         writable = true;
147808         }
147809 -       if (likely(writable)) {
147810 -               if (likely(referenced)) {
147811 -                       result = SCAN_SUCCEED;
147812 -                       trace_mm_collapse_huge_page_isolate(page, none_or_zero,
147813 -                                                           referenced, writable, result);
147814 -                       return 1;
147815 -               }
147816 -       } else {
147818 +       if (unlikely(!writable)) {
147819                 result = SCAN_PAGE_RO;
147820 +       } else if (unlikely(!referenced)) {
147821 +               result = SCAN_LACK_REFERENCED_PAGE;
147822 +       } else {
147823 +               result = SCAN_SUCCEED;
147824 +               trace_mm_collapse_huge_page_isolate(page, none_or_zero,
147825 +                                                   referenced, writable, result);
147826 +               return 1;
147827         }
147829  out:
147830         release_pte_pages(pte, _pte, compound_pagelist);
147831         trace_mm_collapse_huge_page_isolate(page, none_or_zero,
147832 @@ -1199,7 +1199,7 @@ static void collapse_huge_page(struct mm_struct *mm,
147833         spin_lock(pmd_ptl);
147834         BUG_ON(!pmd_none(*pmd));
147835         page_add_new_anon_rmap(new_page, vma, address, true);
147836 -       lru_cache_add_inactive_or_unevictable(new_page, vma);
147837 +       lru_cache_add_page_vma(new_page, vma, true);
147838         pgtable_trans_huge_deposit(mm, pmd, pgtable);
147839         set_pmd_at(mm, address, pmd, _pmd);
147840         update_mmu_cache_pmd(vma, address, pmd);
147841 diff --git a/mm/ksm.c b/mm/ksm.c
147842 index 9694ee2c71de..b32391ccf6d5 100644
147843 --- a/mm/ksm.c
147844 +++ b/mm/ksm.c
147845 @@ -794,6 +794,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
147846                 stable_node->rmap_hlist_len--;
147848                 put_anon_vma(rmap_item->anon_vma);
147849 +               rmap_item->head = NULL;
147850                 rmap_item->address &= PAGE_MASK;
147852         } else if (rmap_item->address & UNSTABLE_FLAG) {
147853 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
147854 index e064ac0d850a..594f99eba9c0 100644
147855 --- a/mm/memcontrol.c
147856 +++ b/mm/memcontrol.c
147857 @@ -3181,9 +3181,17 @@ static void drain_obj_stock(struct memcg_stock_pcp *stock)
147858                 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
147860                 if (nr_pages) {
147861 +                       struct mem_cgroup *memcg;
147863                         rcu_read_lock();
147864 -                       __memcg_kmem_uncharge(obj_cgroup_memcg(old), nr_pages);
147865 +retry:
147866 +                       memcg = obj_cgroup_memcg(old);
147867 +                       if (unlikely(!css_tryget(&memcg->css)))
147868 +                               goto retry;
147869                         rcu_read_unlock();
147871 +                       __memcg_kmem_uncharge(memcg, nr_pages);
147872 +                       css_put(&memcg->css);
147873                 }
147875                 /*
147876 @@ -5206,6 +5214,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
147877                 free_mem_cgroup_per_node_info(memcg, node);
147878         free_percpu(memcg->vmstats_percpu);
147879         free_percpu(memcg->vmstats_local);
147880 +       lru_gen_free_mm_list(memcg);
147881         kfree(memcg);
147884 @@ -5258,6 +5267,9 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
147885                 if (alloc_mem_cgroup_per_node_info(memcg, node))
147886                         goto fail;
147888 +       if (lru_gen_alloc_mm_list(memcg))
147889 +               goto fail;
147891         if (memcg_wb_domain_init(memcg, GFP_KERNEL))
147892                 goto fail;
147894 @@ -6162,6 +6174,29 @@ static void mem_cgroup_move_task(void)
147896  #endif
147898 +#ifdef CONFIG_LRU_GEN
147899 +static void mem_cgroup_attach(struct cgroup_taskset *tset)
147901 +       struct cgroup_subsys_state *css;
147902 +       struct task_struct *task = NULL;
147904 +       cgroup_taskset_for_each_leader(task, css, tset)
147905 +               ;
147907 +       if (!task)
147908 +               return;
147910 +       task_lock(task);
147911 +       if (task->mm && task->mm->owner == task)
147912 +               lru_gen_migrate_mm(task->mm);
147913 +       task_unlock(task);
147915 +#else
147916 +static void mem_cgroup_attach(struct cgroup_taskset *tset)
147919 +#endif
147921  static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
147923         if (value == PAGE_COUNTER_MAX)
147924 @@ -6502,6 +6537,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
147925         .css_free = mem_cgroup_css_free,
147926         .css_reset = mem_cgroup_css_reset,
147927         .can_attach = mem_cgroup_can_attach,
147928 +       .attach = mem_cgroup_attach,
147929         .cancel_attach = mem_cgroup_cancel_attach,
147930         .post_attach = mem_cgroup_move_task,
147931         .dfl_cftypes = memory_files,
147932 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
147933 index 24210c9bd843..bd3945446d47 100644
147934 --- a/mm/memory-failure.c
147935 +++ b/mm/memory-failure.c
147936 @@ -1368,7 +1368,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
147937                  * communicated in siginfo, see kill_proc()
147938                  */
147939                 start = (page->index << PAGE_SHIFT) & ~(size - 1);
147940 -               unmap_mapping_range(page->mapping, start, start + size, 0);
147941 +               unmap_mapping_range(page->mapping, start, size, 0);
147942         }
147943         kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags);
147944         rc = 0;
147945 diff --git a/mm/memory.c b/mm/memory.c
147946 index 550405fc3b5e..a1332ba9c0da 100644
147947 --- a/mm/memory.c
147948 +++ b/mm/memory.c
147949 @@ -73,6 +73,7 @@
147950  #include <linux/perf_event.h>
147951  #include <linux/ptrace.h>
147952  #include <linux/vmalloc.h>
147953 +#include <linux/mm_inline.h>
147955  #include <trace/events/kmem.h>
147957 @@ -839,7 +840,7 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
147958         copy_user_highpage(new_page, page, addr, src_vma);
147959         __SetPageUptodate(new_page);
147960         page_add_new_anon_rmap(new_page, dst_vma, addr, false);
147961 -       lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
147962 +       lru_cache_add_page_vma(new_page, dst_vma, false);
147963         rss[mm_counter(new_page)]++;
147965         /* All done, just insert the new page copy in the child */
147966 @@ -1548,6 +1549,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
147967         mmu_notifier_invalidate_range_end(&range);
147968         tlb_finish_mmu(&tlb);
147970 +EXPORT_SYMBOL(zap_page_range);
147972  /**
147973   * zap_page_range_single - remove user pages in a given range
147974 @@ -2907,7 +2909,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
147975                  */
147976                 ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
147977                 page_add_new_anon_rmap(new_page, vma, vmf->address, false);
147978 -               lru_cache_add_inactive_or_unevictable(new_page, vma);
147979 +               lru_cache_add_page_vma(new_page, vma, true);
147980                 /*
147981                  * We call the notify macro here because, when using secondary
147982                  * mmu page tables (such as kvm shadow page tables), we want the
147983 @@ -3438,9 +3440,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
147984         /* ksm created a completely new copy */
147985         if (unlikely(page != swapcache && swapcache)) {
147986                 page_add_new_anon_rmap(page, vma, vmf->address, false);
147987 -               lru_cache_add_inactive_or_unevictable(page, vma);
147988 +               lru_cache_add_page_vma(page, vma, true);
147989         } else {
147990                 do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
147991 +               lru_gen_activation(page, vma);
147992         }
147994         swap_free(entry);
147995 @@ -3584,7 +3587,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
147997         inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
147998         page_add_new_anon_rmap(page, vma, vmf->address, false);
147999 -       lru_cache_add_inactive_or_unevictable(page, vma);
148000 +       lru_cache_add_page_vma(page, vma, true);
148001  setpte:
148002         set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
148004 @@ -3709,6 +3712,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
148006         add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
148007         page_add_file_rmap(page, true);
148008 +       lru_gen_activation(page, vma);
148009         /*
148010          * deposit and withdraw with pmd lock held
148011          */
148012 @@ -3752,10 +3756,11 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
148013         if (write && !(vma->vm_flags & VM_SHARED)) {
148014                 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
148015                 page_add_new_anon_rmap(page, vma, addr, false);
148016 -               lru_cache_add_inactive_or_unevictable(page, vma);
148017 +               lru_cache_add_page_vma(page, vma, true);
148018         } else {
148019                 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
148020                 page_add_file_rmap(page, false);
148021 +               lru_gen_activation(page, vma);
148022         }
148023         set_pte_at(vma->vm_mm, addr, vmf->pte, entry);
148025 diff --git a/mm/migrate.c b/mm/migrate.c
148026 index 62b81d5257aa..9a50fd026236 100644
148027 --- a/mm/migrate.c
148028 +++ b/mm/migrate.c
148029 @@ -2973,6 +2973,13 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
148031                         swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
148032                         entry = swp_entry_to_pte(swp_entry);
148033 +               } else {
148034 +                       /*
148035 +                        * For now we only support migrating to un-addressable
148036 +                        * device memory.
148037 +                        */
148038 +                       pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
148039 +                       goto abort;
148040                 }
148041         } else {
148042                 entry = mk_pte(page, vma->vm_page_prot);
148043 @@ -3004,7 +3011,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
148044         inc_mm_counter(mm, MM_ANONPAGES);
148045         page_add_new_anon_rmap(page, vma, addr, false);
148046         if (!is_zone_device_page(page))
148047 -               lru_cache_add_inactive_or_unevictable(page, vma);
148048 +               lru_cache_add_page_vma(page, vma, false);
148049         get_page(page);
148051         if (flush) {
148052 diff --git a/mm/mm_init.c b/mm/mm_init.c
148053 index 8e02e865cc65..6303ed7aa511 100644
148054 --- a/mm/mm_init.c
148055 +++ b/mm/mm_init.c
148056 @@ -71,27 +71,33 @@ void __init mminit_verify_pageflags_layout(void)
148057         width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
148058                 - LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH;
148059         mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
148060 -               "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Flags %d\n",
148061 +               "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d lru gen %d tier %d Flags %d\n",
148062                 SECTIONS_WIDTH,
148063                 NODES_WIDTH,
148064                 ZONES_WIDTH,
148065                 LAST_CPUPID_WIDTH,
148066                 KASAN_TAG_WIDTH,
148067 +               LRU_GEN_WIDTH,
148068 +               LRU_USAGE_WIDTH,
148069                 NR_PAGEFLAGS);
148070         mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
148071 -               "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
148072 +               "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d lru gen %d tier %d\n",
148073                 SECTIONS_SHIFT,
148074                 NODES_SHIFT,
148075                 ZONES_SHIFT,
148076                 LAST_CPUPID_SHIFT,
148077 -               KASAN_TAG_WIDTH);
148078 +               KASAN_TAG_WIDTH,
148079 +               LRU_GEN_WIDTH,
148080 +               LRU_USAGE_WIDTH);
148081         mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
148082 -               "Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
148083 +               "Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu lru gen %lu tier %lu\n",
148084                 (unsigned long)SECTIONS_PGSHIFT,
148085                 (unsigned long)NODES_PGSHIFT,
148086                 (unsigned long)ZONES_PGSHIFT,
148087                 (unsigned long)LAST_CPUPID_PGSHIFT,
148088 -               (unsigned long)KASAN_TAG_PGSHIFT);
148089 +               (unsigned long)KASAN_TAG_PGSHIFT,
148090 +               (unsigned long)LRU_GEN_PGOFF,
148091 +               (unsigned long)LRU_USAGE_PGOFF);
148092         mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
148093                 "Node/Zone ID: %lu -> %lu\n",
148094                 (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
148095 diff --git a/mm/mmzone.c b/mm/mmzone.c
148096 index eb89d6e018e2..2ec0d7793424 100644
148097 --- a/mm/mmzone.c
148098 +++ b/mm/mmzone.c
148099 @@ -81,6 +81,8 @@ void lruvec_init(struct lruvec *lruvec)
148101         for_each_lru(lru)
148102                 INIT_LIST_HEAD(&lruvec->lists[lru]);
148104 +       lru_gen_init_lruvec(lruvec);
148107  #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
148108 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
148109 index cfc72873961d..4bb3cdfc47f8 100644
148110 --- a/mm/page_alloc.c
148111 +++ b/mm/page_alloc.c
148112 @@ -764,32 +764,36 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
148113   */
148114  void init_mem_debugging_and_hardening(void)
148116 +       bool page_poisoning_requested = false;
148118 +#ifdef CONFIG_PAGE_POISONING
148119 +       /*
148120 +        * Page poisoning is debug page alloc for some arches. If
148121 +        * either of those options are enabled, enable poisoning.
148122 +        */
148123 +       if (page_poisoning_enabled() ||
148124 +            (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
148125 +             debug_pagealloc_enabled())) {
148126 +               static_branch_enable(&_page_poisoning_enabled);
148127 +               page_poisoning_requested = true;
148128 +       }
148129 +#endif
148131         if (_init_on_alloc_enabled_early) {
148132 -               if (page_poisoning_enabled())
148133 +               if (page_poisoning_requested)
148134                         pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
148135                                 "will take precedence over init_on_alloc\n");
148136                 else
148137                         static_branch_enable(&init_on_alloc);
148138         }
148139         if (_init_on_free_enabled_early) {
148140 -               if (page_poisoning_enabled())
148141 +               if (page_poisoning_requested)
148142                         pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
148143                                 "will take precedence over init_on_free\n");
148144                 else
148145                         static_branch_enable(&init_on_free);
148146         }
148148 -#ifdef CONFIG_PAGE_POISONING
148149 -       /*
148150 -        * Page poisoning is debug page alloc for some arches. If
148151 -        * either of those options are enabled, enable poisoning.
148152 -        */
148153 -       if (page_poisoning_enabled() ||
148154 -            (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
148155 -             debug_pagealloc_enabled()))
148156 -               static_branch_enable(&_page_poisoning_enabled);
148157 -#endif
148159  #ifdef CONFIG_DEBUG_PAGEALLOC
148160         if (!debug_pagealloc_enabled())
148161                 return;
148162 diff --git a/mm/rmap.c b/mm/rmap.c
148163 index b0fc27e77d6d..d600b282ced5 100644
148164 --- a/mm/rmap.c
148165 +++ b/mm/rmap.c
148166 @@ -72,6 +72,7 @@
148167  #include <linux/page_idle.h>
148168  #include <linux/memremap.h>
148169  #include <linux/userfaultfd_k.h>
148170 +#include <linux/mm_inline.h>
148172  #include <asm/tlbflush.h>
148174 @@ -792,6 +793,11 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
148175                 }
148177                 if (pvmw.pte) {
148178 +                       /* the multigenerational lru exploits the spatial locality */
148179 +                       if (lru_gen_enabled() && pte_young(*pvmw.pte)) {
148180 +                               lru_gen_scan_around(&pvmw);
148181 +                               referenced++;
148182 +                       }
148183                         if (ptep_clear_flush_young_notify(vma, address,
148184                                                 pvmw.pte)) {
148185                                 /*
148186 diff --git a/mm/shmem.c b/mm/shmem.c
148187 index b2db4ed0fbc7..9dd24a2f0b7a 100644
148188 --- a/mm/shmem.c
148189 +++ b/mm/shmem.c
148190 @@ -2258,25 +2258,11 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
148191  static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
148193         struct shmem_inode_info *info = SHMEM_I(file_inode(file));
148194 +       int ret;
148196 -       if (info->seals & F_SEAL_FUTURE_WRITE) {
148197 -               /*
148198 -                * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
148199 -                * "future write" seal active.
148200 -                */
148201 -               if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
148202 -                       return -EPERM;
148204 -               /*
148205 -                * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
148206 -                * MAP_SHARED and read-only, take care to not allow mprotect to
148207 -                * revert protections on such mappings. Do this only for shared
148208 -                * mappings. For private mappings, don't need to mask
148209 -                * VM_MAYWRITE as we still want them to be COW-writable.
148210 -                */
148211 -               if (vma->vm_flags & VM_SHARED)
148212 -                       vma->vm_flags &= ~(VM_MAYWRITE);
148213 -       }
148214 +       ret = seal_check_future_write(info->seals, vma);
148215 +       if (ret)
148216 +               return ret;
148218         /* arm64 - allow memory tagging on RAM-based files */
148219         vma->vm_flags |= VM_MTE_ALLOWED;
148220 @@ -2375,8 +2361,18 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
148221         pgoff_t offset, max_off;
148223         ret = -ENOMEM;
148224 -       if (!shmem_inode_acct_block(inode, 1))
148225 +       if (!shmem_inode_acct_block(inode, 1)) {
148226 +               /*
148227 +                * We may have got a page, returned -ENOENT triggering a retry,
148228 +                * and now we find ourselves with -ENOMEM. Release the page, to
148229 +                * avoid a BUG_ON in our caller.
148230 +                */
148231 +               if (unlikely(*pagep)) {
148232 +                       put_page(*pagep);
148233 +                       *pagep = NULL;
148234 +               }
148235                 goto out;
148236 +       }
148238         if (!*pagep) {
148239                 page = shmem_alloc_page(gfp, info, pgoff);
148240 @@ -4233,6 +4229,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
148242         return 0;
148244 +EXPORT_SYMBOL_GPL(shmem_zero_setup);
148246  /**
148247   * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
148248 diff --git a/mm/sparse.c b/mm/sparse.c
148249 index 7bd23f9d6cef..33406ea2ecc4 100644
148250 --- a/mm/sparse.c
148251 +++ b/mm/sparse.c
148252 @@ -547,6 +547,7 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
148253                         pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.",
148254                                __func__, nid);
148255                         pnum_begin = pnum;
148256 +                       sparse_buffer_fini();
148257                         goto failed;
148258                 }
148259                 check_usemap_section_nr(nid, usage);
148260 diff --git a/mm/swap.c b/mm/swap.c
148261 index 31b844d4ed94..d6458ee1e9f8 100644
148262 --- a/mm/swap.c
148263 +++ b/mm/swap.c
148264 @@ -306,7 +306,7 @@ void lru_note_cost_page(struct page *page)
148266  static void __activate_page(struct page *page, struct lruvec *lruvec)
148268 -       if (!PageActive(page) && !PageUnevictable(page)) {
148269 +       if (!PageUnevictable(page) && !page_is_active(page, lruvec)) {
148270                 int nr_pages = thp_nr_pages(page);
148272                 del_page_from_lru_list(page, lruvec);
148273 @@ -334,10 +334,10 @@ static bool need_activate_page_drain(int cpu)
148274         return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
148277 -static void activate_page(struct page *page)
148278 +static void activate_page_on_lru(struct page *page)
148280         page = compound_head(page);
148281 -       if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
148282 +       if (PageLRU(page) && !PageUnevictable(page) && !page_is_active(page, NULL)) {
148283                 struct pagevec *pvec;
148285                 local_lock(&lru_pvecs.lock);
148286 @@ -354,7 +354,7 @@ static inline void activate_page_drain(int cpu)
148290 -static void activate_page(struct page *page)
148291 +static void activate_page_on_lru(struct page *page)
148293         struct lruvec *lruvec;
148295 @@ -368,11 +368,22 @@ static void activate_page(struct page *page)
148297  #endif
148299 -static void __lru_cache_activate_page(struct page *page)
148301 + * If the page is on the LRU, queue it for activation via
148302 + * lru_pvecs.activate_page. Otherwise, assume the page is on a
148303 + * pagevec, mark it active and it'll be moved to the active
148304 + * LRU on the next drain.
148305 + */
148306 +void activate_page(struct page *page)
148308         struct pagevec *pvec;
148309         int i;
148311 +       if (PageLRU(page)) {
148312 +               activate_page_on_lru(page);
148313 +               return;
148314 +       }
148316         local_lock(&lru_pvecs.lock);
148317         pvec = this_cpu_ptr(&lru_pvecs.lru_add);
148319 @@ -420,17 +431,8 @@ void mark_page_accessed(struct page *page)
148320                  * this list is never rotated or maintained, so marking an
148321                  * evictable page accessed has no effect.
148322                  */
148323 -       } else if (!PageActive(page)) {
148324 -               /*
148325 -                * If the page is on the LRU, queue it for activation via
148326 -                * lru_pvecs.activate_page. Otherwise, assume the page is on a
148327 -                * pagevec, mark it active and it'll be moved to the active
148328 -                * LRU on the next drain.
148329 -                */
148330 -               if (PageLRU(page))
148331 -                       activate_page(page);
148332 -               else
148333 -                       __lru_cache_activate_page(page);
148334 +       } else if (!page_inc_usage(page)) {
148335 +               activate_page(page);
148336                 ClearPageReferenced(page);
148337                 workingset_activation(page);
148338         }
148339 @@ -465,15 +467,14 @@ void lru_cache_add(struct page *page)
148340  EXPORT_SYMBOL(lru_cache_add);
148342  /**
148343 - * lru_cache_add_inactive_or_unevictable
148344 + * lru_cache_add_page_vma
148345   * @page:  the page to be added to LRU
148346   * @vma:   vma in which page is mapped for determining reclaimability
148347   *
148348 - * Place @page on the inactive or unevictable LRU list, depending on its
148349 - * evictability.
148350 + * Place @page on an LRU list, depending on its evictability.
148351   */
148352 -void lru_cache_add_inactive_or_unevictable(struct page *page,
148353 -                                        struct vm_area_struct *vma)
148354 +void lru_cache_add_page_vma(struct page *page, struct vm_area_struct *vma,
148355 +                           bool faulting)
148357         bool unevictable;
148359 @@ -490,6 +491,11 @@ void lru_cache_add_inactive_or_unevictable(struct page *page,
148360                 __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
148361                 count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
148362         }
148364 +       /* tell the multigenerational lru that the page is being faulted in */
148365 +       if (lru_gen_enabled() && !unevictable && faulting)
148366 +               SetPageActive(page);
148368         lru_cache_add(page);
148371 @@ -516,7 +522,7 @@ void lru_cache_add_inactive_or_unevictable(struct page *page,
148372   */
148373  static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
148375 -       bool active = PageActive(page);
148376 +       bool active = page_is_active(page, lruvec);
148377         int nr_pages = thp_nr_pages(page);
148379         if (PageUnevictable(page))
148380 @@ -556,7 +562,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
148382  static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
148384 -       if (PageActive(page) && !PageUnevictable(page)) {
148385 +       if (!PageUnevictable(page) && page_is_active(page, lruvec)) {
148386                 int nr_pages = thp_nr_pages(page);
148388                 del_page_from_lru_list(page, lruvec);
148389 @@ -670,7 +676,7 @@ void deactivate_file_page(struct page *page)
148390   */
148391  void deactivate_page(struct page *page)
148393 -       if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
148394 +       if (PageLRU(page) && !PageUnevictable(page) && page_is_active(page, NULL)) {
148395                 struct pagevec *pvec;
148397                 local_lock(&lru_pvecs.lock);
148398 diff --git a/mm/swapfile.c b/mm/swapfile.c
148399 index 084a5b9a18e5..ab3b5ca404fd 100644
148400 --- a/mm/swapfile.c
148401 +++ b/mm/swapfile.c
148402 @@ -1936,7 +1936,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
148403                 page_add_anon_rmap(page, vma, addr, false);
148404         } else { /* ksm created a completely new copy */
148405                 page_add_new_anon_rmap(page, vma, addr, false);
148406 -               lru_cache_add_inactive_or_unevictable(page, vma);
148407 +               lru_cache_add_page_vma(page, vma, false);
148408         }
148409         swap_free(entry);
148410  out:
148411 @@ -2702,6 +2702,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
148412         err = 0;
148413         atomic_inc(&proc_poll_event);
148414         wake_up_interruptible(&proc_poll_wait);
148415 +       /* stop tracking anon if the multigenerational lru is enabled */
148416 +       lru_gen_set_state(false, false, true);
148418  out_dput:
148419         filp_close(victim, NULL);
148420 @@ -3348,6 +3350,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
148421         mutex_unlock(&swapon_mutex);
148422         atomic_inc(&proc_poll_event);
148423         wake_up_interruptible(&proc_poll_wait);
148424 +       /* start tracking anon if the multigenerational lru is enabled */
148425 +       lru_gen_set_state(true, false, true);
148427         error = 0;
148428         goto out;
148429 diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
148430 index 9a3d451402d7..d7382fd886cc 100644
148431 --- a/mm/userfaultfd.c
148432 +++ b/mm/userfaultfd.c
148433 @@ -123,7 +123,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
148435         inc_mm_counter(dst_mm, MM_ANONPAGES);
148436         page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
148437 -       lru_cache_add_inactive_or_unevictable(page, dst_vma);
148438 +       lru_cache_add_page_vma(page, dst_vma, true);
148440         set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
148442 @@ -362,38 +362,38 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
148443                  * If a reservation for the page existed in the reservation
148444                  * map of a private mapping, the map was modified to indicate
148445                  * the reservation was consumed when the page was allocated.
148446 -                * We clear the PagePrivate flag now so that the global
148447 +                * We clear the HPageRestoreReserve flag now so that the global
148448                  * reserve count will not be incremented in free_huge_page.
148449                  * The reservation map will still indicate the reservation
148450                  * was consumed and possibly prevent later page allocation.
148451                  * This is better than leaking a global reservation.  If no
148452 -                * reservation existed, it is still safe to clear PagePrivate
148453 -                * as no adjustments to reservation counts were made during
148454 -                * allocation.
148455 +                * reservation existed, it is still safe to clear
148456 +                * HPageRestoreReserve as no adjustments to reservation counts
148457 +                * were made during allocation.
148458                  *
148459                  * The reservation map for shared mappings indicates which
148460                  * pages have reservations.  When a huge page is allocated
148461                  * for an address with a reservation, no change is made to
148462 -                * the reserve map.  In this case PagePrivate will be set
148463 -                * to indicate that the global reservation count should be
148464 +                * the reserve map.  In this case HPageRestoreReserve will be
148465 +                * set to indicate that the global reservation count should be
148466                  * incremented when the page is freed.  This is the desired
148467                  * behavior.  However, when a huge page is allocated for an
148468                  * address without a reservation a reservation entry is added
148469 -                * to the reservation map, and PagePrivate will not be set.
148470 -                * When the page is freed, the global reserve count will NOT
148471 -                * be incremented and it will appear as though we have leaked
148472 -                * reserved page.  In this case, set PagePrivate so that the
148473 -                * global reserve count will be incremented to match the
148474 -                * reservation map entry which was created.
148475 +                * to the reservation map, and HPageRestoreReserve will not be
148476 +                * set. When the page is freed, the global reserve count will
148477 +                * NOT be incremented and it will appear as though we have
148478 +                * leaked reserved page.  In this case, set HPageRestoreReserve
148479 +                * so that the global reserve count will be incremented to
148480 +                * match the reservation map entry which was created.
148481                  *
148482                  * Note that vm_alloc_shared is based on the flags of the vma
148483                  * for which the page was originally allocated.  dst_vma could
148484                  * be different or NULL on error.
148485                  */
148486                 if (vm_alloc_shared)
148487 -                       SetPagePrivate(page);
148488 +                       SetHPageRestoreReserve(page);
148489                 else
148490 -                       ClearPagePrivate(page);
148491 +                       ClearHPageRestoreReserve(page);
148492                 put_page(page);
148493         }
148494         BUG_ON(copied < 0);
148495 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
148496 index 4f5f8c907897..64ab133ee816 100644
148497 --- a/mm/vmalloc.c
148498 +++ b/mm/vmalloc.c
148499 @@ -316,6 +316,7 @@ int map_kernel_range_noflush(unsigned long addr, unsigned long size,
148501         return 0;
148503 +EXPORT_SYMBOL(map_kernel_range_noflush);
148505  int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
148506                 struct page **pages)
148507 @@ -2131,6 +2132,7 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
148508                                   NUMA_NO_NODE, GFP_KERNEL,
148509                                   __builtin_return_address(0));
148511 +EXPORT_SYMBOL(get_vm_area);
148513  struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
148514                                 const void *caller)
148515 diff --git a/mm/vmscan.c b/mm/vmscan.c
148516 index 562e87cbd7a1..4a34cc622681 100644
148517 --- a/mm/vmscan.c
148518 +++ b/mm/vmscan.c
148519 @@ -49,6 +49,11 @@
148520  #include <linux/printk.h>
148521  #include <linux/dax.h>
148522  #include <linux/psi.h>
148523 +#include <linux/memory.h>
148524 +#include <linux/pagewalk.h>
148525 +#include <linux/shmem_fs.h>
148526 +#include <linux/ctype.h>
148527 +#include <linux/debugfs.h>
148529  #include <asm/tlbflush.h>
148530  #include <asm/div64.h>
148531 @@ -118,6 +123,19 @@ struct scan_control {
148532         /* The file pages on the current node are dangerously low */
148533         unsigned int file_is_tiny:1;
148535 +       /*
148536 +        * The clean file pages on the current node won't be reclaimed when
148537 +        * their amount is below vm.clean_low_kbytes *unless* we threaten
148538 +        * to OOM or have no free swap space or vm.swappiness=0.
148539 +        */
148540 +       unsigned int clean_below_low:1;
148542 +       /*
148543 +        * The clean file pages on the current node won't be reclaimed when
148544 +        * their amount is below vm.clean_min_kbytes.
148545 +        */
148546 +       unsigned int clean_below_min:1;
148548         /* Allocation order */
148549         s8 order;
148551 @@ -164,10 +182,21 @@ struct scan_control {
148552  #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
148553  #endif
148555 +#if CONFIG_CLEAN_LOW_KBYTES < 0
148556 +#error "CONFIG_CLEAN_LOW_KBYTES must be >= 0"
148557 +#endif
148559 +#if CONFIG_CLEAN_MIN_KBYTES < 0
148560 +#error "CONFIG_CLEAN_MIN_KBYTES must be >= 0"
148561 +#endif
148563 +unsigned long sysctl_clean_low_kbytes __read_mostly = CONFIG_CLEAN_LOW_KBYTES;
148564 +unsigned long sysctl_clean_min_kbytes __read_mostly = CONFIG_CLEAN_MIN_KBYTES;
148567   * From 0 .. 200.  Higher means more swappy.
148568   */
148569 -int vm_swappiness = 60;
148570 +int vm_swappiness = 30;
148572  static void set_task_reclaim_state(struct task_struct *task,
148573                                    struct reclaim_state *rs)
148574 @@ -897,9 +926,11 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
148576         if (PageSwapCache(page)) {
148577                 swp_entry_t swap = { .val = page_private(page) };
148578 -               mem_cgroup_swapout(page, swap);
148580 +               /* get a shadow entry before page_memcg() is cleared */
148581                 if (reclaimed && !mapping_exiting(mapping))
148582                         shadow = workingset_eviction(page, target_memcg);
148583 +               mem_cgroup_swapout(page, swap);
148584                 __delete_from_swap_cache(page, swap, shadow);
148585                 xa_unlock_irqrestore(&mapping->i_pages, flags);
148586                 put_swap_page(page, swap);
148587 @@ -1110,6 +1141,10 @@ static unsigned int shrink_page_list(struct list_head *page_list,
148588                 if (!sc->may_unmap && page_mapped(page))
148589                         goto keep_locked;
148591 +               /* in case the page was found accessed by lru_gen_scan_around() */
148592 +               if (lru_gen_enabled() && !ignore_references && PageReferenced(page))
148593 +                       goto keep_locked;
148595                 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
148596                         (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
148598 @@ -2224,6 +2259,135 @@ enum scan_balance {
148599         SCAN_FILE,
148602 +static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc)
148604 +       unsigned long file;
148605 +       struct lruvec *target_lruvec;
148607 +       /* the multigenerational lru doesn't use these counters */
148608 +       if (lru_gen_enabled())
148609 +               return;
148611 +       target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
148613 +       /*
148614 +        * Determine the scan balance between anon and file LRUs.
148615 +        */
148616 +       spin_lock_irq(&target_lruvec->lru_lock);
148617 +       sc->anon_cost = target_lruvec->anon_cost;
148618 +       sc->file_cost = target_lruvec->file_cost;
148619 +       spin_unlock_irq(&target_lruvec->lru_lock);
148621 +       /*
148622 +        * Target desirable inactive:active list ratios for the anon
148623 +        * and file LRU lists.
148624 +        */
148625 +       if (!sc->force_deactivate) {
148626 +               unsigned long refaults;
148628 +               refaults = lruvec_page_state(target_lruvec,
148629 +                               WORKINGSET_ACTIVATE_ANON);
148630 +               if (refaults != target_lruvec->refaults[0] ||
148631 +                       inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
148632 +                       sc->may_deactivate |= DEACTIVATE_ANON;
148633 +               else
148634 +                       sc->may_deactivate &= ~DEACTIVATE_ANON;
148636 +               /*
148637 +                * When refaults are being observed, it means a new
148638 +                * workingset is being established. Deactivate to get
148639 +                * rid of any stale active pages quickly.
148640 +                */
148641 +               refaults = lruvec_page_state(target_lruvec,
148642 +                               WORKINGSET_ACTIVATE_FILE);
148643 +               if (refaults != target_lruvec->refaults[1] ||
148644 +                   inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
148645 +                       sc->may_deactivate |= DEACTIVATE_FILE;
148646 +               else
148647 +                       sc->may_deactivate &= ~DEACTIVATE_FILE;
148648 +       } else
148649 +               sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
148651 +       /*
148652 +        * If we have plenty of inactive file pages that aren't
148653 +        * thrashing, try to reclaim those first before touching
148654 +        * anonymous pages.
148655 +        */
148656 +       file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
148657 +       if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
148658 +               sc->cache_trim_mode = 1;
148659 +       else
148660 +               sc->cache_trim_mode = 0;
148662 +       /*
148663 +        * Prevent the reclaimer from falling into the cache trap: as
148664 +        * cache pages start out inactive, every cache fault will tip
148665 +        * the scan balance towards the file LRU.  And as the file LRU
148666 +        * shrinks, so does the window for rotation from references.
148667 +        * This means we have a runaway feedback loop where a tiny
148668 +        * thrashing file LRU becomes infinitely more attractive than
148669 +        * anon pages.  Try to detect this based on file LRU size.
148670 +        */
148671 +       if (!cgroup_reclaim(sc)) {
148672 +               unsigned long total_high_wmark = 0;
148673 +               unsigned long free, anon;
148674 +               int z;
148676 +               free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
148677 +               file = node_page_state(pgdat, NR_ACTIVE_FILE) +
148678 +                          node_page_state(pgdat, NR_INACTIVE_FILE);
148680 +               for (z = 0; z < MAX_NR_ZONES; z++) {
148681 +                       struct zone *zone = &pgdat->node_zones[z];
148683 +                       if (!managed_zone(zone))
148684 +                               continue;
148686 +                       total_high_wmark += high_wmark_pages(zone);
148687 +               }
148689 +               /*
148690 +                * Consider anon: if that's low too, this isn't a
148691 +                * runaway file reclaim problem, but rather just
148692 +                * extreme pressure. Reclaim as per usual then.
148693 +                */
148694 +               anon = node_page_state(pgdat, NR_INACTIVE_ANON);
148696 +               sc->file_is_tiny =
148697 +                       file + free <= total_high_wmark &&
148698 +                       !(sc->may_deactivate & DEACTIVATE_ANON) &&
148699 +                       anon >> sc->priority;
148701 +               /*
148702 +               * Check the number of clean file pages to protect them from
148703 +               * reclaiming if their amount is below the specified.
148704 +               */
148705 +               if (sysctl_clean_low_kbytes || sysctl_clean_min_kbytes) {
148706 +                       unsigned long reclaimable_file, dirty, clean;
148708 +                       reclaimable_file =
148709 +                               node_page_state(pgdat, NR_ACTIVE_FILE) +
148710 +                               node_page_state(pgdat, NR_INACTIVE_FILE) +
148711 +                               node_page_state(pgdat, NR_ISOLATED_FILE);
148712 +                       dirty = node_page_state(pgdat, NR_FILE_DIRTY);
148713 +                       /*
148714 +                       * node_page_state() sum can go out of sync since
148715 +                       * all the values are not read at once.
148716 +                       */
148717 +                       if (likely(reclaimable_file > dirty))
148718 +                               clean = (reclaimable_file - dirty) << (PAGE_SHIFT - 10);
148719 +                       else
148720 +                               clean = 0;
148722 +                       sc->clean_below_low = clean < sysctl_clean_low_kbytes;
148723 +                       sc->clean_below_min = clean < sysctl_clean_min_kbytes;
148724 +               } else {
148725 +                       sc->clean_below_low = false;
148726 +                       sc->clean_below_min = false;
148727 +               }
148728 +       }
148732   * Determine how aggressively the anon and file LRU lists should be
148733   * scanned.  The relative value of each set of LRU lists is determined
148734 @@ -2281,6 +2445,16 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
148735                 goto out;
148736         }
148738 +       /*
148739 +        * Force-scan anon if clean file pages is under vm.clean_min_kbytes
148740 +        * or vm.clean_low_kbytes (unless the swappiness setting
148741 +        * disagrees with swapping).
148742 +        */
148743 +       if ((sc->clean_below_low || sc->clean_below_min) && swappiness) {
148744 +               scan_balance = SCAN_ANON;
148745 +               goto out;
148746 +       }
148748         /*
148749          * If there is enough inactive page cache, we do not reclaim
148750          * anything from the anonymous working right now.
148751 @@ -2417,10 +2591,30 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
148752                         BUG();
148753                 }
148755 +               /*
148756 +                * Don't reclaim clean file pages when their amount is below
148757 +                * vm.clean_min_kbytes.
148758 +                */
148759 +               if (file && sc->clean_below_min)
148760 +                       scan = 0;
148762                 nr[lru] = scan;
148763         }
148766 +#ifdef CONFIG_LRU_GEN
148767 +static void age_lru_gens(struct pglist_data *pgdat, struct scan_control *sc);
148768 +static void shrink_lru_gens(struct lruvec *lruvec, struct scan_control *sc);
148769 +#else
148770 +static void age_lru_gens(struct pglist_data *pgdat, struct scan_control *sc)
148774 +static void shrink_lru_gens(struct lruvec *lruvec, struct scan_control *sc)
148777 +#endif
148779  static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
148781         unsigned long nr[NR_LRU_LISTS];
148782 @@ -2432,6 +2626,11 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
148783         struct blk_plug plug;
148784         bool scan_adjusted;
148786 +       if (lru_gen_enabled()) {
148787 +               shrink_lru_gens(lruvec, sc);
148788 +               return;
148789 +       }
148791         get_scan_count(lruvec, sc, nr);
148793         /* Record the original scan target for proportional adjustments later */
148794 @@ -2669,7 +2868,6 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
148795         unsigned long nr_reclaimed, nr_scanned;
148796         struct lruvec *target_lruvec;
148797         bool reclaimable = false;
148798 -       unsigned long file;
148800         target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
148802 @@ -2679,93 +2877,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
148803         nr_reclaimed = sc->nr_reclaimed;
148804         nr_scanned = sc->nr_scanned;
148806 -       /*
148807 -        * Determine the scan balance between anon and file LRUs.
148808 -        */
148809 -       spin_lock_irq(&target_lruvec->lru_lock);
148810 -       sc->anon_cost = target_lruvec->anon_cost;
148811 -       sc->file_cost = target_lruvec->file_cost;
148812 -       spin_unlock_irq(&target_lruvec->lru_lock);
148814 -       /*
148815 -        * Target desirable inactive:active list ratios for the anon
148816 -        * and file LRU lists.
148817 -        */
148818 -       if (!sc->force_deactivate) {
148819 -               unsigned long refaults;
148821 -               refaults = lruvec_page_state(target_lruvec,
148822 -                               WORKINGSET_ACTIVATE_ANON);
148823 -               if (refaults != target_lruvec->refaults[0] ||
148824 -                       inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
148825 -                       sc->may_deactivate |= DEACTIVATE_ANON;
148826 -               else
148827 -                       sc->may_deactivate &= ~DEACTIVATE_ANON;
148829 -               /*
148830 -                * When refaults are being observed, it means a new
148831 -                * workingset is being established. Deactivate to get
148832 -                * rid of any stale active pages quickly.
148833 -                */
148834 -               refaults = lruvec_page_state(target_lruvec,
148835 -                               WORKINGSET_ACTIVATE_FILE);
148836 -               if (refaults != target_lruvec->refaults[1] ||
148837 -                   inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
148838 -                       sc->may_deactivate |= DEACTIVATE_FILE;
148839 -               else
148840 -                       sc->may_deactivate &= ~DEACTIVATE_FILE;
148841 -       } else
148842 -               sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
148844 -       /*
148845 -        * If we have plenty of inactive file pages that aren't
148846 -        * thrashing, try to reclaim those first before touching
148847 -        * anonymous pages.
148848 -        */
148849 -       file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
148850 -       if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
148851 -               sc->cache_trim_mode = 1;
148852 -       else
148853 -               sc->cache_trim_mode = 0;
148855 -       /*
148856 -        * Prevent the reclaimer from falling into the cache trap: as
148857 -        * cache pages start out inactive, every cache fault will tip
148858 -        * the scan balance towards the file LRU.  And as the file LRU
148859 -        * shrinks, so does the window for rotation from references.
148860 -        * This means we have a runaway feedback loop where a tiny
148861 -        * thrashing file LRU becomes infinitely more attractive than
148862 -        * anon pages.  Try to detect this based on file LRU size.
148863 -        */
148864 -       if (!cgroup_reclaim(sc)) {
148865 -               unsigned long total_high_wmark = 0;
148866 -               unsigned long free, anon;
148867 -               int z;
148869 -               free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
148870 -               file = node_page_state(pgdat, NR_ACTIVE_FILE) +
148871 -                          node_page_state(pgdat, NR_INACTIVE_FILE);
148873 -               for (z = 0; z < MAX_NR_ZONES; z++) {
148874 -                       struct zone *zone = &pgdat->node_zones[z];
148875 -                       if (!managed_zone(zone))
148876 -                               continue;
148878 -                       total_high_wmark += high_wmark_pages(zone);
148879 -               }
148881 -               /*
148882 -                * Consider anon: if that's low too, this isn't a
148883 -                * runaway file reclaim problem, but rather just
148884 -                * extreme pressure. Reclaim as per usual then.
148885 -                */
148886 -               anon = node_page_state(pgdat, NR_INACTIVE_ANON);
148888 -               sc->file_is_tiny =
148889 -                       file + free <= total_high_wmark &&
148890 -                       !(sc->may_deactivate & DEACTIVATE_ANON) &&
148891 -                       anon >> sc->priority;
148892 -       }
148893 +       prepare_scan_count(pgdat, sc);
148895         shrink_node_memcgs(pgdat, sc);
148897 @@ -2985,6 +3097,10 @@ static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
148898         struct lruvec *target_lruvec;
148899         unsigned long refaults;
148901 +       /* the multigenerational lru doesn't use these counters */
148902 +       if (lru_gen_enabled())
148903 +               return;
148905         target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
148906         refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON);
148907         target_lruvec->refaults[0] = refaults;
148908 @@ -3359,6 +3475,11 @@ static void age_active_anon(struct pglist_data *pgdat,
148909         struct mem_cgroup *memcg;
148910         struct lruvec *lruvec;
148912 +       if (lru_gen_enabled()) {
148913 +               age_lru_gens(pgdat, sc);
148914 +               return;
148915 +       }
148917         if (!total_swap_pages)
148918                 return;
148920 @@ -4304,3 +4425,2365 @@ void check_move_unevictable_pages(struct pagevec *pvec)
148921         }
148923  EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
148925 +#ifdef CONFIG_LRU_GEN
148928 + * After pages are faulted in, the aging must scan them twice before the
148929 + * eviction can. The first scan clears the accessed bit set during initial
148930 + * faults. And the second scan makes sure they haven't been used since the
148931 + * first.
148932 + */
148933 +#define MIN_NR_GENS    2
148935 +#define MAX_BATCH_SIZE 8192
148937 +/******************************************************************************
148938 + *                          shorthand helpers
148939 + ******************************************************************************/
148941 +#define DEFINE_MAX_SEQ()                                               \
148942 +       unsigned long max_seq = READ_ONCE(lruvec->evictable.max_seq)
148944 +#define DEFINE_MIN_SEQ()                                               \
148945 +       unsigned long min_seq[ANON_AND_FILE] = {                        \
148946 +               READ_ONCE(lruvec->evictable.min_seq[0]),                \
148947 +               READ_ONCE(lruvec->evictable.min_seq[1]),                \
148948 +       }
148950 +#define for_each_type_zone(file, zone)                                 \
148951 +       for ((file) = 0; (file) < ANON_AND_FILE; (file)++)              \
148952 +               for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
148954 +#define for_each_gen_type_zone(gen, file, zone)                                \
148955 +       for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++)                   \
148956 +               for ((file) = 0; (file) < ANON_AND_FILE; (file)++)      \
148957 +                       for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
148959 +static int get_nr_gens(struct lruvec *lruvec, int file)
148961 +       return lruvec->evictable.max_seq - lruvec->evictable.min_seq[file] + 1;
148964 +static int min_nr_gens(unsigned long max_seq, unsigned long *min_seq, int swappiness)
148966 +       return max_seq - max(min_seq[!swappiness], min_seq[1]) + 1;
148969 +static int max_nr_gens(unsigned long max_seq, unsigned long *min_seq, int swappiness)
148971 +       return max_seq - min(min_seq[!swappiness], min_seq[1]) + 1;
148974 +static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
148976 +       lockdep_assert_held(&lruvec->lru_lock);
148978 +       return get_nr_gens(lruvec, 0) >= MIN_NR_GENS &&
148979 +              get_nr_gens(lruvec, 0) <= MAX_NR_GENS &&
148980 +              get_nr_gens(lruvec, 1) >= MIN_NR_GENS &&
148981 +              get_nr_gens(lruvec, 1) <= MAX_NR_GENS;
148984 +/******************************************************************************
148985 + *                          refault feedback loop
148986 + ******************************************************************************/
148989 + * A feedback loop modeled after the PID controller. Currently supports the
148990 + * proportional (P) and the integral (I) terms; the derivative (D) term can be
148991 + * added if necessary. The setpoint (SP) is the desired position; the process
148992 + * variable (PV) is the measured position. The error is the difference between
148993 + * the SP and the PV. A positive error results in a positive control output
148994 + * correction, which, in our case, is to allow eviction.
148996 + * The P term is the current refault rate refaulted/(evicted+activated), which
148997 + * has a weight of 1. The I term is the arithmetic mean of the last N refault
148998 + * rates, weighted by geometric series 1/2, 1/4, ..., 1/(1<<N).
149000 + * Our goal is to make sure upper tiers have similar refault rates as the base
149001 + * tier. That is we try to be fair to all tiers by maintaining similar refault
149002 + * rates across them.
149003 + */
149004 +struct controller_pos {
149005 +       unsigned long refaulted;
149006 +       unsigned long total;
149007 +       int gain;
149010 +static void read_controller_pos(struct controller_pos *pos, struct lruvec *lruvec,
149011 +                               int file, int tier, int gain)
149013 +       struct lrugen *lrugen = &lruvec->evictable;
149014 +       int sid = sid_from_seq_or_gen(lrugen->min_seq[file]);
149016 +       pos->refaulted = lrugen->avg_refaulted[file][tier] +
149017 +                        atomic_long_read(&lrugen->refaulted[sid][file][tier]);
149018 +       pos->total = lrugen->avg_total[file][tier] +
149019 +                    atomic_long_read(&lrugen->evicted[sid][file][tier]);
149020 +       if (tier)
149021 +               pos->total += lrugen->activated[sid][file][tier - 1];
149022 +       pos->gain = gain;
149025 +static void reset_controller_pos(struct lruvec *lruvec, int gen, int file)
149027 +       int tier;
149028 +       int sid = sid_from_seq_or_gen(gen);
149029 +       struct lrugen *lrugen = &lruvec->evictable;
149030 +       bool carryover = gen == lru_gen_from_seq(lrugen->min_seq[file]);
149032 +       if (!carryover && NR_STAT_GENS == 1)
149033 +               return;
149035 +       for (tier = 0; tier < MAX_NR_TIERS; tier++) {
149036 +               if (carryover) {
149037 +                       unsigned long sum;
149039 +                       sum = lrugen->avg_refaulted[file][tier] +
149040 +                             atomic_long_read(&lrugen->refaulted[sid][file][tier]);
149041 +                       WRITE_ONCE(lrugen->avg_refaulted[file][tier], sum >> 1);
149043 +                       sum = lrugen->avg_total[file][tier] +
149044 +                             atomic_long_read(&lrugen->evicted[sid][file][tier]);
149045 +                       if (tier)
149046 +                               sum += lrugen->activated[sid][file][tier - 1];
149047 +                       WRITE_ONCE(lrugen->avg_total[file][tier], sum >> 1);
149049 +                       if (NR_STAT_GENS > 1)
149050 +                               continue;
149051 +               }
149053 +               atomic_long_set(&lrugen->refaulted[sid][file][tier], 0);
149054 +               atomic_long_set(&lrugen->evicted[sid][file][tier], 0);
149055 +               if (tier)
149056 +                       WRITE_ONCE(lrugen->activated[sid][file][tier - 1], 0);
149057 +       }
149060 +static bool positive_ctrl_err(struct controller_pos *sp, struct controller_pos *pv)
149062 +       /*
149063 +        * Allow eviction if the PV has a limited number of refaulted pages or a
149064 +        * lower refault rate than the SP.
149065 +        */
149066 +       return pv->refaulted < SWAP_CLUSTER_MAX ||
149067 +              pv->refaulted * max(sp->total, 1UL) * sp->gain <=
149068 +              sp->refaulted * max(pv->total, 1UL) * pv->gain;
149071 +/******************************************************************************
149072 + *                          mm_struct list
149073 + ******************************************************************************/
149075 +enum {
149076 +       MM_SCHED_ACTIVE,        /* running processes */
149077 +       MM_SCHED_INACTIVE,      /* sleeping processes */
149078 +       MM_LOCK_CONTENTION,     /* lock contentions */
149079 +       MM_VMA_INTERVAL,        /* VMAs within the range of current table */
149080 +       MM_LEAF_OTHER_NODE,     /* entries not from node under reclaim */
149081 +       MM_LEAF_OTHER_MEMCG,    /* entries not from memcg under reclaim */
149082 +       MM_LEAF_OLD,            /* old entries */
149083 +       MM_LEAF_YOUNG,          /* young entries */
149084 +       MM_LEAF_DIRTY,          /* dirty entries */
149085 +       MM_LEAF_HOLE,           /* non-present entries */
149086 +       MM_NONLEAF_OLD,         /* old non-leaf pmd entries */
149087 +       MM_NONLEAF_YOUNG,       /* young non-leaf pmd entries */
149088 +       NR_MM_STATS
149091 +/* mnemonic codes for the stats above */
149092 +#define MM_STAT_CODES          "aicvnmoydhlu"
149094 +struct lru_gen_mm_list {
149095 +       /* the head of a global or per-memcg mm_struct list */
149096 +       struct list_head head;
149097 +       /* protects the list */
149098 +       spinlock_t lock;
149099 +       struct {
149100 +               /* set to max_seq after each round of walk */
149101 +               unsigned long cur_seq;
149102 +               /* the next mm on the list to walk */
149103 +               struct list_head *iter;
149104 +               /* to wait for the last worker to finish */
149105 +               struct wait_queue_head wait;
149106 +               /* the number of concurrent workers */
149107 +               int nr_workers;
149108 +               /* stats for debugging */
149109 +               unsigned long stats[NR_STAT_GENS][NR_MM_STATS];
149110 +       } nodes[0];
149113 +static struct lru_gen_mm_list *global_mm_list;
149115 +static struct lru_gen_mm_list *alloc_mm_list(void)
149117 +       int nid;
149118 +       struct lru_gen_mm_list *mm_list;
149120 +       mm_list = kzalloc(struct_size(mm_list, nodes, nr_node_ids), GFP_KERNEL);
149121 +       if (!mm_list)
149122 +               return NULL;
149124 +       INIT_LIST_HEAD(&mm_list->head);
149125 +       spin_lock_init(&mm_list->lock);
149127 +       for_each_node(nid) {
149128 +               mm_list->nodes[nid].cur_seq = MIN_NR_GENS;
149129 +               mm_list->nodes[nid].iter = &mm_list->head;
149130 +               init_waitqueue_head(&mm_list->nodes[nid].wait);
149131 +       }
149133 +       return mm_list;
149136 +static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
149138 +#ifdef CONFIG_MEMCG
149139 +       if (!mem_cgroup_disabled())
149140 +               return memcg ? memcg->mm_list : root_mem_cgroup->mm_list;
149141 +#endif
149142 +       VM_BUG_ON(memcg);
149144 +       return global_mm_list;
149147 +void lru_gen_init_mm(struct mm_struct *mm)
149149 +       int file;
149151 +       INIT_LIST_HEAD(&mm->lrugen.list);
149152 +#ifdef CONFIG_MEMCG
149153 +       mm->lrugen.memcg = NULL;
149154 +#endif
149155 +#ifndef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
149156 +       atomic_set(&mm->lrugen.nr_cpus, 0);
149157 +#endif
149158 +       for (file = 0; file < ANON_AND_FILE; file++)
149159 +               nodes_clear(mm->lrugen.nodes[file]);
149162 +void lru_gen_add_mm(struct mm_struct *mm)
149164 +       struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm);
149165 +       struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
149167 +       VM_BUG_ON_MM(!list_empty(&mm->lrugen.list), mm);
149168 +#ifdef CONFIG_MEMCG
149169 +       VM_BUG_ON_MM(mm->lrugen.memcg, mm);
149170 +       WRITE_ONCE(mm->lrugen.memcg, memcg);
149171 +#endif
149172 +       spin_lock(&mm_list->lock);
149173 +       list_add_tail(&mm->lrugen.list, &mm_list->head);
149174 +       spin_unlock(&mm_list->lock);
149177 +void lru_gen_del_mm(struct mm_struct *mm)
149179 +       int nid;
149180 +#ifdef CONFIG_MEMCG
149181 +       struct lru_gen_mm_list *mm_list = get_mm_list(mm->lrugen.memcg);
149182 +#else
149183 +       struct lru_gen_mm_list *mm_list = get_mm_list(NULL);
149184 +#endif
149186 +       spin_lock(&mm_list->lock);
149188 +       for_each_node(nid) {
149189 +               if (mm_list->nodes[nid].iter != &mm->lrugen.list)
149190 +                       continue;
149192 +               mm_list->nodes[nid].iter = mm_list->nodes[nid].iter->next;
149193 +               if (mm_list->nodes[nid].iter == &mm_list->head)
149194 +                       WRITE_ONCE(mm_list->nodes[nid].cur_seq,
149195 +                                  mm_list->nodes[nid].cur_seq + 1);
149196 +       }
149198 +       list_del_init(&mm->lrugen.list);
149200 +       spin_unlock(&mm_list->lock);
149202 +#ifdef CONFIG_MEMCG
149203 +       mem_cgroup_put(mm->lrugen.memcg);
149204 +       WRITE_ONCE(mm->lrugen.memcg, NULL);
149205 +#endif
149208 +#ifdef CONFIG_MEMCG
149209 +int lru_gen_alloc_mm_list(struct mem_cgroup *memcg)
149211 +       if (mem_cgroup_disabled())
149212 +               return 0;
149214 +       memcg->mm_list = alloc_mm_list();
149216 +       return memcg->mm_list ? 0 : -ENOMEM;
149219 +void lru_gen_free_mm_list(struct mem_cgroup *memcg)
149221 +       kfree(memcg->mm_list);
149222 +       memcg->mm_list = NULL;
149225 +void lru_gen_migrate_mm(struct mm_struct *mm)
149227 +       struct mem_cgroup *memcg;
149229 +       lockdep_assert_held(&mm->owner->alloc_lock);
149231 +       if (mem_cgroup_disabled())
149232 +               return;
149234 +       rcu_read_lock();
149235 +       memcg = mem_cgroup_from_task(mm->owner);
149236 +       rcu_read_unlock();
149237 +       if (memcg == mm->lrugen.memcg)
149238 +               return;
149240 +       VM_BUG_ON_MM(!mm->lrugen.memcg, mm);
149241 +       VM_BUG_ON_MM(list_empty(&mm->lrugen.list), mm);
149243 +       lru_gen_del_mm(mm);
149244 +       lru_gen_add_mm(mm);
149247 +static bool mm_has_migrated(struct mm_struct *mm, struct mem_cgroup *memcg)
149249 +       return READ_ONCE(mm->lrugen.memcg) != memcg;
149251 +#else
149252 +static bool mm_has_migrated(struct mm_struct *mm, struct mem_cgroup *memcg)
149254 +       return false;
149256 +#endif
149258 +struct mm_walk_args {
149259 +       struct mem_cgroup *memcg;
149260 +       unsigned long max_seq;
149261 +       unsigned long next_addr;
149262 +       unsigned long start_pfn;
149263 +       unsigned long end_pfn;
149264 +       int node_id;
149265 +       int batch_size;
149266 +       int mm_stats[NR_MM_STATS];
149267 +       int nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
149268 +       bool should_walk[ANON_AND_FILE];
149269 +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG)
149270 +       unsigned long bitmap[BITS_TO_LONGS(PTRS_PER_PMD)];
149271 +#endif
149274 +static void reset_mm_stats(struct lru_gen_mm_list *mm_list, bool last,
149275 +                          struct mm_walk_args *args)
149277 +       int i;
149278 +       int nid = args->node_id;
149279 +       int sid = sid_from_seq_or_gen(args->max_seq);
149281 +       lockdep_assert_held(&mm_list->lock);
149283 +       for (i = 0; i < NR_MM_STATS; i++) {
149284 +               WRITE_ONCE(mm_list->nodes[nid].stats[sid][i],
149285 +                          mm_list->nodes[nid].stats[sid][i] + args->mm_stats[i]);
149286 +               args->mm_stats[i] = 0;
149287 +       }
149289 +       if (!last || NR_STAT_GENS == 1)
149290 +               return;
149292 +       sid = sid_from_seq_or_gen(args->max_seq + 1);
149293 +       for (i = 0; i < NR_MM_STATS; i++)
149294 +               WRITE_ONCE(mm_list->nodes[nid].stats[sid][i], 0);
149297 +static bool should_skip_mm(struct mm_struct *mm, int nid, int swappiness)
149299 +       int file;
149300 +       unsigned long size = 0;
149302 +       if (mm_is_oom_victim(mm))
149303 +               return true;
149305 +       for (file = !swappiness; file < ANON_AND_FILE; file++) {
149306 +               if (lru_gen_mm_is_active(mm) || node_isset(nid, mm->lrugen.nodes[file]))
149307 +                       size += file ? get_mm_counter(mm, MM_FILEPAGES) :
149308 +                                      get_mm_counter(mm, MM_ANONPAGES) +
149309 +                                      get_mm_counter(mm, MM_SHMEMPAGES);
149310 +       }
149312 +       /* leave the legwork to the rmap if mapped pages are too sparse */
149313 +       if (size < max(SWAP_CLUSTER_MAX, mm_pgtables_bytes(mm) / PAGE_SIZE))
149314 +               return true;
149316 +       return !mmget_not_zero(mm);
149319 +/* To support multiple workers that concurrently walk mm_struct list. */
149320 +static bool get_next_mm(struct mm_walk_args *args, int swappiness, struct mm_struct **iter)
149322 +       bool last = true;
149323 +       struct mm_struct *mm = NULL;
149324 +       int nid = args->node_id;
149325 +       struct lru_gen_mm_list *mm_list = get_mm_list(args->memcg);
149327 +       if (*iter)
149328 +               mmput_async(*iter);
149329 +       else if (args->max_seq <= READ_ONCE(mm_list->nodes[nid].cur_seq))
149330 +               return false;
149332 +       spin_lock(&mm_list->lock);
149334 +       VM_BUG_ON(args->max_seq > mm_list->nodes[nid].cur_seq + 1);
149335 +       VM_BUG_ON(*iter && args->max_seq < mm_list->nodes[nid].cur_seq);
149336 +       VM_BUG_ON(*iter && !mm_list->nodes[nid].nr_workers);
149338 +       if (args->max_seq <= mm_list->nodes[nid].cur_seq) {
149339 +               last = *iter;
149340 +               goto done;
149341 +       }
149343 +       if (mm_list->nodes[nid].iter == &mm_list->head) {
149344 +               VM_BUG_ON(*iter || mm_list->nodes[nid].nr_workers);
149345 +               mm_list->nodes[nid].iter = mm_list->nodes[nid].iter->next;
149346 +       }
149348 +       while (!mm && mm_list->nodes[nid].iter != &mm_list->head) {
149349 +               mm = list_entry(mm_list->nodes[nid].iter, struct mm_struct, lrugen.list);
149350 +               mm_list->nodes[nid].iter = mm_list->nodes[nid].iter->next;
149351 +               if (should_skip_mm(mm, nid, swappiness))
149352 +                       mm = NULL;
149354 +               args->mm_stats[mm ? MM_SCHED_ACTIVE : MM_SCHED_INACTIVE]++;
149355 +       }
149357 +       if (mm_list->nodes[nid].iter == &mm_list->head)
149358 +               WRITE_ONCE(mm_list->nodes[nid].cur_seq,
149359 +                          mm_list->nodes[nid].cur_seq + 1);
149360 +done:
149361 +       if (*iter && !mm)
149362 +               mm_list->nodes[nid].nr_workers--;
149363 +       if (!*iter && mm)
149364 +               mm_list->nodes[nid].nr_workers++;
149366 +       last = last && !mm_list->nodes[nid].nr_workers &&
149367 +              mm_list->nodes[nid].iter == &mm_list->head;
149369 +       reset_mm_stats(mm_list, last, args);
149371 +       spin_unlock(&mm_list->lock);
149373 +       *iter = mm;
149375 +       return last;
149378 +/******************************************************************************
149379 + *                          the aging
149380 + ******************************************************************************/
149382 +static void update_batch_size(struct page *page, int old_gen, int new_gen,
149383 +                             struct mm_walk_args *args)
149385 +       int file = page_is_file_lru(page);
149386 +       int zone = page_zonenum(page);
149387 +       int delta = thp_nr_pages(page);
149389 +       VM_BUG_ON(old_gen >= MAX_NR_GENS);
149390 +       VM_BUG_ON(new_gen >= MAX_NR_GENS);
149392 +       args->batch_size++;
149394 +       args->nr_pages[old_gen][file][zone] -= delta;
149395 +       args->nr_pages[new_gen][file][zone] += delta;
149398 +static void reset_batch_size(struct lruvec *lruvec, struct mm_walk_args *args)
149400 +       int gen, file, zone;
149401 +       struct lrugen *lrugen = &lruvec->evictable;
149403 +       args->batch_size = 0;
149405 +       spin_lock_irq(&lruvec->lru_lock);
149407 +       for_each_gen_type_zone(gen, file, zone) {
149408 +               enum lru_list lru = LRU_FILE * file;
149409 +               int total = args->nr_pages[gen][file][zone];
149411 +               if (!total)
149412 +                       continue;
149414 +               args->nr_pages[gen][file][zone] = 0;
149415 +               WRITE_ONCE(lrugen->sizes[gen][file][zone],
149416 +                          lrugen->sizes[gen][file][zone] + total);
149418 +               if (lru_gen_is_active(lruvec, gen))
149419 +                       lru += LRU_ACTIVE;
149420 +               update_lru_size(lruvec, lru, zone, total);
149421 +       }
149423 +       spin_unlock_irq(&lruvec->lru_lock);
149426 +static int page_update_gen(struct page *page, int new_gen)
149428 +       int old_gen;
149429 +       unsigned long old_flags, new_flags;
149431 +       VM_BUG_ON(new_gen >= MAX_NR_GENS);
149433 +       do {
149434 +               old_flags = READ_ONCE(page->flags);
149436 +               old_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
149437 +               if (old_gen < 0)
149438 +                       new_flags = old_flags | BIT(PG_referenced);
149439 +               else
149440 +                       new_flags = (old_flags & ~(LRU_GEN_MASK | LRU_USAGE_MASK |
149441 +                                    LRU_TIER_FLAGS)) | ((new_gen + 1UL) << LRU_GEN_PGOFF);
149443 +               if (old_flags == new_flags)
149444 +                       break;
149445 +       } while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
149447 +       return old_gen;
149450 +static int should_skip_vma(unsigned long start, unsigned long end, struct mm_walk *walk)
149452 +       struct address_space *mapping;
149453 +       struct vm_area_struct *vma = walk->vma;
149454 +       struct mm_walk_args *args = walk->private;
149456 +       if (!vma_is_accessible(vma) || is_vm_hugetlb_page(vma) ||
149457 +           (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)))
149458 +               return true;
149460 +       if (vma_is_anonymous(vma))
149461 +               return !args->should_walk[0];
149463 +       if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping))
149464 +               return true;
149466 +       mapping = vma->vm_file->f_mapping;
149467 +       if (!mapping->a_ops->writepage)
149468 +               return true;
149470 +       if (shmem_mapping(mapping))
149471 +               return !args->should_walk[0] ||
149472 +                      mapping_unevictable(vma->vm_file->f_mapping);
149474 +       return !args->should_walk[1] || mapping_unevictable(mapping);
149478 + * Some userspace memory allocators create many single-page VMAs. So instead of
149479 + * returning back to the PGD table for each of such VMAs, we finish at least an
149480 + * entire PMD table and therefore avoid many zigzags. This optimizes page table
149481 + * walks for workloads that have large numbers of tiny VMAs.
149483 + * We scan PMD tables in two pass. The first pass reaches to PTE tables and
149484 + * doesn't take the PMD lock. The second pass clears the accessed bit on PMD
149485 + * entries and needs to take the PMD lock. The second pass is only done on the
149486 + * PMD entries that first pass has found the accessed bit is set, and they must
149487 + * be:
149488 + *   1) leaf entries mapping huge pages from the node under reclaim
149489 + *   2) non-leaf entries whose leaf entries only map pages from the node under
149490 + *   reclaim, when CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG=y.
149491 + */
149492 +static bool get_next_interval(struct mm_walk *walk, unsigned long mask, unsigned long size,
149493 +                             unsigned long *start, unsigned long *end)
149495 +       unsigned long next = round_up(*end, size);
149496 +       struct mm_walk_args *args = walk->private;
149498 +       VM_BUG_ON(mask & size);
149499 +       VM_BUG_ON(*start != *end);
149500 +       VM_BUG_ON(!(*end & ~mask));
149501 +       VM_BUG_ON((*end & mask) != (next & mask));
149503 +       while (walk->vma) {
149504 +               if (next >= walk->vma->vm_end) {
149505 +                       walk->vma = walk->vma->vm_next;
149506 +                       continue;
149507 +               }
149509 +               if ((next & mask) != (walk->vma->vm_start & mask))
149510 +                       return false;
149512 +               if (should_skip_vma(walk->vma->vm_start, walk->vma->vm_end, walk)) {
149513 +                       walk->vma = walk->vma->vm_next;
149514 +                       continue;
149515 +               }
149517 +               args->mm_stats[MM_VMA_INTERVAL]++;
149519 +               *start = max(next, walk->vma->vm_start);
149520 +               next = (next | ~mask) + 1;
149521 +               /* rounded-up boundaries can wrap to 0 */
149522 +               *end = next && next < walk->vma->vm_end ? next : walk->vma->vm_end;
149524 +               return true;
149525 +       }
149527 +       return false;
149530 +static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
149531 +                          struct mm_walk *walk)
149533 +       int i;
149534 +       pte_t *pte;
149535 +       spinlock_t *ptl;
149536 +       int remote = 0;
149537 +       struct mm_walk_args *args = walk->private;
149538 +       int old_gen, new_gen = lru_gen_from_seq(args->max_seq);
149540 +       VM_BUG_ON(pmd_leaf(*pmd));
149542 +       pte = pte_offset_map_lock(walk->mm, pmd, start & PMD_MASK, &ptl);
149543 +       arch_enter_lazy_mmu_mode();
149544 +restart:
149545 +       for (i = pte_index(start); start != end; i++, start += PAGE_SIZE) {
149546 +               struct page *page;
149547 +               unsigned long pfn = pte_pfn(pte[i]);
149549 +               if (!pte_present(pte[i]) || is_zero_pfn(pfn)) {
149550 +                       args->mm_stats[MM_LEAF_HOLE]++;
149551 +                       continue;
149552 +               }
149554 +               if (WARN_ON_ONCE(pte_devmap(pte[i]) || pte_special(pte[i])))
149555 +                       continue;
149557 +               if (!pte_young(pte[i])) {
149558 +                       args->mm_stats[MM_LEAF_OLD]++;
149559 +                       continue;
149560 +               }
149562 +               if (pfn < args->start_pfn || pfn >= args->end_pfn) {
149563 +                       remote++;
149564 +                       args->mm_stats[MM_LEAF_OTHER_NODE]++;
149565 +                       continue;
149566 +               }
149568 +               page = compound_head(pfn_to_page(pfn));
149569 +               if (page_to_nid(page) != args->node_id) {
149570 +                       remote++;
149571 +                       args->mm_stats[MM_LEAF_OTHER_NODE]++;
149572 +                       continue;
149573 +               }
149575 +               if (!ptep_test_and_clear_young(walk->vma, start, pte + i))
149576 +                       continue;
149578 +               if (pte_dirty(pte[i]) && !PageDirty(page) &&
149579 +                   !(PageAnon(page) && PageSwapBacked(page) && !PageSwapCache(page))) {
149580 +                       set_page_dirty(page);
149581 +                       args->mm_stats[MM_LEAF_DIRTY]++;
149582 +               }
149584 +               if (page_memcg_rcu(page) != args->memcg) {
149585 +                       args->mm_stats[MM_LEAF_OTHER_MEMCG]++;
149586 +                       continue;
149587 +               }
149589 +               old_gen = page_update_gen(page, new_gen);
149590 +               if (old_gen >= 0 && old_gen != new_gen)
149591 +                       update_batch_size(page, old_gen, new_gen, args);
149592 +               args->mm_stats[MM_LEAF_YOUNG]++;
149593 +       }
149595 +       if (i < PTRS_PER_PTE && get_next_interval(walk, PMD_MASK, PAGE_SIZE, &start, &end))
149596 +               goto restart;
149598 +       arch_leave_lazy_mmu_mode();
149599 +       pte_unmap_unlock(pte, ptl);
149601 +       return !remote;
149604 +static bool walk_pmd_range_unlocked(pud_t *pud, unsigned long start, unsigned long end,
149605 +                                   struct mm_walk *walk)
149607 +       int i;
149608 +       pmd_t *pmd;
149609 +       unsigned long next;
149610 +       int young = 0;
149611 +       struct mm_walk_args *args = walk->private;
149613 +       VM_BUG_ON(pud_leaf(*pud));
149615 +       pmd = pmd_offset(pud, start & PUD_MASK);
149616 +restart:
149617 +       for (i = pmd_index(start); start != end; i++, start = next) {
149618 +               pmd_t val = pmd_read_atomic(pmd + i);
149620 +               next = pmd_addr_end(start, end);
149622 +               barrier();
149623 +               if (!pmd_present(val) || is_huge_zero_pmd(val)) {
149624 +                       args->mm_stats[MM_LEAF_HOLE]++;
149625 +                       continue;
149626 +               }
149628 +               if (pmd_trans_huge(val)) {
149629 +                       unsigned long pfn = pmd_pfn(val);
149631 +                       if (!pmd_young(val)) {
149632 +                               args->mm_stats[MM_LEAF_OLD]++;
149633 +                               continue;
149634 +                       }
149636 +                       if (pfn < args->start_pfn || pfn >= args->end_pfn) {
149637 +                               args->mm_stats[MM_LEAF_OTHER_NODE]++;
149638 +                               continue;
149639 +                       }
149641 +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
149642 +                       young++;
149643 +                       __set_bit(i, args->bitmap);
149644 +#endif
149645 +                       continue;
149646 +               }
149648 +#ifdef CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG
149649 +               if (!pmd_young(val)) {
149650 +                       args->mm_stats[MM_NONLEAF_OLD]++;
149651 +                       continue;
149652 +               }
149653 +#endif
149655 +               if (walk_pte_range(&val, start, next, walk)) {
149656 +#ifdef CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG
149657 +                       young++;
149658 +                       __set_bit(i, args->bitmap);
149659 +#endif
149660 +               }
149661 +       }
149663 +       if (i < PTRS_PER_PMD && get_next_interval(walk, PUD_MASK, PMD_SIZE, &start, &end))
149664 +               goto restart;
149666 +       return young;
149669 +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG)
149670 +static void walk_pmd_range_locked(pud_t *pud, unsigned long start, unsigned long end,
149671 +                                 struct mm_walk *walk)
149673 +       int i;
149674 +       pmd_t *pmd;
149675 +       spinlock_t *ptl;
149676 +       struct mm_walk_args *args = walk->private;
149677 +       int old_gen, new_gen = lru_gen_from_seq(args->max_seq);
149679 +       VM_BUG_ON(pud_leaf(*pud));
149681 +       start &= PUD_MASK;
149682 +       pmd = pmd_offset(pud, start);
149683 +       ptl = pmd_lock(walk->mm, pmd);
149684 +       arch_enter_lazy_mmu_mode();
149686 +       for_each_set_bit(i, args->bitmap, PTRS_PER_PMD) {
149687 +               struct page *page;
149688 +               unsigned long pfn = pmd_pfn(pmd[i]);
149689 +               unsigned long addr = start + PMD_SIZE * i;
149691 +               if (!pmd_present(pmd[i]) || is_huge_zero_pmd(pmd[i])) {
149692 +                       args->mm_stats[MM_LEAF_HOLE]++;
149693 +                       continue;
149694 +               }
149696 +               if (WARN_ON_ONCE(pmd_devmap(pmd[i])))
149697 +                       continue;
149699 +               if (!pmd_young(pmd[i])) {
149700 +                       args->mm_stats[MM_LEAF_OLD]++;
149701 +                       continue;
149702 +               }
149704 +               if (!pmd_trans_huge(pmd[i])) {
149705 +#ifdef CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG
149706 +                       args->mm_stats[MM_NONLEAF_YOUNG]++;
149707 +                       pmdp_test_and_clear_young(walk->vma, addr, pmd + i);
149708 +#endif
149709 +                       continue;
149710 +               }
149712 +               if (pfn < args->start_pfn || pfn >= args->end_pfn) {
149713 +                       args->mm_stats[MM_LEAF_OTHER_NODE]++;
149714 +                       continue;
149715 +               }
149717 +               page = pfn_to_page(pfn);
149718 +               VM_BUG_ON_PAGE(PageTail(page), page);
149719 +               if (page_to_nid(page) != args->node_id) {
149720 +                       args->mm_stats[MM_LEAF_OTHER_NODE]++;
149721 +                       continue;
149722 +               }
149724 +               if (!pmdp_test_and_clear_young(walk->vma, addr, pmd + i))
149725 +                       continue;
149727 +               if (pmd_dirty(pmd[i]) && !PageDirty(page) &&
149728 +                   !(PageAnon(page) && PageSwapBacked(page) && !PageSwapCache(page))) {
149729 +                       set_page_dirty(page);
149730 +                       args->mm_stats[MM_LEAF_DIRTY]++;
149731 +               }
149733 +               if (page_memcg_rcu(page) != args->memcg) {
149734 +                       args->mm_stats[MM_LEAF_OTHER_MEMCG]++;
149735 +                       continue;
149736 +               }
149738 +               old_gen = page_update_gen(page, new_gen);
149739 +               if (old_gen >= 0 && old_gen != new_gen)
149740 +                       update_batch_size(page, old_gen, new_gen, args);
149741 +               args->mm_stats[MM_LEAF_YOUNG]++;
149742 +       }
149744 +       arch_leave_lazy_mmu_mode();
149745 +       spin_unlock(ptl);
149747 +       memset(args->bitmap, 0, sizeof(args->bitmap));
149749 +#else
149750 +static void walk_pmd_range_locked(pud_t *pud, unsigned long start, unsigned long end,
149751 +                                 struct mm_walk *walk)
149754 +#endif
149756 +static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end,
149757 +                         struct mm_walk *walk)
149759 +       int i;
149760 +       pud_t *pud;
149761 +       unsigned long next;
149762 +       struct mm_walk_args *args = walk->private;
149764 +       VM_BUG_ON(p4d_leaf(*p4d));
149766 +       pud = pud_offset(p4d, start & P4D_MASK);
149767 +restart:
149768 +       for (i = pud_index(start); start != end; i++, start = next) {
149769 +               pud_t val = READ_ONCE(pud[i]);
149771 +               next = pud_addr_end(start, end);
149773 +               if (!pud_present(val) || WARN_ON_ONCE(pud_leaf(val)))
149774 +                       continue;
149776 +               if (walk_pmd_range_unlocked(&val, start, next, walk))
149777 +                       walk_pmd_range_locked(&val, start, next, walk);
149779 +               if (args->batch_size >= MAX_BATCH_SIZE) {
149780 +                       end = (start | ~PUD_MASK) + 1;
149781 +                       goto done;
149782 +               }
149783 +       }
149785 +       if (i < PTRS_PER_PUD && get_next_interval(walk, P4D_MASK, PUD_SIZE, &start, &end))
149786 +               goto restart;
149788 +       end = round_up(end, P4D_SIZE);
149789 +done:
149790 +       /* rounded-up boundaries can wrap to 0 */
149791 +       args->next_addr = end && walk->vma ? max(end, walk->vma->vm_start) : 0;
149793 +       return -EAGAIN;
149796 +static void walk_mm(struct mm_walk_args *args, int swappiness, struct mm_struct *mm)
149798 +       static const struct mm_walk_ops mm_walk_ops = {
149799 +               .test_walk = should_skip_vma,
149800 +               .p4d_entry = walk_pud_range,
149801 +       };
149803 +       int err;
149804 +       int file;
149805 +       int nid = args->node_id;
149806 +       struct mem_cgroup *memcg = args->memcg;
149807 +       struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
149809 +       args->next_addr = FIRST_USER_ADDRESS;
149810 +       for (file = !swappiness; file < ANON_AND_FILE; file++)
149811 +               args->should_walk[file] = lru_gen_mm_is_active(mm) ||
149812 +                                         node_isset(nid, mm->lrugen.nodes[file]);
149814 +       do {
149815 +               unsigned long start = args->next_addr;
149816 +               unsigned long end = mm->highest_vm_end;
149818 +               err = -EBUSY;
149820 +               preempt_disable();
149821 +               rcu_read_lock();
149823 +#ifdef CONFIG_MEMCG
149824 +               if (memcg && atomic_read(&memcg->moving_account)) {
149825 +                       args->mm_stats[MM_LOCK_CONTENTION]++;
149826 +                       goto contended;
149827 +               }
149828 +#endif
149829 +               if (!mmap_read_trylock(mm)) {
149830 +                       args->mm_stats[MM_LOCK_CONTENTION]++;
149831 +                       goto contended;
149832 +               }
149834 +               err = walk_page_range(mm, start, end, &mm_walk_ops, args);
149836 +               mmap_read_unlock(mm);
149838 +               if (args->batch_size)
149839 +                       reset_batch_size(lruvec, args);
149840 +contended:
149841 +               rcu_read_unlock();
149842 +               preempt_enable();
149844 +               cond_resched();
149845 +       } while (err == -EAGAIN && args->next_addr &&
149846 +                !mm_is_oom_victim(mm) && !mm_has_migrated(mm, memcg));
149848 +       if (err == -EBUSY)
149849 +               return;
149851 +       for (file = !swappiness; file < ANON_AND_FILE; file++) {
149852 +               if (args->should_walk[file])
149853 +                       node_clear(nid, mm->lrugen.nodes[file]);
149854 +       }
149857 +static void page_inc_gen(struct page *page, struct lruvec *lruvec, bool front)
149859 +       int old_gen, new_gen;
149860 +       unsigned long old_flags, new_flags;
149861 +       int file = page_is_file_lru(page);
149862 +       int zone = page_zonenum(page);
149863 +       struct lrugen *lrugen = &lruvec->evictable;
149865 +       old_gen = lru_gen_from_seq(lrugen->min_seq[file]);
149867 +       do {
149868 +               old_flags = READ_ONCE(page->flags);
149869 +               new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
149870 +               VM_BUG_ON_PAGE(new_gen < 0, page);
149871 +               if (new_gen >= 0 && new_gen != old_gen)
149872 +                       goto sort;
149874 +               new_gen = (old_gen + 1) % MAX_NR_GENS;
149875 +               new_flags = (old_flags & ~(LRU_GEN_MASK | LRU_USAGE_MASK | LRU_TIER_FLAGS)) |
149876 +                           ((new_gen + 1UL) << LRU_GEN_PGOFF);
149877 +               /* mark the page for reclaim if it's pending writeback */
149878 +               if (front)
149879 +                       new_flags |= BIT(PG_reclaim);
149880 +       } while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
149882 +       lru_gen_update_size(page, lruvec, old_gen, new_gen);
149883 +sort:
149884 +       if (front)
149885 +               list_move(&page->lru, &lrugen->lists[new_gen][file][zone]);
149886 +       else
149887 +               list_move_tail(&page->lru, &lrugen->lists[new_gen][file][zone]);
149890 +static bool try_inc_min_seq(struct lruvec *lruvec, int file)
149892 +       int gen, zone;
149893 +       bool success = false;
149894 +       struct lrugen *lrugen = &lruvec->evictable;
149896 +       VM_BUG_ON(!seq_is_valid(lruvec));
149898 +       while (get_nr_gens(lruvec, file) > MIN_NR_GENS) {
149899 +               gen = lru_gen_from_seq(lrugen->min_seq[file]);
149901 +               for (zone = 0; zone < MAX_NR_ZONES; zone++) {
149902 +                       if (!list_empty(&lrugen->lists[gen][file][zone]))
149903 +                               return success;
149904 +               }
149906 +               reset_controller_pos(lruvec, gen, file);
149907 +               WRITE_ONCE(lrugen->min_seq[file], lrugen->min_seq[file] + 1);
149909 +               success = true;
149910 +       }
149912 +       return success;
149915 +static bool inc_min_seq(struct lruvec *lruvec, int file)
149917 +       int gen, zone;
149918 +       int batch_size = 0;
149919 +       struct lrugen *lrugen = &lruvec->evictable;
149921 +       VM_BUG_ON(!seq_is_valid(lruvec));
149923 +       if (get_nr_gens(lruvec, file) != MAX_NR_GENS)
149924 +               return true;
149926 +       gen = lru_gen_from_seq(lrugen->min_seq[file]);
149928 +       for (zone = 0; zone < MAX_NR_ZONES; zone++) {
149929 +               struct list_head *head = &lrugen->lists[gen][file][zone];
149931 +               while (!list_empty(head)) {
149932 +                       struct page *page = lru_to_page(head);
149934 +                       VM_BUG_ON_PAGE(PageTail(page), page);
149935 +                       VM_BUG_ON_PAGE(PageUnevictable(page), page);
149936 +                       VM_BUG_ON_PAGE(PageActive(page), page);
149937 +                       VM_BUG_ON_PAGE(page_is_file_lru(page) != file, page);
149938 +                       VM_BUG_ON_PAGE(page_zonenum(page) != zone, page);
149940 +                       prefetchw_prev_lru_page(page, head, flags);
149942 +                       page_inc_gen(page, lruvec, false);
149944 +                       if (++batch_size == MAX_BATCH_SIZE)
149945 +                               return false;
149946 +               }
149948 +               VM_BUG_ON(lrugen->sizes[gen][file][zone]);
149949 +       }
149951 +       reset_controller_pos(lruvec, gen, file);
149952 +       WRITE_ONCE(lrugen->min_seq[file], lrugen->min_seq[file] + 1);
149954 +       return true;
149957 +static void inc_max_seq(struct lruvec *lruvec)
149959 +       int gen, file, zone;
149960 +       struct lrugen *lrugen = &lruvec->evictable;
149962 +       spin_lock_irq(&lruvec->lru_lock);
149964 +       VM_BUG_ON(!seq_is_valid(lruvec));
149966 +       for (file = 0; file < ANON_AND_FILE; file++) {
149967 +               if (try_inc_min_seq(lruvec, file))
149968 +                       continue;
149970 +               while (!inc_min_seq(lruvec, file)) {
149971 +                       spin_unlock_irq(&lruvec->lru_lock);
149972 +                       cond_resched();
149973 +                       spin_lock_irq(&lruvec->lru_lock);
149974 +               }
149975 +       }
149977 +       gen = lru_gen_from_seq(lrugen->max_seq - 1);
149978 +       for_each_type_zone(file, zone) {
149979 +               enum lru_list lru = LRU_FILE * file;
149980 +               long total = lrugen->sizes[gen][file][zone];
149982 +               if (!total)
149983 +                       continue;
149985 +               WARN_ON_ONCE(total != (int)total);
149987 +               update_lru_size(lruvec, lru, zone, total);
149988 +               update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -total);
149989 +       }
149991 +       gen = lru_gen_from_seq(lrugen->max_seq + 1);
149992 +       for_each_type_zone(file, zone) {
149993 +               VM_BUG_ON(lrugen->sizes[gen][file][zone]);
149994 +               VM_BUG_ON(!list_empty(&lrugen->lists[gen][file][zone]));
149995 +       }
149997 +       for (file = 0; file < ANON_AND_FILE; file++)
149998 +               reset_controller_pos(lruvec, gen, file);
150000 +       WRITE_ONCE(lrugen->timestamps[gen], jiffies);
150001 +       /* make sure all preceding modifications appear first */
150002 +       smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1);
150004 +       spin_unlock_irq(&lruvec->lru_lock);
150007 +/* Main function used by foreground, background and user-triggered aging. */
150008 +static bool walk_mm_list(struct lruvec *lruvec, unsigned long max_seq,
150009 +                        struct scan_control *sc, int swappiness, struct mm_walk_args *args)
150011 +       bool last;
150012 +       bool alloc = !args;
150013 +       struct mm_struct *mm = NULL;
150014 +       struct lrugen *lrugen = &lruvec->evictable;
150015 +       struct pglist_data *pgdat = lruvec_pgdat(lruvec);
150016 +       int nid = pgdat->node_id;
150017 +       struct mem_cgroup *memcg = lruvec_memcg(lruvec);
150018 +       struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
150020 +       VM_BUG_ON(max_seq > READ_ONCE(lrugen->max_seq));
150022 +       /*
150023 +        * For each walk of the mm_struct list of a memcg, we decrement the
150024 +        * priority of its lrugen. For each walk of all memcgs in kswapd, we
150025 +        * increment the priority of every lrugen.
150026 +        *
150027 +        * So if this lrugen has a higher priority (smaller value), it means
150028 +        * other concurrent reclaimers have walked its mm list, and we skip it
150029 +        * for this priority in order to balance the pressure on all memcgs.
150030 +        */
150031 +       if (!mem_cgroup_disabled() && !cgroup_reclaim(sc) &&
150032 +           sc->priority > atomic_read(&lrugen->priority))
150033 +               return false;
150035 +       if (alloc) {
150036 +               args = kvzalloc_node(sizeof(*args), GFP_KERNEL, nid);
150037 +               if (!args)
150038 +                       return false;
150039 +       }
150041 +       args->memcg = memcg;
150042 +       args->max_seq = max_seq;
150043 +       args->start_pfn = pgdat->node_start_pfn;
150044 +       args->end_pfn = pgdat_end_pfn(pgdat);
150045 +       args->node_id = nid;
150047 +       do {
150048 +               last = get_next_mm(args, swappiness, &mm);
150049 +               if (mm)
150050 +                       walk_mm(args, swappiness, mm);
150052 +               cond_resched();
150053 +       } while (mm);
150055 +       if (alloc)
150056 +               kvfree(args);
150058 +       if (!last) {
150059 +               /* foreground aging prefers not to wait unless "necessary" */
150060 +               if (!current_is_kswapd() && sc->priority < DEF_PRIORITY - 2)
150061 +                       wait_event_killable(mm_list->nodes[nid].wait,
150062 +                                           max_seq < READ_ONCE(lrugen->max_seq));
150064 +               return max_seq < READ_ONCE(lrugen->max_seq);
150065 +       }
150067 +       VM_BUG_ON(max_seq != READ_ONCE(lrugen->max_seq));
150069 +       inc_max_seq(lruvec);
150071 +       if (!mem_cgroup_disabled())
150072 +               atomic_add_unless(&lrugen->priority, -1, 0);
150074 +       /* order against inc_max_seq() */
150075 +       smp_mb();
150076 +       /* either we see any waiters or they will see the updated max_seq */
150077 +       if (waitqueue_active(&mm_list->nodes[nid].wait))
150078 +               wake_up_all(&mm_list->nodes[nid].wait);
150080 +       wakeup_flusher_threads(WB_REASON_VMSCAN);
150082 +       return true;
150085 +void lru_gen_scan_around(struct page_vma_mapped_walk *pvmw)
150087 +       pte_t *pte;
150088 +       unsigned long start, end;
150089 +       int old_gen, new_gen;
150090 +       unsigned long flags;
150091 +       struct lruvec *lruvec;
150092 +       struct mem_cgroup *memcg;
150093 +       struct pglist_data *pgdat = page_pgdat(pvmw->page);
150095 +       lockdep_assert_held(pvmw->ptl);
150097 +       start = max(pvmw->address & PMD_MASK, pvmw->vma->vm_start);
150098 +       end = pmd_addr_end(pvmw->address, pvmw->vma->vm_end);
150099 +       pte = pvmw->pte - ((pvmw->address - start) >> PAGE_SHIFT);
150101 +       memcg = lock_page_memcg(pvmw->page);
150102 +       lruvec = lock_page_lruvec_irqsave(pvmw->page, &flags);
150104 +       new_gen = lru_gen_from_seq(lruvec->evictable.max_seq);
150106 +       for (; start != end; pte++, start += PAGE_SIZE) {
150107 +               struct page *page;
150108 +               unsigned long pfn = pte_pfn(*pte);
150110 +               if (!pte_present(*pte) || !pte_young(*pte) || is_zero_pfn(pfn))
150111 +                       continue;
150113 +               if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
150114 +                       continue;
150116 +               page = compound_head(pfn_to_page(pfn));
150117 +               if (page_to_nid(page) != pgdat->node_id)
150118 +                       continue;
150120 +               if (page_memcg_rcu(page) != memcg)
150121 +                       continue;
150122 +               /*
150123 +                * We may be holding many locks. So try to finish as fast as
150124 +                * possible and leave the accessed and the dirty bits to page
150125 +                * table walks.
150126 +                */
150127 +               old_gen = page_update_gen(page, new_gen);
150128 +               if (old_gen >= 0 && old_gen != new_gen)
150129 +                       lru_gen_update_size(page, lruvec, old_gen, new_gen);
150130 +       }
150132 +       unlock_page_lruvec_irqrestore(lruvec, flags);
150133 +       unlock_page_memcg(pvmw->page);
150136 +/******************************************************************************
150137 + *                          the eviction
150138 + ******************************************************************************/
150140 +static bool sort_page(struct page *page, struct lruvec *lruvec, int tier_to_isolate)
150142 +       bool success;
150143 +       int gen = page_lru_gen(page);
150144 +       int file = page_is_file_lru(page);
150145 +       int zone = page_zonenum(page);
150146 +       int tier = lru_tier_from_usage(page_tier_usage(page));
150147 +       struct lrugen *lrugen = &lruvec->evictable;
150149 +       VM_BUG_ON_PAGE(gen == -1, page);
150150 +       VM_BUG_ON_PAGE(tier_to_isolate < 0, page);
150152 +       /* a lazy-free page that has been written into? */
150153 +       if (file && PageDirty(page) && PageAnon(page)) {
150154 +               success = lru_gen_deletion(page, lruvec);
150155 +               VM_BUG_ON_PAGE(!success, page);
150156 +               SetPageSwapBacked(page);
150157 +               add_page_to_lru_list_tail(page, lruvec);
150158 +               return true;
150159 +       }
150161 +       /* page_update_gen() has updated the page? */
150162 +       if (gen != lru_gen_from_seq(lrugen->min_seq[file])) {
150163 +               list_move(&page->lru, &lrugen->lists[gen][file][zone]);
150164 +               return true;
150165 +       }
150167 +       /* activate the page if its tier has a higher refault rate */
150168 +       if (tier_to_isolate < tier) {
150169 +               int sid = sid_from_seq_or_gen(gen);
150171 +               page_inc_gen(page, lruvec, false);
150172 +               WRITE_ONCE(lrugen->activated[sid][file][tier - 1],
150173 +                          lrugen->activated[sid][file][tier - 1] + thp_nr_pages(page));
150174 +               inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file);
150175 +               return true;
150176 +       }
150178 +       /*
150179 +        * A page can't be immediately evicted, and page_inc_gen() will mark it
150180 +        * for reclaim and hopefully writeback will write it soon if it's dirty.
150181 +        */
150182 +       if (PageLocked(page) || PageWriteback(page) || (file && PageDirty(page))) {
150183 +               page_inc_gen(page, lruvec, true);
150184 +               return true;
150185 +       }
150187 +       return false;
150190 +static bool should_skip_page(struct page *page, struct scan_control *sc)
150192 +       if (!sc->may_unmap && page_mapped(page))
150193 +               return true;
150195 +       if (!(sc->may_writepage && (sc->gfp_mask & __GFP_IO)) &&
150196 +           (PageDirty(page) || (PageAnon(page) && !PageSwapCache(page))))
150197 +               return true;
150199 +       if (!get_page_unless_zero(page))
150200 +               return true;
150202 +       if (!TestClearPageLRU(page)) {
150203 +               put_page(page);
150204 +               return true;
150205 +       }
150207 +       return false;
150210 +static void isolate_page(struct page *page, struct lruvec *lruvec)
150212 +       bool success;
150214 +       success = lru_gen_deletion(page, lruvec);
150215 +       VM_BUG_ON_PAGE(!success, page);
150217 +       if (PageActive(page)) {
150218 +               ClearPageActive(page);
150219 +               /* make sure shrink_page_list() rejects this page */
150220 +               SetPageReferenced(page);
150221 +               return;
150222 +       }
150224 +       /* make sure shrink_page_list() doesn't try to write this page */
150225 +       ClearPageReclaim(page);
150226 +       /* make sure shrink_page_list() doesn't reject this page */
150227 +       ClearPageReferenced(page);
150230 +static int scan_lru_gen_pages(struct lruvec *lruvec, struct scan_control *sc,
150231 +                             long *nr_to_scan, int file, int tier,
150232 +                             struct list_head *list)
150234 +       bool success;
150235 +       int gen, zone;
150236 +       enum vm_event_item item;
150237 +       int sorted = 0;
150238 +       int scanned = 0;
150239 +       int isolated = 0;
150240 +       int batch_size = 0;
150241 +       struct lrugen *lrugen = &lruvec->evictable;
150243 +       VM_BUG_ON(!list_empty(list));
150245 +       if (get_nr_gens(lruvec, file) == MIN_NR_GENS)
150246 +               return -ENOENT;
150248 +       gen = lru_gen_from_seq(lrugen->min_seq[file]);
150250 +       for (zone = sc->reclaim_idx; zone >= 0; zone--) {
150251 +               LIST_HEAD(moved);
150252 +               int skipped = 0;
150253 +               struct list_head *head = &lrugen->lists[gen][file][zone];
150255 +               while (!list_empty(head)) {
150256 +                       struct page *page = lru_to_page(head);
150257 +                       int delta = thp_nr_pages(page);
150259 +                       VM_BUG_ON_PAGE(PageTail(page), page);
150260 +                       VM_BUG_ON_PAGE(PageUnevictable(page), page);
150261 +                       VM_BUG_ON_PAGE(PageActive(page), page);
150262 +                       VM_BUG_ON_PAGE(page_is_file_lru(page) != file, page);
150263 +                       VM_BUG_ON_PAGE(page_zonenum(page) != zone, page);
150265 +                       prefetchw_prev_lru_page(page, head, flags);
150267 +                       scanned += delta;
150269 +                       if (sort_page(page, lruvec, tier))
150270 +                               sorted += delta;
150271 +                       else if (should_skip_page(page, sc)) {
150272 +                               list_move(&page->lru, &moved);
150273 +                               skipped += delta;
150274 +                       } else {
150275 +                               isolate_page(page, lruvec);
150276 +                               list_add(&page->lru, list);
150277 +                               isolated += delta;
150278 +                       }
150280 +                       if (scanned >= *nr_to_scan || isolated >= SWAP_CLUSTER_MAX ||
150281 +                           ++batch_size == MAX_BATCH_SIZE)
150282 +                               break;
150283 +               }
150285 +               list_splice(&moved, head);
150286 +               __count_zid_vm_events(PGSCAN_SKIP, zone, skipped);
150288 +               if (scanned >= *nr_to_scan || isolated >= SWAP_CLUSTER_MAX ||
150289 +                   batch_size == MAX_BATCH_SIZE)
150290 +                       break;
150291 +       }
150293 +       success = try_inc_min_seq(lruvec, file);
150295 +       item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
150296 +       if (!cgroup_reclaim(sc))
150297 +               __count_vm_events(item, scanned);
150298 +       __count_memcg_events(lruvec_memcg(lruvec), item, scanned);
150299 +       __count_vm_events(PGSCAN_ANON + file, scanned);
150301 +       *nr_to_scan -= scanned;
150303 +       if (*nr_to_scan <= 0 || success || isolated)
150304 +               return isolated;
150305 +       /*
150306 +        * We may have trouble finding eligible pages due to reclaim_idx,
150307 +        * may_unmap and may_writepage. The following check makes sure we won't
150308 +        * be stuck if we aren't making enough progress.
150309 +        */
150310 +       return batch_size == MAX_BATCH_SIZE && sorted >= SWAP_CLUSTER_MAX ? 0 : -ENOENT;
150313 +static int get_tier_to_isolate(struct lruvec *lruvec, int file)
150315 +       int tier;
150316 +       struct controller_pos sp, pv;
150318 +       /*
150319 +        * Ideally we don't want to evict upper tiers that have higher refault
150320 +        * rates. However, we need to leave some margin for the fluctuation in
150321 +        * refault rates. So we use a larger gain factor to make sure upper
150322 +        * tiers are indeed more active. We choose 2 because the lowest upper
150323 +        * tier would have twice of the refault rate of the base tier, according
150324 +        * to their numbers of accesses.
150325 +        */
150326 +       read_controller_pos(&sp, lruvec, file, 0, 1);
150327 +       for (tier = 1; tier < MAX_NR_TIERS; tier++) {
150328 +               read_controller_pos(&pv, lruvec, file, tier, 2);
150329 +               if (!positive_ctrl_err(&sp, &pv))
150330 +                       break;
150331 +       }
150333 +       return tier - 1;
150336 +static int get_type_to_scan(struct lruvec *lruvec, int swappiness, int *tier_to_isolate)
150338 +       int file, tier;
150339 +       struct controller_pos sp, pv;
150340 +       int gain[ANON_AND_FILE] = { swappiness, 200 - swappiness };
150342 +       /*
150343 +        * Compare the refault rates between the base tiers of anon and file to
150344 +        * determine which type to evict. Also need to compare the refault rates
150345 +        * of the upper tiers of the selected type with that of the base tier to
150346 +        * determine which tier of the selected type to evict.
150347 +        */
150348 +       read_controller_pos(&sp, lruvec, 0, 0, gain[0]);
150349 +       read_controller_pos(&pv, lruvec, 1, 0, gain[1]);
150350 +       file = positive_ctrl_err(&sp, &pv);
150352 +       read_controller_pos(&sp, lruvec, !file, 0, gain[!file]);
150353 +       for (tier = 1; tier < MAX_NR_TIERS; tier++) {
150354 +               read_controller_pos(&pv, lruvec, file, tier, gain[file]);
150355 +               if (!positive_ctrl_err(&sp, &pv))
150356 +                       break;
150357 +       }
150359 +       *tier_to_isolate = tier - 1;
150361 +       return file;
150364 +static int isolate_lru_gen_pages(struct lruvec *lruvec, struct scan_control *sc,
150365 +                                int swappiness, long *nr_to_scan, int *type_to_scan,
150366 +                                struct list_head *list)
150368 +       int i;
150369 +       int file;
150370 +       int isolated;
150371 +       int tier = -1;
150372 +       DEFINE_MAX_SEQ();
150373 +       DEFINE_MIN_SEQ();
150375 +       VM_BUG_ON(!seq_is_valid(lruvec));
150377 +       if (max_nr_gens(max_seq, min_seq, swappiness) == MIN_NR_GENS)
150378 +               return 0;
150379 +       /*
150380 +        * Try to select a type based on generations and swappiness, and if that
150381 +        * fails, fall back to get_type_to_scan(). When anon and file are both
150382 +        * available from the same generation, swappiness 200 is interpreted as
150383 +        * anon first and swappiness 1 is interpreted as file first.
150384 +        */
150385 +       file = !swappiness || min_seq[0] > min_seq[1] ||
150386 +              (min_seq[0] == min_seq[1] && swappiness != 200 &&
150387 +               (swappiness == 1 || get_type_to_scan(lruvec, swappiness, &tier)));
150389 +       if (tier == -1)
150390 +               tier = get_tier_to_isolate(lruvec, file);
150392 +       for (i = !swappiness; i < ANON_AND_FILE; i++) {
150393 +               isolated = scan_lru_gen_pages(lruvec, sc, nr_to_scan, file, tier, list);
150394 +               if (isolated >= 0)
150395 +                       break;
150397 +               file = !file;
150398 +               tier = get_tier_to_isolate(lruvec, file);
150399 +       }
150401 +       if (isolated < 0)
150402 +               isolated = *nr_to_scan = 0;
150404 +       *type_to_scan = file;
150406 +       return isolated;
150409 +/* Main function used by foreground, background and user-triggered eviction. */
150410 +static bool evict_lru_gen_pages(struct lruvec *lruvec, struct scan_control *sc,
150411 +                               int swappiness, long *nr_to_scan)
150413 +       int file;
150414 +       int isolated;
150415 +       int reclaimed;
150416 +       LIST_HEAD(list);
150417 +       struct page *page;
150418 +       enum vm_event_item item;
150419 +       struct reclaim_stat stat;
150420 +       struct pglist_data *pgdat = lruvec_pgdat(lruvec);
150422 +       spin_lock_irq(&lruvec->lru_lock);
150424 +       isolated = isolate_lru_gen_pages(lruvec, sc, swappiness, nr_to_scan, &file, &list);
150425 +       VM_BUG_ON(list_empty(&list) == !!isolated);
150427 +       if (isolated)
150428 +               __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, isolated);
150430 +       spin_unlock_irq(&lruvec->lru_lock);
150432 +       if (!isolated)
150433 +               goto done;
150435 +       reclaimed = shrink_page_list(&list, pgdat, sc, &stat, false);
150436 +       /*
150437 +        * We need to prevent rejected pages from being added back to the same
150438 +        * lists they were isolated from. Otherwise we may risk looping on them
150439 +        * forever. We use PageActive() or !PageReferenced() && PageWorkingset()
150440 +        * to tell lru_gen_addition() not to add them to the oldest generation.
150441 +        */
150442 +       list_for_each_entry(page, &list, lru) {
150443 +               if (PageMlocked(page))
150444 +                       continue;
150446 +               if (PageReferenced(page)) {
150447 +                       SetPageActive(page);
150448 +                       ClearPageReferenced(page);
150449 +               } else {
150450 +                       ClearPageActive(page);
150451 +                       SetPageWorkingset(page);
150452 +               }
150453 +       }
150455 +       spin_lock_irq(&lruvec->lru_lock);
150457 +       move_pages_to_lru(lruvec, &list);
150459 +       __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -isolated);
150461 +       item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
150462 +       if (!cgroup_reclaim(sc))
150463 +               __count_vm_events(item, reclaimed);
150464 +       __count_memcg_events(lruvec_memcg(lruvec), item, reclaimed);
150465 +       __count_vm_events(PGSTEAL_ANON + file, reclaimed);
150467 +       spin_unlock_irq(&lruvec->lru_lock);
150469 +       mem_cgroup_uncharge_list(&list);
150470 +       free_unref_page_list(&list);
150472 +       sc->nr_reclaimed += reclaimed;
150473 +done:
150474 +       return *nr_to_scan > 0 && sc->nr_reclaimed < sc->nr_to_reclaim;
150477 +/******************************************************************************
150478 + *                          page reclaim
150479 + ******************************************************************************/
150481 +static int get_swappiness(struct lruvec *lruvec)
150483 +       struct mem_cgroup *memcg = lruvec_memcg(lruvec);
150484 +       int swappiness = mem_cgroup_get_nr_swap_pages(memcg) >= (long)SWAP_CLUSTER_MAX ?
150485 +                        mem_cgroup_swappiness(memcg) : 0;
150487 +       VM_BUG_ON(swappiness > 200U);
150489 +       return swappiness;
150492 +static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc,
150493 +                                   int swappiness)
150495 +       int gen, file, zone;
150496 +       long nr_to_scan = 0;
150497 +       struct lrugen *lrugen = &lruvec->evictable;
150498 +       DEFINE_MAX_SEQ();
150499 +       DEFINE_MIN_SEQ();
150501 +       lru_add_drain();
150503 +       for (file = !swappiness; file < ANON_AND_FILE; file++) {
150504 +               unsigned long seq;
150506 +               for (seq = min_seq[file]; seq <= max_seq; seq++) {
150507 +                       gen = lru_gen_from_seq(seq);
150509 +                       for (zone = 0; zone <= sc->reclaim_idx; zone++)
150510 +                               nr_to_scan += READ_ONCE(lrugen->sizes[gen][file][zone]);
150511 +               }
150512 +       }
150514 +       nr_to_scan = max(nr_to_scan, 0L);
150515 +       nr_to_scan = round_up(nr_to_scan >> sc->priority, SWAP_CLUSTER_MAX);
150517 +       if (max_nr_gens(max_seq, min_seq, swappiness) > MIN_NR_GENS)
150518 +               return nr_to_scan;
150520 +       /* kswapd uses age_lru_gens() */
150521 +       if (current_is_kswapd())
150522 +               return 0;
150524 +       return walk_mm_list(lruvec, max_seq, sc, swappiness, NULL) ? nr_to_scan : 0;
150527 +static void shrink_lru_gens(struct lruvec *lruvec, struct scan_control *sc)
150529 +       struct blk_plug plug;
150530 +       unsigned long scanned = 0;
150531 +       struct mem_cgroup *memcg = lruvec_memcg(lruvec);
150533 +       blk_start_plug(&plug);
150535 +       while (true) {
150536 +               long nr_to_scan;
150537 +               int swappiness = sc->may_swap ? get_swappiness(lruvec) : 0;
150539 +               nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness) - scanned;
150540 +               if (nr_to_scan < (long)SWAP_CLUSTER_MAX)
150541 +                       break;
150543 +               scanned += nr_to_scan;
150545 +               if (!evict_lru_gen_pages(lruvec, sc, swappiness, &nr_to_scan))
150546 +                       break;
150548 +               scanned -= nr_to_scan;
150550 +               if (mem_cgroup_below_min(memcg) ||
150551 +                   (mem_cgroup_below_low(memcg) && !sc->memcg_low_reclaim))
150552 +                       break;
150554 +               cond_resched();
150555 +       }
150557 +       blk_finish_plug(&plug);
150560 +/******************************************************************************
150561 + *                          the background aging
150562 + ******************************************************************************/
150564 +static int lru_gen_spread = MIN_NR_GENS;
150566 +static void try_walk_mm_list(struct lruvec *lruvec, struct scan_control *sc)
150568 +       int gen, file, zone;
150569 +       long old_and_young[2] = {};
150570 +       struct mm_walk_args args = {};
150571 +       int spread = READ_ONCE(lru_gen_spread);
150572 +       int swappiness = get_swappiness(lruvec);
150573 +       struct lrugen *lrugen = &lruvec->evictable;
150574 +       DEFINE_MAX_SEQ();
150575 +       DEFINE_MIN_SEQ();
150577 +       lru_add_drain();
150579 +       for (file = !swappiness; file < ANON_AND_FILE; file++) {
150580 +               unsigned long seq;
150582 +               for (seq = min_seq[file]; seq <= max_seq; seq++) {
150583 +                       gen = lru_gen_from_seq(seq);
150585 +                       for (zone = 0; zone < MAX_NR_ZONES; zone++)
150586 +                               old_and_young[seq == max_seq] +=
150587 +                                       READ_ONCE(lrugen->sizes[gen][file][zone]);
150588 +               }
150589 +       }
150591 +       old_and_young[0] = max(old_and_young[0], 0L);
150592 +       old_and_young[1] = max(old_and_young[1], 0L);
150594 +       if (old_and_young[0] + old_and_young[1] < SWAP_CLUSTER_MAX)
150595 +               return;
150597 +       /* try to spread pages out across spread+1 generations */
150598 +       if (old_and_young[0] >= old_and_young[1] * spread &&
150599 +           min_nr_gens(max_seq, min_seq, swappiness) > max(spread, MIN_NR_GENS))
150600 +               return;
150602 +       walk_mm_list(lruvec, max_seq, sc, swappiness, &args);
150605 +static void age_lru_gens(struct pglist_data *pgdat, struct scan_control *sc)
150607 +       struct mem_cgroup *memcg;
150609 +       VM_BUG_ON(!current_is_kswapd());
150611 +       memcg = mem_cgroup_iter(NULL, NULL, NULL);
150612 +       do {
150613 +               struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
150614 +               struct lrugen *lrugen = &lruvec->evictable;
150616 +               if (!mem_cgroup_below_min(memcg) &&
150617 +                   (!mem_cgroup_below_low(memcg) || sc->memcg_low_reclaim))
150618 +                       try_walk_mm_list(lruvec, sc);
150620 +               if (!mem_cgroup_disabled())
150621 +                       atomic_add_unless(&lrugen->priority, 1, DEF_PRIORITY);
150623 +               cond_resched();
150624 +       } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
150627 +/******************************************************************************
150628 + *                          state change
150629 + ******************************************************************************/
150631 +#ifdef CONFIG_LRU_GEN_ENABLED
150632 +DEFINE_STATIC_KEY_TRUE(lru_gen_static_key);
150633 +#else
150634 +DEFINE_STATIC_KEY_FALSE(lru_gen_static_key);
150635 +#endif
150637 +static DEFINE_MUTEX(lru_gen_state_mutex);
150638 +static int lru_gen_nr_swapfiles __read_mostly;
150640 +static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
150642 +       int gen, file, zone;
150643 +       enum lru_list lru;
150644 +       struct lrugen *lrugen = &lruvec->evictable;
150646 +       for_each_evictable_lru(lru) {
150647 +               file = is_file_lru(lru);
150649 +               if (lrugen->enabled[file] && !list_empty(&lruvec->lists[lru]))
150650 +                       return false;
150651 +       }
150653 +       for_each_gen_type_zone(gen, file, zone) {
150654 +               if (!lrugen->enabled[file] && !list_empty(&lrugen->lists[gen][file][zone]))
150655 +                       return false;
150657 +               VM_WARN_ONCE(!lrugen->enabled[file] && lrugen->sizes[gen][file][zone],
150658 +                            "lru_gen: possible unbalanced number of pages");
150659 +       }
150661 +       return true;
150664 +static bool fill_lru_gen_lists(struct lruvec *lruvec)
150666 +       enum lru_list lru;
150667 +       int batch_size = 0;
150669 +       for_each_evictable_lru(lru) {
150670 +               int file = is_file_lru(lru);
150671 +               bool active = is_active_lru(lru);
150672 +               struct list_head *head = &lruvec->lists[lru];
150674 +               if (!lruvec->evictable.enabled[file])
150675 +                       continue;
150677 +               while (!list_empty(head)) {
150678 +                       bool success;
150679 +                       struct page *page = lru_to_page(head);
150681 +                       VM_BUG_ON_PAGE(PageTail(page), page);
150682 +                       VM_BUG_ON_PAGE(PageUnevictable(page), page);
150683 +                       VM_BUG_ON_PAGE(PageActive(page) != active, page);
150684 +                       VM_BUG_ON_PAGE(page_lru_gen(page) != -1, page);
150685 +                       VM_BUG_ON_PAGE(page_is_file_lru(page) != file, page);
150687 +                       prefetchw_prev_lru_page(page, head, flags);
150689 +                       del_page_from_lru_list(page, lruvec);
150690 +                       success = lru_gen_addition(page, lruvec, true);
150691 +                       VM_BUG_ON(!success);
150693 +                       if (++batch_size == MAX_BATCH_SIZE)
150694 +                               return false;
150695 +               }
150696 +       }
150698 +       return true;
150701 +static bool drain_lru_gen_lists(struct lruvec *lruvec)
150703 +       int gen, file, zone;
150704 +       int batch_size = 0;
150706 +       for_each_gen_type_zone(gen, file, zone) {
150707 +               struct list_head *head = &lruvec->evictable.lists[gen][file][zone];
150709 +               if (lruvec->evictable.enabled[file])
150710 +                       continue;
150712 +               while (!list_empty(head)) {
150713 +                       bool success;
150714 +                       struct page *page = lru_to_page(head);
150716 +                       VM_BUG_ON_PAGE(PageTail(page), page);
150717 +                       VM_BUG_ON_PAGE(PageUnevictable(page), page);
150718 +                       VM_BUG_ON_PAGE(PageActive(page), page);
150719 +                       VM_BUG_ON_PAGE(page_is_file_lru(page) != file, page);
150720 +                       VM_BUG_ON_PAGE(page_zonenum(page) != zone, page);
150722 +                       prefetchw_prev_lru_page(page, head, flags);
150724 +                       success = lru_gen_deletion(page, lruvec);
150725 +                       VM_BUG_ON(!success);
150726 +                       add_page_to_lru_list(page, lruvec);
150728 +                       if (++batch_size == MAX_BATCH_SIZE)
150729 +                               return false;
150730 +               }
150731 +       }
150733 +       return true;
150737 + * For file page tracking, we enable/disable it according to the main switch.
150738 + * For anon page tracking, we only enabled it when the main switch is on and
150739 + * there is at least one swapfile; we disable it when there are no swapfiles
150740 + * regardless of the value of the main switch. Otherwise, we will eventually
150741 + * reach the max size of the sliding window and have to call inc_min_seq(),
150742 + * which brings an unnecessary overhead.
150743 + */
150744 +void lru_gen_set_state(bool enable, bool main, bool swap)
150746 +       struct mem_cgroup *memcg;
150748 +       mem_hotplug_begin();
150749 +       mutex_lock(&lru_gen_state_mutex);
150750 +       cgroup_lock();
150752 +       main = main && enable != lru_gen_enabled();
150753 +       swap = swap && !(enable ? lru_gen_nr_swapfiles++ : --lru_gen_nr_swapfiles);
150754 +       swap = swap && lru_gen_enabled();
150755 +       if (!main && !swap)
150756 +               goto unlock;
150758 +       if (main) {
150759 +               if (enable)
150760 +                       static_branch_enable(&lru_gen_static_key);
150761 +               else
150762 +                       static_branch_disable(&lru_gen_static_key);
150763 +       }
150765 +       memcg = mem_cgroup_iter(NULL, NULL, NULL);
150766 +       do {
150767 +               int nid;
150769 +               for_each_node_state(nid, N_MEMORY) {
150770 +                       struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
150771 +                       struct lrugen *lrugen = &lruvec->evictable;
150773 +                       spin_lock_irq(&lruvec->lru_lock);
150775 +                       VM_BUG_ON(!seq_is_valid(lruvec));
150776 +                       VM_BUG_ON(!state_is_valid(lruvec));
150778 +                       WRITE_ONCE(lrugen->enabled[0], lru_gen_enabled() && lru_gen_nr_swapfiles);
150779 +                       WRITE_ONCE(lrugen->enabled[1], lru_gen_enabled());
150781 +                       while (!(enable ? fill_lru_gen_lists(lruvec) :
150782 +                                         drain_lru_gen_lists(lruvec))) {
150783 +                               spin_unlock_irq(&lruvec->lru_lock);
150784 +                               cond_resched();
150785 +                               spin_lock_irq(&lruvec->lru_lock);
150786 +                       }
150788 +                       spin_unlock_irq(&lruvec->lru_lock);
150789 +               }
150791 +               cond_resched();
150792 +       } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
150793 +unlock:
150794 +       cgroup_unlock();
150795 +       mutex_unlock(&lru_gen_state_mutex);
150796 +       mem_hotplug_done();
150799 +static int __meminit __maybe_unused lru_gen_online_mem(struct notifier_block *self,
150800 +                                                      unsigned long action, void *arg)
150802 +       struct mem_cgroup *memcg;
150803 +       struct memory_notify *mnb = arg;
150804 +       int nid = mnb->status_change_nid;
150806 +       if (action != MEM_GOING_ONLINE || nid == NUMA_NO_NODE)
150807 +               return NOTIFY_DONE;
150809 +       mutex_lock(&lru_gen_state_mutex);
150810 +       cgroup_lock();
150812 +       memcg = mem_cgroup_iter(NULL, NULL, NULL);
150813 +       do {
150814 +               struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
150815 +               struct lrugen *lrugen = &lruvec->evictable;
150817 +               VM_BUG_ON(!seq_is_valid(lruvec));
150818 +               VM_BUG_ON(!state_is_valid(lruvec));
150820 +               WRITE_ONCE(lrugen->enabled[0], lru_gen_enabled() && lru_gen_nr_swapfiles);
150821 +               WRITE_ONCE(lrugen->enabled[1], lru_gen_enabled());
150822 +       } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
150824 +       cgroup_unlock();
150825 +       mutex_unlock(&lru_gen_state_mutex);
150827 +       return NOTIFY_DONE;
150830 +/******************************************************************************
150831 + *                          sysfs interface
150832 + ******************************************************************************/
150834 +static ssize_t show_lru_gen_spread(struct kobject *kobj, struct kobj_attribute *attr,
150835 +                                  char *buf)
150837 +       return sprintf(buf, "%d\n", READ_ONCE(lru_gen_spread));
150840 +static ssize_t store_lru_gen_spread(struct kobject *kobj, struct kobj_attribute *attr,
150841 +                                   const char *buf, size_t len)
150843 +       int spread;
150845 +       if (kstrtoint(buf, 10, &spread) || spread >= MAX_NR_GENS)
150846 +               return -EINVAL;
150848 +       WRITE_ONCE(lru_gen_spread, spread);
150850 +       return len;
150853 +static struct kobj_attribute lru_gen_spread_attr = __ATTR(
150854 +       spread, 0644, show_lru_gen_spread, store_lru_gen_spread
150857 +static ssize_t show_lru_gen_enabled(struct kobject *kobj, struct kobj_attribute *attr,
150858 +                                   char *buf)
150860 +       return snprintf(buf, PAGE_SIZE, "%ld\n", lru_gen_enabled());
150863 +static ssize_t store_lru_gen_enabled(struct kobject *kobj, struct kobj_attribute *attr,
150864 +                                    const char *buf, size_t len)
150866 +       int enable;
150868 +       if (kstrtoint(buf, 10, &enable))
150869 +               return -EINVAL;
150871 +       lru_gen_set_state(enable, true, false);
150873 +       return len;
150876 +static struct kobj_attribute lru_gen_enabled_attr = __ATTR(
150877 +       enabled, 0644, show_lru_gen_enabled, store_lru_gen_enabled
150880 +static struct attribute *lru_gen_attrs[] = {
150881 +       &lru_gen_spread_attr.attr,
150882 +       &lru_gen_enabled_attr.attr,
150883 +       NULL
150886 +static struct attribute_group lru_gen_attr_group = {
150887 +       .name = "lru_gen",
150888 +       .attrs = lru_gen_attrs,
150891 +/******************************************************************************
150892 + *                          debugfs interface
150893 + ******************************************************************************/
150895 +static void *lru_gen_seq_start(struct seq_file *m, loff_t *pos)
150897 +       struct mem_cgroup *memcg;
150898 +       loff_t nr_to_skip = *pos;
150900 +       m->private = kzalloc(PATH_MAX, GFP_KERNEL);
150901 +       if (!m->private)
150902 +               return ERR_PTR(-ENOMEM);
150904 +       memcg = mem_cgroup_iter(NULL, NULL, NULL);
150905 +       do {
150906 +               int nid;
150908 +               for_each_node_state(nid, N_MEMORY) {
150909 +                       if (!nr_to_skip--)
150910 +                               return mem_cgroup_lruvec(memcg, NODE_DATA(nid));
150911 +               }
150912 +       } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
150914 +       return NULL;
150917 +static void lru_gen_seq_stop(struct seq_file *m, void *v)
150919 +       if (!IS_ERR_OR_NULL(v))
150920 +               mem_cgroup_iter_break(NULL, lruvec_memcg(v));
150922 +       kfree(m->private);
150923 +       m->private = NULL;
150926 +static void *lru_gen_seq_next(struct seq_file *m, void *v, loff_t *pos)
150928 +       int nid = lruvec_pgdat(v)->node_id;
150929 +       struct mem_cgroup *memcg = lruvec_memcg(v);
150931 +       ++*pos;
150933 +       nid = next_memory_node(nid);
150934 +       if (nid == MAX_NUMNODES) {
150935 +               memcg = mem_cgroup_iter(NULL, memcg, NULL);
150936 +               if (!memcg)
150937 +                       return NULL;
150939 +               nid = first_memory_node;
150940 +       }
150942 +       return mem_cgroup_lruvec(memcg, NODE_DATA(nid));
150945 +static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec,
150946 +                                 unsigned long max_seq, unsigned long *min_seq,
150947 +                                 unsigned long seq)
150949 +       int i;
150950 +       int file, tier;
150951 +       int sid = sid_from_seq_or_gen(seq);
150952 +       struct lrugen *lrugen = &lruvec->evictable;
150953 +       int nid = lruvec_pgdat(lruvec)->node_id;
150954 +       struct mem_cgroup *memcg = lruvec_memcg(lruvec);
150955 +       struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
150957 +       for (tier = 0; tier < MAX_NR_TIERS; tier++) {
150958 +               seq_printf(m, "            %10d", tier);
150959 +               for (file = 0; file < ANON_AND_FILE; file++) {
150960 +                       unsigned long n[3] = {};
150962 +                       if (seq == max_seq) {
150963 +                               n[0] = READ_ONCE(lrugen->avg_refaulted[file][tier]);
150964 +                               n[1] = READ_ONCE(lrugen->avg_total[file][tier]);
150966 +                               seq_printf(m, " %10luR %10luT %10lu ", n[0], n[1], n[2]);
150967 +                       } else if (seq == min_seq[file] || NR_STAT_GENS > 1) {
150968 +                               n[0] = atomic_long_read(&lrugen->refaulted[sid][file][tier]);
150969 +                               n[1] = atomic_long_read(&lrugen->evicted[sid][file][tier]);
150970 +                               if (tier)
150971 +                                       n[2] = READ_ONCE(lrugen->activated[sid][file][tier - 1]);
150973 +                               seq_printf(m, " %10lur %10lue %10lua", n[0], n[1], n[2]);
150974 +                       } else
150975 +                               seq_puts(m, "          0           0           0 ");
150976 +               }
150977 +               seq_putc(m, '\n');
150978 +       }
150980 +       seq_puts(m, "                      ");
150981 +       for (i = 0; i < NR_MM_STATS; i++) {
150982 +               if (seq == max_seq && NR_STAT_GENS == 1)
150983 +                       seq_printf(m, " %10lu%c", READ_ONCE(mm_list->nodes[nid].stats[sid][i]),
150984 +                                  toupper(MM_STAT_CODES[i]));
150985 +               else if (seq != max_seq && NR_STAT_GENS > 1)
150986 +                       seq_printf(m, " %10lu%c", READ_ONCE(mm_list->nodes[nid].stats[sid][i]),
150987 +                                  MM_STAT_CODES[i]);
150988 +               else
150989 +                       seq_puts(m, "          0 ");
150990 +       }
150991 +       seq_putc(m, '\n');
150994 +static int lru_gen_seq_show(struct seq_file *m, void *v)
150996 +       unsigned long seq;
150997 +       bool full = !debugfs_real_fops(m->file)->write;
150998 +       struct lruvec *lruvec = v;
150999 +       struct lrugen *lrugen = &lruvec->evictable;
151000 +       int nid = lruvec_pgdat(lruvec)->node_id;
151001 +       struct mem_cgroup *memcg = lruvec_memcg(lruvec);
151002 +       DEFINE_MAX_SEQ();
151003 +       DEFINE_MIN_SEQ();
151005 +       if (nid == first_memory_node) {
151006 +#ifdef CONFIG_MEMCG
151007 +               if (memcg)
151008 +                       cgroup_path(memcg->css.cgroup, m->private, PATH_MAX);
151009 +#endif
151010 +               seq_printf(m, "memcg %5hu %s\n",
151011 +                          mem_cgroup_id(memcg), (char *)m->private);
151012 +       }
151014 +       seq_printf(m, " node %5d %10d\n", nid, atomic_read(&lrugen->priority));
151016 +       seq = full ? (max_seq < MAX_NR_GENS ? 0 : max_seq - MAX_NR_GENS + 1) :
151017 +                    min(min_seq[0], min_seq[1]);
151019 +       for (; seq <= max_seq; seq++) {
151020 +               int gen, file, zone;
151021 +               unsigned int msecs;
151023 +               gen = lru_gen_from_seq(seq);
151024 +               msecs = jiffies_to_msecs(jiffies - READ_ONCE(lrugen->timestamps[gen]));
151026 +               seq_printf(m, " %10lu %10u", seq, msecs);
151028 +               for (file = 0; file < ANON_AND_FILE; file++) {
151029 +                       long size = 0;
151031 +                       if (seq < min_seq[file]) {
151032 +                               seq_puts(m, "         -0 ");
151033 +                               continue;
151034 +                       }
151036 +                       for (zone = 0; zone < MAX_NR_ZONES; zone++)
151037 +                               size += READ_ONCE(lrugen->sizes[gen][file][zone]);
151039 +                       seq_printf(m, " %10lu ", max(size, 0L));
151040 +               }
151042 +               seq_putc(m, '\n');
151044 +               if (full)
151045 +                       lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq);
151046 +       }
151048 +       return 0;
151051 +static const struct seq_operations lru_gen_seq_ops = {
151052 +       .start = lru_gen_seq_start,
151053 +       .stop = lru_gen_seq_stop,
151054 +       .next = lru_gen_seq_next,
151055 +       .show = lru_gen_seq_show,
151058 +static int advance_max_seq(struct lruvec *lruvec, unsigned long seq, int swappiness)
151060 +       struct mm_walk_args args = {};
151061 +       struct scan_control sc = {
151062 +               .target_mem_cgroup = lruvec_memcg(lruvec),
151063 +       };
151064 +       DEFINE_MAX_SEQ();
151066 +       if (seq == max_seq)
151067 +               walk_mm_list(lruvec, max_seq, &sc, swappiness, &args);
151069 +       return seq > max_seq ? -EINVAL : 0;
151072 +static int advance_min_seq(struct lruvec *lruvec, unsigned long seq, int swappiness,
151073 +                          unsigned long nr_to_reclaim)
151075 +       struct blk_plug plug;
151076 +       int err = -EINTR;
151077 +       long nr_to_scan = LONG_MAX;
151078 +       struct scan_control sc = {
151079 +               .nr_to_reclaim = nr_to_reclaim,
151080 +               .target_mem_cgroup = lruvec_memcg(lruvec),
151081 +               .may_writepage = 1,
151082 +               .may_unmap = 1,
151083 +               .may_swap = 1,
151084 +               .reclaim_idx = MAX_NR_ZONES - 1,
151085 +               .gfp_mask = GFP_KERNEL,
151086 +       };
151087 +       DEFINE_MAX_SEQ();
151089 +       if (seq >= max_seq - 1)
151090 +               return -EINVAL;
151092 +       blk_start_plug(&plug);
151094 +       while (!signal_pending(current)) {
151095 +               DEFINE_MIN_SEQ();
151097 +               if (seq < min(min_seq[!swappiness], min_seq[swappiness < 200]) ||
151098 +                   !evict_lru_gen_pages(lruvec, &sc, swappiness, &nr_to_scan)) {
151099 +                       err = 0;
151100 +                       break;
151101 +               }
151103 +               cond_resched();
151104 +       }
151106 +       blk_finish_plug(&plug);
151108 +       return err;
151111 +static int advance_seq(char cmd, int memcg_id, int nid, unsigned long seq,
151112 +                      int swappiness, unsigned long nr_to_reclaim)
151114 +       struct lruvec *lruvec;
151115 +       int err = -EINVAL;
151116 +       struct mem_cgroup *memcg = NULL;
151118 +       if (!mem_cgroup_disabled()) {
151119 +               rcu_read_lock();
151120 +               memcg = mem_cgroup_from_id(memcg_id);
151121 +#ifdef CONFIG_MEMCG
151122 +               if (memcg && !css_tryget(&memcg->css))
151123 +                       memcg = NULL;
151124 +#endif
151125 +               rcu_read_unlock();
151127 +               if (!memcg)
151128 +                       goto done;
151129 +       }
151130 +       if (memcg_id != mem_cgroup_id(memcg))
151131 +               goto done;
151133 +       if (nid < 0 || nid >= MAX_NUMNODES || !node_state(nid, N_MEMORY))
151134 +               goto done;
151136 +       lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
151138 +       if (swappiness == -1)
151139 +               swappiness = get_swappiness(lruvec);
151140 +       else if (swappiness > 200U)
151141 +               goto done;
151143 +       switch (cmd) {
151144 +       case '+':
151145 +               err = advance_max_seq(lruvec, seq, swappiness);
151146 +               break;
151147 +       case '-':
151148 +               err = advance_min_seq(lruvec, seq, swappiness, nr_to_reclaim);
151149 +               break;
151150 +       }
151151 +done:
151152 +       mem_cgroup_put(memcg);
151154 +       return err;
151157 +static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
151158 +                                size_t len, loff_t *pos)
151160 +       void *buf;
151161 +       char *cur, *next;
151162 +       int err = 0;
151164 +       buf = kvmalloc(len + 1, GFP_USER);
151165 +       if (!buf)
151166 +               return -ENOMEM;
151168 +       if (copy_from_user(buf, src, len)) {
151169 +               kvfree(buf);
151170 +               return -EFAULT;
151171 +       }
151173 +       next = buf;
151174 +       next[len] = '\0';
151176 +       while ((cur = strsep(&next, ",;\n"))) {
151177 +               int n;
151178 +               int end;
151179 +               char cmd;
151180 +               int memcg_id;
151181 +               int nid;
151182 +               unsigned long seq;
151183 +               int swappiness = -1;
151184 +               unsigned long nr_to_reclaim = -1;
151186 +               cur = skip_spaces(cur);
151187 +               if (!*cur)
151188 +                       continue;
151190 +               n = sscanf(cur, "%c %u %u %lu %n %u %n %lu %n", &cmd, &memcg_id, &nid,
151191 +                          &seq, &end, &swappiness, &end, &nr_to_reclaim, &end);
151192 +               if (n < 4 || cur[end]) {
151193 +                       err = -EINVAL;
151194 +                       break;
151195 +               }
151197 +               err = advance_seq(cmd, memcg_id, nid, seq, swappiness, nr_to_reclaim);
151198 +               if (err)
151199 +                       break;
151200 +       }
151202 +       kvfree(buf);
151204 +       return err ? : len;
151207 +static int lru_gen_seq_open(struct inode *inode, struct file *file)
151209 +       return seq_open(file, &lru_gen_seq_ops);
151212 +static const struct file_operations lru_gen_rw_fops = {
151213 +       .open = lru_gen_seq_open,
151214 +       .read = seq_read,
151215 +       .write = lru_gen_seq_write,
151216 +       .llseek = seq_lseek,
151217 +       .release = seq_release,
151220 +static const struct file_operations lru_gen_ro_fops = {
151221 +       .open = lru_gen_seq_open,
151222 +       .read = seq_read,
151223 +       .llseek = seq_lseek,
151224 +       .release = seq_release,
151227 +/******************************************************************************
151228 + *                          initialization
151229 + ******************************************************************************/
151231 +void lru_gen_init_lruvec(struct lruvec *lruvec)
151233 +       int i;
151234 +       int gen, file, zone;
151235 +       struct lrugen *lrugen = &lruvec->evictable;
151237 +       atomic_set(&lrugen->priority, DEF_PRIORITY);
151239 +       lrugen->max_seq = MIN_NR_GENS + 1;
151240 +       lrugen->enabled[0] = lru_gen_enabled() && lru_gen_nr_swapfiles;
151241 +       lrugen->enabled[1] = lru_gen_enabled();
151243 +       for (i = 0; i <= MIN_NR_GENS + 1; i++)
151244 +               lrugen->timestamps[i] = jiffies;
151246 +       for_each_gen_type_zone(gen, file, zone)
151247 +               INIT_LIST_HEAD(&lrugen->lists[gen][file][zone]);
151250 +static int __init init_lru_gen(void)
151252 +       BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS);
151253 +       BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS);
151254 +       BUILD_BUG_ON(sizeof(MM_STAT_CODES) != NR_MM_STATS + 1);
151255 +       BUILD_BUG_ON(PMD_SIZE / PAGE_SIZE != PTRS_PER_PTE);
151256 +       BUILD_BUG_ON(PUD_SIZE / PMD_SIZE != PTRS_PER_PMD);
151257 +       BUILD_BUG_ON(P4D_SIZE / PUD_SIZE != PTRS_PER_PUD);
151259 +       if (mem_cgroup_disabled()) {
151260 +               global_mm_list = alloc_mm_list();
151261 +               if (!global_mm_list) {
151262 +                       pr_err("lru_gen: failed to allocate global mm_struct list\n");
151263 +                       return -ENOMEM;
151264 +               }
151265 +       }
151267 +       if (hotplug_memory_notifier(lru_gen_online_mem, 0))
151268 +               pr_err("lru_gen: failed to subscribe hotplug notifications\n");
151270 +       if (sysfs_create_group(mm_kobj, &lru_gen_attr_group))
151271 +               pr_err("lru_gen: failed to create sysfs group\n");
151273 +       debugfs_create_file("lru_gen", 0644, NULL, NULL, &lru_gen_rw_fops);
151274 +       debugfs_create_file("lru_gen_full", 0444, NULL, NULL, &lru_gen_ro_fops);
151276 +       return 0;
151279 + * We want to run as early as possible because some debug code, e.g.,
151280 + * dma_resv_lockdep(), calls mm_alloc() and mmput(). We only depend on mm_kobj,
151281 + * which is initialized one stage earlier.
151282 + */
151283 +arch_initcall(init_lru_gen);
151285 +#endif /* CONFIG_LRU_GEN */
151286 diff --git a/mm/workingset.c b/mm/workingset.c
151287 index cd39902c1062..df363f9419fc 100644
151288 --- a/mm/workingset.c
151289 +++ b/mm/workingset.c
151290 @@ -168,9 +168,9 @@
151291   * refault distance will immediately activate the refaulting page.
151292   */
151294 -#define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) +  \
151295 -                        1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT)
151296 -#define EVICTION_MASK  (~0UL >> EVICTION_SHIFT)
151297 +#define EVICTION_SHIFT         (BITS_PER_XA_VALUE - MEM_CGROUP_ID_SHIFT - NODES_SHIFT)
151298 +#define EVICTION_MASK          (BIT(EVICTION_SHIFT) - 1)
151299 +#define WORKINGSET_WIDTH       1
151302   * Eviction timestamps need to be able to cover the full range of
151303 @@ -182,38 +182,139 @@
151304   */
151305  static unsigned int bucket_order __read_mostly;
151307 -static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
151308 -                        bool workingset)
151309 +static void *pack_shadow(int memcg_id, struct pglist_data *pgdat, unsigned long val)
151311 -       eviction >>= bucket_order;
151312 -       eviction &= EVICTION_MASK;
151313 -       eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
151314 -       eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
151315 -       eviction = (eviction << 1) | workingset;
151316 +       val = (val << MEM_CGROUP_ID_SHIFT) | memcg_id;
151317 +       val = (val << NODES_SHIFT) | pgdat->node_id;
151319 -       return xa_mk_value(eviction);
151320 +       return xa_mk_value(val);
151323 -static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
151324 -                         unsigned long *evictionp, bool *workingsetp)
151325 +static unsigned long unpack_shadow(void *shadow, int *memcg_id, struct pglist_data **pgdat)
151327 -       unsigned long entry = xa_to_value(shadow);
151328 -       int memcgid, nid;
151329 -       bool workingset;
151330 +       unsigned long val = xa_to_value(shadow);
151332 +       *pgdat = NODE_DATA(val & (BIT(NODES_SHIFT) - 1));
151333 +       val >>= NODES_SHIFT;
151334 +       *memcg_id = val & (BIT(MEM_CGROUP_ID_SHIFT) - 1);
151336 +       return val >> MEM_CGROUP_ID_SHIFT;
151339 +#ifdef CONFIG_LRU_GEN
151341 +#if LRU_GEN_SHIFT + LRU_USAGE_SHIFT >= EVICTION_SHIFT
151342 +#error "Please try smaller NODES_SHIFT, NR_LRU_GENS and TIERS_PER_GEN configurations"
151343 +#endif
151345 +static void page_set_usage(struct page *page, int usage)
151347 +       unsigned long old_flags, new_flags;
151349 +       VM_BUG_ON(usage > BIT(LRU_USAGE_WIDTH));
151351 +       if (!usage)
151352 +               return;
151354 +       do {
151355 +               old_flags = READ_ONCE(page->flags);
151356 +               new_flags = (old_flags & ~LRU_USAGE_MASK) | LRU_TIER_FLAGS |
151357 +                           ((usage - 1UL) << LRU_USAGE_PGOFF);
151358 +               if (old_flags == new_flags)
151359 +                       break;
151360 +       } while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
151363 +/* Return a token to be stored in the shadow entry of a page being evicted. */
151364 +static void *lru_gen_eviction(struct page *page)
151366 +       int sid, tier;
151367 +       unsigned long token;
151368 +       unsigned long min_seq;
151369 +       struct lruvec *lruvec;
151370 +       struct lrugen *lrugen;
151371 +       int file = page_is_file_lru(page);
151372 +       int usage = page_tier_usage(page);
151373 +       struct mem_cgroup *memcg = page_memcg(page);
151374 +       struct pglist_data *pgdat = page_pgdat(page);
151376 +       if (!lru_gen_enabled())
151377 +               return NULL;
151379 +       lruvec = mem_cgroup_lruvec(memcg, pgdat);
151380 +       lrugen = &lruvec->evictable;
151381 +       min_seq = READ_ONCE(lrugen->min_seq[file]);
151382 +       token = (min_seq << LRU_USAGE_SHIFT) | usage;
151384 +       sid = sid_from_seq_or_gen(min_seq);
151385 +       tier = lru_tier_from_usage(usage);
151386 +       atomic_long_add(thp_nr_pages(page), &lrugen->evicted[sid][file][tier]);
151388 +       return pack_shadow(mem_cgroup_id(memcg), pgdat, token);
151391 +/* Account a refaulted page based on the token stored in its shadow entry. */
151392 +static bool lru_gen_refault(struct page *page, void *shadow)
151394 +       int sid, tier, usage;
151395 +       int memcg_id;
151396 +       unsigned long token;
151397 +       unsigned long min_seq;
151398 +       struct lruvec *lruvec;
151399 +       struct lrugen *lrugen;
151400 +       struct pglist_data *pgdat;
151401 +       struct mem_cgroup *memcg;
151402 +       int file = page_is_file_lru(page);
151404 +       if (!lru_gen_enabled())
151405 +               return false;
151407 +       token = unpack_shadow(shadow, &memcg_id, &pgdat);
151408 +       if (page_pgdat(page) != pgdat)
151409 +               return true;
151411 +       rcu_read_lock();
151412 +       memcg = page_memcg_rcu(page);
151413 +       if (mem_cgroup_id(memcg) != memcg_id)
151414 +               goto unlock;
151416 +       usage = token & (BIT(LRU_USAGE_SHIFT) - 1);
151417 +       token >>= LRU_USAGE_SHIFT;
151419 +       lruvec = mem_cgroup_lruvec(memcg, pgdat);
151420 +       lrugen = &lruvec->evictable;
151421 +       min_seq = READ_ONCE(lrugen->min_seq[file]);
151422 +       if (token != (min_seq & (EVICTION_MASK >> LRU_USAGE_SHIFT)))
151423 +               goto unlock;
151425 -       workingset = entry & 1;
151426 -       entry >>= 1;
151427 -       nid = entry & ((1UL << NODES_SHIFT) - 1);
151428 -       entry >>= NODES_SHIFT;
151429 -       memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
151430 -       entry >>= MEM_CGROUP_ID_SHIFT;
151432 -       *memcgidp = memcgid;
151433 -       *pgdat = NODE_DATA(nid);
151434 -       *evictionp = entry << bucket_order;
151435 -       *workingsetp = workingset;
151436 +       page_set_usage(page, usage);
151438 +       sid = sid_from_seq_or_gen(min_seq);
151439 +       tier = lru_tier_from_usage(usage);
151440 +       atomic_long_add(thp_nr_pages(page), &lrugen->refaulted[sid][file][tier]);
151441 +       inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file);
151442 +       if (tier)
151443 +               inc_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file);
151444 +unlock:
151445 +       rcu_read_unlock();
151447 +       return true;
151450 +#else /* CONFIG_LRU_GEN */
151452 +static void *lru_gen_eviction(struct page *page)
151454 +       return NULL;
151457 +static bool lru_gen_refault(struct page *page, void *shadow)
151459 +       return false;
151462 +#endif /* CONFIG_LRU_GEN */
151464  /**
151465   * workingset_age_nonresident - age non-resident entries as LRU ages
151466   * @lruvec: the lruvec that was aged
151467 @@ -256,18 +357,25 @@ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg)
151468         unsigned long eviction;
151469         struct lruvec *lruvec;
151470         int memcgid;
151471 +       void *shadow;
151473         /* Page is fully exclusive and pins page's memory cgroup pointer */
151474         VM_BUG_ON_PAGE(PageLRU(page), page);
151475         VM_BUG_ON_PAGE(page_count(page), page);
151476         VM_BUG_ON_PAGE(!PageLocked(page), page);
151478 +       shadow = lru_gen_eviction(page);
151479 +       if (shadow)
151480 +               return shadow;
151482         lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
151483         /* XXX: target_memcg can be NULL, go through lruvec */
151484         memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
151485         eviction = atomic_long_read(&lruvec->nonresident_age);
151486 +       eviction >>= bucket_order;
151487 +       eviction = (eviction << WORKINGSET_WIDTH) | PageWorkingset(page);
151488         workingset_age_nonresident(lruvec, thp_nr_pages(page));
151489 -       return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
151490 +       return pack_shadow(memcgid, pgdat, eviction);
151493  /**
151494 @@ -294,7 +402,10 @@ void workingset_refault(struct page *page, void *shadow)
151495         bool workingset;
151496         int memcgid;
151498 -       unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
151499 +       if (lru_gen_refault(page, shadow))
151500 +               return;
151502 +       eviction = unpack_shadow(shadow, &memcgid, &pgdat);
151504         rcu_read_lock();
151505         /*
151506 @@ -318,6 +429,8 @@ void workingset_refault(struct page *page, void *shadow)
151507                 goto out;
151508         eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
151509         refault = atomic_long_read(&eviction_lruvec->nonresident_age);
151510 +       workingset = eviction & (BIT(WORKINGSET_WIDTH) - 1);
151511 +       eviction = (eviction >> WORKINGSET_WIDTH) << bucket_order;
151513         /*
151514          * Calculate the refault distance
151515 @@ -335,7 +448,7 @@ void workingset_refault(struct page *page, void *shadow)
151516          * longest time, so the occasional inappropriate activation
151517          * leading to pressure on the active list is not a problem.
151518          */
151519 -       refault_distance = (refault - eviction) & EVICTION_MASK;
151520 +       refault_distance = (refault - eviction) & (EVICTION_MASK >> WORKINGSET_WIDTH);
151522         /*
151523          * The activation decision for this page is made at the level
151524 @@ -594,7 +707,7 @@ static int __init workingset_init(void)
151525         unsigned int max_order;
151526         int ret;
151528 -       BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
151529 +       BUILD_BUG_ON(EVICTION_SHIFT < WORKINGSET_WIDTH);
151530         /*
151531          * Calculate the eviction bucket size to cover the longest
151532          * actionable refault distance, which is currently half of
151533 @@ -602,7 +715,7 @@ static int __init workingset_init(void)
151534          * some more pages at runtime, so keep working with up to
151535          * double the initial memory by using totalram_pages as-is.
151536          */
151537 -       timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
151538 +       timestamp_bits = EVICTION_SHIFT - WORKINGSET_WIDTH;
151539         max_order = fls_long(totalram_pages() - 1);
151540         if (max_order > timestamp_bits)
151541                 bucket_order = max_order - timestamp_bits;
151542 diff --git a/net/bluetooth/ecdh_helper.h b/net/bluetooth/ecdh_helper.h
151543 index a6f8d03d4aaf..830723971cf8 100644
151544 --- a/net/bluetooth/ecdh_helper.h
151545 +++ b/net/bluetooth/ecdh_helper.h
151546 @@ -25,6 +25,6 @@
151548  int compute_ecdh_secret(struct crypto_kpp *tfm, const u8 pair_public_key[64],
151549                         u8 secret[32]);
151550 -int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 *private_key);
151551 +int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 private_key[32]);
151552  int generate_ecdh_public_key(struct crypto_kpp *tfm, u8 public_key[64]);
151553  int generate_ecdh_keys(struct crypto_kpp *tfm, u8 public_key[64]);
151554 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
151555 index 6ffa89e3ba0a..f72646690539 100644
151556 --- a/net/bluetooth/hci_conn.c
151557 +++ b/net/bluetooth/hci_conn.c
151558 @@ -1830,8 +1830,6 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
151560         u32 phys = 0;
151562 -       hci_dev_lock(conn->hdev);
151564         /* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
151565          * Table 6.2: Packets defined for synchronous, asynchronous, and
151566          * CSB logical transport types.
151567 @@ -1928,7 +1926,5 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
151568                 break;
151569         }
151571 -       hci_dev_unlock(conn->hdev);
151573         return phys;
151575 diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
151576 index 67668be3461e..82f4973a011d 100644
151577 --- a/net/bluetooth/hci_event.c
151578 +++ b/net/bluetooth/hci_event.c
151579 @@ -5005,6 +5005,7 @@ static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
151580                 return;
151582         hchan->handle = le16_to_cpu(ev->handle);
151583 +       hchan->amp = true;
151585         BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
151587 @@ -5037,7 +5038,7 @@ static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
151588         hci_dev_lock(hdev);
151590         hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
151591 -       if (!hchan)
151592 +       if (!hchan || !hchan->amp)
151593                 goto unlock;
151595         amp_destroy_logical_link(hchan, ev->reason);
151596 @@ -5911,7 +5912,7 @@ static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
151598         BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
151600 -       if (!ev->status)
151601 +       if (ev->status)
151602                 return;
151604         hci_dev_lock(hdev);
151605 diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
151606 index e55976db4403..805ce546b813 100644
151607 --- a/net/bluetooth/hci_request.c
151608 +++ b/net/bluetooth/hci_request.c
151609 @@ -272,12 +272,16 @@ int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
151611         int ret;
151613 -       if (!test_bit(HCI_UP, &hdev->flags))
151614 -               return -ENETDOWN;
151616         /* Serialize all requests */
151617         hci_req_sync_lock(hdev);
151618 -       ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
151619 +       /* check the state after obtaing the lock to protect the HCI_UP
151620 +        * against any races from hci_dev_do_close when the controller
151621 +        * gets removed.
151622 +        */
151623 +       if (test_bit(HCI_UP, &hdev->flags))
151624 +               ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
151625 +       else
151626 +               ret = -ENETDOWN;
151627         hci_req_sync_unlock(hdev);
151629         return ret;
151630 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
151631 index 72c2f5226d67..53ddbee459b9 100644
151632 --- a/net/bluetooth/l2cap_core.c
151633 +++ b/net/bluetooth/l2cap_core.c
151634 @@ -451,6 +451,8 @@ struct l2cap_chan *l2cap_chan_create(void)
151635         if (!chan)
151636                 return NULL;
151638 +       skb_queue_head_init(&chan->tx_q);
151639 +       skb_queue_head_init(&chan->srej_q);
151640         mutex_init(&chan->lock);
151642         /* Set default lock nesting level */
151643 @@ -516,7 +518,9 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
151644         chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
151645         chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
151646         chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
151648         chan->conf_state = 0;
151649 +       set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
151651         set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
151653 diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
151654 index f1b1edd0b697..c99d65ef13b1 100644
151655 --- a/net/bluetooth/l2cap_sock.c
151656 +++ b/net/bluetooth/l2cap_sock.c
151657 @@ -179,9 +179,17 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
151658         struct l2cap_chan *chan = l2cap_pi(sk)->chan;
151659         struct sockaddr_l2 la;
151660         int len, err = 0;
151661 +       bool zapped;
151663         BT_DBG("sk %p", sk);
151665 +       lock_sock(sk);
151666 +       zapped = sock_flag(sk, SOCK_ZAPPED);
151667 +       release_sock(sk);
151669 +       if (zapped)
151670 +               return -EINVAL;
151672         if (!addr || alen < offsetofend(struct sockaddr, sa_family) ||
151673             addr->sa_family != AF_BLUETOOTH)
151674                 return -EINVAL;
151675 diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
151676 index 74971b4bd457..939c6f77fecc 100644
151677 --- a/net/bluetooth/mgmt.c
151678 +++ b/net/bluetooth/mgmt.c
151679 @@ -7976,7 +7976,6 @@ static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
151680                 goto unlock;
151681         }
151683 -       hdev->cur_adv_instance = cp->instance;
151684         /* Submit request for advertising params if ext adv available */
151685         if (ext_adv_capable(hdev)) {
151686                 hci_req_init(&req, hdev);
151687 diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
151688 index b0c1ee110eff..e03cc284161c 100644
151689 --- a/net/bluetooth/smp.c
151690 +++ b/net/bluetooth/smp.c
151691 @@ -2732,6 +2732,15 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
151692         if (skb->len < sizeof(*key))
151693                 return SMP_INVALID_PARAMS;
151695 +       /* Check if remote and local public keys are the same and debug key is
151696 +        * not in use.
151697 +        */
151698 +       if (!test_bit(SMP_FLAG_DEBUG_KEY, &smp->flags) &&
151699 +           !crypto_memneq(key, smp->local_pk, 64)) {
151700 +               bt_dev_err(hdev, "Remote and local public keys are identical");
151701 +               return SMP_UNSPECIFIED;
151702 +       }
151704         memcpy(smp->remote_pk, key, 64);
151706         if (test_bit(SMP_FLAG_REMOTE_OOB, &smp->flags)) {
151707 diff --git a/net/bridge/br_arp_nd_proxy.c b/net/bridge/br_arp_nd_proxy.c
151708 index dfec65eca8a6..3db1def4437b 100644
151709 --- a/net/bridge/br_arp_nd_proxy.c
151710 +++ b/net/bridge/br_arp_nd_proxy.c
151711 @@ -160,7 +160,9 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br,
151712         if (br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
151713                 if (p && (p->flags & BR_NEIGH_SUPPRESS))
151714                         return;
151715 -               if (ipv4_is_zeronet(sip) || sip == tip) {
151716 +               if (parp->ar_op != htons(ARPOP_RREQUEST) &&
151717 +                   parp->ar_op != htons(ARPOP_RREPLY) &&
151718 +                   (ipv4_is_zeronet(sip) || sip == tip)) {
151719                         /* prevent flooding to neigh suppress ports */
151720                         BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
151721                         return;
151722 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
151723 index 9d265447d654..226bb05c3b42 100644
151724 --- a/net/bridge/br_multicast.c
151725 +++ b/net/bridge/br_multicast.c
151726 @@ -1593,7 +1593,8 @@ static void br_multicast_port_group_rexmit(struct timer_list *t)
151727         spin_unlock(&br->multicast_lock);
151730 -static void br_mc_disabled_update(struct net_device *dev, bool value)
151731 +static int br_mc_disabled_update(struct net_device *dev, bool value,
151732 +                                struct netlink_ext_ack *extack)
151734         struct switchdev_attr attr = {
151735                 .orig_dev = dev,
151736 @@ -1602,11 +1603,13 @@ static void br_mc_disabled_update(struct net_device *dev, bool value)
151737                 .u.mc_disabled = !value,
151738         };
151740 -       switchdev_port_attr_set(dev, &attr, NULL);
151741 +       return switchdev_port_attr_set(dev, &attr, extack);
151744  int br_multicast_add_port(struct net_bridge_port *port)
151746 +       int err;
151748         port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
151749         port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT;
151751 @@ -1618,8 +1621,12 @@ int br_multicast_add_port(struct net_bridge_port *port)
151752         timer_setup(&port->ip6_own_query.timer,
151753                     br_ip6_multicast_port_query_expired, 0);
151754  #endif
151755 -       br_mc_disabled_update(port->dev,
151756 -                             br_opt_get(port->br, BROPT_MULTICAST_ENABLED));
151757 +       err = br_mc_disabled_update(port->dev,
151758 +                                   br_opt_get(port->br,
151759 +                                              BROPT_MULTICAST_ENABLED),
151760 +                                   NULL);
151761 +       if (err && err != -EOPNOTSUPP)
151762 +               return err;
151764         port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
151765         if (!port->mcast_stats)
151766 @@ -3152,25 +3159,14 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
151769  #if IS_ENABLED(CONFIG_IPV6)
151770 -static int br_ip6_multicast_mrd_rcv(struct net_bridge *br,
151771 -                                   struct net_bridge_port *port,
151772 -                                   struct sk_buff *skb)
151773 +static void br_ip6_multicast_mrd_rcv(struct net_bridge *br,
151774 +                                    struct net_bridge_port *port,
151775 +                                    struct sk_buff *skb)
151777 -       int ret;
151779 -       if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
151780 -               return -ENOMSG;
151782 -       ret = ipv6_mc_check_icmpv6(skb);
151783 -       if (ret < 0)
151784 -               return ret;
151786         if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
151787 -               return -ENOMSG;
151788 +               return;
151790         br_multicast_mark_router(br, port);
151792 -       return 0;
151795  static int br_multicast_ipv6_rcv(struct net_bridge *br,
151796 @@ -3184,18 +3180,12 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
151798         err = ipv6_mc_check_mld(skb);
151800 -       if (err == -ENOMSG) {
151801 +       if (err == -ENOMSG || err == -ENODATA) {
151802                 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
151803                         BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
151805 -               if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) {
151806 -                       err = br_ip6_multicast_mrd_rcv(br, port, skb);
151808 -                       if (err < 0 && err != -ENOMSG) {
151809 -                               br_multicast_err_count(br, port, skb->protocol);
151810 -                               return err;
151811 -                       }
151812 -               }
151813 +               if (err == -ENODATA &&
151814 +                   ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr))
151815 +                       br_ip6_multicast_mrd_rcv(br, port, skb);
151817                 return 0;
151818         } else if (err < 0) {
151819 @@ -3560,16 +3550,23 @@ static void br_multicast_start_querier(struct net_bridge *br,
151820         rcu_read_unlock();
151823 -int br_multicast_toggle(struct net_bridge *br, unsigned long val)
151824 +int br_multicast_toggle(struct net_bridge *br, unsigned long val,
151825 +                       struct netlink_ext_ack *extack)
151827         struct net_bridge_port *port;
151828         bool change_snoopers = false;
151829 +       int err = 0;
151831         spin_lock_bh(&br->multicast_lock);
151832         if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
151833                 goto unlock;
151835 -       br_mc_disabled_update(br->dev, val);
151836 +       err = br_mc_disabled_update(br->dev, val, extack);
151837 +       if (err == -EOPNOTSUPP)
151838 +               err = 0;
151839 +       if (err)
151840 +               goto unlock;
151842         br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
151843         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
151844                 change_snoopers = true;
151845 @@ -3607,7 +3604,7 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
151846                         br_multicast_leave_snoopers(br);
151847         }
151849 -       return 0;
151850 +       return err;
151853  bool br_multicast_enabled(const struct net_device *dev)
151854 diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
151855 index f2b1343f8332..e4e6e991313e 100644
151856 --- a/net/bridge/br_netlink.c
151857 +++ b/net/bridge/br_netlink.c
151858 @@ -103,8 +103,9 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev,
151860         rcu_read_lock();
151861         if (netif_is_bridge_port(dev)) {
151862 -               p = br_port_get_rcu(dev);
151863 -               vg = nbp_vlan_group_rcu(p);
151864 +               p = br_port_get_check_rcu(dev);
151865 +               if (p)
151866 +                       vg = nbp_vlan_group_rcu(p);
151867         } else if (dev->priv_flags & IFF_EBRIDGE) {
151868                 br = netdev_priv(dev);
151869                 vg = br_vlan_group_rcu(br);
151870 @@ -1293,7 +1294,9 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
151871         if (data[IFLA_BR_MCAST_SNOOPING]) {
151872                 u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]);
151874 -               br_multicast_toggle(br, mcast_snooping);
151875 +               err = br_multicast_toggle(br, mcast_snooping, extack);
151876 +               if (err)
151877 +                       return err;
151878         }
151880         if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) {
151881 diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
151882 index d7d167e10b70..af3430c2d6ea 100644
151883 --- a/net/bridge/br_private.h
151884 +++ b/net/bridge/br_private.h
151885 @@ -810,7 +810,8 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
151886                         struct sk_buff *skb, bool local_rcv, bool local_orig);
151887  int br_multicast_set_router(struct net_bridge *br, unsigned long val);
151888  int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val);
151889 -int br_multicast_toggle(struct net_bridge *br, unsigned long val);
151890 +int br_multicast_toggle(struct net_bridge *br, unsigned long val,
151891 +                       struct netlink_ext_ack *extack);
151892  int br_multicast_set_querier(struct net_bridge *br, unsigned long val);
151893  int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
151894  int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val);
151895 diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
151896 index 072e29840082..381467b691d5 100644
151897 --- a/net/bridge/br_sysfs_br.c
151898 +++ b/net/bridge/br_sysfs_br.c
151899 @@ -409,17 +409,11 @@ static ssize_t multicast_snooping_show(struct device *d,
151900         return sprintf(buf, "%d\n", br_opt_get(br, BROPT_MULTICAST_ENABLED));
151903 -static int toggle_multicast(struct net_bridge *br, unsigned long val,
151904 -                           struct netlink_ext_ack *extack)
151906 -       return br_multicast_toggle(br, val);
151909  static ssize_t multicast_snooping_store(struct device *d,
151910                                         struct device_attribute *attr,
151911                                         const char *buf, size_t len)
151913 -       return store_bridge_parm(d, buf, len, toggle_multicast);
151914 +       return store_bridge_parm(d, buf, len, br_multicast_toggle);
151916  static DEVICE_ATTR_RW(multicast_snooping);
151918 diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
151919 index ca44c327bace..79641c4afee9 100644
151920 --- a/net/ceph/auth_x.c
151921 +++ b/net/ceph/auth_x.c
151922 @@ -526,7 +526,7 @@ static int ceph_x_build_request(struct ceph_auth_client *ac,
151923                 if (ret < 0)
151924                         return ret;
151926 -               auth->struct_v = 2;  /* nautilus+ */
151927 +               auth->struct_v = 3;  /* nautilus+ */
151928                 auth->key = 0;
151929                 for (u = (u64 *)enc_buf; u + 1 <= (u64 *)(enc_buf + ret); u++)
151930                         auth->key ^= *(__le64 *)u;
151931 diff --git a/net/ceph/decode.c b/net/ceph/decode.c
151932 index b44f7651be04..bc109a1a4616 100644
151933 --- a/net/ceph/decode.c
151934 +++ b/net/ceph/decode.c
151935 @@ -4,6 +4,7 @@
151936  #include <linux/inet.h>
151938  #include <linux/ceph/decode.h>
151939 +#include <linux/ceph/messenger.h>  /* for ceph_pr_addr() */
151941  static int
151942  ceph_decode_entity_addr_versioned(void **p, void *end,
151943 @@ -110,6 +111,7 @@ int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
151944         }
151946         ceph_decode_32_safe(p, end, addr_cnt, e_inval);
151947 +       dout("%s addr_cnt %d\n", __func__, addr_cnt);
151949         found = false;
151950         for (i = 0; i < addr_cnt; i++) {
151951 @@ -117,6 +119,7 @@ int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
151952                 if (ret)
151953                         return ret;
151955 +               dout("%s i %d addr %s\n", __func__, i, ceph_pr_addr(&tmp_addr));
151956                 if (tmp_addr.type == my_type) {
151957                         if (found) {
151958                                 pr_err("another match of type %d in addrvec\n",
151959 @@ -128,13 +131,18 @@ int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
151960                         found = true;
151961                 }
151962         }
151963 -       if (!found && addr_cnt != 0) {
151964 -               pr_err("no match of type %d in addrvec\n",
151965 -                      le32_to_cpu(my_type));
151966 -               return -ENOENT;
151967 -       }
151969 -       return 0;
151970 +       if (found)
151971 +               return 0;
151973 +       if (!addr_cnt)
151974 +               return 0;  /* normal -- e.g. unused OSD id/slot */
151976 +       if (addr_cnt == 1 && !memchr_inv(&tmp_addr, 0, sizeof(tmp_addr)))
151977 +               return 0;  /* weird but effectively the same as !addr_cnt */
151979 +       pr_err("no match of type %d in addrvec\n", le32_to_cpu(my_type));
151980 +       return -ENOENT;
151982  e_inval:
151983         return -EINVAL;
151984 diff --git a/net/core/dev.c b/net/core/dev.c
151985 index 1f79b9aa9a3f..70829c568645 100644
151986 --- a/net/core/dev.c
151987 +++ b/net/core/dev.c
151988 @@ -4672,10 +4672,10 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
151989         void *orig_data, *orig_data_end, *hard_start;
151990         struct netdev_rx_queue *rxqueue;
151991         u32 metalen, act = XDP_DROP;
151992 +       bool orig_bcast, orig_host;
151993         u32 mac_len, frame_sz;
151994         __be16 orig_eth_type;
151995         struct ethhdr *eth;
151996 -       bool orig_bcast;
151997         int off;
151999         /* Reinjected packets coming from act_mirred or similar should
152000 @@ -4722,6 +4722,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
152001         orig_data_end = xdp->data_end;
152002         orig_data = xdp->data;
152003         eth = (struct ethhdr *)xdp->data;
152004 +       orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr);
152005         orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
152006         orig_eth_type = eth->h_proto;
152008 @@ -4749,8 +4750,11 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
152009         /* check if XDP changed eth hdr such SKB needs update */
152010         eth = (struct ethhdr *)xdp->data;
152011         if ((orig_eth_type != eth->h_proto) ||
152012 +           (orig_host != ether_addr_equal_64bits(eth->h_dest,
152013 +                                                 skb->dev->dev_addr)) ||
152014             (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
152015                 __skb_push(skb, ETH_HLEN);
152016 +               skb->pkt_type = PACKET_HOST;
152017                 skb->protocol = eth_type_trans(skb, skb->dev);
152018         }
152020 @@ -5914,7 +5918,7 @@ static struct list_head *gro_list_prepare(struct napi_struct *napi,
152021         return head;
152024 -static void skb_gro_reset_offset(struct sk_buff *skb)
152025 +static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
152027         const struct skb_shared_info *pinfo = skb_shinfo(skb);
152028         const skb_frag_t *frag0 = &pinfo->frags[0];
152029 @@ -5925,7 +5929,7 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
152031         if (!skb_headlen(skb) && pinfo->nr_frags &&
152032             !PageHighMem(skb_frag_page(frag0)) &&
152033 -           (!NET_IP_ALIGN || !(skb_frag_off(frag0) & 3))) {
152034 +           (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
152035                 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
152036                 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
152037                                                     skb_frag_size(frag0),
152038 @@ -6143,7 +6147,7 @@ gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
152039         skb_mark_napi_id(skb, napi);
152040         trace_napi_gro_receive_entry(skb);
152042 -       skb_gro_reset_offset(skb);
152043 +       skb_gro_reset_offset(skb, 0);
152045         ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
152046         trace_napi_gro_receive_exit(ret);
152047 @@ -6232,7 +6236,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
152048         napi->skb = NULL;
152050         skb_reset_mac_header(skb);
152051 -       skb_gro_reset_offset(skb);
152052 +       skb_gro_reset_offset(skb, hlen);
152054         if (unlikely(skb_gro_header_hard(skb, hlen))) {
152055                 eth = skb_gro_header_slow(skb, hlen, 0);
152056 diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
152057 index a96a4f5de0ce..3f36b04d86a0 100644
152058 --- a/net/core/flow_dissector.c
152059 +++ b/net/core/flow_dissector.c
152060 @@ -828,8 +828,10 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
152061                 key_addrs = skb_flow_dissector_target(flow_dissector,
152062                                                       FLOW_DISSECTOR_KEY_IPV6_ADDRS,
152063                                                       target_container);
152064 -               memcpy(&key_addrs->v6addrs, &flow_keys->ipv6_src,
152065 -                      sizeof(key_addrs->v6addrs));
152066 +               memcpy(&key_addrs->v6addrs.src, &flow_keys->ipv6_src,
152067 +                      sizeof(key_addrs->v6addrs.src));
152068 +               memcpy(&key_addrs->v6addrs.dst, &flow_keys->ipv6_dst,
152069 +                      sizeof(key_addrs->v6addrs.dst));
152070                 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
152071         }
152073 diff --git a/net/core/page_pool.c b/net/core/page_pool.c
152074 index ad8b0707af04..f014fd8c19a6 100644
152075 --- a/net/core/page_pool.c
152076 +++ b/net/core/page_pool.c
152077 @@ -174,8 +174,10 @@ static void page_pool_dma_sync_for_device(struct page_pool *pool,
152078                                           struct page *page,
152079                                           unsigned int dma_sync_size)
152081 +       dma_addr_t dma_addr = page_pool_get_dma_addr(page);
152083         dma_sync_size = min(dma_sync_size, pool->p.max_len);
152084 -       dma_sync_single_range_for_device(pool->p.dev, page->dma_addr,
152085 +       dma_sync_single_range_for_device(pool->p.dev, dma_addr,
152086                                          pool->p.offset, dma_sync_size,
152087                                          pool->p.dma_dir);
152089 @@ -226,7 +228,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
152090                 put_page(page);
152091                 return NULL;
152092         }
152093 -       page->dma_addr = dma;
152094 +       page_pool_set_dma_addr(page, dma);
152096         if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
152097                 page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
152098 @@ -294,13 +296,13 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
152099                  */
152100                 goto skip_dma_unmap;
152102 -       dma = page->dma_addr;
152103 +       dma = page_pool_get_dma_addr(page);
152105 -       /* When page is unmapped, it cannot be returned our pool */
152106 +       /* When page is unmapped, it cannot be returned to our pool */
152107         dma_unmap_page_attrs(pool->p.dev, dma,
152108                              PAGE_SIZE << pool->p.order, pool->p.dma_dir,
152109                              DMA_ATTR_SKIP_CPU_SYNC);
152110 -       page->dma_addr = 0;
152111 +       page_pool_set_dma_addr(page, 0);
152112  skip_dma_unmap:
152113         /* This may be the last page returned, releasing the pool, so
152114          * it is not safe to reference pool afterwards.
152115 diff --git a/net/core/pktgen.c b/net/core/pktgen.c
152116 index 3fba429f1f57..9a3a9a6eb837 100644
152117 --- a/net/core/pktgen.c
152118 +++ b/net/core/pktgen.c
152119 @@ -1894,7 +1894,7 @@ static void pktgen_mark_device(const struct pktgen_net *pn, const char *ifname)
152120                 mutex_unlock(&pktgen_thread_lock);
152121                 pr_debug("%s: waiting for %s to disappear....\n",
152122                          __func__, ifname);
152123 -               schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try));
152124 +               schedule_msec_hrtimeout_interruptible((msec_per_try));
152125                 mutex_lock(&pktgen_thread_lock);
152127                 if (++i >= max_tries) {
152128 diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
152129 index 771688e1b0da..2603966da904 100644
152130 --- a/net/ethtool/ioctl.c
152131 +++ b/net/ethtool/ioctl.c
152132 @@ -489,7 +489,7 @@ store_link_ksettings_for_user(void __user *to,
152134         struct ethtool_link_usettings link_usettings;
152136 -       memcpy(&link_usettings.base, &from->base, sizeof(link_usettings));
152137 +       memcpy(&link_usettings, from, sizeof(link_usettings));
152138         bitmap_to_arr32(link_usettings.link_modes.supported,
152139                         from->link_modes.supported,
152140                         __ETHTOOL_LINK_MODE_MASK_NBITS);
152141 diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
152142 index 50d3c8896f91..25a55086d2b6 100644
152143 --- a/net/ethtool/netlink.c
152144 +++ b/net/ethtool/netlink.c
152145 @@ -384,7 +384,8 @@ static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev,
152146         int ret;
152148         ehdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
152149 -                          &ethtool_genl_family, 0, ctx->ops->reply_cmd);
152150 +                          &ethtool_genl_family, NLM_F_MULTI,
152151 +                          ctx->ops->reply_cmd);
152152         if (!ehdr)
152153                 return -EMSGSIZE;
152155 diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
152156 index b218e4594009..6852e9bccf5b 100644
152157 --- a/net/hsr/hsr_forward.c
152158 +++ b/net/hsr/hsr_forward.c
152159 @@ -520,6 +520,10 @@ static int fill_frame_info(struct hsr_frame_info *frame,
152160         struct ethhdr *ethhdr;
152161         __be16 proto;
152163 +       /* Check if skb contains hsr_ethhdr */
152164 +       if (skb->mac_len < sizeof(struct hsr_ethhdr))
152165 +               return -EINVAL;
152167         memset(frame, 0, sizeof(*frame));
152168         frame->is_supervision = is_supervision_frame(port->hsr, skb);
152169         frame->node_src = hsr_get_node(port, &hsr->node_db, skb,
152170 diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
152171 index 87983e70f03f..a833a7a67ce7 100644
152172 --- a/net/ipv4/Kconfig
152173 +++ b/net/ipv4/Kconfig
152174 @@ -669,6 +669,24 @@ config TCP_CONG_BBR
152175           AQM schemes that do not provide a delay signal. It requires the fq
152176           ("Fair Queue") pacing packet scheduler.
152178 +config TCP_CONG_BBR2
152179 +       tristate "BBR2 TCP"
152180 +       default n
152181 +       help
152183 +       BBR2 TCP congestion control is a model-based congestion control
152184 +       algorithm that aims to maximize network utilization, keep queues and
152185 +       retransmit rates low, and to be able to coexist with Reno/CUBIC in
152186 +       common scenarios. It builds an explicit model of the network path.  It
152187 +       tolerates a targeted degree of random packet loss and delay that are
152188 +       unrelated to congestion. It can operate over LAN, WAN, cellular, wifi,
152189 +       or cable modem links, and can use DCTCP-L4S-style ECN signals.  It can
152190 +       coexist with flows that use loss-based congestion control, and can
152191 +       operate with shallow buffers, deep buffers, bufferbloat, policers, or
152192 +       AQM schemes that do not provide a delay signal. It requires pacing,
152193 +       using either TCP internal pacing or the fq ("Fair Queue") pacing packet
152194 +       scheduler.
152196  choice
152197         prompt "Default TCP congestion control"
152198         default DEFAULT_CUBIC
152199 @@ -706,6 +724,9 @@ choice
152200         config DEFAULT_BBR
152201                 bool "BBR" if TCP_CONG_BBR=y
152203 +       config DEFAULT_BBR2
152204 +               bool "BBR2" if TCP_CONG_BBR2=y
152206         config DEFAULT_RENO
152207                 bool "Reno"
152208  endchoice
152209 @@ -730,6 +751,7 @@ config DEFAULT_TCP_CONG
152210         default "dctcp" if DEFAULT_DCTCP
152211         default "cdg" if DEFAULT_CDG
152212         default "bbr" if DEFAULT_BBR
152213 +       default "bbr2" if DEFAULT_BBR2
152214         default "cubic"
152216  config TCP_MD5SIG
152217 diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
152218 index 5b77a46885b9..8c5779dba462 100644
152219 --- a/net/ipv4/Makefile
152220 +++ b/net/ipv4/Makefile
152221 @@ -46,6 +46,7 @@ obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o
152222  obj-$(CONFIG_INET_UDP_DIAG) += udp_diag.o
152223  obj-$(CONFIG_INET_RAW_DIAG) += raw_diag.o
152224  obj-$(CONFIG_TCP_CONG_BBR) += tcp_bbr.o
152225 +obj-$(CONFIG_TCP_CONG_BBR2) += tcp_bbr2.o
152226  obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o
152227  obj-$(CONFIG_TCP_CONG_CDG) += tcp_cdg.o
152228  obj-$(CONFIG_TCP_CONG_CUBIC) += tcp_cubic.o
152229 diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c
152230 index d520e61649c8..22129c1c56a2 100644
152231 --- a/net/ipv4/bpf_tcp_ca.c
152232 +++ b/net/ipv4/bpf_tcp_ca.c
152233 @@ -16,7 +16,7 @@ static u32 optional_ops[] = {
152234         offsetof(struct tcp_congestion_ops, cwnd_event),
152235         offsetof(struct tcp_congestion_ops, in_ack_event),
152236         offsetof(struct tcp_congestion_ops, pkts_acked),
152237 -       offsetof(struct tcp_congestion_ops, min_tso_segs),
152238 +       offsetof(struct tcp_congestion_ops, tso_segs),
152239         offsetof(struct tcp_congestion_ops, sndbuf_expand),
152240         offsetof(struct tcp_congestion_ops, cong_control),
152242 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
152243 index bba150fdd265..d635b4f32d34 100644
152244 --- a/net/ipv4/route.c
152245 +++ b/net/ipv4/route.c
152246 @@ -66,6 +66,7 @@
152247  #include <linux/types.h>
152248  #include <linux/kernel.h>
152249  #include <linux/mm.h>
152250 +#include <linux/memblock.h>
152251  #include <linux/string.h>
152252  #include <linux/socket.h>
152253  #include <linux/sockios.h>
152254 @@ -478,8 +479,10 @@ static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
152255         __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
152258 -#define IP_IDENTS_SZ 2048u
152260 +/* Hash tables of size 2048..262144 depending on RAM size.
152261 + * Each bucket uses 8 bytes.
152262 + */
152263 +static u32 ip_idents_mask __read_mostly;
152264  static atomic_t *ip_idents __read_mostly;
152265  static u32 *ip_tstamps __read_mostly;
152267 @@ -489,12 +492,16 @@ static u32 *ip_tstamps __read_mostly;
152268   */
152269  u32 ip_idents_reserve(u32 hash, int segs)
152271 -       u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
152272 -       atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
152273 -       u32 old = READ_ONCE(*p_tstamp);
152274 -       u32 now = (u32)jiffies;
152275 +       u32 bucket, old, now = (u32)jiffies;
152276 +       atomic_t *p_id;
152277 +       u32 *p_tstamp;
152278         u32 delta = 0;
152280 +       bucket = hash & ip_idents_mask;
152281 +       p_tstamp = ip_tstamps + bucket;
152282 +       p_id = ip_idents + bucket;
152283 +       old = READ_ONCE(*p_tstamp);
152285         if (old != now && cmpxchg(p_tstamp, old, now) == old)
152286                 delta = prandom_u32_max(now - old);
152288 @@ -3553,18 +3560,25 @@ struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
152290  int __init ip_rt_init(void)
152292 +       void *idents_hash;
152293         int cpu;
152295 -       ip_idents = kmalloc_array(IP_IDENTS_SZ, sizeof(*ip_idents),
152296 -                                 GFP_KERNEL);
152297 -       if (!ip_idents)
152298 -               panic("IP: failed to allocate ip_idents\n");
152299 +       /* For modern hosts, this will use 2 MB of memory */
152300 +       idents_hash = alloc_large_system_hash("IP idents",
152301 +                                             sizeof(*ip_idents) + sizeof(*ip_tstamps),
152302 +                                             0,
152303 +                                             16, /* one bucket per 64 KB */
152304 +                                             HASH_ZERO,
152305 +                                             NULL,
152306 +                                             &ip_idents_mask,
152307 +                                             2048,
152308 +                                             256*1024);
152310 +       ip_idents = idents_hash;
152312 -       prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
152313 +       prandom_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents));
152315 -       ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
152316 -       if (!ip_tstamps)
152317 -               panic("IP: failed to allocate ip_tstamps\n");
152318 +       ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents);
152320         for_each_possible_cpu(cpu) {
152321                 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
152322 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
152323 index de7cc8445ac0..521f310f2ac1 100644
152324 --- a/net/ipv4/tcp.c
152325 +++ b/net/ipv4/tcp.c
152326 @@ -3033,6 +3033,7 @@ int tcp_disconnect(struct sock *sk, int flags)
152327         tp->rx_opt.dsack = 0;
152328         tp->rx_opt.num_sacks = 0;
152329         tp->rcv_ooopack = 0;
152330 +       tp->fast_ack_mode = 0;
152333         /* Clean up fastopen related fields */
152334 diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
152335 index 6ea3dc2e4219..8ef512fefe25 100644
152336 --- a/net/ipv4/tcp_bbr.c
152337 +++ b/net/ipv4/tcp_bbr.c
152338 @@ -292,26 +292,40 @@ static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
152339                 sk->sk_pacing_rate = rate;
152342 -/* override sysctl_tcp_min_tso_segs */
152343  static u32 bbr_min_tso_segs(struct sock *sk)
152345         return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
152348 +/* Return the number of segments BBR would like in a TSO/GSO skb, given
152349 + * a particular max gso size as a constraint.
152350 + */
152351 +static u32 bbr_tso_segs_generic(struct sock *sk, unsigned int mss_now,
152352 +                               u32 gso_max_size)
152354 +       u32 segs;
152355 +       u64 bytes;
152357 +       /* Budget a TSO/GSO burst size allowance based on bw (pacing_rate). */
152358 +       bytes = sk->sk_pacing_rate >> sk->sk_pacing_shift;
152360 +       bytes = min_t(u32, bytes, gso_max_size - 1 - MAX_TCP_HEADER);
152361 +       segs = max_t(u32, bytes / mss_now, bbr_min_tso_segs(sk));
152362 +       return segs;
152365 +/* Custom tcp_tso_autosize() for BBR, used at transmit time to cap skb size. */
152366 +static u32  bbr_tso_segs(struct sock *sk, unsigned int mss_now)
152368 +       return bbr_tso_segs_generic(sk, mss_now, sk->sk_gso_max_size);
152371 +/* Like bbr_tso_segs(), using mss_cache, ignoring driver's sk_gso_max_size. */
152372  static u32 bbr_tso_segs_goal(struct sock *sk)
152374         struct tcp_sock *tp = tcp_sk(sk);
152375 -       u32 segs, bytes;
152377 -       /* Sort of tcp_tso_autosize() but ignoring
152378 -        * driver provided sk_gso_max_size.
152379 -        */
152380 -       bytes = min_t(unsigned long,
152381 -                     sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
152382 -                     GSO_MAX_SIZE - 1 - MAX_TCP_HEADER);
152383 -       segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
152385 -       return min(segs, 0x7FU);
152386 +       return  bbr_tso_segs_generic(sk, tp->mss_cache, GSO_MAX_SIZE);
152389  /* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
152390 @@ -1147,7 +1161,7 @@ static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = {
152391         .undo_cwnd      = bbr_undo_cwnd,
152392         .cwnd_event     = bbr_cwnd_event,
152393         .ssthresh       = bbr_ssthresh,
152394 -       .min_tso_segs   = bbr_min_tso_segs,
152395 +       .tso_segs       = bbr_tso_segs,
152396         .get_info       = bbr_get_info,
152397         .set_state      = bbr_set_state,
152399 diff --git a/net/ipv4/tcp_bbr2.c b/net/ipv4/tcp_bbr2.c
152400 new file mode 100644
152401 index 000000000000..5510adc92bbb
152402 --- /dev/null
152403 +++ b/net/ipv4/tcp_bbr2.c
152404 @@ -0,0 +1,2671 @@
152405 +/* BBR (Bottleneck Bandwidth and RTT) congestion control, v2
152407 + * BBRv2 is a model-based congestion control algorithm that aims for low
152408 + * queues, low loss, and (bounded) Reno/CUBIC coexistence. To maintain a model
152409 + * of the network path, it uses measurements of bandwidth and RTT, as well as
152410 + * (if they occur) packet loss and/or DCTCP/L4S-style ECN signals.  Note that
152411 + * although it can use ECN or loss signals explicitly, it does not require
152412 + * either; it can bound its in-flight data based on its estimate of the BDP.
152414 + * The model has both higher and lower bounds for the operating range:
152415 + *   lo: bw_lo, inflight_lo: conservative short-term lower bound
152416 + *   hi: bw_hi, inflight_hi: robust long-term upper bound
152417 + * The bandwidth-probing time scale is (a) extended dynamically based on
152418 + * estimated BDP to improve coexistence with Reno/CUBIC; (b) bounded by
152419 + * an interactive wall-clock time-scale to be more scalable and responsive
152420 + * than Reno and CUBIC.
152422 + * Here is a state transition diagram for BBR:
152424 + *             |
152425 + *             V
152426 + *    +---> STARTUP  ----+
152427 + *    |        |         |
152428 + *    |        V         |
152429 + *    |      DRAIN   ----+
152430 + *    |        |         |
152431 + *    |        V         |
152432 + *    +---> PROBE_BW ----+
152433 + *    |      ^    |      |
152434 + *    |      |    |      |
152435 + *    |      +----+      |
152436 + *    |                  |
152437 + *    +---- PROBE_RTT <--+
152439 + * A BBR flow starts in STARTUP, and ramps up its sending rate quickly.
152440 + * When it estimates the pipe is full, it enters DRAIN to drain the queue.
152441 + * In steady state a BBR flow only uses PROBE_BW and PROBE_RTT.
152442 + * A long-lived BBR flow spends the vast majority of its time remaining
152443 + * (repeatedly) in PROBE_BW, fully probing and utilizing the pipe's bandwidth
152444 + * in a fair manner, with a small, bounded queue. *If* a flow has been
152445 + * continuously sending for the entire min_rtt window, and hasn't seen an RTT
152446 + * sample that matches or decreases its min_rtt estimate for 10 seconds, then
152447 + * it briefly enters PROBE_RTT to cut inflight to a minimum value to re-probe
152448 + * the path's two-way propagation delay (min_rtt). When exiting PROBE_RTT, if
152449 + * we estimated that we reached the full bw of the pipe then we enter PROBE_BW;
152450 + * otherwise we enter STARTUP to try to fill the pipe.
152452 + * BBR is described in detail in:
152453 + *   "BBR: Congestion-Based Congestion Control",
152454 + *   Neal Cardwell, Yuchung Cheng, C. Stephen Gunn, Soheil Hassas Yeganeh,
152455 + *   Van Jacobson. ACM Queue, Vol. 14 No. 5, September-October 2016.
152457 + * There is a public e-mail list for discussing BBR development and testing:
152458 + *   https://groups.google.com/forum/#!forum/bbr-dev
152460 + * NOTE: BBR might be used with the fq qdisc ("man tc-fq") with pacing enabled,
152461 + * otherwise TCP stack falls back to an internal pacing using one high
152462 + * resolution timer per TCP socket and may use more resources.
152463 + */
152464 +#include <linux/module.h>
152465 +#include <net/tcp.h>
152466 +#include <linux/inet_diag.h>
152467 +#include <linux/inet.h>
152468 +#include <linux/random.h>
152470 +#include "tcp_dctcp.h"
152472 +/* Scale factor for rate in pkt/uSec unit to avoid truncation in bandwidth
152473 + * estimation. The rate unit ~= (1500 bytes / 1 usec / 2^24) ~= 715 bps.
152474 + * This handles bandwidths from 0.06pps (715bps) to 256Mpps (3Tbps) in a u32.
152475 + * Since the minimum window is >=4 packets, the lower bound isn't
152476 + * an issue. The upper bound isn't an issue with existing technologies.
152477 + */
152478 +#define BW_SCALE 24
152479 +#define BW_UNIT (1 << BW_SCALE)
152481 +#define BBR_SCALE 8    /* scaling factor for fractions in BBR (e.g. gains) */
152482 +#define BBR_UNIT (1 << BBR_SCALE)
152484 +#define FLAG_DEBUG_VERBOSE     0x1     /* Verbose debugging messages */
152485 +#define FLAG_DEBUG_LOOPBACK    0x2     /* Do NOT skip loopback addr */
152487 +#define CYCLE_LEN              8       /* number of phases in a pacing gain cycle */
152489 +/* BBR has the following modes for deciding how fast to send: */
152490 +enum bbr_mode {
152491 +       BBR_STARTUP,    /* ramp up sending rate rapidly to fill pipe */
152492 +       BBR_DRAIN,      /* drain any queue created during startup */
152493 +       BBR_PROBE_BW,   /* discover, share bw: pace around estimated bw */
152494 +       BBR_PROBE_RTT,  /* cut inflight to min to probe min_rtt */
152497 +/* How does the incoming ACK stream relate to our bandwidth probing? */
152498 +enum bbr_ack_phase {
152499 +       BBR_ACKS_INIT,            /* not probing; not getting probe feedback */
152500 +       BBR_ACKS_REFILLING,       /* sending at est. bw to fill pipe */
152501 +       BBR_ACKS_PROBE_STARTING,  /* inflight rising to probe bw */
152502 +       BBR_ACKS_PROBE_FEEDBACK,  /* getting feedback from bw probing */
152503 +       BBR_ACKS_PROBE_STOPPING,  /* stopped probing; still getting feedback */
152506 +/* BBR congestion control block */
152507 +struct bbr {
152508 +       u32     min_rtt_us;             /* min RTT in min_rtt_win_sec window */
152509 +       u32     min_rtt_stamp;          /* timestamp of min_rtt_us */
152510 +       u32     probe_rtt_done_stamp;   /* end time for BBR_PROBE_RTT mode */
152511 +       u32     probe_rtt_min_us;       /* min RTT in bbr_probe_rtt_win_ms window */
152512 +       u32     probe_rtt_min_stamp;    /* timestamp of probe_rtt_min_us*/
152513 +       u32     next_rtt_delivered; /* scb->tx.delivered at end of round */
152514 +       u32     prior_rcv_nxt;  /* tp->rcv_nxt when CE state last changed */
152515 +       u64     cycle_mstamp;        /* time of this cycle phase start */
152516 +       u32     mode:3,              /* current bbr_mode in state machine */
152517 +               prev_ca_state:3,     /* CA state on previous ACK */
152518 +               packet_conservation:1,  /* use packet conservation? */
152519 +               round_start:1,       /* start of packet-timed tx->ack round? */
152520 +               ce_state:1,          /* If most recent data has CE bit set */
152521 +               bw_probe_up_rounds:5,   /* cwnd-limited rounds in PROBE_UP */
152522 +               try_fast_path:1,        /* can we take fast path? */
152523 +               unused2:11,
152524 +               idle_restart:1,      /* restarting after idle? */
152525 +               probe_rtt_round_done:1,  /* a BBR_PROBE_RTT round at 4 pkts? */
152526 +               cycle_idx:3,    /* current index in pacing_gain cycle array */
152527 +               has_seen_rtt:1;      /* have we seen an RTT sample yet? */
152528 +       u32     pacing_gain:11, /* current gain for setting pacing rate */
152529 +               cwnd_gain:11,   /* current gain for setting cwnd */
152530 +               full_bw_reached:1,   /* reached full bw in Startup? */
152531 +               full_bw_cnt:2,  /* number of rounds without large bw gains */
152532 +               init_cwnd:7;    /* initial cwnd */
152533 +       u32     prior_cwnd;     /* prior cwnd upon entering loss recovery */
152534 +       u32     full_bw;        /* recent bw, to estimate if pipe is full */
152536 +       /* For tracking ACK aggregation: */
152537 +       u64     ack_epoch_mstamp;       /* start of ACK sampling epoch */
152538 +       u16     extra_acked[2];         /* max excess data ACKed in epoch */
152539 +       u32     ack_epoch_acked:20,     /* packets (S)ACKed in sampling epoch */
152540 +               extra_acked_win_rtts:5, /* age of extra_acked, in round trips */
152541 +               extra_acked_win_idx:1,  /* current index in extra_acked array */
152542 +       /* BBR v2 state: */
152543 +               unused1:2,
152544 +               startup_ecn_rounds:2,   /* consecutive hi ECN STARTUP rounds */
152545 +               loss_in_cycle:1,        /* packet loss in this cycle? */
152546 +               ecn_in_cycle:1;         /* ECN in this cycle? */
152547 +       u32     loss_round_delivered; /* scb->tx.delivered ending loss round */
152548 +       u32     undo_bw_lo;          /* bw_lo before latest losses */
152549 +       u32     undo_inflight_lo;    /* inflight_lo before latest losses */
152550 +       u32     undo_inflight_hi;    /* inflight_hi before latest losses */
152551 +       u32     bw_latest;       /* max delivered bw in last round trip */
152552 +       u32     bw_lo;           /* lower bound on sending bandwidth */
152553 +       u32     bw_hi[2];        /* upper bound of sending bandwidth range*/
152554 +       u32     inflight_latest; /* max delivered data in last round trip */
152555 +       u32     inflight_lo;     /* lower bound of inflight data range */
152556 +       u32     inflight_hi;     /* upper bound of inflight data range */
152557 +       u32     bw_probe_up_cnt; /* packets delivered per inflight_hi incr */
152558 +       u32     bw_probe_up_acks;  /* packets (S)ACKed since inflight_hi incr */
152559 +       u32     probe_wait_us;   /* PROBE_DOWN until next clock-driven probe */
152560 +       u32     ecn_eligible:1, /* sender can use ECN (RTT, handshake)? */
152561 +               ecn_alpha:9,    /* EWMA delivered_ce/delivered; 0..256 */
152562 +               bw_probe_samples:1,    /* rate samples reflect bw probing? */
152563 +               prev_probe_too_high:1, /* did last PROBE_UP go too high? */
152564 +               stopped_risky_probe:1, /* last PROBE_UP stopped due to risk? */
152565 +               rounds_since_probe:8,  /* packet-timed rounds since probed bw */
152566 +               loss_round_start:1,    /* loss_round_delivered round trip? */
152567 +               loss_in_round:1,       /* loss marked in this round trip? */
152568 +               ecn_in_round:1,        /* ECN marked in this round trip? */
152569 +               ack_phase:3,           /* bbr_ack_phase: meaning of ACKs */
152570 +               loss_events_in_round:4,/* losses in STARTUP round */
152571 +               initialized:1;         /* has bbr_init() been called? */
152572 +       u32     alpha_last_delivered;    /* tp->delivered    at alpha update */
152573 +       u32     alpha_last_delivered_ce; /* tp->delivered_ce at alpha update */
152575 +       /* Params configurable using setsockopt. Refer to correspoding
152576 +        * module param for detailed description of params.
152577 +        */
152578 +       struct bbr_params {
152579 +               u32     high_gain:11,           /* max allowed value: 2047 */
152580 +                       drain_gain:10,          /* max allowed value: 1023 */
152581 +                       cwnd_gain:11;           /* max allowed value: 2047 */
152582 +               u32     cwnd_min_target:4,      /* max allowed value: 15 */
152583 +                       min_rtt_win_sec:5,      /* max allowed value: 31 */
152584 +                       probe_rtt_mode_ms:9,    /* max allowed value: 511 */
152585 +                       full_bw_cnt:3,          /* max allowed value: 7 */
152586 +                       cwnd_tso_budget:1,      /* allowed values: {0, 1} */
152587 +                       unused3:6,
152588 +                       drain_to_target:1,      /* boolean */
152589 +                       precise_ece_ack:1,      /* boolean */
152590 +                       extra_acked_in_startup:1, /* allowed values: {0, 1} */
152591 +                       fast_path:1;            /* boolean */
152592 +               u32     full_bw_thresh:10,      /* max allowed value: 1023 */
152593 +                       startup_cwnd_gain:11,   /* max allowed value: 2047 */
152594 +                       bw_probe_pif_gain:9,    /* max allowed value: 511 */
152595 +                       usage_based_cwnd:1,     /* boolean */
152596 +                       unused2:1;
152597 +               u16     probe_rtt_win_ms:14,    /* max allowed value: 16383 */
152598 +                       refill_add_inc:2;       /* max allowed value: 3 */
152599 +               u16     extra_acked_gain:11,    /* max allowed value: 2047 */
152600 +                       extra_acked_win_rtts:5; /* max allowed value: 31*/
152601 +               u16     pacing_gain[CYCLE_LEN]; /* max allowed value: 1023 */
152602 +               /* Mostly BBR v2 parameters below here: */
152603 +               u32     ecn_alpha_gain:8,       /* max allowed value: 255 */
152604 +                       ecn_factor:8,           /* max allowed value: 255 */
152605 +                       ecn_thresh:8,           /* max allowed value: 255 */
152606 +                       beta:8;                 /* max allowed value: 255 */
152607 +               u32     ecn_max_rtt_us:19,      /* max allowed value: 524287 */
152608 +                       bw_probe_reno_gain:9,   /* max allowed value: 511 */
152609 +                       full_loss_cnt:4;        /* max allowed value: 15 */
152610 +               u32     probe_rtt_cwnd_gain:8,  /* max allowed value: 255 */
152611 +                       inflight_headroom:8,    /* max allowed value: 255 */
152612 +                       loss_thresh:8,          /* max allowed value: 255 */
152613 +                       bw_probe_max_rounds:8;  /* max allowed value: 255 */
152614 +               u32     bw_probe_rand_rounds:4, /* max allowed value: 15 */
152615 +                       bw_probe_base_us:26,    /* usecs: 0..2^26-1 (67 secs) */
152616 +                       full_ecn_cnt:2;         /* max allowed value: 3 */
152617 +               u32     bw_probe_rand_us:26,    /* usecs: 0..2^26-1 (67 secs) */
152618 +                       undo:1,                 /* boolean */
152619 +                       tso_rtt_shift:4,        /* max allowed value: 15 */
152620 +                       unused5:1;
152621 +               u32     ecn_reprobe_gain:9,     /* max allowed value: 511 */
152622 +                       unused1:14,
152623 +                       ecn_alpha_init:9;       /* max allowed value: 256 */
152624 +       } params;
152626 +       struct {
152627 +               u32     snd_isn; /* Initial sequence number */
152628 +               u32     rs_bw;   /* last valid rate sample bw */
152629 +               u32     target_cwnd; /* target cwnd, based on BDP */
152630 +               u8      undo:1,  /* Undo even happened but not yet logged */
152631 +                       unused:7;
152632 +               char    event;   /* single-letter event debug codes */
152633 +               u16     unused2;
152634 +       } debug;
152637 +struct bbr_context {
152638 +       u32 sample_bw;
152639 +       u32 target_cwnd;
152640 +       u32 log:1;
152643 +/* Window length of min_rtt filter (in sec). Max allowed value is 31 (0x1F) */
152644 +static u32 bbr_min_rtt_win_sec = 10;
152645 +/* Minimum time (in ms) spent at bbr_cwnd_min_target in BBR_PROBE_RTT mode.
152646 + * Max allowed value is 511 (0x1FF).
152647 + */
152648 +static u32 bbr_probe_rtt_mode_ms = 200;
152649 +/* Window length of probe_rtt_min_us filter (in ms), and consequently the
152650 + * typical interval between PROBE_RTT mode entries.
152651 + * Note that bbr_probe_rtt_win_ms must be <= bbr_min_rtt_win_sec * MSEC_PER_SEC
152652 + */
152653 +static u32 bbr_probe_rtt_win_ms = 5000;
152654 +/* Skip TSO below the following bandwidth (bits/sec): */
152655 +static int bbr_min_tso_rate = 1200000;
152657 +/* Use min_rtt to help adapt TSO burst size, with smaller min_rtt resulting
152658 + * in bigger TSO bursts. By default we cut the RTT-based allowance in half
152659 + * for every 2^9 usec (aka 512 us) of RTT, so that the RTT-based allowance
152660 + * is below 1500 bytes after 6 * ~500 usec = 3ms.
152661 + */
152662 +static u32 bbr_tso_rtt_shift = 9;  /* halve allowance per 2^9 usecs, 512us */
152664 +/* Select cwnd TSO budget approach:
152665 + *  0: padding
152666 + *  1: flooring
152667 + */
152668 +static uint bbr_cwnd_tso_budget = 1;
152670 +/* Pace at ~1% below estimated bw, on average, to reduce queue at bottleneck.
152671 + * In order to help drive the network toward lower queues and low latency while
152672 + * maintaining high utilization, the average pacing rate aims to be slightly
152673 + * lower than the estimated bandwidth. This is an important aspect of the
152674 + * design.
152675 + */
152676 +static const int bbr_pacing_margin_percent = 1;
152678 +/* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain
152679 + * that will allow a smoothly increasing pacing rate that will double each RTT
152680 + * and send the same number of packets per RTT that an un-paced, slow-starting
152681 + * Reno or CUBIC flow would. Max allowed value is 2047 (0x7FF).
152682 + */
152683 +static int bbr_high_gain  = BBR_UNIT * 2885 / 1000 + 1;
152684 +/* The gain for deriving startup cwnd. Max allowed value is 2047 (0x7FF). */
152685 +static int bbr_startup_cwnd_gain  = BBR_UNIT * 2885 / 1000 + 1;
152686 +/* The pacing gain of 1/high_gain in BBR_DRAIN is calculated to typically drain
152687 + * the queue created in BBR_STARTUP in a single round. Max allowed value
152688 + * is 1023 (0x3FF).
152689 + */
152690 +static int bbr_drain_gain = BBR_UNIT * 1000 / 2885;
152691 +/* The gain for deriving steady-state cwnd tolerates delayed/stretched ACKs.
152692 + * Max allowed value is 2047 (0x7FF).
152693 + */
152694 +static int bbr_cwnd_gain  = BBR_UNIT * 2;
152695 +/* The pacing_gain values for the PROBE_BW gain cycle, to discover/share bw.
152696 + * Max allowed value for each element is 1023 (0x3FF).
152697 + */
152698 +enum bbr_pacing_gain_phase {
152699 +       BBR_BW_PROBE_UP         = 0,  /* push up inflight to probe for bw/vol */
152700 +       BBR_BW_PROBE_DOWN       = 1,  /* drain excess inflight from the queue */
152701 +       BBR_BW_PROBE_CRUISE     = 2,  /* use pipe, w/ headroom in queue/pipe */
152702 +       BBR_BW_PROBE_REFILL     = 3,  /* v2: refill the pipe again to 100% */
152704 +static int bbr_pacing_gain[] = {
152705 +       BBR_UNIT * 5 / 4,       /* probe for more available bw */
152706 +       BBR_UNIT * 3 / 4,       /* drain queue and/or yield bw to other flows */
152707 +       BBR_UNIT, BBR_UNIT, BBR_UNIT,   /* cruise at 1.0*bw to utilize pipe, */
152708 +       BBR_UNIT, BBR_UNIT, BBR_UNIT    /* without creating excess queue... */
152711 +/* Try to keep at least this many packets in flight, if things go smoothly. For
152712 + * smooth functioning, a sliding window protocol ACKing every other packet
152713 + * needs at least 4 packets in flight. Max allowed value is 15 (0xF).
152714 + */
152715 +static u32 bbr_cwnd_min_target = 4;
152717 +/* Cwnd to BDP proportion in PROBE_RTT mode scaled by BBR_UNIT. Default: 50%.
152718 + * Use 0 to disable. Max allowed value is 255.
152719 + */
152720 +static u32 bbr_probe_rtt_cwnd_gain = BBR_UNIT * 1 / 2;
152722 +/* To estimate if BBR_STARTUP mode (i.e. high_gain) has filled pipe... */
152723 +/* If bw has increased significantly (1.25x), there may be more bw available.
152724 + * Max allowed value is 1023 (0x3FF).
152725 + */
152726 +static u32 bbr_full_bw_thresh = BBR_UNIT * 5 / 4;
152727 +/* But after 3 rounds w/o significant bw growth, estimate pipe is full.
152728 + * Max allowed value is 7 (0x7).
152729 + */
152730 +static u32 bbr_full_bw_cnt = 3;
152732 +static u32 bbr_flags;          /* Debugging related stuff */
152734 +/* Whether to debug using printk.
152735 + */
152736 +static bool bbr_debug_with_printk;
152738 +/* Whether to debug using ftrace event tcp:tcp_bbr_event.
152739 + * Ignored when bbr_debug_with_printk is set.
152740 + */
152741 +static bool bbr_debug_ftrace;
152743 +/* Experiment: each cycle, try to hold sub-unity gain until inflight <= BDP. */
152744 +static bool bbr_drain_to_target = true;                /* default: enabled */
152746 +/* Experiment: Flags to control BBR with ECN behavior.
152747 + */
152748 +static bool bbr_precise_ece_ack = true;                /* default: enabled */
152750 +/* The max rwin scaling shift factor is 14 (RFC 1323), so the max sane rwin is
152751 + * (2^(16+14) B)/(1024 B/packet) = 1M packets.
152752 + */
152753 +static u32 bbr_cwnd_warn_val   = 1U << 20;
152755 +static u16 bbr_debug_port_mask;
152757 +/* BBR module parameters. These are module parameters only in Google prod.
152758 + * Upstream these are intentionally not module parameters.
152759 + */
152760 +static int bbr_pacing_gain_size = CYCLE_LEN;
152762 +/* Gain factor for adding extra_acked to target cwnd: */
152763 +static int bbr_extra_acked_gain = 256;
152765 +/* Window length of extra_acked window. Max allowed val is 31. */
152766 +static u32 bbr_extra_acked_win_rtts = 5;
152768 +/* Max allowed val for ack_epoch_acked, after which sampling epoch is reset */
152769 +static u32 bbr_ack_epoch_acked_reset_thresh = 1U << 20;
152771 +/* Time period for clamping cwnd increment due to ack aggregation */
152772 +static u32 bbr_extra_acked_max_us = 100 * 1000;
152774 +/* Use extra acked in startup ?
152775 + * 0: disabled
152776 + * 1: use latest extra_acked value from 1-2 rtt in startup
152777 + */
152778 +static int bbr_extra_acked_in_startup = 1;             /* default: enabled */
152780 +/* Experiment: don't grow cwnd beyond twice of what we just probed. */
152781 +static bool bbr_usage_based_cwnd;              /* default: disabled */
152783 +/* For lab testing, researchers can enable BBRv2 ECN support with this flag,
152784 + * when they know that any ECN marks that the connections experience will be
152785 + * DCTCP/L4S-style ECN marks, rather than RFC3168 ECN marks.
152786 + * TODO(ncardwell): Production use of the BBRv2 ECN functionality depends on
152787 + * negotiation or configuration that is outside the scope of the BBRv2
152788 + * alpha release.
152789 + */
152790 +static bool bbr_ecn_enable = false;
152792 +module_param_named(min_tso_rate,      bbr_min_tso_rate,      int,    0644);
152793 +module_param_named(tso_rtt_shift,     bbr_tso_rtt_shift,     int,    0644);
152794 +module_param_named(high_gain,         bbr_high_gain,         int,    0644);
152795 +module_param_named(drain_gain,        bbr_drain_gain,        int,    0644);
152796 +module_param_named(startup_cwnd_gain, bbr_startup_cwnd_gain, int,    0644);
152797 +module_param_named(cwnd_gain,         bbr_cwnd_gain,         int,    0644);
152798 +module_param_array_named(pacing_gain, bbr_pacing_gain,       int,
152799 +                        &bbr_pacing_gain_size, 0644);
152800 +module_param_named(cwnd_min_target,   bbr_cwnd_min_target,   uint,   0644);
152801 +module_param_named(probe_rtt_cwnd_gain,
152802 +                  bbr_probe_rtt_cwnd_gain,                  uint,   0664);
152803 +module_param_named(cwnd_warn_val,     bbr_cwnd_warn_val,     uint,   0664);
152804 +module_param_named(debug_port_mask,   bbr_debug_port_mask,   ushort, 0644);
152805 +module_param_named(flags,             bbr_flags,             uint,   0644);
152806 +module_param_named(debug_ftrace,      bbr_debug_ftrace, bool,   0644);
152807 +module_param_named(debug_with_printk, bbr_debug_with_printk, bool,   0644);
152808 +module_param_named(min_rtt_win_sec,   bbr_min_rtt_win_sec,   uint,   0644);
152809 +module_param_named(probe_rtt_mode_ms, bbr_probe_rtt_mode_ms, uint,   0644);
152810 +module_param_named(probe_rtt_win_ms,  bbr_probe_rtt_win_ms,  uint,   0644);
152811 +module_param_named(full_bw_thresh,    bbr_full_bw_thresh,    uint,   0644);
152812 +module_param_named(full_bw_cnt,       bbr_full_bw_cnt,       uint,   0644);
152813 +module_param_named(cwnd_tso_bduget,   bbr_cwnd_tso_budget,   uint,   0664);
152814 +module_param_named(extra_acked_gain,  bbr_extra_acked_gain,  int,    0664);
152815 +module_param_named(extra_acked_win_rtts,
152816 +                  bbr_extra_acked_win_rtts, uint,   0664);
152817 +module_param_named(extra_acked_max_us,
152818 +                  bbr_extra_acked_max_us, uint,   0664);
152819 +module_param_named(ack_epoch_acked_reset_thresh,
152820 +                  bbr_ack_epoch_acked_reset_thresh, uint,   0664);
152821 +module_param_named(drain_to_target,   bbr_drain_to_target,   bool,   0664);
152822 +module_param_named(precise_ece_ack,   bbr_precise_ece_ack,   bool,   0664);
152823 +module_param_named(extra_acked_in_startup,
152824 +                  bbr_extra_acked_in_startup, int, 0664);
152825 +module_param_named(usage_based_cwnd, bbr_usage_based_cwnd, bool,   0664);
152826 +module_param_named(ecn_enable,       bbr_ecn_enable,         bool,   0664);
152828 +static void bbr2_exit_probe_rtt(struct sock *sk);
152829 +static void bbr2_reset_congestion_signals(struct sock *sk);
152831 +static void bbr_check_probe_rtt_done(struct sock *sk);
152833 +/* Do we estimate that STARTUP filled the pipe? */
152834 +static bool bbr_full_bw_reached(const struct sock *sk)
152836 +       const struct bbr *bbr = inet_csk_ca(sk);
152838 +       return bbr->full_bw_reached;
152841 +/* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
152842 +static u32 bbr_max_bw(const struct sock *sk)
152844 +       struct bbr *bbr = inet_csk_ca(sk);
152846 +       return max(bbr->bw_hi[0], bbr->bw_hi[1]);
152849 +/* Return the estimated bandwidth of the path, in pkts/uS << BW_SCALE. */
152850 +static u32 bbr_bw(const struct sock *sk)
152852 +       struct bbr *bbr = inet_csk_ca(sk);
152854 +       return min(bbr_max_bw(sk), bbr->bw_lo);
152857 +/* Return maximum extra acked in past k-2k round trips,
152858 + * where k = bbr_extra_acked_win_rtts.
152859 + */
152860 +static u16 bbr_extra_acked(const struct sock *sk)
152862 +       struct bbr *bbr = inet_csk_ca(sk);
152864 +       return max(bbr->extra_acked[0], bbr->extra_acked[1]);
152867 +/* Return rate in bytes per second, optionally with a gain.
152868 + * The order here is chosen carefully to avoid overflow of u64. This should
152869 + * work for input rates of up to 2.9Tbit/sec and gain of 2.89x.
152870 + */
152871 +static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain,
152872 +                                 int margin)
152874 +       unsigned int mss = tcp_sk(sk)->mss_cache;
152876 +       rate *= mss;
152877 +       rate *= gain;
152878 +       rate >>= BBR_SCALE;
152879 +       rate *= USEC_PER_SEC / 100 * (100 - margin);
152880 +       rate >>= BW_SCALE;
152881 +       rate = max(rate, 1ULL);
152882 +       return rate;
152885 +static u64 bbr_bw_bytes_per_sec(struct sock *sk, u64 rate)
152887 +       return bbr_rate_bytes_per_sec(sk, rate, BBR_UNIT, 0);
152890 +static u64 bbr_rate_kbps(struct sock *sk, u64 rate)
152892 +       rate = bbr_bw_bytes_per_sec(sk, rate);
152893 +       rate *= 8;
152894 +       do_div(rate, 1000);
152895 +       return rate;
152898 +static u32 bbr_tso_segs_goal(struct sock *sk);
152899 +static void bbr_debug(struct sock *sk, u32 acked,
152900 +                     const struct rate_sample *rs, struct bbr_context *ctx)
152902 +       static const char ca_states[] = {
152903 +               [TCP_CA_Open]           = 'O',
152904 +               [TCP_CA_Disorder]       = 'D',
152905 +               [TCP_CA_CWR]            = 'C',
152906 +               [TCP_CA_Recovery]       = 'R',
152907 +               [TCP_CA_Loss]           = 'L',
152908 +       };
152909 +       static const char mode[] = {
152910 +               'G',  /* Growing   - BBR_STARTUP */
152911 +               'D',  /* Drain     - BBR_DRAIN */
152912 +               'W',  /* Window    - BBR_PROBE_BW */
152913 +               'M',  /* Min RTT   - BBR_PROBE_RTT */
152914 +       };
152915 +       static const char ack_phase[] = { /* bbr_ack_phase strings */
152916 +               'I',    /* BBR_ACKS_INIT           - 'Init' */
152917 +               'R',    /* BBR_ACKS_REFILLING      - 'Refilling' */
152918 +               'B',    /* BBR_ACKS_PROBE_STARTING - 'Before' */
152919 +               'F',    /* BBR_ACKS_PROBE_FEEDBACK - 'Feedback' */
152920 +               'A',    /* BBR_ACKS_PROBE_STOPPING - 'After' */
152921 +       };
152922 +       struct tcp_sock *tp = tcp_sk(sk);
152923 +       struct bbr *bbr = inet_csk_ca(sk);
152924 +       const u32 una = tp->snd_una - bbr->debug.snd_isn;
152925 +       const u32 fack = tcp_highest_sack_seq(tp);
152926 +       const u16 dport = ntohs(inet_sk(sk)->inet_dport);
152927 +       bool is_port_match = (bbr_debug_port_mask &&
152928 +                             ((dport & bbr_debug_port_mask) == 0));
152929 +       char debugmsg[320];
152931 +       if (sk->sk_state == TCP_SYN_SENT)
152932 +               return;  /* no bbr_init() yet if SYN retransmit -> CA_Loss */
152934 +       if (!tp->snd_cwnd || tp->snd_cwnd > bbr_cwnd_warn_val) {
152935 +               char addr[INET6_ADDRSTRLEN + 10] = { 0 };
152937 +               if (sk->sk_family == AF_INET)
152938 +                       snprintf(addr, sizeof(addr), "%pI4:%u",
152939 +                                &inet_sk(sk)->inet_daddr, dport);
152940 +               else if (sk->sk_family == AF_INET6)
152941 +                       snprintf(addr, sizeof(addr), "%pI6:%u",
152942 +                                &sk->sk_v6_daddr, dport);
152944 +               WARN_ONCE(1,
152945 +                       "BBR %s cwnd alert: %u "
152946 +                       "snd_una: %u ca: %d pacing_gain: %u cwnd_gain: %u "
152947 +                       "bw: %u rtt: %u min_rtt: %u "
152948 +                       "acked: %u tso_segs: %u "
152949 +                       "bw: %d %ld %d pif: %u\n",
152950 +                       addr, tp->snd_cwnd,
152951 +                       una, inet_csk(sk)->icsk_ca_state,
152952 +                       bbr->pacing_gain, bbr->cwnd_gain,
152953 +                       bbr_max_bw(sk), (tp->srtt_us >> 3), bbr->min_rtt_us,
152954 +                       acked, bbr_tso_segs_goal(sk),
152955 +                       rs->delivered, rs->interval_us, rs->is_retrans,
152956 +                       tcp_packets_in_flight(tp));
152957 +       }
152959 +       if (likely(!bbr_debug_with_printk && !bbr_debug_ftrace))
152960 +               return;
152962 +       if (!sock_flag(sk, SOCK_DBG) && !is_port_match)
152963 +               return;
152965 +       if (!ctx->log && !tp->app_limited && !(bbr_flags & FLAG_DEBUG_VERBOSE))
152966 +               return;
152968 +       if (ipv4_is_loopback(inet_sk(sk)->inet_daddr) &&
152969 +           !(bbr_flags & FLAG_DEBUG_LOOPBACK))
152970 +               return;
152972 +       snprintf(debugmsg, sizeof(debugmsg) - 1,
152973 +                "BBR %pI4:%-5u %5u,%03u:%-7u %c "
152974 +                "%c %2u br %2u cr %2d rtt %5ld d %2d i %5ld mrtt %d %cbw %llu "
152975 +                "bw %llu lb %llu ib %llu qb %llu "
152976 +                "a %u if %2u %c %c dl %u l %u al %u # %u t %u %c %c "
152977 +                "lr %d er %d ea %d bwl %lld il %d ih %d c %d "
152978 +                "v %d %c %u %c %s\n",
152979 +                &inet_sk(sk)->inet_daddr, dport,
152980 +                una / 1000, una % 1000, fack - tp->snd_una,
152981 +                ca_states[inet_csk(sk)->icsk_ca_state],
152982 +                bbr->debug.undo ? '@' : mode[bbr->mode],
152983 +                tp->snd_cwnd,
152984 +                bbr_extra_acked(sk),   /* br (legacy): extra_acked */
152985 +                rs->tx_in_flight,      /* cr (legacy): tx_inflight */
152986 +                rs->rtt_us,
152987 +                rs->delivered,
152988 +                rs->interval_us,
152989 +                bbr->min_rtt_us,
152990 +                rs->is_app_limited ? '_' : 'l',
152991 +                bbr_rate_kbps(sk, ctx->sample_bw), /* lbw: latest sample bw */
152992 +                bbr_rate_kbps(sk, bbr_max_bw(sk)), /* bw: max bw */
152993 +                0ULL,                              /* lb: [obsolete] */
152994 +                0ULL,                              /* ib: [obsolete] */
152995 +                (u64)sk->sk_pacing_rate * 8 / 1000,
152996 +                acked,
152997 +                tcp_packets_in_flight(tp),
152998 +                rs->is_ack_delayed ? 'd' : '.',
152999 +                bbr->round_start ? '*' : '.',
153000 +                tp->delivered, tp->lost,
153001 +                tp->app_limited,
153002 +                0,                                 /* #: [obsolete] */
153003 +                ctx->target_cwnd,
153004 +                tp->reord_seen ? 'r' : '.',  /* r: reordering seen? */
153005 +                ca_states[bbr->prev_ca_state],
153006 +                (rs->lost + rs->delivered) > 0 ?
153007 +                (1000 * rs->lost /
153008 +                 (rs->lost + rs->delivered)) : 0,    /* lr: loss rate x1000 */
153009 +                (rs->delivered) > 0 ?
153010 +                (1000 * rs->delivered_ce /
153011 +                 (rs->delivered)) : 0,               /* er: ECN rate x1000 */
153012 +                1000 * bbr->ecn_alpha >> BBR_SCALE,  /* ea: ECN alpha x1000 */
153013 +                bbr->bw_lo == ~0U ?
153014 +                  -1 : (s64)bbr_rate_kbps(sk, bbr->bw_lo), /* bwl */
153015 +                bbr->inflight_lo,      /* il */
153016 +                bbr->inflight_hi,      /* ih */
153017 +                bbr->bw_probe_up_cnt,  /* c */
153018 +                2,                     /* v: version */
153019 +                bbr->debug.event,
153020 +                bbr->cycle_idx,
153021 +                ack_phase[bbr->ack_phase],
153022 +                bbr->bw_probe_samples ? "Y" : "N");
153023 +       debugmsg[sizeof(debugmsg) - 1] = 0;
153025 +       /* printk takes a higher precedence. */
153026 +       if (bbr_debug_with_printk)
153027 +               printk(KERN_DEBUG "%s", debugmsg);
153029 +       if (unlikely(bbr->debug.undo))
153030 +               bbr->debug.undo = 0;
153033 +/* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
153034 +static unsigned long bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
153036 +       u64 rate = bw;
153038 +       rate = bbr_rate_bytes_per_sec(sk, rate, gain,
153039 +                                     bbr_pacing_margin_percent);
153040 +       rate = min_t(u64, rate, sk->sk_max_pacing_rate);
153041 +       return rate;
153044 +/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
153045 +static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
153047 +       struct tcp_sock *tp = tcp_sk(sk);
153048 +       struct bbr *bbr = inet_csk_ca(sk);
153049 +       u64 bw;
153050 +       u32 rtt_us;
153052 +       if (tp->srtt_us) {              /* any RTT sample yet? */
153053 +               rtt_us = max(tp->srtt_us >> 3, 1U);
153054 +               bbr->has_seen_rtt = 1;
153055 +       } else {                         /* no RTT sample yet */
153056 +               rtt_us = USEC_PER_MSEC;  /* use nominal default RTT */
153057 +       }
153058 +       bw = (u64)tp->snd_cwnd * BW_UNIT;
153059 +       do_div(bw, rtt_us);
153060 +       sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr->params.high_gain);
153063 +/* Pace using current bw estimate and a gain factor. */
153064 +static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
153066 +       struct tcp_sock *tp = tcp_sk(sk);
153067 +       struct bbr *bbr = inet_csk_ca(sk);
153068 +       unsigned long rate = bbr_bw_to_pacing_rate(sk, bw, gain);
153070 +       if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
153071 +               bbr_init_pacing_rate_from_rtt(sk);
153072 +       if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
153073 +               sk->sk_pacing_rate = rate;
153076 +static u32 bbr_min_tso_segs(struct sock *sk)
153078 +       return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
153081 +/* Return the number of segments BBR would like in a TSO/GSO skb, given
153082 + * a particular max gso size as a constraint.
153083 + */
153084 +static u32 bbr_tso_segs_generic(struct sock *sk, unsigned int mss_now,
153085 +                               u32 gso_max_size)
153087 +       struct bbr *bbr = inet_csk_ca(sk);
153088 +       u32 segs, r;
153089 +       u64 bytes;
153091 +       /* Budget a TSO/GSO burst size allowance based on bw (pacing_rate). */
153092 +       bytes = sk->sk_pacing_rate >> sk->sk_pacing_shift;
153094 +       /* Budget a TSO/GSO burst size allowance based on min_rtt. For every
153095 +        * K = 2^tso_rtt_shift microseconds of min_rtt, halve the burst.
153096 +        * The min_rtt-based burst allowance is: 64 KBytes / 2^(min_rtt/K)
153097 +        */
153098 +       if (bbr->params.tso_rtt_shift) {
153099 +               r = bbr->min_rtt_us >> bbr->params.tso_rtt_shift;
153100 +               if (r < BITS_PER_TYPE(u32))   /* prevent undefined behavior */
153101 +                       bytes += GSO_MAX_SIZE >> r;
153102 +       }
153104 +       bytes = min_t(u32, bytes, gso_max_size - 1 - MAX_TCP_HEADER);
153105 +       segs = max_t(u32, bytes / mss_now, bbr_min_tso_segs(sk));
153106 +       return segs;
153109 +/* Custom tcp_tso_autosize() for BBR, used at transmit time to cap skb size. */
153110 +static u32  bbr_tso_segs(struct sock *sk, unsigned int mss_now)
153112 +       return bbr_tso_segs_generic(sk, mss_now, sk->sk_gso_max_size);
153115 +/* Like bbr_tso_segs(), using mss_cache, ignoring driver's sk_gso_max_size. */
153116 +static u32 bbr_tso_segs_goal(struct sock *sk)
153118 +       struct tcp_sock *tp = tcp_sk(sk);
153120 +       return  bbr_tso_segs_generic(sk, tp->mss_cache, GSO_MAX_SIZE);
153123 +/* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
153124 +static void bbr_save_cwnd(struct sock *sk)
153126 +       struct tcp_sock *tp = tcp_sk(sk);
153127 +       struct bbr *bbr = inet_csk_ca(sk);
153129 +       if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT)
153130 +               bbr->prior_cwnd = tp->snd_cwnd;  /* this cwnd is good enough */
153131 +       else  /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */
153132 +               bbr->prior_cwnd = max(bbr->prior_cwnd, tp->snd_cwnd);
153135 +static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
153137 +       struct tcp_sock *tp = tcp_sk(sk);
153138 +       struct bbr *bbr = inet_csk_ca(sk);
153140 +       if (event == CA_EVENT_TX_START && tp->app_limited) {
153141 +               bbr->idle_restart = 1;
153142 +               bbr->ack_epoch_mstamp = tp->tcp_mstamp;
153143 +               bbr->ack_epoch_acked = 0;
153144 +               /* Avoid pointless buffer overflows: pace at est. bw if we don't
153145 +                * need more speed (we're restarting from idle and app-limited).
153146 +                */
153147 +               if (bbr->mode == BBR_PROBE_BW)
153148 +                       bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT);
153149 +               else if (bbr->mode == BBR_PROBE_RTT)
153150 +                       bbr_check_probe_rtt_done(sk);
153151 +       } else if ((event == CA_EVENT_ECN_IS_CE ||
153152 +                   event == CA_EVENT_ECN_NO_CE) &&
153153 +                   bbr_ecn_enable &&
153154 +                   bbr->params.precise_ece_ack) {
153155 +               u32 state = bbr->ce_state;
153156 +               dctcp_ece_ack_update(sk, event, &bbr->prior_rcv_nxt, &state);
153157 +               bbr->ce_state = state;
153158 +               if (tp->fast_ack_mode == 2 && event == CA_EVENT_ECN_IS_CE)
153159 +                       tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
153160 +       }
153163 +/* Calculate bdp based on min RTT and the estimated bottleneck bandwidth:
153165 + * bdp = ceil(bw * min_rtt * gain)
153167 + * The key factor, gain, controls the amount of queue. While a small gain
153168 + * builds a smaller queue, it becomes more vulnerable to noise in RTT
153169 + * measurements (e.g., delayed ACKs or other ACK compression effects). This
153170 + * noise may cause BBR to under-estimate the rate.
153171 + */
153172 +static u32 bbr_bdp(struct sock *sk, u32 bw, int gain)
153174 +       struct bbr *bbr = inet_csk_ca(sk);
153175 +       u32 bdp;
153176 +       u64 w;
153178 +       /* If we've never had a valid RTT sample, cap cwnd at the initial
153179 +        * default. This should only happen when the connection is not using TCP
153180 +        * timestamps and has retransmitted all of the SYN/SYNACK/data packets
153181 +        * ACKed so far. In this case, an RTO can cut cwnd to 1, in which
153182 +        * case we need to slow-start up toward something safe: initial cwnd.
153183 +        */
153184 +       if (unlikely(bbr->min_rtt_us == ~0U))    /* no valid RTT samples yet? */
153185 +               return bbr->init_cwnd;  /* be safe: cap at initial cwnd */
153187 +       w = (u64)bw * bbr->min_rtt_us;
153189 +       /* Apply a gain to the given value, remove the BW_SCALE shift, and
153190 +        * round the value up to avoid a negative feedback loop.
153191 +        */
153192 +       bdp = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
153194 +       return bdp;
153197 +/* To achieve full performance in high-speed paths, we budget enough cwnd to
153198 + * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
153199 + *   - one skb in sending host Qdisc,
153200 + *   - one skb in sending host TSO/GSO engine
153201 + *   - one skb being received by receiver host LRO/GRO/delayed-ACK engine
153202 + * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
153203 + * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
153204 + * which allows 2 outstanding 2-packet sequences, to try to keep pipe
153205 + * full even with ACK-every-other-packet delayed ACKs.
153206 + */
153207 +static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd)
153209 +       struct bbr *bbr = inet_csk_ca(sk);
153210 +       u32 tso_segs_goal;
153212 +       tso_segs_goal = 3 * bbr_tso_segs_goal(sk);
153214 +       /* Allow enough full-sized skbs in flight to utilize end systems. */
153215 +       if (bbr->params.cwnd_tso_budget == 1) {
153216 +               cwnd = max_t(u32, cwnd, tso_segs_goal);
153217 +               cwnd = max_t(u32, cwnd, bbr->params.cwnd_min_target);
153218 +       } else {
153219 +               cwnd += tso_segs_goal;
153220 +               cwnd = (cwnd + 1) & ~1U;
153221 +       }
153222 +       /* Ensure gain cycling gets inflight above BDP even for small BDPs. */
153223 +       if (bbr->mode == BBR_PROBE_BW && bbr->cycle_idx == BBR_BW_PROBE_UP)
153224 +               cwnd += 2;
153226 +       return cwnd;
153229 +/* Find inflight based on min RTT and the estimated bottleneck bandwidth. */
153230 +static u32 bbr_inflight(struct sock *sk, u32 bw, int gain)
153232 +       u32 inflight;
153234 +       inflight = bbr_bdp(sk, bw, gain);
153235 +       inflight = bbr_quantization_budget(sk, inflight);
153237 +       return inflight;
153240 +/* With pacing at lower layers, there's often less data "in the network" than
153241 + * "in flight". With TSQ and departure time pacing at lower layers (e.g. fq),
153242 + * we often have several skbs queued in the pacing layer with a pre-scheduled
153243 + * earliest departure time (EDT). BBR adapts its pacing rate based on the
153244 + * inflight level that it estimates has already been "baked in" by previous
153245 + * departure time decisions. We calculate a rough estimate of the number of our
153246 + * packets that might be in the network at the earliest departure time for the
153247 + * next skb scheduled:
153248 + *   in_network_at_edt = inflight_at_edt - (EDT - now) * bw
153249 + * If we're increasing inflight, then we want to know if the transmit of the
153250 + * EDT skb will push inflight above the target, so inflight_at_edt includes
153251 + * bbr_tso_segs_goal() from the skb departing at EDT. If decreasing inflight,
153252 + * then estimate if inflight will sink too low just before the EDT transmit.
153253 + */
153254 +static u32 bbr_packets_in_net_at_edt(struct sock *sk, u32 inflight_now)
153256 +       struct tcp_sock *tp = tcp_sk(sk);
153257 +       struct bbr *bbr = inet_csk_ca(sk);
153258 +       u64 now_ns, edt_ns, interval_us;
153259 +       u32 interval_delivered, inflight_at_edt;
153261 +       now_ns = tp->tcp_clock_cache;
153262 +       edt_ns = max(tp->tcp_wstamp_ns, now_ns);
153263 +       interval_us = div_u64(edt_ns - now_ns, NSEC_PER_USEC);
153264 +       interval_delivered = (u64)bbr_bw(sk) * interval_us >> BW_SCALE;
153265 +       inflight_at_edt = inflight_now;
153266 +       if (bbr->pacing_gain > BBR_UNIT)              /* increasing inflight */
153267 +               inflight_at_edt += bbr_tso_segs_goal(sk);  /* include EDT skb */
153268 +       if (interval_delivered >= inflight_at_edt)
153269 +               return 0;
153270 +       return inflight_at_edt - interval_delivered;
153273 +/* Find the cwnd increment based on estimate of ack aggregation */
153274 +static u32 bbr_ack_aggregation_cwnd(struct sock *sk)
153276 +       struct bbr *bbr = inet_csk_ca(sk);
153277 +       u32 max_aggr_cwnd, aggr_cwnd = 0;
153279 +       if (bbr->params.extra_acked_gain &&
153280 +           (bbr_full_bw_reached(sk) || bbr->params.extra_acked_in_startup)) {
153281 +               max_aggr_cwnd = ((u64)bbr_bw(sk) * bbr_extra_acked_max_us)
153282 +                               / BW_UNIT;
153283 +               aggr_cwnd = (bbr->params.extra_acked_gain * bbr_extra_acked(sk))
153284 +                            >> BBR_SCALE;
153285 +               aggr_cwnd = min(aggr_cwnd, max_aggr_cwnd);
153286 +       }
153288 +       return aggr_cwnd;
153291 +/* Returns the cwnd for PROBE_RTT mode. */
153292 +static u32 bbr_probe_rtt_cwnd(struct sock *sk)
153294 +       struct bbr *bbr = inet_csk_ca(sk);
153296 +       if (bbr->params.probe_rtt_cwnd_gain == 0)
153297 +               return bbr->params.cwnd_min_target;
153298 +       return max_t(u32, bbr->params.cwnd_min_target,
153299 +                    bbr_bdp(sk, bbr_bw(sk), bbr->params.probe_rtt_cwnd_gain));
153302 +/* Slow-start up toward target cwnd (if bw estimate is growing, or packet loss
153303 + * has drawn us down below target), or snap down to target if we're above it.
153304 + */
153305 +static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
153306 +                        u32 acked, u32 bw, int gain, u32 cwnd,
153307 +                        struct bbr_context *ctx)
153309 +       struct tcp_sock *tp = tcp_sk(sk);
153310 +       struct bbr *bbr = inet_csk_ca(sk);
153311 +       u32 target_cwnd = 0, prev_cwnd = tp->snd_cwnd, max_probe;
153313 +       if (!acked)
153314 +               goto done;  /* no packet fully ACKed; just apply caps */
153316 +       target_cwnd = bbr_bdp(sk, bw, gain);
153318 +       /* Increment the cwnd to account for excess ACKed data that seems
153319 +        * due to aggregation (of data and/or ACKs) visible in the ACK stream.
153320 +        */
153321 +       target_cwnd += bbr_ack_aggregation_cwnd(sk);
153322 +       target_cwnd = bbr_quantization_budget(sk, target_cwnd);
153324 +       /* If we're below target cwnd, slow start cwnd toward target cwnd. */
153325 +       bbr->debug.target_cwnd = target_cwnd;
153327 +       /* Update cwnd and enable fast path if cwnd reaches target_cwnd. */
153328 +       bbr->try_fast_path = 0;
153329 +       if (bbr_full_bw_reached(sk)) { /* only cut cwnd if we filled the pipe */
153330 +               cwnd += acked;
153331 +               if (cwnd >= target_cwnd) {
153332 +                       cwnd = target_cwnd;
153333 +                       bbr->try_fast_path = 1;
153334 +               }
153335 +       } else if (cwnd < target_cwnd || cwnd  < 2 * bbr->init_cwnd) {
153336 +               cwnd += acked;
153337 +       } else {
153338 +               bbr->try_fast_path = 1;
153339 +       }
153341 +       /* When growing cwnd, don't grow beyond twice what we just probed. */
153342 +       if (bbr->params.usage_based_cwnd) {
153343 +               max_probe = max(2 * tp->max_packets_out, tp->snd_cwnd);
153344 +               cwnd = min(cwnd, max_probe);
153345 +       }
153347 +       cwnd = max_t(u32, cwnd, bbr->params.cwnd_min_target);
153348 +done:
153349 +       tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);   /* apply global cap */
153350 +       if (bbr->mode == BBR_PROBE_RTT)  /* drain queue, refresh min_rtt */
153351 +               tp->snd_cwnd = min_t(u32, tp->snd_cwnd, bbr_probe_rtt_cwnd(sk));
153353 +       ctx->target_cwnd = target_cwnd;
153354 +       ctx->log = (tp->snd_cwnd != prev_cwnd);
153357 +/* See if we have reached next round trip */
153358 +static void bbr_update_round_start(struct sock *sk,
153359 +               const struct rate_sample *rs, struct bbr_context *ctx)
153361 +       struct tcp_sock *tp = tcp_sk(sk);
153362 +       struct bbr *bbr = inet_csk_ca(sk);
153364 +       bbr->round_start = 0;
153366 +       /* See if we've reached the next RTT */
153367 +       if (rs->interval_us > 0 &&
153368 +           !before(rs->prior_delivered, bbr->next_rtt_delivered)) {
153369 +               bbr->next_rtt_delivered = tp->delivered;
153370 +               bbr->round_start = 1;
153371 +       }
153374 +/* Calculate the bandwidth based on how fast packets are delivered */
153375 +static void bbr_calculate_bw_sample(struct sock *sk,
153376 +                       const struct rate_sample *rs, struct bbr_context *ctx)
153378 +       struct bbr *bbr = inet_csk_ca(sk);
153379 +       u64 bw = 0;
153381 +       /* Divide delivered by the interval to find a (lower bound) bottleneck
153382 +        * bandwidth sample. Delivered is in packets and interval_us in uS and
153383 +        * ratio will be <<1 for most connections. So delivered is first scaled.
153384 +        * Round up to allow growth at low rates, even with integer division.
153385 +        */
153386 +       if (rs->interval_us > 0) {
153387 +               if (WARN_ONCE(rs->delivered < 0,
153388 +                             "negative delivered: %d interval_us: %ld\n",
153389 +                             rs->delivered, rs->interval_us))
153390 +                       return;
153392 +               bw = DIV_ROUND_UP_ULL((u64)rs->delivered * BW_UNIT, rs->interval_us);
153393 +       }
153395 +       ctx->sample_bw = bw;
153396 +       bbr->debug.rs_bw = bw;
153399 +/* Estimates the windowed max degree of ack aggregation.
153400 + * This is used to provision extra in-flight data to keep sending during
153401 + * inter-ACK silences.
153403 + * Degree of ack aggregation is estimated as extra data acked beyond expected.
153405 + * max_extra_acked = "maximum recent excess data ACKed beyond max_bw * interval"
153406 + * cwnd += max_extra_acked
153408 + * Max extra_acked is clamped by cwnd and bw * bbr_extra_acked_max_us (100 ms).
153409 + * Max filter is an approximate sliding window of 5-10 (packet timed) round
153410 + * trips for non-startup phase, and 1-2 round trips for startup.
153411 + */
153412 +static void bbr_update_ack_aggregation(struct sock *sk,
153413 +                                      const struct rate_sample *rs)
153415 +       u32 epoch_us, expected_acked, extra_acked;
153416 +       struct bbr *bbr = inet_csk_ca(sk);
153417 +       struct tcp_sock *tp = tcp_sk(sk);
153418 +       u32 extra_acked_win_rtts_thresh = bbr->params.extra_acked_win_rtts;
153420 +       if (!bbr->params.extra_acked_gain || rs->acked_sacked <= 0 ||
153421 +           rs->delivered < 0 || rs->interval_us <= 0)
153422 +               return;
153424 +       if (bbr->round_start) {
153425 +               bbr->extra_acked_win_rtts = min(0x1F,
153426 +                                               bbr->extra_acked_win_rtts + 1);
153427 +               if (bbr->params.extra_acked_in_startup &&
153428 +                   !bbr_full_bw_reached(sk))
153429 +                       extra_acked_win_rtts_thresh = 1;
153430 +               if (bbr->extra_acked_win_rtts >=
153431 +                   extra_acked_win_rtts_thresh) {
153432 +                       bbr->extra_acked_win_rtts = 0;
153433 +                       bbr->extra_acked_win_idx = bbr->extra_acked_win_idx ?
153434 +                                                  0 : 1;
153435 +                       bbr->extra_acked[bbr->extra_acked_win_idx] = 0;
153436 +               }
153437 +       }
153439 +       /* Compute how many packets we expected to be delivered over epoch. */
153440 +       epoch_us = tcp_stamp_us_delta(tp->delivered_mstamp,
153441 +                                     bbr->ack_epoch_mstamp);
153442 +       expected_acked = ((u64)bbr_bw(sk) * epoch_us) / BW_UNIT;
153444 +       /* Reset the aggregation epoch if ACK rate is below expected rate or
153445 +        * significantly large no. of ack received since epoch (potentially
153446 +        * quite old epoch).
153447 +        */
153448 +       if (bbr->ack_epoch_acked <= expected_acked ||
153449 +           (bbr->ack_epoch_acked + rs->acked_sacked >=
153450 +            bbr_ack_epoch_acked_reset_thresh)) {
153451 +               bbr->ack_epoch_acked = 0;
153452 +               bbr->ack_epoch_mstamp = tp->delivered_mstamp;
153453 +               expected_acked = 0;
153454 +       }
153456 +       /* Compute excess data delivered, beyond what was expected. */
153457 +       bbr->ack_epoch_acked = min_t(u32, 0xFFFFF,
153458 +                                  bbr->ack_epoch_acked + rs->acked_sacked);
153459 +       extra_acked = bbr->ack_epoch_acked - expected_acked;
153460 +       extra_acked = min(extra_acked, tp->snd_cwnd);
153461 +       if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx])
153462 +               bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked;
153465 +/* Estimate when the pipe is full, using the change in delivery rate: BBR
153466 + * estimates that STARTUP filled the pipe if the estimated bw hasn't changed by
153467 + * at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited
153468 + * rounds. Why 3 rounds: 1: rwin autotuning grows the rwin, 2: we fill the
153469 + * higher rwin, 3: we get higher delivery rate samples. Or transient
153470 + * cross-traffic or radio noise can go away. CUBIC Hystart shares a similar
153471 + * design goal, but uses delay and inter-ACK spacing instead of bandwidth.
153472 + */
153473 +static void bbr_check_full_bw_reached(struct sock *sk,
153474 +                                     const struct rate_sample *rs)
153476 +       struct bbr *bbr = inet_csk_ca(sk);
153477 +       u32 bw_thresh;
153479 +       if (bbr_full_bw_reached(sk) || !bbr->round_start || rs->is_app_limited)
153480 +               return;
153482 +       bw_thresh = (u64)bbr->full_bw * bbr->params.full_bw_thresh >> BBR_SCALE;
153483 +       if (bbr_max_bw(sk) >= bw_thresh) {
153484 +               bbr->full_bw = bbr_max_bw(sk);
153485 +               bbr->full_bw_cnt = 0;
153486 +               return;
153487 +       }
153488 +       ++bbr->full_bw_cnt;
153489 +       bbr->full_bw_reached = bbr->full_bw_cnt >= bbr->params.full_bw_cnt;
153492 +/* If pipe is probably full, drain the queue and then enter steady-state. */
153493 +static bool bbr_check_drain(struct sock *sk, const struct rate_sample *rs,
153494 +                           struct bbr_context *ctx)
153496 +       struct bbr *bbr = inet_csk_ca(sk);
153498 +       if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) {
153499 +               bbr->mode = BBR_DRAIN;  /* drain queue we created */
153500 +               tcp_sk(sk)->snd_ssthresh =
153501 +                               bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT);
153502 +               bbr2_reset_congestion_signals(sk);
153503 +       }       /* fall through to check if in-flight is already small: */
153504 +       if (bbr->mode == BBR_DRAIN &&
153505 +           bbr_packets_in_net_at_edt(sk, tcp_packets_in_flight(tcp_sk(sk))) <=
153506 +           bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT))
153507 +               return true;  /* exiting DRAIN now */
153508 +       return false;
153511 +static void bbr_check_probe_rtt_done(struct sock *sk)
153513 +       struct tcp_sock *tp = tcp_sk(sk);
153514 +       struct bbr *bbr = inet_csk_ca(sk);
153516 +       if (!(bbr->probe_rtt_done_stamp &&
153517 +             after(tcp_jiffies32, bbr->probe_rtt_done_stamp)))
153518 +               return;
153520 +       bbr->probe_rtt_min_stamp = tcp_jiffies32; /* schedule next PROBE_RTT */
153521 +       tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd);
153522 +       bbr2_exit_probe_rtt(sk);
153525 +/* The goal of PROBE_RTT mode is to have BBR flows cooperatively and
153526 + * periodically drain the bottleneck queue, to converge to measure the true
153527 + * min_rtt (unloaded propagation delay). This allows the flows to keep queues
153528 + * small (reducing queuing delay and packet loss) and achieve fairness among
153529 + * BBR flows.
153531 + * The min_rtt filter window is 10 seconds. When the min_rtt estimate expires,
153532 + * we enter PROBE_RTT mode and cap the cwnd at bbr_cwnd_min_target=4 packets.
153533 + * After at least bbr_probe_rtt_mode_ms=200ms and at least one packet-timed
153534 + * round trip elapsed with that flight size <= 4, we leave PROBE_RTT mode and
153535 + * re-enter the previous mode. BBR uses 200ms to approximately bound the
153536 + * performance penalty of PROBE_RTT's cwnd capping to roughly 2% (200ms/10s).
153538 + * Note that flows need only pay 2% if they are busy sending over the last 10
153539 + * seconds. Interactive applications (e.g., Web, RPCs, video chunks) often have
153540 + * natural silences or low-rate periods within 10 seconds where the rate is low
153541 + * enough for long enough to drain its queue in the bottleneck. We pick up
153542 + * these min RTT measurements opportunistically with our min_rtt filter. :-)
153543 + */
153544 +static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
153546 +       struct tcp_sock *tp = tcp_sk(sk);
153547 +       struct bbr *bbr = inet_csk_ca(sk);
153548 +       bool probe_rtt_expired, min_rtt_expired;
153549 +       u32 expire;
153551 +       /* Track min RTT in probe_rtt_win_ms to time next PROBE_RTT state. */
153552 +       expire = bbr->probe_rtt_min_stamp +
153553 +                msecs_to_jiffies(bbr->params.probe_rtt_win_ms);
153554 +       probe_rtt_expired = after(tcp_jiffies32, expire);
153555 +       if (rs->rtt_us >= 0 &&
153556 +           (rs->rtt_us <= bbr->probe_rtt_min_us ||
153557 +            (probe_rtt_expired && !rs->is_ack_delayed))) {
153558 +               bbr->probe_rtt_min_us = rs->rtt_us;
153559 +               bbr->probe_rtt_min_stamp = tcp_jiffies32;
153560 +       }
153561 +       /* Track min RTT seen in the min_rtt_win_sec filter window: */
153562 +       expire = bbr->min_rtt_stamp + bbr->params.min_rtt_win_sec * HZ;
153563 +       min_rtt_expired = after(tcp_jiffies32, expire);
153564 +       if (bbr->probe_rtt_min_us <= bbr->min_rtt_us ||
153565 +           min_rtt_expired) {
153566 +               bbr->min_rtt_us = bbr->probe_rtt_min_us;
153567 +               bbr->min_rtt_stamp = bbr->probe_rtt_min_stamp;
153568 +       }
153570 +       if (bbr->params.probe_rtt_mode_ms > 0 && probe_rtt_expired &&
153571 +           !bbr->idle_restart && bbr->mode != BBR_PROBE_RTT) {
153572 +               bbr->mode = BBR_PROBE_RTT;  /* dip, drain queue */
153573 +               bbr_save_cwnd(sk);  /* note cwnd so we can restore it */
153574 +               bbr->probe_rtt_done_stamp = 0;
153575 +               bbr->ack_phase = BBR_ACKS_PROBE_STOPPING;
153576 +               bbr->next_rtt_delivered = tp->delivered;
153577 +       }
153579 +       if (bbr->mode == BBR_PROBE_RTT) {
153580 +               /* Ignore low rate samples during this mode. */
153581 +               tp->app_limited =
153582 +                       (tp->delivered + tcp_packets_in_flight(tp)) ? : 1;
153583 +               /* Maintain min packets in flight for max(200 ms, 1 round). */
153584 +               if (!bbr->probe_rtt_done_stamp &&
153585 +                   tcp_packets_in_flight(tp) <= bbr_probe_rtt_cwnd(sk)) {
153586 +                       bbr->probe_rtt_done_stamp = tcp_jiffies32 +
153587 +                               msecs_to_jiffies(bbr->params.probe_rtt_mode_ms);
153588 +                       bbr->probe_rtt_round_done = 0;
153589 +                       bbr->next_rtt_delivered = tp->delivered;
153590 +               } else if (bbr->probe_rtt_done_stamp) {
153591 +                       if (bbr->round_start)
153592 +                               bbr->probe_rtt_round_done = 1;
153593 +                       if (bbr->probe_rtt_round_done)
153594 +                               bbr_check_probe_rtt_done(sk);
153595 +               }
153596 +       }
153597 +       /* Restart after idle ends only once we process a new S/ACK for data */
153598 +       if (rs->delivered > 0)
153599 +               bbr->idle_restart = 0;
153602 +static void bbr_update_gains(struct sock *sk)
153604 +       struct bbr *bbr = inet_csk_ca(sk);
153606 +       switch (bbr->mode) {
153607 +       case BBR_STARTUP:
153608 +               bbr->pacing_gain = bbr->params.high_gain;
153609 +               bbr->cwnd_gain   = bbr->params.startup_cwnd_gain;
153610 +               break;
153611 +       case BBR_DRAIN:
153612 +               bbr->pacing_gain = bbr->params.drain_gain;  /* slow, to drain */
153613 +               bbr->cwnd_gain = bbr->params.startup_cwnd_gain;  /* keep cwnd */
153614 +               break;
153615 +       case BBR_PROBE_BW:
153616 +               bbr->pacing_gain = bbr->params.pacing_gain[bbr->cycle_idx];
153617 +               bbr->cwnd_gain = bbr->params.cwnd_gain;
153618 +               break;
153619 +       case BBR_PROBE_RTT:
153620 +               bbr->pacing_gain = BBR_UNIT;
153621 +               bbr->cwnd_gain = BBR_UNIT;
153622 +               break;
153623 +       default:
153624 +               WARN_ONCE(1, "BBR bad mode: %u\n", bbr->mode);
153625 +               break;
153626 +       }
153629 +static void bbr_init(struct sock *sk)
153631 +       struct tcp_sock *tp = tcp_sk(sk);
153632 +       struct bbr *bbr = inet_csk_ca(sk);
153633 +       int i;
153635 +       WARN_ON_ONCE(tp->snd_cwnd >= bbr_cwnd_warn_val);
153637 +       bbr->initialized = 1;
153638 +       bbr->params.high_gain = min(0x7FF, bbr_high_gain);
153639 +       bbr->params.drain_gain = min(0x3FF, bbr_drain_gain);
153640 +       bbr->params.startup_cwnd_gain = min(0x7FF, bbr_startup_cwnd_gain);
153641 +       bbr->params.cwnd_gain = min(0x7FF, bbr_cwnd_gain);
153642 +       bbr->params.cwnd_tso_budget = min(0x1U, bbr_cwnd_tso_budget);
153643 +       bbr->params.cwnd_min_target = min(0xFU, bbr_cwnd_min_target);
153644 +       bbr->params.min_rtt_win_sec = min(0x1FU, bbr_min_rtt_win_sec);
153645 +       bbr->params.probe_rtt_mode_ms = min(0x1FFU, bbr_probe_rtt_mode_ms);
153646 +       bbr->params.full_bw_cnt = min(0x7U, bbr_full_bw_cnt);
153647 +       bbr->params.full_bw_thresh = min(0x3FFU, bbr_full_bw_thresh);
153648 +       bbr->params.extra_acked_gain = min(0x7FF, bbr_extra_acked_gain);
153649 +       bbr->params.extra_acked_win_rtts = min(0x1FU, bbr_extra_acked_win_rtts);
153650 +       bbr->params.drain_to_target = bbr_drain_to_target ? 1 : 0;
153651 +       bbr->params.precise_ece_ack = bbr_precise_ece_ack ? 1 : 0;
153652 +       bbr->params.extra_acked_in_startup = bbr_extra_acked_in_startup ? 1 : 0;
153653 +       bbr->params.probe_rtt_cwnd_gain = min(0xFFU, bbr_probe_rtt_cwnd_gain);
153654 +       bbr->params.probe_rtt_win_ms =
153655 +               min(0x3FFFU,
153656 +                   min_t(u32, bbr_probe_rtt_win_ms,
153657 +                         bbr->params.min_rtt_win_sec * MSEC_PER_SEC));
153658 +       for (i = 0; i < CYCLE_LEN; i++)
153659 +               bbr->params.pacing_gain[i] = min(0x3FF, bbr_pacing_gain[i]);
153660 +       bbr->params.usage_based_cwnd = bbr_usage_based_cwnd ? 1 : 0;
153661 +       bbr->params.tso_rtt_shift =  min(0xFU, bbr_tso_rtt_shift);
153663 +       bbr->debug.snd_isn = tp->snd_una;
153664 +       bbr->debug.target_cwnd = 0;
153665 +       bbr->debug.undo = 0;
153667 +       bbr->init_cwnd = min(0x7FU, tp->snd_cwnd);
153668 +       bbr->prior_cwnd = tp->prior_cwnd;
153669 +       tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
153670 +       bbr->next_rtt_delivered = 0;
153671 +       bbr->prev_ca_state = TCP_CA_Open;
153672 +       bbr->packet_conservation = 0;
153674 +       bbr->probe_rtt_done_stamp = 0;
153675 +       bbr->probe_rtt_round_done = 0;
153676 +       bbr->probe_rtt_min_us = tcp_min_rtt(tp);
153677 +       bbr->probe_rtt_min_stamp = tcp_jiffies32;
153678 +       bbr->min_rtt_us = tcp_min_rtt(tp);
153679 +       bbr->min_rtt_stamp = tcp_jiffies32;
153681 +       bbr->has_seen_rtt = 0;
153682 +       bbr_init_pacing_rate_from_rtt(sk);
153684 +       bbr->round_start = 0;
153685 +       bbr->idle_restart = 0;
153686 +       bbr->full_bw_reached = 0;
153687 +       bbr->full_bw = 0;
153688 +       bbr->full_bw_cnt = 0;
153689 +       bbr->cycle_mstamp = 0;
153690 +       bbr->cycle_idx = 0;
153691 +       bbr->mode = BBR_STARTUP;
153692 +       bbr->debug.rs_bw = 0;
153694 +       bbr->ack_epoch_mstamp = tp->tcp_mstamp;
153695 +       bbr->ack_epoch_acked = 0;
153696 +       bbr->extra_acked_win_rtts = 0;
153697 +       bbr->extra_acked_win_idx = 0;
153698 +       bbr->extra_acked[0] = 0;
153699 +       bbr->extra_acked[1] = 0;
153701 +       bbr->ce_state = 0;
153702 +       bbr->prior_rcv_nxt = tp->rcv_nxt;
153703 +       bbr->try_fast_path = 0;
153705 +       cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED);
153708 +static u32 bbr_sndbuf_expand(struct sock *sk)
153710 +       /* Provision 3 * cwnd since BBR may slow-start even during recovery. */
153711 +       return 3;
153714 +/* __________________________________________________________________________
153716 + * Functions new to BBR v2 ("bbr") congestion control are below here.
153717 + * __________________________________________________________________________
153718 + */
153720 +/* Incorporate a new bw sample into the current window of our max filter. */
153721 +static void bbr2_take_bw_hi_sample(struct sock *sk, u32 bw)
153723 +       struct bbr *bbr = inet_csk_ca(sk);
153725 +       bbr->bw_hi[1] = max(bw, bbr->bw_hi[1]);
153728 +/* Keep max of last 1-2 cycles. Each PROBE_BW cycle, flip filter window. */
153729 +static void bbr2_advance_bw_hi_filter(struct sock *sk)
153731 +       struct bbr *bbr = inet_csk_ca(sk);
153733 +       if (!bbr->bw_hi[1])
153734 +               return;  /* no samples in this window; remember old window */
153735 +       bbr->bw_hi[0] = bbr->bw_hi[1];
153736 +       bbr->bw_hi[1] = 0;
153739 +/* How much do we want in flight? Our BDP, unless congestion cut cwnd. */
153740 +static u32 bbr2_target_inflight(struct sock *sk)
153742 +       u32 bdp = bbr_inflight(sk, bbr_bw(sk), BBR_UNIT);
153744 +       return min(bdp, tcp_sk(sk)->snd_cwnd);
153747 +static bool bbr2_is_probing_bandwidth(struct sock *sk)
153749 +       struct bbr *bbr = inet_csk_ca(sk);
153751 +       return (bbr->mode == BBR_STARTUP) ||
153752 +               (bbr->mode == BBR_PROBE_BW &&
153753 +                (bbr->cycle_idx == BBR_BW_PROBE_REFILL ||
153754 +                 bbr->cycle_idx == BBR_BW_PROBE_UP));
153757 +/* Has the given amount of time elapsed since we marked the phase start? */
153758 +static bool bbr2_has_elapsed_in_phase(const struct sock *sk, u32 interval_us)
153760 +       const struct tcp_sock *tp = tcp_sk(sk);
153761 +       const struct bbr *bbr = inet_csk_ca(sk);
153763 +       return tcp_stamp_us_delta(tp->tcp_mstamp,
153764 +                                 bbr->cycle_mstamp + interval_us) > 0;
153767 +static void bbr2_handle_queue_too_high_in_startup(struct sock *sk)
153769 +       struct bbr *bbr = inet_csk_ca(sk);
153771 +       bbr->full_bw_reached = 1;
153772 +       bbr->inflight_hi = bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT);
153775 +/* Exit STARTUP upon N consecutive rounds with ECN mark rate > ecn_thresh. */
153776 +static void bbr2_check_ecn_too_high_in_startup(struct sock *sk, u32 ce_ratio)
153778 +       struct bbr *bbr = inet_csk_ca(sk);
153780 +       if (bbr_full_bw_reached(sk) || !bbr->ecn_eligible ||
153781 +           !bbr->params.full_ecn_cnt || !bbr->params.ecn_thresh)
153782 +               return;
153784 +       if (ce_ratio >= bbr->params.ecn_thresh)
153785 +               bbr->startup_ecn_rounds++;
153786 +       else
153787 +               bbr->startup_ecn_rounds = 0;
153789 +       if (bbr->startup_ecn_rounds >= bbr->params.full_ecn_cnt) {
153790 +               bbr->debug.event = 'E';  /* ECN caused STARTUP exit */
153791 +               bbr2_handle_queue_too_high_in_startup(sk);
153792 +               return;
153793 +       }
153796 +static void bbr2_update_ecn_alpha(struct sock *sk)
153798 +       struct tcp_sock *tp = tcp_sk(sk);
153799 +       struct bbr *bbr = inet_csk_ca(sk);
153800 +       s32 delivered, delivered_ce;
153801 +       u64 alpha, ce_ratio;
153802 +       u32 gain;
153804 +       if (bbr->params.ecn_factor == 0)
153805 +               return;
153807 +       delivered = tp->delivered - bbr->alpha_last_delivered;
153808 +       delivered_ce = tp->delivered_ce - bbr->alpha_last_delivered_ce;
153810 +       if (delivered == 0 ||           /* avoid divide by zero */
153811 +           WARN_ON_ONCE(delivered < 0 || delivered_ce < 0))  /* backwards? */
153812 +               return;
153814 +       /* See if we should use ECN sender logic for this connection. */
153815 +       if (!bbr->ecn_eligible && bbr_ecn_enable &&
153816 +           (bbr->min_rtt_us <= bbr->params.ecn_max_rtt_us ||
153817 +            !bbr->params.ecn_max_rtt_us))
153818 +               bbr->ecn_eligible = 1;
153820 +       ce_ratio = (u64)delivered_ce << BBR_SCALE;
153821 +       do_div(ce_ratio, delivered);
153822 +       gain = bbr->params.ecn_alpha_gain;
153823 +       alpha = ((BBR_UNIT - gain) * bbr->ecn_alpha) >> BBR_SCALE;
153824 +       alpha += (gain * ce_ratio) >> BBR_SCALE;
153825 +       bbr->ecn_alpha = min_t(u32, alpha, BBR_UNIT);
153827 +       bbr->alpha_last_delivered = tp->delivered;
153828 +       bbr->alpha_last_delivered_ce = tp->delivered_ce;
153830 +       bbr2_check_ecn_too_high_in_startup(sk, ce_ratio);
153833 +/* Each round trip of BBR_BW_PROBE_UP, double volume of probing data. */
153834 +static void bbr2_raise_inflight_hi_slope(struct sock *sk)
153836 +       struct tcp_sock *tp = tcp_sk(sk);
153837 +       struct bbr *bbr = inet_csk_ca(sk);
153838 +       u32 growth_this_round, cnt;
153840 +       /* Calculate "slope": packets S/Acked per inflight_hi increment. */
153841 +       growth_this_round = 1 << bbr->bw_probe_up_rounds;
153842 +       bbr->bw_probe_up_rounds = min(bbr->bw_probe_up_rounds + 1, 30);
153843 +       cnt = tp->snd_cwnd / growth_this_round;
153844 +       cnt = max(cnt, 1U);
153845 +       bbr->bw_probe_up_cnt = cnt;
153846 +       bbr->debug.event = 'G';  /* Grow inflight_hi slope */
153849 +/* In BBR_BW_PROBE_UP, not seeing high loss/ECN/queue, so raise inflight_hi. */
153850 +static void bbr2_probe_inflight_hi_upward(struct sock *sk,
153851 +                                         const struct rate_sample *rs)
153853 +       struct tcp_sock *tp = tcp_sk(sk);
153854 +       struct bbr *bbr = inet_csk_ca(sk);
153855 +       u32 delta;
153857 +       if (!tp->is_cwnd_limited || tp->snd_cwnd < bbr->inflight_hi) {
153858 +               bbr->bw_probe_up_acks = 0;  /* don't accmulate unused credits */
153859 +               return;  /* not fully using inflight_hi, so don't grow it */
153860 +       }
153862 +       /* For each bw_probe_up_cnt packets ACKed, increase inflight_hi by 1. */
153863 +       bbr->bw_probe_up_acks += rs->acked_sacked;
153864 +       if (bbr->bw_probe_up_acks >=  bbr->bw_probe_up_cnt) {
153865 +               delta = bbr->bw_probe_up_acks / bbr->bw_probe_up_cnt;
153866 +               bbr->bw_probe_up_acks -= delta * bbr->bw_probe_up_cnt;
153867 +               bbr->inflight_hi += delta;
153868 +               bbr->debug.event = 'I';  /* Increment inflight_hi */
153869 +       }
153871 +       if (bbr->round_start)
153872 +               bbr2_raise_inflight_hi_slope(sk);
153875 +/* Does loss/ECN rate for this sample say inflight is "too high"?
153876 + * This is used by both the bbr_check_loss_too_high_in_startup() function,
153877 + * which can be used in either v1 or v2, and the PROBE_UP phase of v2, which
153878 + * uses it to notice when loss/ECN rates suggest inflight is too high.
153879 + */
153880 +static bool bbr2_is_inflight_too_high(const struct sock *sk,
153881 +                                    const struct rate_sample *rs)
153883 +       const struct bbr *bbr = inet_csk_ca(sk);
153884 +       u32 loss_thresh, ecn_thresh;
153886 +       if (rs->lost > 0 && rs->tx_in_flight) {
153887 +               loss_thresh = (u64)rs->tx_in_flight * bbr->params.loss_thresh >>
153888 +                               BBR_SCALE;
153889 +               if (rs->lost > loss_thresh)
153890 +                       return true;
153891 +       }
153893 +       if (rs->delivered_ce > 0 && rs->delivered > 0 &&
153894 +           bbr->ecn_eligible && bbr->params.ecn_thresh) {
153895 +               ecn_thresh = (u64)rs->delivered * bbr->params.ecn_thresh >>
153896 +                               BBR_SCALE;
153897 +               if (rs->delivered_ce >= ecn_thresh)
153898 +                       return true;
153899 +       }
153901 +       return false;
153904 +/* Calculate the tx_in_flight level that corresponded to excessive loss.
153905 + * We find "lost_prefix" segs of the skb where loss rate went too high,
153906 + * by solving for "lost_prefix" in the following equation:
153907 + *   lost                     /  inflight                     >= loss_thresh
153908 + *  (lost_prev + lost_prefix) / (inflight_prev + lost_prefix) >= loss_thresh
153909 + * Then we take that equation, convert it to fixed point, and
153910 + * round up to the nearest packet.
153911 + */
153912 +static u32 bbr2_inflight_hi_from_lost_skb(const struct sock *sk,
153913 +                                         const struct rate_sample *rs,
153914 +                                         const struct sk_buff *skb)
153916 +       const struct bbr *bbr = inet_csk_ca(sk);
153917 +       u32 loss_thresh  = bbr->params.loss_thresh;
153918 +       u32 pcount, divisor, inflight_hi;
153919 +       s32 inflight_prev, lost_prev;
153920 +       u64 loss_budget, lost_prefix;
153922 +       pcount = tcp_skb_pcount(skb);
153924 +       /* How much data was in flight before this skb? */
153925 +       inflight_prev = rs->tx_in_flight - pcount;
153926 +       if (WARN_ONCE(inflight_prev < 0,
153927 +                     "tx_in_flight: %u pcount: %u reneg: %u",
153928 +                     rs->tx_in_flight, pcount, tcp_sk(sk)->is_sack_reneg))
153929 +               return ~0U;
153931 +       /* How much inflight data was marked lost before this skb? */
153932 +       lost_prev = rs->lost - pcount;
153933 +       if (WARN_ON_ONCE(lost_prev < 0))
153934 +               return ~0U;
153936 +       /* At what prefix of this lost skb did losss rate exceed loss_thresh? */
153937 +       loss_budget = (u64)inflight_prev * loss_thresh + BBR_UNIT - 1;
153938 +       loss_budget >>= BBR_SCALE;
153939 +       if (lost_prev >= loss_budget) {
153940 +               lost_prefix = 0;   /* previous losses crossed loss_thresh */
153941 +       } else {
153942 +               lost_prefix = loss_budget - lost_prev;
153943 +               lost_prefix <<= BBR_SCALE;
153944 +               divisor = BBR_UNIT - loss_thresh;
153945 +               if (WARN_ON_ONCE(!divisor))  /* loss_thresh is 8 bits */
153946 +                       return ~0U;
153947 +               do_div(lost_prefix, divisor);
153948 +       }
153950 +       inflight_hi = inflight_prev + lost_prefix;
153951 +       return inflight_hi;
153954 +/* If loss/ECN rates during probing indicated we may have overfilled a
153955 + * buffer, return an operating point that tries to leave unutilized headroom in
153956 + * the path for other flows, for fairness convergence and lower RTTs and loss.
153957 + */
153958 +static u32 bbr2_inflight_with_headroom(const struct sock *sk)
153960 +       struct bbr *bbr = inet_csk_ca(sk);
153961 +       u32 headroom, headroom_fraction;
153963 +       if (bbr->inflight_hi == ~0U)
153964 +               return ~0U;
153966 +       headroom_fraction = bbr->params.inflight_headroom;
153967 +       headroom = ((u64)bbr->inflight_hi * headroom_fraction) >> BBR_SCALE;
153968 +       headroom = max(headroom, 1U);
153969 +       return max_t(s32, bbr->inflight_hi - headroom,
153970 +                    bbr->params.cwnd_min_target);
153973 +/* Bound cwnd to a sensible level, based on our current probing state
153974 + * machine phase and model of a good inflight level (inflight_lo, inflight_hi).
153975 + */
153976 +static void bbr2_bound_cwnd_for_inflight_model(struct sock *sk)
153978 +       struct tcp_sock *tp = tcp_sk(sk);
153979 +       struct bbr *bbr = inet_csk_ca(sk);
153980 +       u32 cap;
153982 +       /* tcp_rcv_synsent_state_process() currently calls tcp_ack()
153983 +        * and thus cong_control() without first initializing us(!).
153984 +        */
153985 +       if (!bbr->initialized)
153986 +               return;
153988 +       cap = ~0U;
153989 +       if (bbr->mode == BBR_PROBE_BW &&
153990 +           bbr->cycle_idx != BBR_BW_PROBE_CRUISE) {
153991 +               /* Probe to see if more packets fit in the path. */
153992 +               cap = bbr->inflight_hi;
153993 +       } else {
153994 +               if (bbr->mode == BBR_PROBE_RTT ||
153995 +                   (bbr->mode == BBR_PROBE_BW &&
153996 +                    bbr->cycle_idx == BBR_BW_PROBE_CRUISE))
153997 +                       cap = bbr2_inflight_with_headroom(sk);
153998 +       }
153999 +       /* Adapt to any loss/ECN since our last bw probe. */
154000 +       cap = min(cap, bbr->inflight_lo);
154002 +       cap = max_t(u32, cap, bbr->params.cwnd_min_target);
154003 +       tp->snd_cwnd = min(cap, tp->snd_cwnd);
154006 +/* Estimate a short-term lower bound on the capacity available now, based
154007 + * on measurements of the current delivery process and recent history. When we
154008 + * are seeing loss/ECN at times when we are not probing bw, then conservatively
154009 + * move toward flow balance by multiplicatively cutting our short-term
154010 + * estimated safe rate and volume of data (bw_lo and inflight_lo). We use a
154011 + * multiplicative decrease in order to converge to a lower capacity in time
154012 + * logarithmic in the magnitude of the decrease.
154014 + * However, we do not cut our short-term estimates lower than the current rate
154015 + * and volume of delivered data from this round trip, since from the current
154016 + * delivery process we can estimate the measured capacity available now.
154018 + * Anything faster than that approach would knowingly risk high loss, which can
154019 + * cause low bw for Reno/CUBIC and high loss recovery latency for
154020 + * request/response flows using any congestion control.
154021 + */
154022 +static void bbr2_adapt_lower_bounds(struct sock *sk)
154024 +       struct tcp_sock *tp = tcp_sk(sk);
154025 +       struct bbr *bbr = inet_csk_ca(sk);
154026 +       u32 ecn_cut, ecn_inflight_lo, beta;
154028 +       /* We only use lower-bound estimates when not probing bw.
154029 +        * When probing we need to push inflight higher to probe bw.
154030 +        */
154031 +       if (bbr2_is_probing_bandwidth(sk))
154032 +               return;
154034 +       /* ECN response. */
154035 +       if (bbr->ecn_in_round && bbr->ecn_eligible && bbr->params.ecn_factor) {
154036 +               /* Reduce inflight to (1 - alpha*ecn_factor). */
154037 +               ecn_cut = (BBR_UNIT -
154038 +                          ((bbr->ecn_alpha * bbr->params.ecn_factor) >>
154039 +                           BBR_SCALE));
154040 +               if (bbr->inflight_lo == ~0U)
154041 +                       bbr->inflight_lo = tp->snd_cwnd;
154042 +               ecn_inflight_lo = (u64)bbr->inflight_lo * ecn_cut >> BBR_SCALE;
154043 +       } else {
154044 +               ecn_inflight_lo = ~0U;
154045 +       }
154047 +       /* Loss response. */
154048 +       if (bbr->loss_in_round) {
154049 +               /* Reduce bw and inflight to (1 - beta). */
154050 +               if (bbr->bw_lo == ~0U)
154051 +                       bbr->bw_lo = bbr_max_bw(sk);
154052 +               if (bbr->inflight_lo == ~0U)
154053 +                       bbr->inflight_lo = tp->snd_cwnd;
154054 +               beta = bbr->params.beta;
154055 +               bbr->bw_lo =
154056 +                       max_t(u32, bbr->bw_latest,
154057 +                             (u64)bbr->bw_lo *
154058 +                             (BBR_UNIT - beta) >> BBR_SCALE);
154059 +               bbr->inflight_lo =
154060 +                       max_t(u32, bbr->inflight_latest,
154061 +                             (u64)bbr->inflight_lo *
154062 +                             (BBR_UNIT - beta) >> BBR_SCALE);
154063 +       }
154065 +       /* Adjust to the lower of the levels implied by loss or ECN. */
154066 +       bbr->inflight_lo = min(bbr->inflight_lo, ecn_inflight_lo);
154069 +/* Reset any short-term lower-bound adaptation to congestion, so that we can
154070 + * push our inflight up.
154071 + */
154072 +static void bbr2_reset_lower_bounds(struct sock *sk)
154074 +       struct bbr *bbr = inet_csk_ca(sk);
154076 +       bbr->bw_lo = ~0U;
154077 +       bbr->inflight_lo = ~0U;
154080 +/* After bw probing (STARTUP/PROBE_UP), reset signals before entering a state
154081 + * machine phase where we adapt our lower bound based on congestion signals.
154082 + */
154083 +static void bbr2_reset_congestion_signals(struct sock *sk)
154085 +       struct bbr *bbr = inet_csk_ca(sk);
154087 +       bbr->loss_in_round = 0;
154088 +       bbr->ecn_in_round = 0;
154089 +       bbr->loss_in_cycle = 0;
154090 +       bbr->ecn_in_cycle = 0;
154091 +       bbr->bw_latest = 0;
154092 +       bbr->inflight_latest = 0;
154095 +/* Update (most of) our congestion signals: track the recent rate and volume of
154096 + * delivered data, presence of loss, and EWMA degree of ECN marking.
154097 + */
154098 +static void bbr2_update_congestion_signals(
154099 +       struct sock *sk, const struct rate_sample *rs, struct bbr_context *ctx)
154101 +       struct tcp_sock *tp = tcp_sk(sk);
154102 +       struct bbr *bbr = inet_csk_ca(sk);
154103 +       u64 bw;
154105 +       bbr->loss_round_start = 0;
154106 +       if (rs->interval_us <= 0 || !rs->acked_sacked)
154107 +               return; /* Not a valid observation */
154108 +       bw = ctx->sample_bw;
154110 +       if (!rs->is_app_limited || bw >= bbr_max_bw(sk))
154111 +               bbr2_take_bw_hi_sample(sk, bw);
154113 +       bbr->loss_in_round |= (rs->losses > 0);
154115 +       /* Update rate and volume of delivered data from latest round trip: */
154116 +       bbr->bw_latest       = max_t(u32, bbr->bw_latest,       ctx->sample_bw);
154117 +       bbr->inflight_latest = max_t(u32, bbr->inflight_latest, rs->delivered);
154119 +       if (before(rs->prior_delivered, bbr->loss_round_delivered))
154120 +               return;         /* skip the per-round-trip updates */
154121 +       /* Now do per-round-trip updates. */
154122 +       bbr->loss_round_delivered = tp->delivered;  /* mark round trip */
154123 +       bbr->loss_round_start = 1;
154124 +       bbr2_adapt_lower_bounds(sk);
154126 +       /* Update windowed "latest" (single-round-trip) filters. */
154127 +       bbr->loss_in_round = 0;
154128 +       bbr->ecn_in_round  = 0;
154129 +       bbr->bw_latest = ctx->sample_bw;
154130 +       bbr->inflight_latest = rs->delivered;
154133 +/* Bandwidth probing can cause loss. To help coexistence with loss-based
154134 + * congestion control we spread out our probing in a Reno-conscious way. Due to
154135 + * the shape of the Reno sawtooth, the time required between loss epochs for an
154136 + * idealized Reno flow is a number of round trips that is the BDP of that
154137 + * flow. We count packet-timed round trips directly, since measured RTT can
154138 + * vary widely, and Reno is driven by packet-timed round trips.
154139 + */
154140 +static bool bbr2_is_reno_coexistence_probe_time(struct sock *sk)
154142 +       struct bbr *bbr = inet_csk_ca(sk);
154143 +       u32 inflight, rounds, reno_gain, reno_rounds;
154145 +       /* Random loss can shave some small percentage off of our inflight
154146 +        * in each round. To survive this, flows need robust periodic probes.
154147 +        */
154148 +       rounds = bbr->params.bw_probe_max_rounds;
154150 +       reno_gain = bbr->params.bw_probe_reno_gain;
154151 +       if (reno_gain) {
154152 +               inflight = bbr2_target_inflight(sk);
154153 +               reno_rounds = ((u64)inflight * reno_gain) >> BBR_SCALE;
154154 +               rounds = min(rounds, reno_rounds);
154155 +       }
154156 +       return bbr->rounds_since_probe >= rounds;
154159 +/* How long do we want to wait before probing for bandwidth (and risking
154160 + * loss)? We randomize the wait, for better mixing and fairness convergence.
154162 + * We bound the Reno-coexistence inter-bw-probe time to be 62-63 round trips.
154163 + * This is calculated to allow fairness with a 25Mbps, 30ms Reno flow,
154164 + * (eg 4K video to a broadband user):
154165 + *   BDP = 25Mbps * .030sec /(1514bytes) = 61.9 packets
154167 + * We bound the BBR-native inter-bw-probe wall clock time to be:
154168 + *  (a) higher than 2 sec: to try to avoid causing loss for a long enough time
154169 + *      to allow Reno at 30ms to get 4K video bw, the inter-bw-probe time must
154170 + *      be at least: 25Mbps * .030sec / (1514bytes) * 0.030sec = 1.9secs
154171 + *  (b) lower than 3 sec: to ensure flows can start probing in a reasonable
154172 + *      amount of time to discover unutilized bw on human-scale interactive
154173 + *      time-scales (e.g. perhaps traffic from a web page download that we
154174 + *      were competing with is now complete).
154175 + */
154176 +static void bbr2_pick_probe_wait(struct sock *sk)
154178 +       struct bbr *bbr = inet_csk_ca(sk);
154180 +       /* Decide the random round-trip bound for wait until probe: */
154181 +       bbr->rounds_since_probe =
154182 +               prandom_u32_max(bbr->params.bw_probe_rand_rounds);
154183 +       /* Decide the random wall clock bound for wait until probe: */
154184 +       bbr->probe_wait_us = bbr->params.bw_probe_base_us +
154185 +                            prandom_u32_max(bbr->params.bw_probe_rand_us);
154188 +static void bbr2_set_cycle_idx(struct sock *sk, int cycle_idx)
154190 +       struct bbr *bbr = inet_csk_ca(sk);
154192 +       bbr->cycle_idx = cycle_idx;
154193 +       /* New phase, so need to update cwnd and pacing rate. */
154194 +       bbr->try_fast_path = 0;
154197 +/* Send at estimated bw to fill the pipe, but not queue. We need this phase
154198 + * before PROBE_UP, because as soon as we send faster than the available bw
154199 + * we will start building a queue, and if the buffer is shallow we can cause
154200 + * loss. If we do not fill the pipe before we cause this loss, our bw_hi and
154201 + * inflight_hi estimates will underestimate.
154202 + */
154203 +static void bbr2_start_bw_probe_refill(struct sock *sk, u32 bw_probe_up_rounds)
154205 +       struct tcp_sock *tp = tcp_sk(sk);
154206 +       struct bbr *bbr = inet_csk_ca(sk);
154208 +       bbr2_reset_lower_bounds(sk);
154209 +       if (bbr->inflight_hi != ~0U)
154210 +               bbr->inflight_hi += bbr->params.refill_add_inc;
154211 +       bbr->bw_probe_up_rounds = bw_probe_up_rounds;
154212 +       bbr->bw_probe_up_acks = 0;
154213 +       bbr->stopped_risky_probe = 0;
154214 +       bbr->ack_phase = BBR_ACKS_REFILLING;
154215 +       bbr->next_rtt_delivered = tp->delivered;
154216 +       bbr2_set_cycle_idx(sk, BBR_BW_PROBE_REFILL);
154219 +/* Now probe max deliverable data rate and volume. */
154220 +static void bbr2_start_bw_probe_up(struct sock *sk)
154222 +       struct tcp_sock *tp = tcp_sk(sk);
154223 +       struct bbr *bbr = inet_csk_ca(sk);
154225 +       bbr->ack_phase = BBR_ACKS_PROBE_STARTING;
154226 +       bbr->next_rtt_delivered = tp->delivered;
154227 +       bbr->cycle_mstamp = tp->tcp_mstamp;
154228 +       bbr2_set_cycle_idx(sk, BBR_BW_PROBE_UP);
154229 +       bbr2_raise_inflight_hi_slope(sk);
154232 +/* Start a new PROBE_BW probing cycle of some wall clock length. Pick a wall
154233 + * clock time at which to probe beyond an inflight that we think to be
154234 + * safe. This will knowingly risk packet loss, so we want to do this rarely, to
154235 + * keep packet loss rates low. Also start a round-trip counter, to probe faster
154236 + * if we estimate a Reno flow at our BDP would probe faster.
154237 + */
154238 +static void bbr2_start_bw_probe_down(struct sock *sk)
154240 +       struct tcp_sock *tp = tcp_sk(sk);
154241 +       struct bbr *bbr = inet_csk_ca(sk);
154243 +       bbr2_reset_congestion_signals(sk);
154244 +       bbr->bw_probe_up_cnt = ~0U;     /* not growing inflight_hi any more */
154245 +       bbr2_pick_probe_wait(sk);
154246 +       bbr->cycle_mstamp = tp->tcp_mstamp;             /* start wall clock */
154247 +       bbr->ack_phase = BBR_ACKS_PROBE_STOPPING;
154248 +       bbr->next_rtt_delivered = tp->delivered;
154249 +       bbr2_set_cycle_idx(sk, BBR_BW_PROBE_DOWN);
154252 +/* Cruise: maintain what we estimate to be a neutral, conservative
154253 + * operating point, without attempting to probe up for bandwidth or down for
154254 + * RTT, and only reducing inflight in response to loss/ECN signals.
154255 + */
154256 +static void bbr2_start_bw_probe_cruise(struct sock *sk)
154258 +       struct bbr *bbr = inet_csk_ca(sk);
154260 +       if (bbr->inflight_lo != ~0U)
154261 +               bbr->inflight_lo = min(bbr->inflight_lo, bbr->inflight_hi);
154263 +       bbr2_set_cycle_idx(sk, BBR_BW_PROBE_CRUISE);
154266 +/* Loss and/or ECN rate is too high while probing.
154267 + * Adapt (once per bw probe) by cutting inflight_hi and then restarting cycle.
154268 + */
154269 +static void bbr2_handle_inflight_too_high(struct sock *sk,
154270 +                                         const struct rate_sample *rs)
154272 +       struct bbr *bbr = inet_csk_ca(sk);
154273 +       const u32 beta = bbr->params.beta;
154275 +       bbr->prev_probe_too_high = 1;
154276 +       bbr->bw_probe_samples = 0;  /* only react once per probe */
154277 +       bbr->debug.event = 'L';     /* Loss/ECN too high */
154278 +       /* If we are app-limited then we are not robustly
154279 +        * probing the max volume of inflight data we think
154280 +        * might be safe (analogous to how app-limited bw
154281 +        * samples are not known to be robustly probing bw).
154282 +        */
154283 +       if (!rs->is_app_limited)
154284 +               bbr->inflight_hi = max_t(u32, rs->tx_in_flight,
154285 +                                        (u64)bbr2_target_inflight(sk) *
154286 +                                        (BBR_UNIT - beta) >> BBR_SCALE);
154287 +       if (bbr->mode == BBR_PROBE_BW && bbr->cycle_idx == BBR_BW_PROBE_UP)
154288 +               bbr2_start_bw_probe_down(sk);
154291 +/* If we're seeing bw and loss samples reflecting our bw probing, adapt
154292 + * using the signals we see. If loss or ECN mark rate gets too high, then adapt
154293 + * inflight_hi downward. If we're able to push inflight higher without such
154294 + * signals, push higher: adapt inflight_hi upward.
154295 + */
154296 +static bool bbr2_adapt_upper_bounds(struct sock *sk,
154297 +                                  const struct rate_sample *rs)
154299 +       struct bbr *bbr = inet_csk_ca(sk);
154301 +       /* Track when we'll see bw/loss samples resulting from our bw probes. */
154302 +       if (bbr->ack_phase == BBR_ACKS_PROBE_STARTING && bbr->round_start)
154303 +               bbr->ack_phase = BBR_ACKS_PROBE_FEEDBACK;
154304 +       if (bbr->ack_phase == BBR_ACKS_PROBE_STOPPING && bbr->round_start) {
154305 +               /* End of samples from bw probing phase. */
154306 +               bbr->bw_probe_samples = 0;
154307 +               bbr->ack_phase = BBR_ACKS_INIT;
154308 +               /* At this point in the cycle, our current bw sample is also
154309 +                * our best recent chance at finding the highest available bw
154310 +                * for this flow. So now is the best time to forget the bw
154311 +                * samples from the previous cycle, by advancing the window.
154312 +                */
154313 +               if (bbr->mode == BBR_PROBE_BW && !rs->is_app_limited)
154314 +                       bbr2_advance_bw_hi_filter(sk);
154315 +               /* If we had an inflight_hi, then probed and pushed inflight all
154316 +                * the way up to hit that inflight_hi without seeing any
154317 +                * high loss/ECN in all the resulting ACKs from that probing,
154318 +                * then probe up again, this time letting inflight persist at
154319 +                * inflight_hi for a round trip, then accelerating beyond.
154320 +                */
154321 +               if (bbr->mode == BBR_PROBE_BW &&
154322 +                   bbr->stopped_risky_probe && !bbr->prev_probe_too_high) {
154323 +                       bbr->debug.event = 'R';  /* reprobe */
154324 +                       bbr2_start_bw_probe_refill(sk, 0);
154325 +                       return true;  /* yes, decided state transition */
154326 +               }
154327 +       }
154329 +       if (bbr2_is_inflight_too_high(sk, rs)) {
154330 +               if (bbr->bw_probe_samples)  /*  sample is from bw probing? */
154331 +                       bbr2_handle_inflight_too_high(sk, rs);
154332 +       } else {
154333 +               /* Loss/ECN rate is declared safe. Adjust upper bound upward. */
154334 +               if (bbr->inflight_hi == ~0U)  /* no excess queue signals yet? */
154335 +                       return false;
154337 +               /* To be resilient to random loss, we must raise inflight_hi
154338 +                * if we observe in any phase that a higher level is safe.
154339 +                */
154340 +               if (rs->tx_in_flight > bbr->inflight_hi) {
154341 +                       bbr->inflight_hi = rs->tx_in_flight;
154342 +                       bbr->debug.event = 'U';  /* raise up inflight_hi */
154343 +               }
154345 +               if (bbr->mode == BBR_PROBE_BW &&
154346 +                   bbr->cycle_idx == BBR_BW_PROBE_UP)
154347 +                       bbr2_probe_inflight_hi_upward(sk, rs);
154348 +       }
154350 +       return false;
154353 +/* Check if it's time to probe for bandwidth now, and if so, kick it off. */
154354 +static bool bbr2_check_time_to_probe_bw(struct sock *sk)
154356 +       struct bbr *bbr = inet_csk_ca(sk);
154357 +       u32 n;
154359 +       /* If we seem to be at an operating point where we are not seeing loss
154360 +        * but we are seeing ECN marks, then when the ECN marks cease we reprobe
154361 +        * quickly (in case a burst of cross-traffic has ceased and freed up bw,
154362 +        * or in case we are sharing with multiplicatively probing traffic).
154363 +        */
154364 +       if (bbr->params.ecn_reprobe_gain && bbr->ecn_eligible &&
154365 +           bbr->ecn_in_cycle && !bbr->loss_in_cycle &&
154366 +           inet_csk(sk)->icsk_ca_state == TCP_CA_Open) {
154367 +               bbr->debug.event = 'A';  /* *A*ll clear to probe *A*gain */
154368 +               /* Calculate n so that when bbr2_raise_inflight_hi_slope()
154369 +                * computes growth_this_round as 2^n it will be roughly the
154370 +                * desired volume of data (inflight_hi*ecn_reprobe_gain).
154371 +                */
154372 +               n = ilog2((((u64)bbr->inflight_hi *
154373 +                           bbr->params.ecn_reprobe_gain) >> BBR_SCALE));
154374 +               bbr2_start_bw_probe_refill(sk, n);
154375 +               return true;
154376 +       }
154378 +       if (bbr2_has_elapsed_in_phase(sk, bbr->probe_wait_us) ||
154379 +           bbr2_is_reno_coexistence_probe_time(sk)) {
154380 +               bbr2_start_bw_probe_refill(sk, 0);
154381 +               return true;
154382 +       }
154383 +       return false;
154386 +/* Is it time to transition from PROBE_DOWN to PROBE_CRUISE? */
154387 +static bool bbr2_check_time_to_cruise(struct sock *sk, u32 inflight, u32 bw)
154389 +       struct bbr *bbr = inet_csk_ca(sk);
154390 +       bool is_under_bdp, is_long_enough;
154392 +       /* Always need to pull inflight down to leave headroom in queue. */
154393 +       if (inflight > bbr2_inflight_with_headroom(sk))
154394 +               return false;
154396 +       is_under_bdp = inflight <= bbr_inflight(sk, bw, BBR_UNIT);
154397 +       if (bbr->params.drain_to_target)
154398 +               return is_under_bdp;
154400 +       is_long_enough = bbr2_has_elapsed_in_phase(sk, bbr->min_rtt_us);
154401 +       return is_under_bdp || is_long_enough;
154404 +/* PROBE_BW state machine: cruise, refill, probe for bw, or drain? */
154405 +static void bbr2_update_cycle_phase(struct sock *sk,
154406 +                                   const struct rate_sample *rs)
154408 +       struct bbr *bbr = inet_csk_ca(sk);
154409 +       bool is_risky = false, is_queuing = false;
154410 +       u32 inflight, bw;
154412 +       if (!bbr_full_bw_reached(sk))
154413 +               return;
154415 +       /* In DRAIN, PROBE_BW, or PROBE_RTT, adjust upper bounds. */
154416 +       if (bbr2_adapt_upper_bounds(sk, rs))
154417 +               return;         /* already decided state transition */
154419 +       if (bbr->mode != BBR_PROBE_BW)
154420 +               return;
154422 +       inflight = bbr_packets_in_net_at_edt(sk, rs->prior_in_flight);
154423 +       bw = bbr_max_bw(sk);
154425 +       switch (bbr->cycle_idx) {
154426 +       /* First we spend most of our time cruising with a pacing_gain of 1.0,
154427 +        * which paces at the estimated bw, to try to fully use the pipe
154428 +        * without building queue. If we encounter loss/ECN marks, we adapt
154429 +        * by slowing down.
154430 +        */
154431 +       case BBR_BW_PROBE_CRUISE:
154432 +               if (bbr2_check_time_to_probe_bw(sk))
154433 +                       return;         /* already decided state transition */
154434 +               break;
154436 +       /* After cruising, when it's time to probe, we first "refill": we send
154437 +        * at the estimated bw to fill the pipe, before probing higher and
154438 +        * knowingly risking overflowing the bottleneck buffer (causing loss).
154439 +        */
154440 +       case BBR_BW_PROBE_REFILL:
154441 +               if (bbr->round_start) {
154442 +                       /* After one full round trip of sending in REFILL, we
154443 +                        * start to see bw samples reflecting our REFILL, which
154444 +                        * may be putting too much data in flight.
154445 +                        */
154446 +                       bbr->bw_probe_samples = 1;
154447 +                       bbr2_start_bw_probe_up(sk);
154448 +               }
154449 +               break;
154451 +       /* After we refill the pipe, we probe by using a pacing_gain > 1.0, to
154452 +        * probe for bw. If we have not seen loss/ECN, we try to raise inflight
154453 +        * to at least pacing_gain*BDP; note that this may take more than
154454 +        * min_rtt if min_rtt is small (e.g. on a LAN).
154455 +        *
154456 +        * We terminate PROBE_UP bandwidth probing upon any of the following:
154457 +        *
154458 +        * (1) We've pushed inflight up to hit the inflight_hi target set in the
154459 +        *     most recent previous bw probe phase. Thus we want to start
154460 +        *     draining the queue immediately because it's very likely the most
154461 +        *     recently sent packets will fill the queue and cause drops.
154462 +        *     (checked here)
154463 +        * (2) We have probed for at least 1*min_rtt_us, and the
154464 +        *     estimated queue is high enough (inflight > 1.25 * estimated_bdp).
154465 +        *     (checked here)
154466 +        * (3) Loss filter says loss rate is "too high".
154467 +        *     (checked in bbr_is_inflight_too_high())
154468 +        * (4) ECN filter says ECN mark rate is "too high".
154469 +        *     (checked in bbr_is_inflight_too_high())
154470 +        */
154471 +       case BBR_BW_PROBE_UP:
154472 +               if (bbr->prev_probe_too_high &&
154473 +                   inflight >= bbr->inflight_hi) {
154474 +                       bbr->stopped_risky_probe = 1;
154475 +                       is_risky = true;
154476 +                       bbr->debug.event = 'D';   /* D for danger */
154477 +               } else if (bbr2_has_elapsed_in_phase(sk, bbr->min_rtt_us) &&
154478 +                          inflight >=
154479 +                          bbr_inflight(sk, bw,
154480 +                                       bbr->params.bw_probe_pif_gain)) {
154481 +                       is_queuing = true;
154482 +                       bbr->debug.event = 'Q'; /* building Queue */
154483 +               }
154484 +               if (is_risky || is_queuing) {
154485 +                       bbr->prev_probe_too_high = 0;  /* no loss/ECN (yet) */
154486 +                       bbr2_start_bw_probe_down(sk);  /* restart w/ down */
154487 +               }
154488 +               break;
154490 +       /* After probing in PROBE_UP, we have usually accumulated some data in
154491 +        * the bottleneck buffer (if bw probing didn't find more bw). We next
154492 +        * enter PROBE_DOWN to try to drain any excess data from the queue. To
154493 +        * do this, we use a pacing_gain < 1.0. We hold this pacing gain until
154494 +        * our inflight is less then that target cruising point, which is the
154495 +        * minimum of (a) the amount needed to leave headroom, and (b) the
154496 +        * estimated BDP. Once inflight falls to match the target, we estimate
154497 +        * the queue is drained; persisting would underutilize the pipe.
154498 +        */
154499 +       case BBR_BW_PROBE_DOWN:
154500 +               if (bbr2_check_time_to_probe_bw(sk))
154501 +                       return;         /* already decided state transition */
154502 +               if (bbr2_check_time_to_cruise(sk, inflight, bw))
154503 +                       bbr2_start_bw_probe_cruise(sk);
154504 +               break;
154506 +       default:
154507 +               WARN_ONCE(1, "BBR invalid cycle index %u\n", bbr->cycle_idx);
154508 +       }
154511 +/* Exiting PROBE_RTT, so return to bandwidth probing in STARTUP or PROBE_BW. */
154512 +static void bbr2_exit_probe_rtt(struct sock *sk)
154514 +       struct bbr *bbr = inet_csk_ca(sk);
154516 +       bbr2_reset_lower_bounds(sk);
154517 +       if (bbr_full_bw_reached(sk)) {
154518 +               bbr->mode = BBR_PROBE_BW;
154519 +               /* Raising inflight after PROBE_RTT may cause loss, so reset
154520 +                * the PROBE_BW clock and schedule the next bandwidth probe for
154521 +                * a friendly and randomized future point in time.
154522 +                */
154523 +               bbr2_start_bw_probe_down(sk);
154524 +               /* Since we are exiting PROBE_RTT, we know inflight is
154525 +                * below our estimated BDP, so it is reasonable to cruise.
154526 +                */
154527 +               bbr2_start_bw_probe_cruise(sk);
154528 +       } else {
154529 +               bbr->mode = BBR_STARTUP;
154530 +       }
154533 +/* Exit STARTUP based on loss rate > 1% and loss gaps in round >= N. Wait until
154534 + * the end of the round in recovery to get a good estimate of how many packets
154535 + * have been lost, and how many we need to drain with a low pacing rate.
154536 + */
154537 +static void bbr2_check_loss_too_high_in_startup(struct sock *sk,
154538 +                                              const struct rate_sample *rs)
154540 +       struct bbr *bbr = inet_csk_ca(sk);
154542 +       if (bbr_full_bw_reached(sk))
154543 +               return;
154545 +       /* For STARTUP exit, check the loss rate at the end of each round trip
154546 +        * of Recovery episodes in STARTUP. We check the loss rate at the end
154547 +        * of the round trip to filter out noisy/low loss and have a better
154548 +        * sense of inflight (extent of loss), so we can drain more accurately.
154549 +        */
154550 +       if (rs->losses && bbr->loss_events_in_round < 0xf)
154551 +               bbr->loss_events_in_round++;  /* update saturating counter */
154552 +       if (bbr->params.full_loss_cnt && bbr->loss_round_start &&
154553 +           inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery &&
154554 +           bbr->loss_events_in_round >= bbr->params.full_loss_cnt &&
154555 +           bbr2_is_inflight_too_high(sk, rs)) {
154556 +               bbr->debug.event = 'P';  /* Packet loss caused STARTUP exit */
154557 +               bbr2_handle_queue_too_high_in_startup(sk);
154558 +               return;
154559 +       }
154560 +       if (bbr->loss_round_start)
154561 +               bbr->loss_events_in_round = 0;
154564 +/* If we are done draining, advance into steady state operation in PROBE_BW. */
154565 +static void bbr2_check_drain(struct sock *sk, const struct rate_sample *rs,
154566 +                            struct bbr_context *ctx)
154568 +       struct bbr *bbr = inet_csk_ca(sk);
154570 +       if (bbr_check_drain(sk, rs, ctx)) {
154571 +               bbr->mode = BBR_PROBE_BW;
154572 +               bbr2_start_bw_probe_down(sk);
154573 +       }
154576 +static void bbr2_update_model(struct sock *sk, const struct rate_sample *rs,
154577 +                             struct bbr_context *ctx)
154579 +       bbr2_update_congestion_signals(sk, rs, ctx);
154580 +       bbr_update_ack_aggregation(sk, rs);
154581 +       bbr2_check_loss_too_high_in_startup(sk, rs);
154582 +       bbr_check_full_bw_reached(sk, rs);
154583 +       bbr2_check_drain(sk, rs, ctx);
154584 +       bbr2_update_cycle_phase(sk, rs);
154585 +       bbr_update_min_rtt(sk, rs);
154588 +/* Fast path for app-limited case.
154590 + * On each ack, we execute bbr state machine, which primarily consists of:
154591 + * 1) update model based on new rate sample, and
154592 + * 2) update control based on updated model or state change.
154594 + * There are certain workload/scenarios, e.g. app-limited case, where
154595 + * either we can skip updating model or we can skip update of both model
154596 + * as well as control. This provides signifcant softirq cpu savings for
154597 + * processing incoming acks.
154599 + * In case of app-limited, if there is no congestion (loss/ecn) and
154600 + * if observed bw sample is less than current estimated bw, then we can
154601 + * skip some of the computation in bbr state processing:
154603 + * - if there is no rtt/mode/phase change: In this case, since all the
154604 + *   parameters of the network model are constant, we can skip model
154605 + *   as well control update.
154607 + * - else we can skip rest of the model update. But we still need to
154608 + *   update the control to account for the new rtt/mode/phase.
154610 + * Returns whether we can take fast path or not.
154611 + */
154612 +static bool bbr2_fast_path(struct sock *sk, bool *update_model,
154613 +               const struct rate_sample *rs, struct bbr_context *ctx)
154615 +       struct bbr *bbr = inet_csk_ca(sk);
154616 +       u32 prev_min_rtt_us, prev_mode;
154618 +       if (bbr->params.fast_path && bbr->try_fast_path &&
154619 +           rs->is_app_limited && ctx->sample_bw < bbr_max_bw(sk) &&
154620 +           !bbr->loss_in_round && !bbr->ecn_in_round) {
154621 +               prev_mode = bbr->mode;
154622 +               prev_min_rtt_us = bbr->min_rtt_us;
154623 +               bbr2_check_drain(sk, rs, ctx);
154624 +               bbr2_update_cycle_phase(sk, rs);
154625 +               bbr_update_min_rtt(sk, rs);
154627 +               if (bbr->mode == prev_mode &&
154628 +                   bbr->min_rtt_us == prev_min_rtt_us &&
154629 +                   bbr->try_fast_path)
154630 +                       return true;
154632 +               /* Skip model update, but control still needs to be updated */
154633 +               *update_model = false;
154634 +       }
154635 +       return false;
154638 +static void bbr2_main(struct sock *sk, const struct rate_sample *rs)
154640 +       struct tcp_sock *tp = tcp_sk(sk);
154641 +       struct bbr *bbr = inet_csk_ca(sk);
154642 +       struct bbr_context ctx = { 0 };
154643 +       bool update_model = true;
154644 +       u32 bw;
154646 +       bbr->debug.event = '.';  /* init to default NOP (no event yet) */
154648 +       bbr_update_round_start(sk, rs, &ctx);
154649 +       if (bbr->round_start) {
154650 +               bbr->rounds_since_probe =
154651 +                       min_t(s32, bbr->rounds_since_probe + 1, 0xFF);
154652 +               bbr2_update_ecn_alpha(sk);
154653 +       }
154655 +       bbr->ecn_in_round  |= rs->is_ece;
154656 +       bbr_calculate_bw_sample(sk, rs, &ctx);
154658 +       if (bbr2_fast_path(sk, &update_model, rs, &ctx))
154659 +               goto out;
154661 +       if (update_model)
154662 +               bbr2_update_model(sk, rs, &ctx);
154664 +       bbr_update_gains(sk);
154665 +       bw = bbr_bw(sk);
154666 +       bbr_set_pacing_rate(sk, bw, bbr->pacing_gain);
154667 +       bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain,
154668 +                    tp->snd_cwnd, &ctx);
154669 +       bbr2_bound_cwnd_for_inflight_model(sk);
154671 +out:
154672 +       bbr->prev_ca_state = inet_csk(sk)->icsk_ca_state;
154673 +       bbr->loss_in_cycle |= rs->lost > 0;
154674 +       bbr->ecn_in_cycle  |= rs->delivered_ce > 0;
154676 +       bbr_debug(sk, rs->acked_sacked, rs, &ctx);
154679 +/* Module parameters that are settable by TCP_CONGESTION_PARAMS are declared
154680 + * down here, so that the algorithm functions that use the parameters must use
154681 + * the per-socket parameters; if they accidentally use the global version
154682 + * then there will be a compile error.
154683 + * TODO(ncardwell): move all per-socket parameters down to this section.
154684 + */
154686 +/* On losses, scale down inflight and pacing rate by beta scaled by BBR_SCALE.
154687 + * No loss response when 0. Max allwed value is 255.
154688 + */
154689 +static u32 bbr_beta = BBR_UNIT * 30 / 100;
154691 +/* Gain factor for ECN mark ratio samples, scaled by BBR_SCALE.
154692 + * Max allowed value is 255.
154693 + */
154694 +static u32 bbr_ecn_alpha_gain = BBR_UNIT * 1 / 16;  /* 1/16 = 6.25% */
154696 +/* The initial value for the ecn_alpha state variable. Default and max
154697 + * BBR_UNIT (256), representing 1.0. This allows a flow to respond quickly
154698 + * to congestion if the bottleneck is congested when the flow starts up.
154699 + */
154700 +static u32 bbr_ecn_alpha_init = BBR_UNIT;      /* 1.0, to respond quickly */
154702 +/* On ECN, cut inflight_lo to (1 - ecn_factor * ecn_alpha) scaled by BBR_SCALE.
154703 + * No ECN based bounding when 0. Max allwed value is 255.
154704 + */
154705 +static u32 bbr_ecn_factor = BBR_UNIT * 1 / 3;      /* 1/3 = 33% */
154707 +/* Estimate bw probing has gone too far if CE ratio exceeds this threshold.
154708 + * Scaled by BBR_SCALE. Disabled when 0. Max allowed is 255.
154709 + */
154710 +static u32 bbr_ecn_thresh = BBR_UNIT * 1 / 2;  /* 1/2 = 50% */
154712 +/* Max RTT (in usec) at which to use sender-side ECN logic.
154713 + * Disabled when 0 (ECN allowed at any RTT).
154714 + * Max allowed for the parameter is 524287 (0x7ffff) us, ~524 ms.
154715 + */
154716 +static u32 bbr_ecn_max_rtt_us = 5000;
154718 +/* If non-zero, if in a cycle with no losses but some ECN marks, after ECN
154719 + * clears then use a multiplicative increase to quickly reprobe bw by
154720 + * starting inflight probing at the given multiple of inflight_hi.
154721 + * Default for this experimental knob is 0 (disabled).
154722 + * Planned value for experiments: BBR_UNIT * 1 / 2 = 128, representing 0.5.
154723 + */
154724 +static u32 bbr_ecn_reprobe_gain;
154726 +/* Estimate bw probing has gone too far if loss rate exceeds this level. */
154727 +static u32 bbr_loss_thresh = BBR_UNIT * 2 / 100;  /* 2% loss */
154729 +/* Exit STARTUP if number of loss marking events in a Recovery round is >= N,
154730 + * and loss rate is higher than bbr_loss_thresh.
154731 + * Disabled if 0. Max allowed value is 15 (0xF).
154732 + */
154733 +static u32 bbr_full_loss_cnt = 8;
154735 +/* Exit STARTUP if number of round trips with ECN mark rate above ecn_thresh
154736 + * meets this count. Max allowed value is 3.
154737 + */
154738 +static u32 bbr_full_ecn_cnt = 2;
154740 +/* Fraction of unutilized headroom to try to leave in path upon high loss. */
154741 +static u32 bbr_inflight_headroom = BBR_UNIT * 15 / 100;
154743 +/* Multiplier to get target inflight (as multiple of BDP) for PROBE_UP phase.
154744 + * Default is 1.25x, as in BBR v1. Max allowed is 511.
154745 + */
154746 +static u32 bbr_bw_probe_pif_gain = BBR_UNIT * 5 / 4;
154748 +/* Multiplier to get Reno-style probe epoch duration as: k * BDP round trips.
154749 + * If zero, disables this BBR v2 Reno-style BDP-scaled coexistence mechanism.
154750 + * Max allowed is 511.
154751 + */
154752 +static u32 bbr_bw_probe_reno_gain = BBR_UNIT;
154754 +/* Max number of packet-timed rounds to wait before probing for bandwidth.  If
154755 + * we want to tolerate 1% random loss per round, and not have this cut our
154756 + * inflight too much, we must probe for bw periodically on roughly this scale.
154757 + * If low, limits Reno/CUBIC coexistence; if high, limits loss tolerance.
154758 + * We aim to be fair with Reno/CUBIC up to a BDP of at least:
154759 + *  BDP = 25Mbps * .030sec /(1514bytes) = 61.9 packets
154760 + */
154761 +static u32 bbr_bw_probe_max_rounds = 63;
154763 +/* Max amount of randomness to inject in round counting for Reno-coexistence.
154764 + * Max value is 15.
154765 + */
154766 +static u32 bbr_bw_probe_rand_rounds = 2;
154768 +/* Use BBR-native probe time scale starting at this many usec.
154769 + * We aim to be fair with Reno/CUBIC up to an inter-loss time epoch of at least:
154770 + *  BDP*RTT = 25Mbps * .030sec /(1514bytes) * 0.030sec = 1.9 secs
154771 + */
154772 +static u32 bbr_bw_probe_base_us = 2 * USEC_PER_SEC;  /* 2 secs */
154774 +/* Use BBR-native probes spread over this many usec: */
154775 +static u32 bbr_bw_probe_rand_us = 1 * USEC_PER_SEC;  /* 1 secs */
154777 +/* Undo the model changes made in loss recovery if recovery was spurious? */
154778 +static bool bbr_undo = true;
154780 +/* Use fast path if app-limited, no loss/ECN, and target cwnd was reached? */
154781 +static bool bbr_fast_path = true;      /* default: enabled */
154783 +/* Use fast ack mode ? */
154784 +static int bbr_fast_ack_mode = 1;      /* default: rwnd check off */
154786 +/* How much to additively increase inflight_hi when entering REFILL? */
154787 +static u32 bbr_refill_add_inc;         /* default: disabled */
154789 +module_param_named(beta,                 bbr_beta,                 uint, 0644);
154790 +module_param_named(ecn_alpha_gain,       bbr_ecn_alpha_gain,       uint, 0644);
154791 +module_param_named(ecn_alpha_init,       bbr_ecn_alpha_init,       uint, 0644);
154792 +module_param_named(ecn_factor,           bbr_ecn_factor,           uint, 0644);
154793 +module_param_named(ecn_thresh,           bbr_ecn_thresh,           uint, 0644);
154794 +module_param_named(ecn_max_rtt_us,       bbr_ecn_max_rtt_us,       uint, 0644);
154795 +module_param_named(ecn_reprobe_gain,     bbr_ecn_reprobe_gain,     uint, 0644);
154796 +module_param_named(loss_thresh,          bbr_loss_thresh,          uint, 0664);
154797 +module_param_named(full_loss_cnt,        bbr_full_loss_cnt,        uint, 0664);
154798 +module_param_named(full_ecn_cnt,         bbr_full_ecn_cnt,         uint, 0664);
154799 +module_param_named(inflight_headroom,    bbr_inflight_headroom,    uint, 0664);
154800 +module_param_named(bw_probe_pif_gain,    bbr_bw_probe_pif_gain,    uint, 0664);
154801 +module_param_named(bw_probe_reno_gain,   bbr_bw_probe_reno_gain,   uint, 0664);
154802 +module_param_named(bw_probe_max_rounds,  bbr_bw_probe_max_rounds,  uint, 0664);
154803 +module_param_named(bw_probe_rand_rounds, bbr_bw_probe_rand_rounds, uint, 0664);
154804 +module_param_named(bw_probe_base_us,     bbr_bw_probe_base_us,     uint, 0664);
154805 +module_param_named(bw_probe_rand_us,     bbr_bw_probe_rand_us,     uint, 0664);
154806 +module_param_named(undo,                 bbr_undo,                 bool, 0664);
154807 +module_param_named(fast_path,           bbr_fast_path,            bool, 0664);
154808 +module_param_named(fast_ack_mode,       bbr_fast_ack_mode,        uint, 0664);
154809 +module_param_named(refill_add_inc,       bbr_refill_add_inc,       uint, 0664);
154811 +static void bbr2_init(struct sock *sk)
154813 +       struct tcp_sock *tp = tcp_sk(sk);
154814 +       struct bbr *bbr = inet_csk_ca(sk);
154816 +       bbr_init(sk);   /* run shared init code for v1 and v2 */
154818 +       /* BBR v2 parameters: */
154819 +       bbr->params.beta = min_t(u32, 0xFFU, bbr_beta);
154820 +       bbr->params.ecn_alpha_gain = min_t(u32, 0xFFU, bbr_ecn_alpha_gain);
154821 +       bbr->params.ecn_alpha_init = min_t(u32, BBR_UNIT, bbr_ecn_alpha_init);
154822 +       bbr->params.ecn_factor = min_t(u32, 0xFFU, bbr_ecn_factor);
154823 +       bbr->params.ecn_thresh = min_t(u32, 0xFFU, bbr_ecn_thresh);
154824 +       bbr->params.ecn_max_rtt_us = min_t(u32, 0x7ffffU, bbr_ecn_max_rtt_us);
154825 +       bbr->params.ecn_reprobe_gain = min_t(u32, 0x1FF, bbr_ecn_reprobe_gain);
154826 +       bbr->params.loss_thresh = min_t(u32, 0xFFU, bbr_loss_thresh);
154827 +       bbr->params.full_loss_cnt = min_t(u32, 0xFU, bbr_full_loss_cnt);
154828 +       bbr->params.full_ecn_cnt = min_t(u32, 0x3U, bbr_full_ecn_cnt);
154829 +       bbr->params.inflight_headroom =
154830 +               min_t(u32, 0xFFU, bbr_inflight_headroom);
154831 +       bbr->params.bw_probe_pif_gain =
154832 +               min_t(u32, 0x1FFU, bbr_bw_probe_pif_gain);
154833 +       bbr->params.bw_probe_reno_gain =
154834 +               min_t(u32, 0x1FFU, bbr_bw_probe_reno_gain);
154835 +       bbr->params.bw_probe_max_rounds =
154836 +               min_t(u32, 0xFFU, bbr_bw_probe_max_rounds);
154837 +       bbr->params.bw_probe_rand_rounds =
154838 +               min_t(u32, 0xFU, bbr_bw_probe_rand_rounds);
154839 +       bbr->params.bw_probe_base_us =
154840 +               min_t(u32, (1 << 26) - 1, bbr_bw_probe_base_us);
154841 +       bbr->params.bw_probe_rand_us =
154842 +               min_t(u32, (1 << 26) - 1, bbr_bw_probe_rand_us);
154843 +       bbr->params.undo = bbr_undo;
154844 +       bbr->params.fast_path = bbr_fast_path ? 1 : 0;
154845 +       bbr->params.refill_add_inc = min_t(u32, 0x3U, bbr_refill_add_inc);
154847 +       /* BBR v2 state: */
154848 +       bbr->initialized = 1;
154849 +       /* Start sampling ECN mark rate after first full flight is ACKed: */
154850 +       bbr->loss_round_delivered = tp->delivered + 1;
154851 +       bbr->loss_round_start = 0;
154852 +       bbr->undo_bw_lo = 0;
154853 +       bbr->undo_inflight_lo = 0;
154854 +       bbr->undo_inflight_hi = 0;
154855 +       bbr->loss_events_in_round = 0;
154856 +       bbr->startup_ecn_rounds = 0;
154857 +       bbr2_reset_congestion_signals(sk);
154858 +       bbr->bw_lo = ~0U;
154859 +       bbr->bw_hi[0] = 0;
154860 +       bbr->bw_hi[1] = 0;
154861 +       bbr->inflight_lo = ~0U;
154862 +       bbr->inflight_hi = ~0U;
154863 +       bbr->bw_probe_up_cnt = ~0U;
154864 +       bbr->bw_probe_up_acks = 0;
154865 +       bbr->bw_probe_up_rounds = 0;
154866 +       bbr->probe_wait_us = 0;
154867 +       bbr->stopped_risky_probe = 0;
154868 +       bbr->ack_phase = BBR_ACKS_INIT;
154869 +       bbr->rounds_since_probe = 0;
154870 +       bbr->bw_probe_samples = 0;
154871 +       bbr->prev_probe_too_high = 0;
154872 +       bbr->ecn_eligible = 0;
154873 +       bbr->ecn_alpha = bbr->params.ecn_alpha_init;
154874 +       bbr->alpha_last_delivered = 0;
154875 +       bbr->alpha_last_delivered_ce = 0;
154877 +       tp->fast_ack_mode = min_t(u32, 0x2U, bbr_fast_ack_mode);
154880 +/* Core TCP stack informs us that the given skb was just marked lost. */
154881 +static void bbr2_skb_marked_lost(struct sock *sk, const struct sk_buff *skb)
154883 +       struct tcp_sock *tp = tcp_sk(sk);
154884 +       struct bbr *bbr = inet_csk_ca(sk);
154885 +       struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
154886 +       struct rate_sample rs;
154888 +       /* Capture "current" data over the full round trip of loss,
154889 +        * to have a better chance to see the full capacity of the path.
154890 +       */
154891 +       if (!bbr->loss_in_round)  /* first loss in this round trip? */
154892 +               bbr->loss_round_delivered = tp->delivered;  /* set round trip */
154893 +       bbr->loss_in_round = 1;
154894 +       bbr->loss_in_cycle = 1;
154896 +       if (!bbr->bw_probe_samples)
154897 +               return;  /* not an skb sent while probing for bandwidth */
154898 +       if (unlikely(!scb->tx.delivered_mstamp))
154899 +               return;  /* skb was SACKed, reneged, marked lost; ignore it */
154900 +       /* We are probing for bandwidth. Construct a rate sample that
154901 +        * estimates what happened in the flight leading up to this lost skb,
154902 +        * then see if the loss rate went too high, and if so at which packet.
154903 +        */
154904 +       memset(&rs, 0, sizeof(rs));
154905 +       rs.tx_in_flight = scb->tx.in_flight;
154906 +       rs.lost = tp->lost - scb->tx.lost;
154907 +       rs.is_app_limited = scb->tx.is_app_limited;
154908 +       if (bbr2_is_inflight_too_high(sk, &rs)) {
154909 +               rs.tx_in_flight = bbr2_inflight_hi_from_lost_skb(sk, &rs, skb);
154910 +               bbr2_handle_inflight_too_high(sk, &rs);
154911 +       }
154914 +/* Revert short-term model if current loss recovery event was spurious. */
154915 +static u32 bbr2_undo_cwnd(struct sock *sk)
154917 +       struct tcp_sock *tp = tcp_sk(sk);
154918 +       struct bbr *bbr = inet_csk_ca(sk);
154920 +       bbr->debug.undo = 1;
154921 +       bbr->full_bw = 0;   /* spurious slow-down; reset full pipe detection */
154922 +       bbr->full_bw_cnt = 0;
154923 +       bbr->loss_in_round = 0;
154925 +       if (!bbr->params.undo)
154926 +               return tp->snd_cwnd;
154928 +       /* Revert to cwnd and other state saved before loss episode. */
154929 +       bbr->bw_lo = max(bbr->bw_lo, bbr->undo_bw_lo);
154930 +       bbr->inflight_lo = max(bbr->inflight_lo, bbr->undo_inflight_lo);
154931 +       bbr->inflight_hi = max(bbr->inflight_hi, bbr->undo_inflight_hi);
154932 +       return bbr->prior_cwnd;
154935 +/* Entering loss recovery, so save state for when we undo recovery. */
154936 +static u32 bbr2_ssthresh(struct sock *sk)
154938 +       struct bbr *bbr = inet_csk_ca(sk);
154940 +       bbr_save_cwnd(sk);
154941 +       /* For undo, save state that adapts based on loss signal. */
154942 +       bbr->undo_bw_lo         = bbr->bw_lo;
154943 +       bbr->undo_inflight_lo   = bbr->inflight_lo;
154944 +       bbr->undo_inflight_hi   = bbr->inflight_hi;
154945 +       return tcp_sk(sk)->snd_ssthresh;
154948 +static enum tcp_bbr2_phase bbr2_get_phase(struct bbr *bbr)
154950 +       switch (bbr->mode) {
154951 +       case BBR_STARTUP:
154952 +               return BBR2_PHASE_STARTUP;
154953 +       case BBR_DRAIN:
154954 +               return BBR2_PHASE_DRAIN;
154955 +       case BBR_PROBE_BW:
154956 +               break;
154957 +       case BBR_PROBE_RTT:
154958 +               return BBR2_PHASE_PROBE_RTT;
154959 +       default:
154960 +               return BBR2_PHASE_INVALID;
154961 +       }
154962 +       switch (bbr->cycle_idx) {
154963 +       case BBR_BW_PROBE_UP:
154964 +               return BBR2_PHASE_PROBE_BW_UP;
154965 +       case BBR_BW_PROBE_DOWN:
154966 +               return BBR2_PHASE_PROBE_BW_DOWN;
154967 +       case BBR_BW_PROBE_CRUISE:
154968 +               return BBR2_PHASE_PROBE_BW_CRUISE;
154969 +       case BBR_BW_PROBE_REFILL:
154970 +               return BBR2_PHASE_PROBE_BW_REFILL;
154971 +       default:
154972 +               return BBR2_PHASE_INVALID;
154973 +       }
154976 +static size_t bbr2_get_info(struct sock *sk, u32 ext, int *attr,
154977 +                           union tcp_cc_info *info)
154979 +       if (ext & (1 << (INET_DIAG_BBRINFO - 1)) ||
154980 +           ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
154981 +               struct bbr *bbr = inet_csk_ca(sk);
154982 +               u64 bw = bbr_bw_bytes_per_sec(sk, bbr_bw(sk));
154983 +               u64 bw_hi = bbr_bw_bytes_per_sec(sk, bbr_max_bw(sk));
154984 +               u64 bw_lo = bbr->bw_lo == ~0U ?
154985 +                       ~0ULL : bbr_bw_bytes_per_sec(sk, bbr->bw_lo);
154987 +               memset(&info->bbr2, 0, sizeof(info->bbr2));
154988 +               info->bbr2.bbr_bw_lsb           = (u32)bw;
154989 +               info->bbr2.bbr_bw_msb           = (u32)(bw >> 32);
154990 +               info->bbr2.bbr_min_rtt          = bbr->min_rtt_us;
154991 +               info->bbr2.bbr_pacing_gain      = bbr->pacing_gain;
154992 +               info->bbr2.bbr_cwnd_gain        = bbr->cwnd_gain;
154993 +               info->bbr2.bbr_bw_hi_lsb        = (u32)bw_hi;
154994 +               info->bbr2.bbr_bw_hi_msb        = (u32)(bw_hi >> 32);
154995 +               info->bbr2.bbr_bw_lo_lsb        = (u32)bw_lo;
154996 +               info->bbr2.bbr_bw_lo_msb        = (u32)(bw_lo >> 32);
154997 +               info->bbr2.bbr_mode             = bbr->mode;
154998 +               info->bbr2.bbr_phase            = (__u8)bbr2_get_phase(bbr);
154999 +               info->bbr2.bbr_version          = (__u8)2;
155000 +               info->bbr2.bbr_inflight_lo      = bbr->inflight_lo;
155001 +               info->bbr2.bbr_inflight_hi      = bbr->inflight_hi;
155002 +               info->bbr2.bbr_extra_acked      = bbr_extra_acked(sk);
155003 +               *attr = INET_DIAG_BBRINFO;
155004 +               return sizeof(info->bbr2);
155005 +       }
155006 +       return 0;
155009 +static void bbr2_set_state(struct sock *sk, u8 new_state)
155011 +       struct tcp_sock *tp = tcp_sk(sk);
155012 +       struct bbr *bbr = inet_csk_ca(sk);
155014 +       if (new_state == TCP_CA_Loss) {
155015 +               struct rate_sample rs = { .losses = 1 };
155016 +               struct bbr_context ctx = { 0 };
155018 +               bbr->prev_ca_state = TCP_CA_Loss;
155019 +               bbr->full_bw = 0;
155020 +               if (!bbr2_is_probing_bandwidth(sk) && bbr->inflight_lo == ~0U) {
155021 +                       /* bbr_adapt_lower_bounds() needs cwnd before
155022 +                        * we suffered an RTO, to update inflight_lo:
155023 +                        */
155024 +                       bbr->inflight_lo =
155025 +                               max(tp->snd_cwnd, bbr->prior_cwnd);
155026 +               }
155027 +               bbr_debug(sk, 0, &rs, &ctx);
155028 +       } else if (bbr->prev_ca_state == TCP_CA_Loss &&
155029 +                  new_state != TCP_CA_Loss) {
155030 +               tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd);
155031 +               bbr->try_fast_path = 0; /* bound cwnd using latest model */
155032 +       }
155035 +static struct tcp_congestion_ops tcp_bbr2_cong_ops __read_mostly = {
155036 +       .flags          = TCP_CONG_NON_RESTRICTED | TCP_CONG_WANTS_CE_EVENTS,
155037 +       .name           = "bbr2",
155038 +       .owner          = THIS_MODULE,
155039 +       .init           = bbr2_init,
155040 +       .cong_control   = bbr2_main,
155041 +       .sndbuf_expand  = bbr_sndbuf_expand,
155042 +       .skb_marked_lost = bbr2_skb_marked_lost,
155043 +       .undo_cwnd      = bbr2_undo_cwnd,
155044 +       .cwnd_event     = bbr_cwnd_event,
155045 +       .ssthresh       = bbr2_ssthresh,
155046 +       .tso_segs       = bbr_tso_segs,
155047 +       .get_info       = bbr2_get_info,
155048 +       .set_state      = bbr2_set_state,
155051 +static int __init bbr_register(void)
155053 +       BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE);
155054 +       return tcp_register_congestion_control(&tcp_bbr2_cong_ops);
155057 +static void __exit bbr_unregister(void)
155059 +       tcp_unregister_congestion_control(&tcp_bbr2_cong_ops);
155062 +module_init(bbr_register);
155063 +module_exit(bbr_unregister);
155065 +MODULE_AUTHOR("Van Jacobson <vanj@google.com>");
155066 +MODULE_AUTHOR("Neal Cardwell <ncardwell@google.com>");
155067 +MODULE_AUTHOR("Yuchung Cheng <ycheng@google.com>");
155068 +MODULE_AUTHOR("Soheil Hassas Yeganeh <soheil@google.com>");
155069 +MODULE_AUTHOR("Priyaranjan Jha <priyarjha@google.com>");
155070 +MODULE_AUTHOR("Yousuk Seung <ysseung@google.com>");
155071 +MODULE_AUTHOR("Kevin Yang <yyd@google.com>");
155072 +MODULE_AUTHOR("Arjun Roy <arjunroy@google.com>");
155074 +MODULE_LICENSE("Dual BSD/GPL");
155075 +MODULE_DESCRIPTION("TCP BBR (Bottleneck Bandwidth and RTT)");
155076 diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
155077 index 563d016e7478..153ed9010c0c 100644
155078 --- a/net/ipv4/tcp_cong.c
155079 +++ b/net/ipv4/tcp_cong.c
155080 @@ -179,6 +179,7 @@ void tcp_init_congestion_control(struct sock *sk)
155081         struct inet_connection_sock *icsk = inet_csk(sk);
155083         tcp_sk(sk)->prior_ssthresh = 0;
155084 +       tcp_sk(sk)->fast_ack_mode = 0;
155085         if (icsk->icsk_ca_ops->init)
155086                 icsk->icsk_ca_ops->init(sk);
155087         if (tcp_ca_needs_ecn(sk))
155088 @@ -230,6 +231,10 @@ int tcp_set_default_congestion_control(struct net *net, const char *name)
155089                 ret = -ENOENT;
155090         } else if (!bpf_try_module_get(ca, ca->owner)) {
155091                 ret = -EBUSY;
155092 +       } else if (!net_eq(net, &init_net) &&
155093 +                       !(ca->flags & TCP_CONG_NON_RESTRICTED)) {
155094 +               /* Only init netns can set default to a restricted algorithm */
155095 +               ret = -EPERM;
155096         } else {
155097                 prev = xchg(&net->ipv4.tcp_congestion_control, ca);
155098                 if (prev)
155099 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
155100 index 69a545db80d2..45aaba87ce8e 100644
155101 --- a/net/ipv4/tcp_input.c
155102 +++ b/net/ipv4/tcp_input.c
155103 @@ -348,7 +348,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
155104                         tcp_enter_quickack_mode(sk, 2);
155105                 break;
155106         case INET_ECN_CE:
155107 -               if (tcp_ca_needs_ecn(sk))
155108 +               if (tcp_ca_wants_ce_events(sk))
155109                         tcp_ca_event(sk, CA_EVENT_ECN_IS_CE);
155111                 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
155112 @@ -359,7 +359,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
155113                 tp->ecn_flags |= TCP_ECN_SEEN;
155114                 break;
155115         default:
155116 -               if (tcp_ca_needs_ecn(sk))
155117 +               if (tcp_ca_wants_ce_events(sk))
155118                         tcp_ca_event(sk, CA_EVENT_ECN_NO_CE);
155119                 tp->ecn_flags |= TCP_ECN_SEEN;
155120                 break;
155121 @@ -1039,7 +1039,12 @@ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
155122   */
155123  static void tcp_notify_skb_loss_event(struct tcp_sock *tp, const struct sk_buff *skb)
155125 +       struct sock *sk = (struct sock *)tp;
155126 +       const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
155128         tp->lost += tcp_skb_pcount(skb);
155129 +       if (ca_ops->skb_marked_lost)
155130 +               ca_ops->skb_marked_lost(sk, skb);
155133  void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
155134 @@ -1420,6 +1425,17 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
155135         WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount);
155136         tcp_skb_pcount_add(skb, -pcount);
155138 +       /* Adjust tx.in_flight as pcount is shifted from skb to prev. */
155139 +       if (WARN_ONCE(TCP_SKB_CB(skb)->tx.in_flight < pcount,
155140 +                     "prev in_flight: %u skb in_flight: %u pcount: %u",
155141 +                     TCP_SKB_CB(prev)->tx.in_flight,
155142 +                     TCP_SKB_CB(skb)->tx.in_flight,
155143 +                     pcount))
155144 +               TCP_SKB_CB(skb)->tx.in_flight = 0;
155145 +       else
155146 +               TCP_SKB_CB(skb)->tx.in_flight -= pcount;
155147 +       TCP_SKB_CB(prev)->tx.in_flight += pcount;
155149         /* When we're adding to gso_segs == 1, gso_size will be zero,
155150          * in theory this shouldn't be necessary but as long as DSACK
155151          * code can come after this skb later on it's better to keep
155152 @@ -3182,7 +3198,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
155153         long seq_rtt_us = -1L;
155154         long ca_rtt_us = -1L;
155155         u32 pkts_acked = 0;
155156 -       u32 last_in_flight = 0;
155157         bool rtt_update;
155158         int flag = 0;
155160 @@ -3218,7 +3233,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
155161                         if (!first_ackt)
155162                                 first_ackt = last_ackt;
155164 -                       last_in_flight = TCP_SKB_CB(skb)->tx.in_flight;
155165                         if (before(start_seq, reord))
155166                                 reord = start_seq;
155167                         if (!after(scb->end_seq, tp->high_seq))
155168 @@ -3284,8 +3298,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
155169                 seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt);
155170                 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt);
155172 -               if (pkts_acked == 1 && last_in_flight < tp->mss_cache &&
155173 -                   last_in_flight && !prior_sacked && fully_acked &&
155174 +               if (pkts_acked == 1 && fully_acked && !prior_sacked &&
155175 +                   (tp->snd_una - prior_snd_una) < tp->mss_cache &&
155176                     sack->rate->prior_delivered + 1 == tp->delivered &&
155177                     !(flag & (FLAG_CA_ALERT | FLAG_SYN_ACKED))) {
155178                         /* Conservatively mark a delayed ACK. It's typically
155179 @@ -3342,9 +3356,10 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
155181         if (icsk->icsk_ca_ops->pkts_acked) {
155182                 struct ack_sample sample = { .pkts_acked = pkts_acked,
155183 -                                            .rtt_us = sack->rate->rtt_us,
155184 -                                            .in_flight = last_in_flight };
155185 +                                            .rtt_us = sack->rate->rtt_us };
155187 +               sample.in_flight = tp->mss_cache *
155188 +                       (tp->delivered - sack->rate->prior_delivered);
155189                 icsk->icsk_ca_ops->pkts_acked(sk, &sample);
155190         }
155192 @@ -3742,6 +3757,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
155194         prior_fack = tcp_is_sack(tp) ? tcp_highest_sack_seq(tp) : tp->snd_una;
155195         rs.prior_in_flight = tcp_packets_in_flight(tp);
155196 +       tcp_rate_check_app_limited(sk);
155198         /* ts_recent update must be made after we are sure that the packet
155199          * is in window.
155200 @@ -3839,6 +3855,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
155201         delivered = tcp_newly_delivered(sk, delivered, flag);
155202         lost = tp->lost - lost;                 /* freshly marked lost */
155203         rs.is_ack_delayed = !!(flag & FLAG_ACK_MAYBE_DELAYED);
155204 +       rs.is_ece = !!(flag & FLAG_ECE);
155205         tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate);
155206         tcp_cong_control(sk, ack, delivered, flag, sack_state.rate);
155207         tcp_xmit_recovery(sk, rexmit);
155208 @@ -5399,13 +5416,14 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
155210             /* More than one full frame received... */
155211         if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
155212 +            (tp->fast_ack_mode == 1 ||
155213              /* ... and right edge of window advances far enough.
155214               * (tcp_recvmsg() will send ACK otherwise).
155215               * If application uses SO_RCVLOWAT, we want send ack now if
155216               * we have not received enough bytes to satisfy the condition.
155217               */
155218 -           (tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat ||
155219 -            __tcp_select_window(sk) >= tp->rcv_wnd)) ||
155220 +             (tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat ||
155221 +              __tcp_select_window(sk) >= tp->rcv_wnd))) ||
155222             /* We ACK each frame or... */
155223             tcp_in_quickack_mode(sk) ||
155224             /* Protocol state mandates a one-time immediate ACK */
155225 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
155226 index fbf140a770d8..90d939375b29 100644
155227 --- a/net/ipv4/tcp_output.c
155228 +++ b/net/ipv4/tcp_output.c
155229 @@ -1256,8 +1256,6 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
155230         tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
155231         skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
155232         if (clone_it) {
155233 -               TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
155234 -                       - tp->snd_una;
155235                 oskb = skb;
155237                 tcp_skb_tsorted_save(oskb) {
155238 @@ -1536,7 +1534,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
155240         struct tcp_sock *tp = tcp_sk(sk);
155241         struct sk_buff *buff;
155242 -       int nsize, old_factor;
155243 +       int nsize, old_factor, inflight_prev;
155244         long limit;
155245         int nlen;
155246         u8 flags;
155247 @@ -1615,6 +1613,15 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
155249                 if (diff)
155250                         tcp_adjust_pcount(sk, skb, diff);
155252 +               /* Set buff tx.in_flight as if buff were sent by itself. */
155253 +               inflight_prev = TCP_SKB_CB(skb)->tx.in_flight - old_factor;
155254 +               if (WARN_ONCE(inflight_prev < 0,
155255 +                             "inconsistent: tx.in_flight: %u old_factor: %d",
155256 +                             TCP_SKB_CB(skb)->tx.in_flight, old_factor))
155257 +                       inflight_prev = 0;
155258 +               TCP_SKB_CB(buff)->tx.in_flight = inflight_prev +
155259 +                                                tcp_skb_pcount(buff);
155260         }
155262         /* Link BUFF into the send queue. */
155263 @@ -1982,13 +1989,12 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
155264  static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
155266         const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
155267 -       u32 min_tso, tso_segs;
155269 -       min_tso = ca_ops->min_tso_segs ?
155270 -                       ca_ops->min_tso_segs(sk) :
155271 -                       sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs;
155272 +       u32 tso_segs;
155274 -       tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
155275 +       tso_segs = ca_ops->tso_segs ?
155276 +               ca_ops->tso_segs(sk, mss_now) :
155277 +               tcp_tso_autosize(sk, mss_now,
155278 +                                sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
155279         return min_t(u32, tso_segs, sk->sk_gso_max_segs);
155282 @@ -2628,6 +2634,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
155283                         skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache;
155284                         list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
155285                         tcp_init_tso_segs(skb, mss_now);
155286 +                       tcp_set_tx_in_flight(sk, skb);
155287                         goto repair; /* Skip network transmission */
155288                 }
155290 diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
155291 index 0de693565963..796fa6e5310c 100644
155292 --- a/net/ipv4/tcp_rate.c
155293 +++ b/net/ipv4/tcp_rate.c
155294 @@ -34,6 +34,24 @@
155295   * ready to send in the write queue.
155296   */
155298 +void tcp_set_tx_in_flight(struct sock *sk, struct sk_buff *skb)
155300 +       struct tcp_sock *tp = tcp_sk(sk);
155301 +       u32 in_flight;
155303 +       /* Check, sanitize, and record packets in flight after skb was sent. */
155304 +       in_flight = tcp_packets_in_flight(tp) + tcp_skb_pcount(skb);
155305 +       if (WARN_ONCE(in_flight > TCPCB_IN_FLIGHT_MAX,
155306 +                     "insane in_flight %u cc %s mss %u "
155307 +                     "cwnd %u pif %u %u %u %u\n",
155308 +                     in_flight, inet_csk(sk)->icsk_ca_ops->name,
155309 +                     tp->mss_cache, tp->snd_cwnd,
155310 +                     tp->packets_out, tp->retrans_out,
155311 +                     tp->sacked_out, tp->lost_out))
155312 +               in_flight = TCPCB_IN_FLIGHT_MAX;
155313 +       TCP_SKB_CB(skb)->tx.in_flight = in_flight;
155316  /* Snapshot the current delivery information in the skb, to generate
155317   * a rate sample later when the skb is (s)acked in tcp_rate_skb_delivered().
155318   */
155319 @@ -65,7 +83,10 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
155320         TCP_SKB_CB(skb)->tx.first_tx_mstamp     = tp->first_tx_mstamp;
155321         TCP_SKB_CB(skb)->tx.delivered_mstamp    = tp->delivered_mstamp;
155322         TCP_SKB_CB(skb)->tx.delivered           = tp->delivered;
155323 +       TCP_SKB_CB(skb)->tx.delivered_ce        = tp->delivered_ce;
155324 +       TCP_SKB_CB(skb)->tx.lost                = tp->lost;
155325         TCP_SKB_CB(skb)->tx.is_app_limited      = tp->app_limited ? 1 : 0;
155326 +       tcp_set_tx_in_flight(sk, skb);
155329  /* When an skb is sacked or acked, we fill in the rate sample with the (prior)
155330 @@ -86,16 +107,20 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
155332         if (!rs->prior_delivered ||
155333             after(scb->tx.delivered, rs->prior_delivered)) {
155334 +               rs->prior_lost       = scb->tx.lost;
155335 +               rs->prior_delivered_ce  = scb->tx.delivered_ce;
155336                 rs->prior_delivered  = scb->tx.delivered;
155337                 rs->prior_mstamp     = scb->tx.delivered_mstamp;
155338                 rs->is_app_limited   = scb->tx.is_app_limited;
155339                 rs->is_retrans       = scb->sacked & TCPCB_RETRANS;
155340 +               rs->tx_in_flight     = scb->tx.in_flight;
155342                 /* Record send time of most recently ACKed packet: */
155343                 tp->first_tx_mstamp  = tcp_skb_timestamp_us(skb);
155344                 /* Find the duration of the "send phase" of this window: */
155345 -               rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
155346 -                                                    scb->tx.first_tx_mstamp);
155347 +               rs->interval_us      = tcp_stamp32_us_delta(
155348 +                                               tp->first_tx_mstamp,
155349 +                                               scb->tx.first_tx_mstamp);
155351         }
155352         /* Mark off the skb delivered once it's sacked to avoid being
155353 @@ -137,6 +162,11 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
155354                 return;
155355         }
155356         rs->delivered   = tp->delivered - rs->prior_delivered;
155357 +       rs->lost        = tp->lost - rs->prior_lost;
155359 +       rs->delivered_ce = tp->delivered_ce - rs->prior_delivered_ce;
155360 +       /* delivered_ce occupies less than 32 bits in the skb control block */
155361 +       rs->delivered_ce &= TCPCB_DELIVERED_CE_MASK;
155363         /* Model sending data and receiving ACKs as separate pipeline phases
155364          * for a window. Usually the ACK phase is longer, but with ACK
155365 @@ -144,7 +174,7 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
155366          * longer phase.
155367          */
155368         snd_us = rs->interval_us;                               /* send phase */
155369 -       ack_us = tcp_stamp_us_delta(tp->tcp_mstamp,
155370 +       ack_us = tcp_stamp32_us_delta(tp->tcp_mstamp,
155371                                     rs->prior_mstamp); /* ack phase */
155372         rs->interval_us = max(snd_us, ack_us);
155374 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
155375 index 4ef08079ccfa..b5b24caa8ba0 100644
155376 --- a/net/ipv4/tcp_timer.c
155377 +++ b/net/ipv4/tcp_timer.c
155378 @@ -607,6 +607,7 @@ void tcp_write_timer_handler(struct sock *sk)
155379                 goto out;
155380         }
155382 +       tcp_rate_check_app_limited(sk);
155383         tcp_mstamp_refresh(tcp_sk(sk));
155384         event = icsk->icsk_pending;
155386 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
155387 index 99d743eb9dc4..c586a6bb8c6d 100644
155388 --- a/net/ipv4/udp.c
155389 +++ b/net/ipv4/udp.c
155390 @@ -2664,9 +2664,12 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
155392         case UDP_GRO:
155393                 lock_sock(sk);
155395 +               /* when enabling GRO, accept the related GSO packet type */
155396                 if (valbool)
155397                         udp_tunnel_encap_enable(sk->sk_socket);
155398                 up->gro_enabled = valbool;
155399 +               up->accept_udp_l4 = valbool;
155400                 release_sock(sk);
155401                 break;
155403 diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
155404 index c5b4b586570f..25134a3548e9 100644
155405 --- a/net/ipv4/udp_offload.c
155406 +++ b/net/ipv4/udp_offload.c
155407 @@ -515,21 +515,24 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
155408         unsigned int off = skb_gro_offset(skb);
155409         int flush = 1;
155411 +       /* we can do L4 aggregation only if the packet can't land in a tunnel
155412 +        * otherwise we could corrupt the inner stream
155413 +        */
155414         NAPI_GRO_CB(skb)->is_flist = 0;
155415 -       if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
155416 -               NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled: 1;
155417 +       if (!sk || !udp_sk(sk)->gro_receive) {
155418 +               if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
155419 +                       NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled : 1;
155421 -       if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) ||
155422 -           (sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist) {
155423 -               pp = call_gro_receive(udp_gro_receive_segment, head, skb);
155424 +               if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) ||
155425 +                   (sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist)
155426 +                       pp = call_gro_receive(udp_gro_receive_segment, head, skb);
155427                 return pp;
155428         }
155430 -       if (!sk || NAPI_GRO_CB(skb)->encap_mark ||
155431 +       if (NAPI_GRO_CB(skb)->encap_mark ||
155432             (uh->check && skb->ip_summed != CHECKSUM_PARTIAL &&
155433              NAPI_GRO_CB(skb)->csum_cnt == 0 &&
155434 -            !NAPI_GRO_CB(skb)->csum_valid) ||
155435 -           !udp_sk(sk)->gro_receive)
155436 +            !NAPI_GRO_CB(skb)->csum_valid))
155437                 goto out;
155439         /* mark that this skb passed once through the tunnel gro layer */
155440 diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
155441 index 1baf43aacb2e..bc224f917bbd 100644
155442 --- a/net/ipv6/ip6_gre.c
155443 +++ b/net/ipv6/ip6_gre.c
155444 @@ -387,7 +387,6 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
155445         if (!(nt->parms.o_flags & TUNNEL_SEQ))
155446                 dev->features |= NETIF_F_LLTX;
155448 -       dev_hold(dev);
155449         ip6gre_tunnel_link(ign, nt);
155450         return nt;
155452 @@ -1496,6 +1495,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
155453         }
155454         ip6gre_tnl_init_features(dev);
155456 +       dev_hold(dev);
155457         return 0;
155459  cleanup_dst_cache_init:
155460 @@ -1538,8 +1538,6 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
155461         strcpy(tunnel->parms.name, dev->name);
155463         tunnel->hlen            = sizeof(struct ipv6hdr) + 4;
155465 -       dev_hold(dev);
155468  static struct inet6_protocol ip6gre_protocol __read_mostly = {
155469 @@ -1889,6 +1887,7 @@ static int ip6erspan_tap_init(struct net_device *dev)
155470         dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
155471         ip6erspan_tnl_link_config(tunnel, 1);
155473 +       dev_hold(dev);
155474         return 0;
155476  cleanup_dst_cache_init:
155477 @@ -1988,8 +1987,6 @@ static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev,
155478         if (tb[IFLA_MTU])
155479                 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
155481 -       dev_hold(dev);
155483  out:
155484         return err;
155486 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
155487 index 42fe7db6bbb3..d42f471b0d65 100644
155488 --- a/net/ipv6/ip6_tunnel.c
155489 +++ b/net/ipv6/ip6_tunnel.c
155490 @@ -266,7 +266,6 @@ static int ip6_tnl_create2(struct net_device *dev)
155492         strcpy(t->parms.name, dev->name);
155494 -       dev_hold(dev);
155495         ip6_tnl_link(ip6n, t);
155496         return 0;
155498 @@ -1882,6 +1881,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
155499         dev->min_mtu = ETH_MIN_MTU;
155500         dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
155502 +       dev_hold(dev);
155503         return 0;
155505  destroy_dst:
155506 @@ -1925,7 +1925,6 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
155507         struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
155509         t->parms.proto = IPPROTO_IPV6;
155510 -       dev_hold(dev);
155512         rcu_assign_pointer(ip6n->tnls_wc[0], t);
155513         return 0;
155514 diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
155515 index e0cc32e45880..2d048e21abbb 100644
155516 --- a/net/ipv6/ip6_vti.c
155517 +++ b/net/ipv6/ip6_vti.c
155518 @@ -193,7 +193,6 @@ static int vti6_tnl_create2(struct net_device *dev)
155520         strcpy(t->parms.name, dev->name);
155522 -       dev_hold(dev);
155523         vti6_tnl_link(ip6n, t);
155525         return 0;
155526 @@ -934,6 +933,7 @@ static inline int vti6_dev_init_gen(struct net_device *dev)
155527         dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
155528         if (!dev->tstats)
155529                 return -ENOMEM;
155530 +       dev_hold(dev);
155531         return 0;
155534 @@ -965,7 +965,6 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev)
155535         struct vti6_net *ip6n = net_generic(net, vti6_net_id);
155537         t->parms.proto = IPPROTO_IPV6;
155538 -       dev_hold(dev);
155540         rcu_assign_pointer(ip6n->tnls_wc[0], t);
155541         return 0;
155542 diff --git a/net/ipv6/mcast_snoop.c b/net/ipv6/mcast_snoop.c
155543 index d3d6b6a66e5f..04d5fcdfa6e0 100644
155544 --- a/net/ipv6/mcast_snoop.c
155545 +++ b/net/ipv6/mcast_snoop.c
155546 @@ -109,7 +109,7 @@ static int ipv6_mc_check_mld_msg(struct sk_buff *skb)
155547         struct mld_msg *mld;
155549         if (!ipv6_mc_may_pull(skb, len))
155550 -               return -EINVAL;
155551 +               return -ENODATA;
155553         mld = (struct mld_msg *)skb_transport_header(skb);
155555 @@ -122,7 +122,7 @@ static int ipv6_mc_check_mld_msg(struct sk_buff *skb)
155556         case ICMPV6_MGM_QUERY:
155557                 return ipv6_mc_check_mld_query(skb);
155558         default:
155559 -               return -ENOMSG;
155560 +               return -ENODATA;
155561         }
155564 @@ -131,7 +131,7 @@ static inline __sum16 ipv6_mc_validate_checksum(struct sk_buff *skb)
155565         return skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo);
155568 -int ipv6_mc_check_icmpv6(struct sk_buff *skb)
155569 +static int ipv6_mc_check_icmpv6(struct sk_buff *skb)
155571         unsigned int len = skb_transport_offset(skb) + sizeof(struct icmp6hdr);
155572         unsigned int transport_len = ipv6_transport_len(skb);
155573 @@ -150,7 +150,6 @@ int ipv6_mc_check_icmpv6(struct sk_buff *skb)
155575         return 0;
155577 -EXPORT_SYMBOL(ipv6_mc_check_icmpv6);
155579  /**
155580   * ipv6_mc_check_mld - checks whether this is a sane MLD packet
155581 @@ -161,7 +160,10 @@ EXPORT_SYMBOL(ipv6_mc_check_icmpv6);
155582   *
155583   * -EINVAL: A broken packet was detected, i.e. it violates some internet
155584   *  standard
155585 - * -ENOMSG: IP header validation succeeded but it is not an MLD packet.
155586 + * -ENOMSG: IP header validation succeeded but it is not an ICMPv6 packet
155587 + *  with a hop-by-hop option.
155588 + * -ENODATA: IP+ICMPv6 header with hop-by-hop option validation succeeded
155589 + *  but it is not an MLD packet.
155590   * -ENOMEM: A memory allocation failure happened.
155591   *
155592   * Caller needs to set the skb network header and free any returned skb if it
155593 diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
155594 index 9fdccf0718b5..fcc9ba2c80e9 100644
155595 --- a/net/ipv6/sit.c
155596 +++ b/net/ipv6/sit.c
155597 @@ -218,8 +218,6 @@ static int ipip6_tunnel_create(struct net_device *dev)
155599         ipip6_tunnel_clone_6rd(dev, sitn);
155601 -       dev_hold(dev);
155603         ipip6_tunnel_link(sitn, t);
155604         return 0;
155606 @@ -1456,7 +1454,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
155607                 dev->tstats = NULL;
155608                 return err;
155609         }
155611 +       dev_hold(dev);
155612         return 0;
155615 @@ -1472,7 +1470,6 @@ static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
155616         iph->ihl                = 5;
155617         iph->ttl                = 64;
155619 -       dev_hold(dev);
155620         rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
155623 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
155624 index 1b9c82616606..0331f3a3c40e 100644
155625 --- a/net/mac80211/main.c
155626 +++ b/net/mac80211/main.c
155627 @@ -1141,8 +1141,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
155628         if (local->hw.wiphy->max_scan_ie_len)
155629                 local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len;
155631 -       WARN_ON(!ieee80211_cs_list_valid(local->hw.cipher_schemes,
155632 -                                        local->hw.n_cipher_schemes));
155633 +       if (WARN_ON(!ieee80211_cs_list_valid(local->hw.cipher_schemes,
155634 +                                            local->hw.n_cipher_schemes))) {
155635 +               result = -EINVAL;
155636 +               goto fail_workqueue;
155637 +       }
155639         result = ieee80211_init_cipher_suites(local);
155640         if (result < 0)
155641 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
155642 index 96f487fc0071..0fe91dc9817e 100644
155643 --- a/net/mac80211/mlme.c
155644 +++ b/net/mac80211/mlme.c
155645 @@ -1295,6 +1295,11 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_sub_if_data *sdata)
155647         sdata->vif.csa_active = false;
155648         ifmgd->csa_waiting_bcn = false;
155649 +       /*
155650 +        * If the CSA IE is still present on the beacon after the switch,
155651 +        * we need to consider it as a new CSA (possibly to self).
155652 +        */
155653 +       ifmgd->beacon_crc_valid = false;
155655         ret = drv_post_channel_switch(sdata);
155656         if (ret) {
155657 @@ -1400,11 +1405,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
155658                 ch_switch.delay = csa_ie.max_switch_time;
155659         }
155661 -       if (res < 0) {
155662 -               ieee80211_queue_work(&local->hw,
155663 -                                    &ifmgd->csa_connection_drop_work);
155664 -               return;
155665 -       }
155666 +       if (res < 0)
155667 +               goto lock_and_drop_connection;
155669         if (beacon && sdata->vif.csa_active && !ifmgd->csa_waiting_bcn) {
155670                 if (res)
155671 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
155672 index 3b3bcefbf657..28422d687096 100644
155673 --- a/net/mac80211/tx.c
155674 +++ b/net/mac80211/tx.c
155675 @@ -2267,17 +2267,6 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
155676                                                     payload[7]);
155677         }
155679 -       /* Initialize skb->priority for QoS frames. If the DONT_REORDER flag
155680 -        * is set, stick to the default value for skb->priority to assure
155681 -        * frames injected with this flag are not reordered relative to each
155682 -        * other.
155683 -        */
155684 -       if (ieee80211_is_data_qos(hdr->frame_control) &&
155685 -           !(info->control.flags & IEEE80211_TX_CTRL_DONT_REORDER)) {
155686 -               u8 *p = ieee80211_get_qos_ctl(hdr);
155687 -               skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
155688 -       }
155690         rcu_read_lock();
155692         /*
155693 @@ -2341,6 +2330,15 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
155695         info->band = chandef->chan->band;
155697 +       /* Initialize skb->priority according to frame type and TID class,
155698 +        * with respect to the sub interface that the frame will actually
155699 +        * be transmitted on. If the DONT_REORDER flag is set, the original
155700 +        * skb-priority is preserved to assure frames injected with this
155701 +        * flag are not reordered relative to each other.
155702 +        */
155703 +       ieee80211_select_queue_80211(sdata, skb, hdr);
155704 +       skb_set_queue_mapping(skb, ieee80211_ac_from_tid(skb->priority));
155706         /* remove the injection radiotap header */
155707         skb_pull(skb, len_rthdr);
155709 diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
155710 index 4bde960e19dc..65e5d3eb1078 100644
155711 --- a/net/mptcp/protocol.c
155712 +++ b/net/mptcp/protocol.c
155713 @@ -399,6 +399,14 @@ static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
155714         return false;
155717 +static void mptcp_set_datafin_timeout(const struct sock *sk)
155719 +       struct inet_connection_sock *icsk = inet_csk(sk);
155721 +       mptcp_sk(sk)->timer_ival = min(TCP_RTO_MAX,
155722 +                                      TCP_RTO_MIN << icsk->icsk_retransmits);
155725  static void mptcp_set_timeout(const struct sock *sk, const struct sock *ssk)
155727         long tout = ssk && inet_csk(ssk)->icsk_pending ?
155728 @@ -1052,7 +1060,7 @@ static void __mptcp_clean_una(struct sock *sk)
155729         }
155731         if (snd_una == READ_ONCE(msk->snd_nxt)) {
155732 -               if (msk->timer_ival)
155733 +               if (msk->timer_ival && !mptcp_data_fin_enabled(msk))
155734                         mptcp_stop_timer(sk);
155735         } else {
155736                 mptcp_reset_timer(sk);
155737 @@ -1275,7 +1283,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
155738         int avail_size;
155739         size_t ret = 0;
155741 -       pr_debug("msk=%p ssk=%p sending dfrag at seq=%lld len=%d already sent=%d",
155742 +       pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u",
155743                  msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
155745         /* compute send limit */
155746 @@ -1693,7 +1701,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
155747                         if (!msk->first_pending)
155748                                 WRITE_ONCE(msk->first_pending, dfrag);
155749                 }
155750 -               pr_debug("msk=%p dfrag at seq=%lld len=%d sent=%d new=%d", msk,
155751 +               pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk,
155752                          dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
155753                          !dfrag_collapsed);
155755 @@ -2276,8 +2284,19 @@ static void __mptcp_retrans(struct sock *sk)
155757         __mptcp_clean_una_wakeup(sk);
155758         dfrag = mptcp_rtx_head(sk);
155759 -       if (!dfrag)
155760 +       if (!dfrag) {
155761 +               if (mptcp_data_fin_enabled(msk)) {
155762 +                       struct inet_connection_sock *icsk = inet_csk(sk);
155764 +                       icsk->icsk_retransmits++;
155765 +                       mptcp_set_datafin_timeout(sk);
155766 +                       mptcp_send_ack(msk);
155768 +                       goto reset_timer;
155769 +               }
155771                 return;
155772 +       }
155774         ssk = mptcp_subflow_get_retrans(msk);
155775         if (!ssk)
155776 @@ -2460,6 +2479,8 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
155777                         pr_debug("Sending DATA_FIN on subflow %p", ssk);
155778                         mptcp_set_timeout(sk, ssk);
155779                         tcp_send_ack(ssk);
155780 +                       if (!mptcp_timer_pending(sk))
155781 +                               mptcp_reset_timer(sk);
155782                 }
155783                 break;
155784         }
155785 diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
155786 index d17d39ccdf34..4fe7acaa472f 100644
155787 --- a/net/mptcp/subflow.c
155788 +++ b/net/mptcp/subflow.c
155789 @@ -524,8 +524,7 @@ static void mptcp_sock_destruct(struct sock *sk)
155790          * ESTABLISHED state and will not have the SOCK_DEAD flag.
155791          * Both result in warnings from inet_sock_destruct.
155792          */
155794 -       if (sk->sk_state == TCP_ESTABLISHED) {
155795 +       if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
155796                 sk->sk_state = TCP_CLOSE;
155797                 WARN_ON_ONCE(sk->sk_socket);
155798                 sock_orphan(sk);
155799 diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
155800 index c6c0cb465664..313d1c8ff066 100644
155801 --- a/net/netfilter/nf_conntrack_standalone.c
155802 +++ b/net/netfilter/nf_conntrack_standalone.c
155803 @@ -1060,16 +1060,10 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
155804         nf_conntrack_standalone_init_dccp_sysctl(net, table);
155805         nf_conntrack_standalone_init_gre_sysctl(net, table);
155807 -       /* Don't allow unprivileged users to alter certain sysctls */
155808 -       if (net->user_ns != &init_user_ns) {
155809 +       /* Don't allow non-init_net ns to alter global sysctls */
155810 +       if (!net_eq(&init_net, net)) {
155811                 table[NF_SYSCTL_CT_MAX].mode = 0444;
155812                 table[NF_SYSCTL_CT_EXPECT_MAX].mode = 0444;
155813 -               table[NF_SYSCTL_CT_HELPER].mode = 0444;
155814 -#ifdef CONFIG_NF_CONNTRACK_EVENTS
155815 -               table[NF_SYSCTL_CT_EVENTS].mode = 0444;
155816 -#endif
155817 -               table[NF_SYSCTL_CT_BUCKETS].mode = 0444;
155818 -       } else if (!net_eq(&init_net, net)) {
155819                 table[NF_SYSCTL_CT_BUCKETS].mode = 0444;
155820         }
155822 diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
155823 index 589d2f6978d3..878ed49d0c56 100644
155824 --- a/net/netfilter/nf_tables_api.c
155825 +++ b/net/netfilter/nf_tables_api.c
155826 @@ -6246,9 +6246,9 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
155827         INIT_LIST_HEAD(&obj->list);
155828         return err;
155829  err_trans:
155830 -       kfree(obj->key.name);
155831 -err_userdata:
155832         kfree(obj->udata);
155833 +err_userdata:
155834 +       kfree(obj->key.name);
155835  err_strdup:
155836         if (obj->ops->destroy)
155837                 obj->ops->destroy(&ctx, obj);
155838 diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
155839 index 9ae14270c543..2b00f7f47693 100644
155840 --- a/net/netfilter/nf_tables_offload.c
155841 +++ b/net/netfilter/nf_tables_offload.c
155842 @@ -45,6 +45,48 @@ void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
155843                 offsetof(struct nft_flow_key, control);
155846 +struct nft_offload_ethertype {
155847 +       __be16 value;
155848 +       __be16 mask;
155851 +static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
155852 +                                       struct nft_flow_rule *flow)
155854 +       struct nft_flow_match *match = &flow->match;
155855 +       struct nft_offload_ethertype ethertype;
155857 +       if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL) &&
155858 +           match->key.basic.n_proto != htons(ETH_P_8021Q) &&
155859 +           match->key.basic.n_proto != htons(ETH_P_8021AD))
155860 +               return;
155862 +       ethertype.value = match->key.basic.n_proto;
155863 +       ethertype.mask = match->mask.basic.n_proto;
155865 +       if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_VLAN) &&
155866 +           (match->key.vlan.vlan_tpid == htons(ETH_P_8021Q) ||
155867 +            match->key.vlan.vlan_tpid == htons(ETH_P_8021AD))) {
155868 +               match->key.basic.n_proto = match->key.cvlan.vlan_tpid;
155869 +               match->mask.basic.n_proto = match->mask.cvlan.vlan_tpid;
155870 +               match->key.cvlan.vlan_tpid = match->key.vlan.vlan_tpid;
155871 +               match->mask.cvlan.vlan_tpid = match->mask.vlan.vlan_tpid;
155872 +               match->key.vlan.vlan_tpid = ethertype.value;
155873 +               match->mask.vlan.vlan_tpid = ethertype.mask;
155874 +               match->dissector.offset[FLOW_DISSECTOR_KEY_CVLAN] =
155875 +                       offsetof(struct nft_flow_key, cvlan);
155876 +               match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN);
155877 +       } else {
155878 +               match->key.basic.n_proto = match->key.vlan.vlan_tpid;
155879 +               match->mask.basic.n_proto = match->mask.vlan.vlan_tpid;
155880 +               match->key.vlan.vlan_tpid = ethertype.value;
155881 +               match->mask.vlan.vlan_tpid = ethertype.mask;
155882 +               match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] =
155883 +                       offsetof(struct nft_flow_key, vlan);
155884 +               match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
155885 +       }
155888  struct nft_flow_rule *nft_flow_rule_create(struct net *net,
155889                                            const struct nft_rule *rule)
155891 @@ -89,6 +131,8 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net,
155893                 expr = nft_expr_next(expr);
155894         }
155895 +       nft_flow_rule_transfer_vlan(ctx, flow);
155897         flow->proto = ctx->dep.l3num;
155898         kfree(ctx);
155900 diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
155901 index 916a3c7f9eaf..79fbf37291f3 100644
155902 --- a/net/netfilter/nfnetlink_osf.c
155903 +++ b/net/netfilter/nfnetlink_osf.c
155904 @@ -186,6 +186,8 @@ static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx,
155906                 ctx->optp = skb_header_pointer(skb, ip_hdrlen(skb) +
155907                                 sizeof(struct tcphdr), ctx->optsize, opts);
155908 +               if (!ctx->optp)
155909 +                       return NULL;
155910         }
155912         return tcp;
155913 diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
155914 index eb6a43a180bb..47b6d05f1ae6 100644
155915 --- a/net/netfilter/nft_cmp.c
155916 +++ b/net/netfilter/nft_cmp.c
155917 @@ -114,19 +114,56 @@ static int nft_cmp_dump(struct sk_buff *skb, const struct nft_expr *expr)
155918         return -1;
155921 +union nft_cmp_offload_data {
155922 +       u16     val16;
155923 +       u32     val32;
155924 +       u64     val64;
155927 +static void nft_payload_n2h(union nft_cmp_offload_data *data,
155928 +                           const u8 *val, u32 len)
155930 +       switch (len) {
155931 +       case 2:
155932 +               data->val16 = ntohs(*((u16 *)val));
155933 +               break;
155934 +       case 4:
155935 +               data->val32 = ntohl(*((u32 *)val));
155936 +               break;
155937 +       case 8:
155938 +               data->val64 = be64_to_cpu(*((u64 *)val));
155939 +               break;
155940 +       default:
155941 +               WARN_ON_ONCE(1);
155942 +               break;
155943 +       }
155946  static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
155947                              struct nft_flow_rule *flow,
155948                              const struct nft_cmp_expr *priv)
155950         struct nft_offload_reg *reg = &ctx->regs[priv->sreg];
155951 +       union nft_cmp_offload_data _data, _datamask;
155952         u8 *mask = (u8 *)&flow->match.mask;
155953         u8 *key = (u8 *)&flow->match.key;
155954 +       u8 *data, *datamask;
155956         if (priv->op != NFT_CMP_EQ || priv->len > reg->len)
155957                 return -EOPNOTSUPP;
155959 -       memcpy(key + reg->offset, &priv->data, reg->len);
155960 -       memcpy(mask + reg->offset, &reg->mask, reg->len);
155961 +       if (reg->flags & NFT_OFFLOAD_F_NETWORK2HOST) {
155962 +               nft_payload_n2h(&_data, (u8 *)&priv->data, reg->len);
155963 +               nft_payload_n2h(&_datamask, (u8 *)&reg->mask, reg->len);
155964 +               data = (u8 *)&_data;
155965 +               datamask = (u8 *)&_datamask;
155966 +       } else {
155967 +               data = (u8 *)&priv->data;
155968 +               datamask = (u8 *)&reg->mask;
155969 +       }
155971 +       memcpy(key + reg->offset, data, reg->len);
155972 +       memcpy(mask + reg->offset, datamask, reg->len);
155974         flow->match.dissector.used_keys |= BIT(reg->key);
155975         flow->match.dissector.offset[reg->key] = reg->base_offset;
155976 diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
155977 index cb1c8c231880..501c5b24cc39 100644
155978 --- a/net/netfilter/nft_payload.c
155979 +++ b/net/netfilter/nft_payload.c
155980 @@ -226,8 +226,9 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
155981                 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
155982                         return -EOPNOTSUPP;
155984 -               NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
155985 -                                 vlan_tci, sizeof(__be16), reg);
155986 +               NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN, vlan,
155987 +                                       vlan_tci, sizeof(__be16), reg,
155988 +                                       NFT_OFFLOAD_F_NETWORK2HOST);
155989                 break;
155990         case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
155991                 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
155992 @@ -241,16 +242,18 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
155993                 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
155994                         return -EOPNOTSUPP;
155996 -               NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
155997 -                                 vlan_tci, sizeof(__be16), reg);
155998 +               NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
155999 +                                       vlan_tci, sizeof(__be16), reg,
156000 +                                       NFT_OFFLOAD_F_NETWORK2HOST);
156001                 break;
156002         case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
156003                                                         sizeof(struct vlan_hdr):
156004                 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
156005                         return -EOPNOTSUPP;
156007 -               NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
156008 +               NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
156009                                   vlan_tpid, sizeof(__be16), reg);
156010 +               nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
156011                 break;
156012         default:
156013                 return -EOPNOTSUPP;
156014 diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
156015 index bf618b7ec1ae..560c2cda52ee 100644
156016 --- a/net/netfilter/nft_set_hash.c
156017 +++ b/net/netfilter/nft_set_hash.c
156018 @@ -406,9 +406,17 @@ static void nft_rhash_destroy(const struct nft_set *set)
156019                                     (void *)set);
156022 +/* Number of buckets is stored in u32, so cap our result to 1U<<31 */
156023 +#define NFT_MAX_BUCKETS (1U << 31)
156025  static u32 nft_hash_buckets(u32 size)
156027 -       return roundup_pow_of_two(size * 4 / 3);
156028 +       u64 val = div_u64((u64)size * 4, 3);
156030 +       if (val >= NFT_MAX_BUCKETS)
156031 +               return NFT_MAX_BUCKETS;
156033 +       return roundup_pow_of_two(val);
156036  static bool nft_rhash_estimate(const struct nft_set_desc *desc, u32 features,
156037 diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
156038 index 75625d13e976..498a0bf6f044 100644
156039 --- a/net/netfilter/xt_SECMARK.c
156040 +++ b/net/netfilter/xt_SECMARK.c
156041 @@ -24,10 +24,9 @@ MODULE_ALIAS("ip6t_SECMARK");
156042  static u8 mode;
156044  static unsigned int
156045 -secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
156046 +secmark_tg(struct sk_buff *skb, const struct xt_secmark_target_info_v1 *info)
156048         u32 secmark = 0;
156049 -       const struct xt_secmark_target_info *info = par->targinfo;
156051         switch (mode) {
156052         case SECMARK_MODE_SEL:
156053 @@ -41,7 +40,7 @@ secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
156054         return XT_CONTINUE;
156057 -static int checkentry_lsm(struct xt_secmark_target_info *info)
156058 +static int checkentry_lsm(struct xt_secmark_target_info_v1 *info)
156060         int err;
156062 @@ -73,15 +72,15 @@ static int checkentry_lsm(struct xt_secmark_target_info *info)
156063         return 0;
156066 -static int secmark_tg_check(const struct xt_tgchk_param *par)
156067 +static int
156068 +secmark_tg_check(const char *table, struct xt_secmark_target_info_v1 *info)
156070 -       struct xt_secmark_target_info *info = par->targinfo;
156071         int err;
156073 -       if (strcmp(par->table, "mangle") != 0 &&
156074 -           strcmp(par->table, "security") != 0) {
156075 +       if (strcmp(table, "mangle") != 0 &&
156076 +           strcmp(table, "security") != 0) {
156077                 pr_info_ratelimited("only valid in \'mangle\' or \'security\' table, not \'%s\'\n",
156078 -                                   par->table);
156079 +                                   table);
156080                 return -EINVAL;
156081         }
156083 @@ -116,25 +115,76 @@ static void secmark_tg_destroy(const struct xt_tgdtor_param *par)
156084         }
156087 -static struct xt_target secmark_tg_reg __read_mostly = {
156088 -       .name       = "SECMARK",
156089 -       .revision   = 0,
156090 -       .family     = NFPROTO_UNSPEC,
156091 -       .checkentry = secmark_tg_check,
156092 -       .destroy    = secmark_tg_destroy,
156093 -       .target     = secmark_tg,
156094 -       .targetsize = sizeof(struct xt_secmark_target_info),
156095 -       .me         = THIS_MODULE,
156096 +static int secmark_tg_check_v0(const struct xt_tgchk_param *par)
156098 +       struct xt_secmark_target_info *info = par->targinfo;
156099 +       struct xt_secmark_target_info_v1 newinfo = {
156100 +               .mode   = info->mode,
156101 +       };
156102 +       int ret;
156104 +       memcpy(newinfo.secctx, info->secctx, SECMARK_SECCTX_MAX);
156106 +       ret = secmark_tg_check(par->table, &newinfo);
156107 +       info->secid = newinfo.secid;
156109 +       return ret;
156112 +static unsigned int
156113 +secmark_tg_v0(struct sk_buff *skb, const struct xt_action_param *par)
156115 +       const struct xt_secmark_target_info *info = par->targinfo;
156116 +       struct xt_secmark_target_info_v1 newinfo = {
156117 +               .secid  = info->secid,
156118 +       };
156120 +       return secmark_tg(skb, &newinfo);
156123 +static int secmark_tg_check_v1(const struct xt_tgchk_param *par)
156125 +       return secmark_tg_check(par->table, par->targinfo);
156128 +static unsigned int
156129 +secmark_tg_v1(struct sk_buff *skb, const struct xt_action_param *par)
156131 +       return secmark_tg(skb, par->targinfo);
156134 +static struct xt_target secmark_tg_reg[] __read_mostly = {
156135 +       {
156136 +               .name           = "SECMARK",
156137 +               .revision       = 0,
156138 +               .family         = NFPROTO_UNSPEC,
156139 +               .checkentry     = secmark_tg_check_v0,
156140 +               .destroy        = secmark_tg_destroy,
156141 +               .target         = secmark_tg_v0,
156142 +               .targetsize     = sizeof(struct xt_secmark_target_info),
156143 +               .me             = THIS_MODULE,
156144 +       },
156145 +       {
156146 +               .name           = "SECMARK",
156147 +               .revision       = 1,
156148 +               .family         = NFPROTO_UNSPEC,
156149 +               .checkentry     = secmark_tg_check_v1,
156150 +               .destroy        = secmark_tg_destroy,
156151 +               .target         = secmark_tg_v1,
156152 +               .targetsize     = sizeof(struct xt_secmark_target_info_v1),
156153 +               .usersize       = offsetof(struct xt_secmark_target_info_v1, secid),
156154 +               .me             = THIS_MODULE,
156155 +       },
156158  static int __init secmark_tg_init(void)
156160 -       return xt_register_target(&secmark_tg_reg);
156161 +       return xt_register_targets(secmark_tg_reg, ARRAY_SIZE(secmark_tg_reg));
156164  static void __exit secmark_tg_exit(void)
156166 -       xt_unregister_target(&secmark_tg_reg);
156167 +       xt_unregister_targets(secmark_tg_reg, ARRAY_SIZE(secmark_tg_reg));
156170  module_init(secmark_tg_init);
156171 diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c
156172 index 5971fb6f51cc..dc21b4141b0a 100644
156173 --- a/net/nfc/digital_dep.c
156174 +++ b/net/nfc/digital_dep.c
156175 @@ -1273,6 +1273,8 @@ static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg,
156176         }
156178         rc = nfc_tm_data_received(ddev->nfc_dev, resp);
156179 +       if (rc)
156180 +               resp = NULL;
156182  exit:
156183         kfree_skb(ddev->chaining_skb);
156184 diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
156185 index a3b46f888803..53dbe733f998 100644
156186 --- a/net/nfc/llcp_sock.c
156187 +++ b/net/nfc/llcp_sock.c
156188 @@ -109,12 +109,14 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
156189                                           GFP_KERNEL);
156190         if (!llcp_sock->service_name) {
156191                 nfc_llcp_local_put(llcp_sock->local);
156192 +               llcp_sock->local = NULL;
156193                 ret = -ENOMEM;
156194                 goto put_dev;
156195         }
156196         llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
156197         if (llcp_sock->ssap == LLCP_SAP_MAX) {
156198                 nfc_llcp_local_put(llcp_sock->local);
156199 +               llcp_sock->local = NULL;
156200                 kfree(llcp_sock->service_name);
156201                 llcp_sock->service_name = NULL;
156202                 ret = -EADDRINUSE;
156203 @@ -709,6 +711,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
156204         llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
156205         if (llcp_sock->ssap == LLCP_SAP_MAX) {
156206                 nfc_llcp_local_put(llcp_sock->local);
156207 +               llcp_sock->local = NULL;
156208                 ret = -ENOMEM;
156209                 goto put_dev;
156210         }
156211 @@ -756,6 +759,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
156212  sock_llcp_release:
156213         nfc_llcp_put_ssap(local, llcp_sock->ssap);
156214         nfc_llcp_local_put(llcp_sock->local);
156215 +       llcp_sock->local = NULL;
156217  put_dev:
156218         nfc_put_device(dev);
156219 diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
156220 index 59257400697d..142d71c8d652 100644
156221 --- a/net/nfc/nci/core.c
156222 +++ b/net/nfc/nci/core.c
156223 @@ -1191,6 +1191,7 @@ EXPORT_SYMBOL(nci_allocate_device);
156224  void nci_free_device(struct nci_dev *ndev)
156226         nfc_free_device(ndev->nfc_dev);
156227 +       nci_hci_deallocate(ndev);
156228         kfree(ndev);
156230  EXPORT_SYMBOL(nci_free_device);
156231 diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
156232 index 6b275a387a92..96865142104f 100644
156233 --- a/net/nfc/nci/hci.c
156234 +++ b/net/nfc/nci/hci.c
156235 @@ -792,3 +792,8 @@ struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev)
156237         return hdev;
156240 +void nci_hci_deallocate(struct nci_dev *ndev)
156242 +       kfree(ndev->hci_dev);
156244 diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
156245 index 92a0b67b2728..77d924ab8cdb 100644
156246 --- a/net/openvswitch/actions.c
156247 +++ b/net/openvswitch/actions.c
156248 @@ -827,17 +827,17 @@ static void ovs_fragment(struct net *net, struct vport *vport,
156249         }
156251         if (key->eth.type == htons(ETH_P_IP)) {
156252 -               struct dst_entry ovs_dst;
156253 +               struct rtable ovs_rt = { 0 };
156254                 unsigned long orig_dst;
156256                 prepare_frag(vport, skb, orig_network_offset,
156257                              ovs_key_mac_proto(key));
156258 -               dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
156259 +               dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
156260                          DST_OBSOLETE_NONE, DST_NOCOUNT);
156261 -               ovs_dst.dev = vport->dev;
156262 +               ovs_rt.dst.dev = vport->dev;
156264                 orig_dst = skb->_skb_refdst;
156265 -               skb_dst_set_noref(skb, &ovs_dst);
156266 +               skb_dst_set_noref(skb, &ovs_rt.dst);
156267                 IPCB(skb)->frag_max_size = mru;
156269                 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
156270 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
156271 index e24b2841c643..9611e41c7b8b 100644
156272 --- a/net/packet/af_packet.c
156273 +++ b/net/packet/af_packet.c
156274 @@ -1359,7 +1359,7 @@ static unsigned int fanout_demux_rollover(struct packet_fanout *f,
156275         struct packet_sock *po, *po_next, *po_skip = NULL;
156276         unsigned int i, j, room = ROOM_NONE;
156278 -       po = pkt_sk(f->arr[idx]);
156279 +       po = pkt_sk(rcu_dereference(f->arr[idx]));
156281         if (try_self) {
156282                 room = packet_rcv_has_room(po, skb);
156283 @@ -1371,7 +1371,7 @@ static unsigned int fanout_demux_rollover(struct packet_fanout *f,
156285         i = j = min_t(int, po->rollover->sock, num - 1);
156286         do {
156287 -               po_next = pkt_sk(f->arr[i]);
156288 +               po_next = pkt_sk(rcu_dereference(f->arr[i]));
156289                 if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
156290                     packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
156291                         if (i != j)
156292 @@ -1466,7 +1466,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
156293         if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
156294                 idx = fanout_demux_rollover(f, skb, idx, true, num);
156296 -       po = pkt_sk(f->arr[idx]);
156297 +       po = pkt_sk(rcu_dereference(f->arr[idx]));
156298         return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
156301 @@ -1480,7 +1480,7 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po)
156302         struct packet_fanout *f = po->fanout;
156304         spin_lock(&f->lock);
156305 -       f->arr[f->num_members] = sk;
156306 +       rcu_assign_pointer(f->arr[f->num_members], sk);
156307         smp_wmb();
156308         f->num_members++;
156309         if (f->num_members == 1)
156310 @@ -1495,11 +1495,14 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
156312         spin_lock(&f->lock);
156313         for (i = 0; i < f->num_members; i++) {
156314 -               if (f->arr[i] == sk)
156315 +               if (rcu_dereference_protected(f->arr[i],
156316 +                                             lockdep_is_held(&f->lock)) == sk)
156317                         break;
156318         }
156319         BUG_ON(i >= f->num_members);
156320 -       f->arr[i] = f->arr[f->num_members - 1];
156321 +       rcu_assign_pointer(f->arr[i],
156322 +                          rcu_dereference_protected(f->arr[f->num_members - 1],
156323 +                                                    lockdep_is_held(&f->lock)));
156324         f->num_members--;
156325         if (f->num_members == 0)
156326                 __dev_remove_pack(&f->prot_hook);
156327 diff --git a/net/packet/internal.h b/net/packet/internal.h
156328 index 5f61e59ebbff..48af35b1aed2 100644
156329 --- a/net/packet/internal.h
156330 +++ b/net/packet/internal.h
156331 @@ -94,7 +94,7 @@ struct packet_fanout {
156332         spinlock_t              lock;
156333         refcount_t              sk_ref;
156334         struct packet_type      prot_hook ____cacheline_aligned_in_smp;
156335 -       struct sock             *arr[];
156336 +       struct sock     __rcu   *arr[];
156339  struct packet_rollover {
156340 diff --git a/net/qrtr/mhi.c b/net/qrtr/mhi.c
156341 index 2bf2b1943e61..fa611678af05 100644
156342 --- a/net/qrtr/mhi.c
156343 +++ b/net/qrtr/mhi.c
156344 @@ -50,6 +50,9 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
156345         struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep);
156346         int rc;
156348 +       if (skb->sk)
156349 +               sock_hold(skb->sk);
156351         rc = skb_linearize(skb);
156352         if (rc)
156353                 goto free_skb;
156354 @@ -59,12 +62,11 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
156355         if (rc)
156356                 goto free_skb;
156358 -       if (skb->sk)
156359 -               sock_hold(skb->sk);
156361         return rc;
156363  free_skb:
156364 +       if (skb->sk)
156365 +               sock_put(skb->sk);
156366         kfree_skb(skb);
156368         return rc;
156369 diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
156370 index 16e888a9601d..48fdf7293dea 100644
156371 --- a/net/sched/act_ct.c
156372 +++ b/net/sched/act_ct.c
156373 @@ -732,7 +732,8 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
156374  #endif
156375         }
156377 -       *qdisc_skb_cb(skb) = cb;
156378 +       if (err != -EINPROGRESS)
156379 +               *qdisc_skb_cb(skb) = cb;
156380         skb_clear_hash(skb);
156381         skb->ignore_df = 1;
156382         return err;
156383 @@ -967,7 +968,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
156384         err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
156385         if (err == -EINPROGRESS) {
156386                 retval = TC_ACT_STOLEN;
156387 -               goto out;
156388 +               goto out_clear;
156389         }
156390         if (err)
156391                 goto drop;
156392 @@ -1030,7 +1031,6 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
156393  out_push:
156394         skb_push_rcsum(skb, nh_ofs);
156396 -out:
156397         qdisc_skb_cb(skb)->post_ct = true;
156398  out_clear:
156399         tcf_action_update_bstats(&c->common, skb);
156400 diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
156401 index c69a4ba9c33f..3035f96c6e6c 100644
156402 --- a/net/sched/cls_flower.c
156403 +++ b/net/sched/cls_flower.c
156404 @@ -209,16 +209,16 @@ static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
156405                                   struct fl_flow_key *key,
156406                                   struct fl_flow_key *mkey)
156408 -       __be16 min_mask, max_mask, min_val, max_val;
156409 +       u16 min_mask, max_mask, min_val, max_val;
156411 -       min_mask = htons(filter->mask->key.tp_range.tp_min.dst);
156412 -       max_mask = htons(filter->mask->key.tp_range.tp_max.dst);
156413 -       min_val = htons(filter->key.tp_range.tp_min.dst);
156414 -       max_val = htons(filter->key.tp_range.tp_max.dst);
156415 +       min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
156416 +       max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
156417 +       min_val = ntohs(filter->key.tp_range.tp_min.dst);
156418 +       max_val = ntohs(filter->key.tp_range.tp_max.dst);
156420         if (min_mask && max_mask) {
156421 -               if (htons(key->tp_range.tp.dst) < min_val ||
156422 -                   htons(key->tp_range.tp.dst) > max_val)
156423 +               if (ntohs(key->tp_range.tp.dst) < min_val ||
156424 +                   ntohs(key->tp_range.tp.dst) > max_val)
156425                         return false;
156427                 /* skb does not have min and max values */
156428 @@ -232,16 +232,16 @@ static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
156429                                   struct fl_flow_key *key,
156430                                   struct fl_flow_key *mkey)
156432 -       __be16 min_mask, max_mask, min_val, max_val;
156433 +       u16 min_mask, max_mask, min_val, max_val;
156435 -       min_mask = htons(filter->mask->key.tp_range.tp_min.src);
156436 -       max_mask = htons(filter->mask->key.tp_range.tp_max.src);
156437 -       min_val = htons(filter->key.tp_range.tp_min.src);
156438 -       max_val = htons(filter->key.tp_range.tp_max.src);
156439 +       min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
156440 +       max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
156441 +       min_val = ntohs(filter->key.tp_range.tp_min.src);
156442 +       max_val = ntohs(filter->key.tp_range.tp_max.src);
156444         if (min_mask && max_mask) {
156445 -               if (htons(key->tp_range.tp.src) < min_val ||
156446 -                   htons(key->tp_range.tp.src) > max_val)
156447 +               if (ntohs(key->tp_range.tp.src) < min_val ||
156448 +                   ntohs(key->tp_range.tp.src) > max_val)
156449                         return false;
156451                 /* skb does not have min and max values */
156452 @@ -783,16 +783,16 @@ static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
156453                        TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
156455         if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
156456 -           htons(key->tp_range.tp_max.dst) <=
156457 -           htons(key->tp_range.tp_min.dst)) {
156458 +           ntohs(key->tp_range.tp_max.dst) <=
156459 +           ntohs(key->tp_range.tp_min.dst)) {
156460                 NL_SET_ERR_MSG_ATTR(extack,
156461                                     tb[TCA_FLOWER_KEY_PORT_DST_MIN],
156462                                     "Invalid destination port range (min must be strictly smaller than max)");
156463                 return -EINVAL;
156464         }
156465         if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
156466 -           htons(key->tp_range.tp_max.src) <=
156467 -           htons(key->tp_range.tp_min.src)) {
156468 +           ntohs(key->tp_range.tp_max.src) <=
156469 +           ntohs(key->tp_range.tp_min.src)) {
156470                 NL_SET_ERR_MSG_ATTR(extack,
156471                                     tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
156472                                     "Invalid source port range (min must be strictly smaller than max)");
156473 diff --git a/net/sched/sch_frag.c b/net/sched/sch_frag.c
156474 index e1e77d3fb6c0..8c06381391d6 100644
156475 --- a/net/sched/sch_frag.c
156476 +++ b/net/sched/sch_frag.c
156477 @@ -90,16 +90,16 @@ static int sch_fragment(struct net *net, struct sk_buff *skb,
156478         }
156480         if (skb_protocol(skb, true) == htons(ETH_P_IP)) {
156481 -               struct dst_entry sch_frag_dst;
156482 +               struct rtable sch_frag_rt = { 0 };
156483                 unsigned long orig_dst;
156485                 sch_frag_prepare_frag(skb, xmit);
156486 -               dst_init(&sch_frag_dst, &sch_frag_dst_ops, NULL, 1,
156487 +               dst_init(&sch_frag_rt.dst, &sch_frag_dst_ops, NULL, 1,
156488                          DST_OBSOLETE_NONE, DST_NOCOUNT);
156489 -               sch_frag_dst.dev = skb->dev;
156490 +               sch_frag_rt.dst.dev = skb->dev;
156492                 orig_dst = skb->_skb_refdst;
156493 -               skb_dst_set_noref(skb, &sch_frag_dst);
156494 +               skb_dst_set_noref(skb, &sch_frag_rt.dst);
156495                 IPCB(skb)->frag_max_size = mru;
156497                 ret = ip_do_fragment(net, skb->sk, skb, sch_frag_xmit);
156498 diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
156499 index 8287894541e3..909c798b7403 100644
156500 --- a/net/sched/sch_taprio.c
156501 +++ b/net/sched/sch_taprio.c
156502 @@ -901,6 +901,12 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
156504                 list_for_each_entry(entry, &new->entries, list)
156505                         cycle = ktime_add_ns(cycle, entry->interval);
156507 +               if (!cycle) {
156508 +                       NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
156509 +                       return -EINVAL;
156510 +               }
156512                 new->cycle_time = cycle;
156513         }
156515 diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
156516 index f77484df097b..da4ce0947c3a 100644
156517 --- a/net/sctp/sm_make_chunk.c
156518 +++ b/net/sctp/sm_make_chunk.c
156519 @@ -3147,7 +3147,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
156520                  * primary.
156521                  */
156522                 if (af->is_any(&addr))
156523 -                       memcpy(&addr.v4, sctp_source(asconf), sizeof(addr));
156524 +                       memcpy(&addr, sctp_source(asconf), sizeof(addr));
156526                 if (security_sctp_bind_connect(asoc->ep->base.sk,
156527                                                SCTP_PARAM_SET_PRIMARY,
156528 diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
156529 index af2b7041fa4e..73bb4c6e9201 100644
156530 --- a/net/sctp/sm_statefuns.c
156531 +++ b/net/sctp/sm_statefuns.c
156532 @@ -1852,20 +1852,35 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
156533                         SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
156534         sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_ASCONF_QUEUE, SCTP_NULL());
156536 -       repl = sctp_make_cookie_ack(new_asoc, chunk);
156537 +       /* Update the content of current association. */
156538 +       if (sctp_assoc_update((struct sctp_association *)asoc, new_asoc)) {
156539 +               struct sctp_chunk *abort;
156541 +               abort = sctp_make_abort(asoc, NULL, sizeof(struct sctp_errhdr));
156542 +               if (abort) {
156543 +                       sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
156544 +                       sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
156545 +               }
156546 +               sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED));
156547 +               sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
156548 +                               SCTP_PERR(SCTP_ERROR_RSRC_LOW));
156549 +               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
156550 +               SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
156551 +               goto nomem;
156552 +       }
156554 +       repl = sctp_make_cookie_ack(asoc, chunk);
156555         if (!repl)
156556                 goto nomem;
156558         /* Report association restart to upper layer. */
156559         ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_RESTART, 0,
156560 -                                            new_asoc->c.sinit_num_ostreams,
156561 -                                            new_asoc->c.sinit_max_instreams,
156562 +                                            asoc->c.sinit_num_ostreams,
156563 +                                            asoc->c.sinit_max_instreams,
156564                                              NULL, GFP_ATOMIC);
156565         if (!ev)
156566                 goto nomem_ev;
156568 -       /* Update the content of current association. */
156569 -       sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
156570         sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
156571         if ((sctp_state(asoc, SHUTDOWN_PENDING) ||
156572              sctp_state(asoc, SHUTDOWN_SENT)) &&
156573 @@ -1929,7 +1944,8 @@ static enum sctp_disposition sctp_sf_do_dupcook_b(
156574         sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
156575         sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
156576                         SCTP_STATE(SCTP_STATE_ESTABLISHED));
156577 -       SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
156578 +       if (asoc->state < SCTP_STATE_ESTABLISHED)
156579 +               SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
156580         sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
156582         repl = sctp_make_cookie_ack(new_asoc, chunk);
156583 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
156584 index b9b3d899a611..4ae428f2f2c5 100644
156585 --- a/net/sctp/socket.c
156586 +++ b/net/sctp/socket.c
156587 @@ -357,6 +357,18 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
156588         return af;
156591 +static void sctp_auto_asconf_init(struct sctp_sock *sp)
156593 +       struct net *net = sock_net(&sp->inet.sk);
156595 +       if (net->sctp.default_auto_asconf) {
156596 +               spin_lock(&net->sctp.addr_wq_lock);
156597 +               list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist);
156598 +               spin_unlock(&net->sctp.addr_wq_lock);
156599 +               sp->do_auto_asconf = 1;
156600 +       }
156603  /* Bind a local address either to an endpoint or to an association.  */
156604  static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
156606 @@ -418,8 +430,10 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
156607                 return -EADDRINUSE;
156609         /* Refresh ephemeral port.  */
156610 -       if (!bp->port)
156611 +       if (!bp->port) {
156612                 bp->port = inet_sk(sk)->inet_num;
156613 +               sctp_auto_asconf_init(sp);
156614 +       }
156616         /* Add the address to the bind address list.
156617          * Use GFP_ATOMIC since BHs will be disabled.
156618 @@ -1520,9 +1534,11 @@ static void sctp_close(struct sock *sk, long timeout)
156620         /* Supposedly, no process has access to the socket, but
156621          * the net layers still may.
156622 +        * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
156623 +        * held and that should be grabbed before socket lock.
156624          */
156625 -       local_bh_disable();
156626 -       bh_lock_sock(sk);
156627 +       spin_lock_bh(&net->sctp.addr_wq_lock);
156628 +       bh_lock_sock_nested(sk);
156630         /* Hold the sock, since sk_common_release() will put sock_put()
156631          * and we have just a little more cleanup.
156632 @@ -1531,7 +1547,7 @@ static void sctp_close(struct sock *sk, long timeout)
156633         sk_common_release(sk);
156635         bh_unlock_sock(sk);
156636 -       local_bh_enable();
156637 +       spin_unlock_bh(&net->sctp.addr_wq_lock);
156639         sock_put(sk);
156641 @@ -4991,16 +5007,6 @@ static int sctp_init_sock(struct sock *sk)
156642         sk_sockets_allocated_inc(sk);
156643         sock_prot_inuse_add(net, sk->sk_prot, 1);
156645 -       if (net->sctp.default_auto_asconf) {
156646 -               spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
156647 -               list_add_tail(&sp->auto_asconf_list,
156648 -                   &net->sctp.auto_asconf_splist);
156649 -               sp->do_auto_asconf = 1;
156650 -               spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
156651 -       } else {
156652 -               sp->do_auto_asconf = 0;
156653 -       }
156655         local_bh_enable();
156657         return 0;
156658 @@ -5025,9 +5031,7 @@ static void sctp_destroy_sock(struct sock *sk)
156660         if (sp->do_auto_asconf) {
156661                 sp->do_auto_asconf = 0;
156662 -               spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
156663                 list_del(&sp->auto_asconf_list);
156664 -               spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
156665         }
156666         sctp_endpoint_free(sp->ep);
156667         local_bh_disable();
156668 @@ -9398,6 +9402,8 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
156669                         return err;
156670         }
156672 +       sctp_auto_asconf_init(newsp);
156674         /* Move any messages in the old socket's receive queue that are for the
156675          * peeled off association to the new socket's receive queue.
156676          */
156677 diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
156678 index 47340b3b514f..cb23cca72c24 100644
156679 --- a/net/smc/af_smc.c
156680 +++ b/net/smc/af_smc.c
156681 @@ -2162,6 +2162,9 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
156682         struct smc_sock *smc;
156683         int val, rc;
156685 +       if (level == SOL_TCP && optname == TCP_ULP)
156686 +               return -EOPNOTSUPP;
156688         smc = smc_sk(sk);
156690         /* generic setsockopts reaching us here always apply to the
156691 @@ -2186,7 +2189,6 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
156692         if (rc || smc->use_fallback)
156693                 goto out;
156694         switch (optname) {
156695 -       case TCP_ULP:
156696         case TCP_FASTOPEN:
156697         case TCP_FASTOPEN_CONNECT:
156698         case TCP_FASTOPEN_KEY:
156699 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
156700 index 612f0a641f4c..f555d335e910 100644
156701 --- a/net/sunrpc/clnt.c
156702 +++ b/net/sunrpc/clnt.c
156703 @@ -1799,7 +1799,6 @@ call_allocate(struct rpc_task *task)
156705         status = xprt->ops->buf_alloc(task);
156706         trace_rpc_buf_alloc(task, status);
156707 -       xprt_inject_disconnect(xprt);
156708         if (status == 0)
156709                 return;
156710         if (status != -ENOMEM) {
156711 @@ -2457,12 +2456,6 @@ call_decode(struct rpc_task *task)
156712                 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
156713         }
156715 -       /*
156716 -        * Ensure that we see all writes made by xprt_complete_rqst()
156717 -        * before it changed req->rq_reply_bytes_recvd.
156718 -        */
156719 -       smp_rmb();
156721         /*
156722          * Did we ever call xprt_complete_rqst()? If not, we should assume
156723          * the message is incomplete.
156724 @@ -2471,6 +2464,11 @@ call_decode(struct rpc_task *task)
156725         if (!req->rq_reply_bytes_recvd)
156726                 goto out;
156728 +       /* Ensure that we see all writes made by xprt_complete_rqst()
156729 +        * before it changed req->rq_reply_bytes_recvd.
156730 +        */
156731 +       smp_rmb();
156733         req->rq_rcv_buf.len = req->rq_private_buf.len;
156734         trace_rpc_xdr_recvfrom(task, &req->rq_rcv_buf);
156736 diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
156737 index d76dc9d95d16..0de918cb3d90 100644
156738 --- a/net/sunrpc/svc.c
156739 +++ b/net/sunrpc/svc.c
156740 @@ -846,7 +846,8 @@ void
156741  svc_rqst_free(struct svc_rqst *rqstp)
156743         svc_release_buffer(rqstp);
156744 -       put_page(rqstp->rq_scratch_page);
156745 +       if (rqstp->rq_scratch_page)
156746 +               put_page(rqstp->rq_scratch_page);
156747         kfree(rqstp->rq_resp);
156748         kfree(rqstp->rq_argp);
156749         kfree(rqstp->rq_auth_data);
156750 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
156751 index 2e2f007dfc9f..7cde41a936a4 100644
156752 --- a/net/sunrpc/svcsock.c
156753 +++ b/net/sunrpc/svcsock.c
156754 @@ -1171,7 +1171,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
156755         tcp_sock_set_cork(svsk->sk_sk, true);
156756         err = svc_tcp_sendmsg(svsk->sk_sock, xdr, marker, &sent);
156757         xdr_free_bvec(xdr);
156758 -       trace_svcsock_tcp_send(xprt, err < 0 ? err : sent);
156759 +       trace_svcsock_tcp_send(xprt, err < 0 ? (long)err : sent);
156760         if (err < 0 || sent != (xdr->len + sizeof(marker)))
156761                 goto out_close;
156762         if (atomic_dec_and_test(&svsk->sk_sendqlen))
156763 diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
156764 index 691ccf8049a4..20fe31b1b776 100644
156765 --- a/net/sunrpc/xprt.c
156766 +++ b/net/sunrpc/xprt.c
156767 @@ -698,9 +698,9 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
156768         const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
156769         int status = 0;
156771 -       if (time_before(jiffies, req->rq_minortimeo))
156772 -               return status;
156773         if (time_before(jiffies, req->rq_majortimeo)) {
156774 +               if (time_before(jiffies, req->rq_minortimeo))
156775 +                       return status;
156776                 if (to->to_exponential)
156777                         req->rq_timeout <<= 1;
156778                 else
156779 @@ -1469,8 +1469,6 @@ bool xprt_prepare_transmit(struct rpc_task *task)
156780         struct rpc_xprt *xprt = req->rq_xprt;
156782         if (!xprt_lock_write(xprt, task)) {
156783 -               trace_xprt_transmit_queued(xprt, task);
156785                 /* Race breaker: someone may have transmitted us */
156786                 if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
156787                         rpc_wake_up_queued_task_set_status(&xprt->sending,
156788 @@ -1483,7 +1481,10 @@ bool xprt_prepare_transmit(struct rpc_task *task)
156790  void xprt_end_transmit(struct rpc_task *task)
156792 -       xprt_release_write(task->tk_rqstp->rq_xprt, task);
156793 +       struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
156795 +       xprt_inject_disconnect(xprt);
156796 +       xprt_release_write(xprt, task);
156799  /**
156800 @@ -1885,7 +1886,6 @@ void xprt_release(struct rpc_task *task)
156801         spin_unlock(&xprt->transport_lock);
156802         if (req->rq_buffer)
156803                 xprt->ops->buf_free(task);
156804 -       xprt_inject_disconnect(xprt);
156805         xdr_free_bvec(&req->rq_rcv_buf);
156806         xdr_free_bvec(&req->rq_snd_buf);
156807         if (req->rq_cred != NULL)
156808 diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
156809 index 766a1048a48a..aca2228095db 100644
156810 --- a/net/sunrpc/xprtrdma/frwr_ops.c
156811 +++ b/net/sunrpc/xprtrdma/frwr_ops.c
156812 @@ -257,6 +257,7 @@ int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device)
156813         ep->re_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
156814         ep->re_attr.cap.max_recv_wr = ep->re_max_requests;
156815         ep->re_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
156816 +       ep->re_attr.cap.max_recv_wr += RPCRDMA_MAX_RECV_BATCH;
156817         ep->re_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
156819         ep->re_max_rdma_segs =
156820 @@ -575,7 +576,6 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
156821                 mr = container_of(frwr, struct rpcrdma_mr, frwr);
156822                 bad_wr = bad_wr->next;
156824 -               list_del_init(&mr->mr_list);
156825                 frwr_mr_recycle(mr);
156826         }
156828 diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
156829 index 292f066d006e..21ddd78a8c35 100644
156830 --- a/net/sunrpc/xprtrdma/rpc_rdma.c
156831 +++ b/net/sunrpc/xprtrdma/rpc_rdma.c
156832 @@ -1430,9 +1430,10 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
156833                 credits = 1;    /* don't deadlock */
156834         else if (credits > r_xprt->rx_ep->re_max_requests)
156835                 credits = r_xprt->rx_ep->re_max_requests;
156836 +       rpcrdma_post_recvs(r_xprt, credits + (buf->rb_bc_srv_max_requests << 1),
156837 +                          false);
156838         if (buf->rb_credits != credits)
156839                 rpcrdma_update_cwnd(r_xprt, credits);
156840 -       rpcrdma_post_recvs(r_xprt, false);
156842         req = rpcr_to_rdmar(rqst);
156843         if (unlikely(req->rl_reply))
156844 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
156845 index 52c759a8543e..3669661457c1 100644
156846 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
156847 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
156848 @@ -958,7 +958,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
156849         p = xdr_reserve_space(&sctxt->sc_stream,
156850                               rpcrdma_fixed_maxsz * sizeof(*p));
156851         if (!p)
156852 -               goto err0;
156853 +               goto err1;
156855         ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res);
156856         if (ret < 0)
156857 @@ -970,11 +970,11 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
156858         *p = pcl_is_empty(&rctxt->rc_reply_pcl) ? rdma_msg : rdma_nomsg;
156860         if (svc_rdma_encode_read_list(sctxt) < 0)
156861 -               goto err0;
156862 +               goto err1;
156863         if (svc_rdma_encode_write_list(rctxt, sctxt) < 0)
156864 -               goto err0;
156865 +               goto err1;
156866         if (svc_rdma_encode_reply_chunk(rctxt, sctxt, ret) < 0)
156867 -               goto err0;
156868 +               goto err1;
156870         ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp);
156871         if (ret < 0)
156872 diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
156873 index 78d29d1bcc20..09953597d055 100644
156874 --- a/net/sunrpc/xprtrdma/transport.c
156875 +++ b/net/sunrpc/xprtrdma/transport.c
156876 @@ -262,8 +262,10 @@ xprt_rdma_connect_worker(struct work_struct *work)
156877   * xprt_rdma_inject_disconnect - inject a connection fault
156878   * @xprt: transport context
156879   *
156880 - * If @xprt is connected, disconnect it to simulate spurious connection
156881 - * loss.
156882 + * If @xprt is connected, disconnect it to simulate spurious
156883 + * connection loss. Caller must hold @xprt's send lock to
156884 + * ensure that data structures and hardware resources are
156885 + * stable during the rdma_disconnect() call.
156886   */
156887  static void
156888  xprt_rdma_inject_disconnect(struct rpc_xprt *xprt)
156889 diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
156890 index ec912cf9c618..f3fffc74ab0f 100644
156891 --- a/net/sunrpc/xprtrdma/verbs.c
156892 +++ b/net/sunrpc/xprtrdma/verbs.c
156893 @@ -535,7 +535,7 @@ int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt)
156894          * outstanding Receives.
156895          */
156896         rpcrdma_ep_get(ep);
156897 -       rpcrdma_post_recvs(r_xprt, true);
156898 +       rpcrdma_post_recvs(r_xprt, 1, true);
156900         rc = rdma_connect(ep->re_id, &ep->re_remote_cma);
156901         if (rc)
156902 @@ -1364,21 +1364,21 @@ int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
156903  /**
156904   * rpcrdma_post_recvs - Refill the Receive Queue
156905   * @r_xprt: controlling transport instance
156906 - * @temp: mark Receive buffers to be deleted after use
156907 + * @needed: current credit grant
156908 + * @temp: mark Receive buffers to be deleted after one use
156909   *
156910   */
156911 -void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
156912 +void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp)
156914         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
156915         struct rpcrdma_ep *ep = r_xprt->rx_ep;
156916         struct ib_recv_wr *wr, *bad_wr;
156917         struct rpcrdma_rep *rep;
156918 -       int needed, count, rc;
156919 +       int count, rc;
156921         rc = 0;
156922         count = 0;
156924 -       needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1);
156925         if (likely(ep->re_receive_count > needed))
156926                 goto out;
156927         needed -= ep->re_receive_count;
156928 diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
156929 index fe3be985e239..28af11fbe643 100644
156930 --- a/net/sunrpc/xprtrdma/xprt_rdma.h
156931 +++ b/net/sunrpc/xprtrdma/xprt_rdma.h
156932 @@ -461,7 +461,7 @@ int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt);
156933  void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt);
156935  int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
156936 -void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp);
156937 +void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp);
156940   * Buffer calls - xprtrdma/verbs.c
156941 diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
156942 index 97710ce36047..c89ce47c56cf 100644
156943 --- a/net/tipc/crypto.c
156944 +++ b/net/tipc/crypto.c
156945 @@ -1492,6 +1492,8 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
156946         /* Allocate statistic structure */
156947         c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
156948         if (!c->stats) {
156949 +               if (c->wq)
156950 +                       destroy_workqueue(c->wq);
156951                 kfree_sensitive(c);
156952                 return -ENOMEM;
156953         }
156954 diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
156955 index 5a1ce64039f7..0749df80454d 100644
156956 --- a/net/tipc/netlink_compat.c
156957 +++ b/net/tipc/netlink_compat.c
156958 @@ -696,7 +696,7 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
156959         if (err)
156960                 return err;
156962 -       link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
156963 +       link_info.dest = htonl(nla_get_flag(link[TIPC_NLA_LINK_DEST]));
156964         link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
156965         nla_strscpy(link_info.str, link[TIPC_NLA_LINK_NAME],
156966                     TIPC_MAX_LINK_NAME);
156967 diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
156968 index e4370b1b7494..902cb6dd710b 100644
156969 --- a/net/vmw_vsock/virtio_transport_common.c
156970 +++ b/net/vmw_vsock/virtio_transport_common.c
156971 @@ -733,6 +733,23 @@ static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
156972         return t->send_pkt(reply);
156975 +/* This function should be called with sk_lock held and SOCK_DONE set */
156976 +static void virtio_transport_remove_sock(struct vsock_sock *vsk)
156978 +       struct virtio_vsock_sock *vvs = vsk->trans;
156979 +       struct virtio_vsock_pkt *pkt, *tmp;
156981 +       /* We don't need to take rx_lock, as the socket is closing and we are
156982 +        * removing it.
156983 +        */
156984 +       list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
156985 +               list_del(&pkt->list);
156986 +               virtio_transport_free_pkt(pkt);
156987 +       }
156989 +       vsock_remove_sock(vsk);
156992  static void virtio_transport_wait_close(struct sock *sk, long timeout)
156994         if (timeout) {
156995 @@ -765,7 +782,7 @@ static void virtio_transport_do_close(struct vsock_sock *vsk,
156996             (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
156997                 vsk->close_work_scheduled = false;
156999 -               vsock_remove_sock(vsk);
157000 +               virtio_transport_remove_sock(vsk);
157002                 /* Release refcnt obtained when we scheduled the timeout */
157003                 sock_put(sk);
157004 @@ -828,22 +845,15 @@ static bool virtio_transport_close(struct vsock_sock *vsk)
157006  void virtio_transport_release(struct vsock_sock *vsk)
157008 -       struct virtio_vsock_sock *vvs = vsk->trans;
157009 -       struct virtio_vsock_pkt *pkt, *tmp;
157010         struct sock *sk = &vsk->sk;
157011         bool remove_sock = true;
157013         if (sk->sk_type == SOCK_STREAM)
157014                 remove_sock = virtio_transport_close(vsk);
157016 -       list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
157017 -               list_del(&pkt->list);
157018 -               virtio_transport_free_pkt(pkt);
157019 -       }
157021         if (remove_sock) {
157022                 sock_set_flag(sk, SOCK_DONE);
157023 -               vsock_remove_sock(vsk);
157024 +               virtio_transport_remove_sock(vsk);
157025         }
157027  EXPORT_SYMBOL_GPL(virtio_transport_release);
157028 diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
157029 index 8b65323207db..1c9ecb18b8e6 100644
157030 --- a/net/vmw_vsock/vmci_transport.c
157031 +++ b/net/vmw_vsock/vmci_transport.c
157032 @@ -568,8 +568,7 @@ vmci_transport_queue_pair_alloc(struct vmci_qp **qpair,
157033                                peer, flags, VMCI_NO_PRIVILEGE_FLAGS);
157034  out:
157035         if (err < 0) {
157036 -               pr_err("Could not attach to queue pair with %d\n",
157037 -                      err);
157038 +               pr_err_once("Could not attach to queue pair with %d\n", err);
157039                 err = vmci_transport_error_to_vsock_error(err);
157040         }
157042 diff --git a/net/wireless/core.c b/net/wireless/core.c
157043 index a2785379df6e..589ee5a69a2e 100644
157044 --- a/net/wireless/core.c
157045 +++ b/net/wireless/core.c
157046 @@ -332,14 +332,29 @@ static void cfg80211_event_work(struct work_struct *work)
157047  void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev)
157049         struct wireless_dev *wdev, *tmp;
157050 +       bool found = false;
157052         ASSERT_RTNL();
157053 -       lockdep_assert_wiphy(&rdev->wiphy);
157055 +       list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
157056 +               if (wdev->nl_owner_dead) {
157057 +                       if (wdev->netdev)
157058 +                               dev_close(wdev->netdev);
157059 +                       found = true;
157060 +               }
157061 +       }
157063 +       if (!found)
157064 +               return;
157066 +       wiphy_lock(&rdev->wiphy);
157067         list_for_each_entry_safe(wdev, tmp, &rdev->wiphy.wdev_list, list) {
157068 -               if (wdev->nl_owner_dead)
157069 +               if (wdev->nl_owner_dead) {
157070 +                       cfg80211_leave(rdev, wdev);
157071                         rdev_del_virtual_intf(rdev, wdev);
157072 +               }
157073         }
157074 +       wiphy_unlock(&rdev->wiphy);
157077  static void cfg80211_destroy_iface_wk(struct work_struct *work)
157078 @@ -350,9 +365,7 @@ static void cfg80211_destroy_iface_wk(struct work_struct *work)
157079                             destroy_work);
157081         rtnl_lock();
157082 -       wiphy_lock(&rdev->wiphy);
157083         cfg80211_destroy_ifaces(rdev);
157084 -       wiphy_unlock(&rdev->wiphy);
157085         rtnl_unlock();
157088 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
157089 index b1df42e4f1eb..a5224da63832 100644
157090 --- a/net/wireless/nl80211.c
157091 +++ b/net/wireless/nl80211.c
157092 @@ -3929,7 +3929,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
157093         return err;
157096 -static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
157097 +static int _nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
157099         struct cfg80211_registered_device *rdev = info->user_ptr[0];
157100         struct vif_params params;
157101 @@ -3938,9 +3938,6 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
157102         int err;
157103         enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED;
157105 -       /* to avoid failing a new interface creation due to pending removal */
157106 -       cfg80211_destroy_ifaces(rdev);
157108         memset(&params, 0, sizeof(params));
157110         if (!info->attrs[NL80211_ATTR_IFNAME])
157111 @@ -4028,6 +4025,21 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
157112         return genlmsg_reply(msg, info);
157115 +static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
157117 +       struct cfg80211_registered_device *rdev = info->user_ptr[0];
157118 +       int ret;
157120 +       /* to avoid failing a new interface creation due to pending removal */
157121 +       cfg80211_destroy_ifaces(rdev);
157123 +       wiphy_lock(&rdev->wiphy);
157124 +       ret = _nl80211_new_interface(skb, info);
157125 +       wiphy_unlock(&rdev->wiphy);
157127 +       return ret;
157130  static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
157132         struct cfg80211_registered_device *rdev = info->user_ptr[0];
157133 @@ -15040,7 +15052,9 @@ static const struct genl_small_ops nl80211_small_ops[] = {
157134                 .doit = nl80211_new_interface,
157135                 .flags = GENL_UNS_ADMIN_PERM,
157136                 .internal_flags = NL80211_FLAG_NEED_WIPHY |
157137 -                                 NL80211_FLAG_NEED_RTNL,
157138 +                                 NL80211_FLAG_NEED_RTNL |
157139 +                                 /* we take the wiphy mutex later ourselves */
157140 +                                 NL80211_FLAG_NO_WIPHY_MTX,
157141         },
157142         {
157143                 .cmd = NL80211_CMD_DEL_INTERFACE,
157144 diff --git a/net/wireless/scan.c b/net/wireless/scan.c
157145 index 758eb7d2a706..caa8eafbd583 100644
157146 --- a/net/wireless/scan.c
157147 +++ b/net/wireless/scan.c
157148 @@ -1751,6 +1751,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
157150                 if (rdev->bss_entries >= bss_entries_limit &&
157151                     !cfg80211_bss_expire_oldest(rdev)) {
157152 +                       if (!list_empty(&new->hidden_list))
157153 +                               list_del(&new->hidden_list);
157154                         kfree(new);
157155                         goto drop;
157156                 }
157157 diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
157158 index 4faabd1ecfd1..143979ea4165 100644
157159 --- a/net/xdp/xsk.c
157160 +++ b/net/xdp/xsk.c
157161 @@ -454,12 +454,16 @@ static int xsk_generic_xmit(struct sock *sk)
157162         struct sk_buff *skb;
157163         unsigned long flags;
157164         int err = 0;
157165 +       u32 hr, tr;
157167         mutex_lock(&xs->mutex);
157169         if (xs->queue_id >= xs->dev->real_num_tx_queues)
157170                 goto out;
157172 +       hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
157173 +       tr = xs->dev->needed_tailroom;
157175         while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
157176                 char *buffer;
157177                 u64 addr;
157178 @@ -471,11 +475,13 @@ static int xsk_generic_xmit(struct sock *sk)
157179                 }
157181                 len = desc.len;
157182 -               skb = sock_alloc_send_skb(sk, len, 1, &err);
157183 +               skb = sock_alloc_send_skb(sk, hr + len + tr, 1, &err);
157184                 if (unlikely(!skb))
157185                         goto out;
157187 +               skb_reserve(skb, hr);
157188                 skb_put(skb, len);
157190                 addr = desc.addr;
157191                 buffer = xsk_buff_raw_get_data(xs->pool, addr);
157192                 err = skb_store_bits(skb, 0, buffer, len);
157193 diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
157194 index 2823b7c3302d..40f359bf2044 100644
157195 --- a/net/xdp/xsk_queue.h
157196 +++ b/net/xdp/xsk_queue.h
157197 @@ -128,13 +128,12 @@ static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
157198  static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
157199                                             struct xdp_desc *desc)
157201 -       u64 chunk, chunk_end;
157202 +       u64 chunk;
157204 -       chunk = xp_aligned_extract_addr(pool, desc->addr);
157205 -       chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len);
157206 -       if (chunk != chunk_end)
157207 +       if (desc->len > pool->chunk_size)
157208                 return false;
157210 +       chunk = xp_aligned_extract_addr(pool, desc->addr);
157211         if (chunk >= pool->addrs_cnt)
157212                 return false;
157214 diff --git a/samples/bpf/tracex1_kern.c b/samples/bpf/tracex1_kern.c
157215 index 3f4599c9a202..ef30d2b353b0 100644
157216 --- a/samples/bpf/tracex1_kern.c
157217 +++ b/samples/bpf/tracex1_kern.c
157218 @@ -26,7 +26,7 @@
157219  SEC("kprobe/__netif_receive_skb_core")
157220  int bpf_prog1(struct pt_regs *ctx)
157222 -       /* attaches to kprobe netif_receive_skb,
157223 +       /* attaches to kprobe __netif_receive_skb_core,
157224          * looks for packets on loobpack device and prints them
157225          */
157226         char devname[IFNAMSIZ];
157227 @@ -35,7 +35,7 @@ int bpf_prog1(struct pt_regs *ctx)
157228         int len;
157230         /* non-portable! works for the given kernel only */
157231 -       skb = (struct sk_buff *) PT_REGS_PARM1(ctx);
157232 +       bpf_probe_read_kernel(&skb, sizeof(skb), (void *)PT_REGS_PARM1(ctx));
157233         dev = _(skb->dev);
157234         len = _(skb->len);
157236 diff --git a/samples/kfifo/bytestream-example.c b/samples/kfifo/bytestream-example.c
157237 index c406f03ee551..5a90aa527877 100644
157238 --- a/samples/kfifo/bytestream-example.c
157239 +++ b/samples/kfifo/bytestream-example.c
157240 @@ -122,8 +122,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf,
157241         ret = kfifo_from_user(&test, buf, count, &copied);
157243         mutex_unlock(&write_lock);
157244 +       if (ret)
157245 +               return ret;
157247 -       return ret ? ret : copied;
157248 +       return copied;
157251  static ssize_t fifo_read(struct file *file, char __user *buf,
157252 @@ -138,8 +140,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
157253         ret = kfifo_to_user(&test, buf, count, &copied);
157255         mutex_unlock(&read_lock);
157256 +       if (ret)
157257 +               return ret;
157259 -       return ret ? ret : copied;
157260 +       return copied;
157263  static const struct proc_ops fifo_proc_ops = {
157264 diff --git a/samples/kfifo/inttype-example.c b/samples/kfifo/inttype-example.c
157265 index 78977fc4a23f..e5403d8c971a 100644
157266 --- a/samples/kfifo/inttype-example.c
157267 +++ b/samples/kfifo/inttype-example.c
157268 @@ -115,8 +115,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf,
157269         ret = kfifo_from_user(&test, buf, count, &copied);
157271         mutex_unlock(&write_lock);
157272 +       if (ret)
157273 +               return ret;
157275 -       return ret ? ret : copied;
157276 +       return copied;
157279  static ssize_t fifo_read(struct file *file, char __user *buf,
157280 @@ -131,8 +133,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
157281         ret = kfifo_to_user(&test, buf, count, &copied);
157283         mutex_unlock(&read_lock);
157284 +       if (ret)
157285 +               return ret;
157287 -       return ret ? ret : copied;
157288 +       return copied;
157291  static const struct proc_ops fifo_proc_ops = {
157292 diff --git a/samples/kfifo/record-example.c b/samples/kfifo/record-example.c
157293 index c507998a2617..f64f3d62d6c2 100644
157294 --- a/samples/kfifo/record-example.c
157295 +++ b/samples/kfifo/record-example.c
157296 @@ -129,8 +129,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf,
157297         ret = kfifo_from_user(&test, buf, count, &copied);
157299         mutex_unlock(&write_lock);
157300 +       if (ret)
157301 +               return ret;
157303 -       return ret ? ret : copied;
157304 +       return copied;
157307  static ssize_t fifo_read(struct file *file, char __user *buf,
157308 @@ -145,8 +147,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
157309         ret = kfifo_to_user(&test, buf, count, &copied);
157311         mutex_unlock(&read_lock);
157312 +       if (ret)
157313 +               return ret;
157315 -       return ret ? ret : copied;
157316 +       return copied;
157319  static const struct proc_ops fifo_proc_ops = {
157320 diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
157321 index 066beffca09a..4ca5579af4e4 100644
157322 --- a/scripts/Makefile.modpost
157323 +++ b/scripts/Makefile.modpost
157324 @@ -68,7 +68,20 @@ else
157325  ifeq ($(KBUILD_EXTMOD),)
157327  input-symdump := vmlinux.symvers
157328 -output-symdump := Module.symvers
157329 +output-symdump := modules-only.symvers
157331 +quiet_cmd_cat = GEN     $@
157332 +      cmd_cat = cat $(real-prereqs) > $@
157334 +ifneq ($(wildcard vmlinux.symvers),)
157336 +__modpost: Module.symvers
157337 +Module.symvers: vmlinux.symvers modules-only.symvers FORCE
157338 +       $(call if_changed,cat)
157340 +targets += Module.symvers
157342 +endif
157344  else
157346 diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
157347 index e0f965529166..af814b39b876 100644
157348 --- a/scripts/kconfig/nconf.c
157349 +++ b/scripts/kconfig/nconf.c
157350 @@ -504,8 +504,8 @@ static int get_mext_match(const char *match_str, match_f flag)
157351         else if (flag == FIND_NEXT_MATCH_UP)
157352                 --match_start;
157354 +       match_start = (match_start + items_num) % items_num;
157355         index = match_start;
157356 -       index = (index + items_num) % items_num;
157357         while (true) {
157358                 char *str = k_menu_items[index].str;
157359                 if (strcasestr(str, match_str) != NULL)
157360 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
157361 index 24725e50c7b4..10c3fba26f03 100644
157362 --- a/scripts/mod/modpost.c
157363 +++ b/scripts/mod/modpost.c
157364 @@ -2423,19 +2423,6 @@ static void read_dump(const char *fname)
157365         fatal("parse error in symbol dump file\n");
157368 -/* For normal builds always dump all symbols.
157369 - * For external modules only dump symbols
157370 - * that are not read from kernel Module.symvers.
157371 - **/
157372 -static int dump_sym(struct symbol *sym)
157374 -       if (!external_module)
157375 -               return 1;
157376 -       if (sym->module->from_dump)
157377 -               return 0;
157378 -       return 1;
157381  static void write_dump(const char *fname)
157383         struct buffer buf = { };
157384 @@ -2446,7 +2433,7 @@ static void write_dump(const char *fname)
157385         for (n = 0; n < SYMBOL_HASH_SIZE ; n++) {
157386                 symbol = symbolhash[n];
157387                 while (symbol) {
157388 -                       if (dump_sym(symbol)) {
157389 +                       if (!symbol->module->from_dump) {
157390                                 namespace = symbol->namespace;
157391                                 buf_printf(&buf, "0x%08x\t%s\t%s\t%s\t%s\n",
157392                                            symbol->crc, symbol->name,
157393 diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
157394 index 867860ea57da..7b83a1aaec98 100755
157395 --- a/scripts/recordmcount.pl
157396 +++ b/scripts/recordmcount.pl
157397 @@ -392,7 +392,7 @@ if ($arch eq "x86_64") {
157398      $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
157399  } elsif ($arch eq "riscv") {
157400      $function_regex = "^([0-9a-fA-F]+)\\s+<([^.0-9][0-9a-zA-Z_\\.]+)>:";
157401 -    $mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL\\s_mcount\$";
157402 +    $mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL(_PLT)?\\s_?mcount\$";
157403      $type = ".quad";
157404      $alignment = 2;
157405  } elsif ($arch eq "nds32") {
157406 diff --git a/scripts/setlocalversion b/scripts/setlocalversion
157407 index bb709eda96cd..cf323fa660b6 100755
157408 --- a/scripts/setlocalversion
157409 +++ b/scripts/setlocalversion
157410 @@ -54,7 +54,7 @@ scm_version()
157411                         # If only the short version is requested, don't bother
157412                         # running further git commands
157413                         if $short; then
157414 -                               echo "+"
157415 +                       #       echo "+"
157416                                 return
157417                         fi
157418                         # If we are past a tagged commit (like
157419 diff --git a/security/commoncap.c b/security/commoncap.c
157420 index 1c519c875217..5cdeb73ca8fa 100644
157421 --- a/security/commoncap.c
157422 +++ b/security/commoncap.c
157423 @@ -400,7 +400,7 @@ int cap_inode_getsecurity(struct user_namespace *mnt_userns,
157424                                       &tmpbuf, size, GFP_NOFS);
157425         dput(dentry);
157427 -       if (ret < 0)
157428 +       if (ret < 0 || !tmpbuf)
157429                 return ret;
157431         fs_ns = inode->i_sb->s_user_ns;
157432 diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
157433 index e22e510ae92d..4e081e650047 100644
157434 --- a/security/integrity/ima/ima_template.c
157435 +++ b/security/integrity/ima/ima_template.c
157436 @@ -494,8 +494,8 @@ int ima_restore_measurement_list(loff_t size, void *buf)
157437                         }
157438                 }
157440 -               entry->pcr = !ima_canonical_fmt ? *(hdr[HDR_PCR].data) :
157441 -                            le32_to_cpu(*(hdr[HDR_PCR].data));
157442 +               entry->pcr = !ima_canonical_fmt ? *(u32 *)(hdr[HDR_PCR].data) :
157443 +                            le32_to_cpu(*(u32 *)(hdr[HDR_PCR].data));
157444                 ret = ima_restore_measurement_entry(entry);
157445                 if (ret < 0)
157446                         break;
157447 diff --git a/security/keys/trusted-keys/trusted_tpm1.c b/security/keys/trusted-keys/trusted_tpm1.c
157448 index 493eb91ed017..56c9b48460d9 100644
157449 --- a/security/keys/trusted-keys/trusted_tpm1.c
157450 +++ b/security/keys/trusted-keys/trusted_tpm1.c
157451 @@ -500,10 +500,12 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
157453         ret = tpm_get_random(chip, td->nonceodd, TPM_NONCE_SIZE);
157454         if (ret < 0)
157455 -               return ret;
157456 +               goto out;
157458 -       if (ret != TPM_NONCE_SIZE)
157459 -               return -EIO;
157460 +       if (ret != TPM_NONCE_SIZE) {
157461 +               ret = -EIO;
157462 +               goto out;
157463 +       }
157465         ordinal = htonl(TPM_ORD_SEAL);
157466         datsize = htonl(datalen);
157467 @@ -791,13 +793,33 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
157468                                 return -EINVAL;
157469                         break;
157470                 case Opt_blobauth:
157471 -                       if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
157472 -                               return -EINVAL;
157473 -                       res = hex2bin(opt->blobauth, args[0].from,
157474 -                                     SHA1_DIGEST_SIZE);
157475 -                       if (res < 0)
157476 -                               return -EINVAL;
157477 +                       /*
157478 +                        * TPM 1.2 authorizations are sha1 hashes passed in as
157479 +                        * hex strings.  TPM 2.0 authorizations are simple
157480 +                        * passwords (although it can take a hash as well)
157481 +                        */
157482 +                       opt->blobauth_len = strlen(args[0].from);
157484 +                       if (opt->blobauth_len == 2 * TPM_DIGEST_SIZE) {
157485 +                               res = hex2bin(opt->blobauth, args[0].from,
157486 +                                             TPM_DIGEST_SIZE);
157487 +                               if (res < 0)
157488 +                                       return -EINVAL;
157490 +                               opt->blobauth_len = TPM_DIGEST_SIZE;
157491 +                               break;
157492 +                       }
157494 +                       if (tpm2 && opt->blobauth_len <= sizeof(opt->blobauth)) {
157495 +                               memcpy(opt->blobauth, args[0].from,
157496 +                                      opt->blobauth_len);
157497 +                               break;
157498 +                       }
157500 +                       return -EINVAL;
157502                         break;
157504                 case Opt_migratable:
157505                         if (*args[0].from == '0')
157506                                 pay->migratable = 0;
157507 diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c
157508 index c87c4df8703d..4c19d3abddbe 100644
157509 --- a/security/keys/trusted-keys/trusted_tpm2.c
157510 +++ b/security/keys/trusted-keys/trusted_tpm2.c
157511 @@ -97,10 +97,12 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
157512                              TPM_DIGEST_SIZE);
157514         /* sensitive */
157515 -       tpm_buf_append_u16(&buf, 4 + TPM_DIGEST_SIZE + payload->key_len + 1);
157516 +       tpm_buf_append_u16(&buf, 4 + options->blobauth_len + payload->key_len + 1);
157518 +       tpm_buf_append_u16(&buf, options->blobauth_len);
157519 +       if (options->blobauth_len)
157520 +               tpm_buf_append(&buf, options->blobauth, options->blobauth_len);
157522 -       tpm_buf_append_u16(&buf, TPM_DIGEST_SIZE);
157523 -       tpm_buf_append(&buf, options->blobauth, TPM_DIGEST_SIZE);
157524         tpm_buf_append_u16(&buf, payload->key_len + 1);
157525         tpm_buf_append(&buf, payload->key, payload->key_len);
157526         tpm_buf_append_u8(&buf, payload->migratable);
157527 @@ -265,7 +267,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
157528                              NULL /* nonce */, 0,
157529                              TPM2_SA_CONTINUE_SESSION,
157530                              options->blobauth /* hmac */,
157531 -                            TPM_DIGEST_SIZE);
157532 +                            options->blobauth_len);
157534         rc = tpm_transmit_cmd(chip, &buf, 6, "unsealing");
157535         if (rc > 0)
157536 diff --git a/security/security.c b/security/security.c
157537 index 5ac96b16f8fa..8ef0ce0faba7 100644
157538 --- a/security/security.c
157539 +++ b/security/security.c
157540 @@ -727,24 +727,28 @@ int security_binder_set_context_mgr(struct task_struct *mgr)
157542         return call_int_hook(binder_set_context_mgr, 0, mgr);
157544 +EXPORT_SYMBOL(security_binder_set_context_mgr);
157546  int security_binder_transaction(struct task_struct *from,
157547                                 struct task_struct *to)
157549         return call_int_hook(binder_transaction, 0, from, to);
157551 +EXPORT_SYMBOL(security_binder_transaction);
157553  int security_binder_transfer_binder(struct task_struct *from,
157554                                     struct task_struct *to)
157556         return call_int_hook(binder_transfer_binder, 0, from, to);
157558 +EXPORT_SYMBOL(security_binder_transfer_binder);
157560  int security_binder_transfer_file(struct task_struct *from,
157561                                   struct task_struct *to, struct file *file)
157563         return call_int_hook(binder_transfer_file, 0, from, to, file);
157565 +EXPORT_SYMBOL(security_binder_transfer_file);
157567  int security_ptrace_access_check(struct task_struct *child, unsigned int mode)
157569 diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
157570 index ba2e01a6955c..62d19bccf3de 100644
157571 --- a/security/selinux/include/classmap.h
157572 +++ b/security/selinux/include/classmap.h
157573 @@ -242,11 +242,12 @@ struct security_class_mapping secclass_map[] = {
157574         { "infiniband_endport",
157575           { "manage_subnet", NULL } },
157576         { "bpf",
157577 -         {"map_create", "map_read", "map_write", "prog_load", "prog_run"} },
157578 +         { "map_create", "map_read", "map_write", "prog_load", "prog_run",
157579 +           NULL } },
157580         { "xdp_socket",
157581           { COMMON_SOCK_PERMS, NULL } },
157582         { "perf_event",
157583 -         {"open", "cpu", "kernel", "tracepoint", "read", "write"} },
157584 +         { "open", "cpu", "kernel", "tracepoint", "read", "write", NULL } },
157585         { "lockdown",
157586           { "integrity", "confidentiality", NULL } },
157587         { "anon_inode",
157588 diff --git a/sound/core/init.c b/sound/core/init.c
157589 index 45f4b01de23f..ef41f5b3a240 100644
157590 --- a/sound/core/init.c
157591 +++ b/sound/core/init.c
157592 @@ -398,10 +398,8 @@ int snd_card_disconnect(struct snd_card *card)
157593                 return 0;
157594         }
157595         card->shutdown = 1;
157596 -       spin_unlock(&card->files_lock);
157598         /* replace file->f_op with special dummy operations */
157599 -       spin_lock(&card->files_lock);
157600         list_for_each_entry(mfile, &card->files_list, list) {
157601                 /* it's critical part, use endless loop */
157602                 /* we have no room to fail */
157603 diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
157604 index 25778765cbfe..9897bd26a438 100644
157605 --- a/sound/firewire/Kconfig
157606 +++ b/sound/firewire/Kconfig
157607 @@ -38,7 +38,7 @@ config SND_OXFW
157608            * Mackie(Loud) Onyx 1640i (former model)
157609            * Mackie(Loud) Onyx Satellite
157610            * Mackie(Loud) Tapco Link.Firewire
157611 -          * Mackie(Loud) d.2 pro/d.4 pro
157612 +          * Mackie(Loud) d.4 pro
157613            * Mackie(Loud) U.420/U.420d
157614            * TASCAM FireOne
157615            * Stanton Controllers & Systems 1 Deck/Mixer
157616 @@ -84,7 +84,7 @@ config SND_BEBOB
157617           * PreSonus FIREBOX/FIREPOD/FP10/Inspire1394
157618           * BridgeCo RDAudio1/Audio5
157619           * Mackie Onyx 1220/1620/1640 (FireWire I/O Card)
157620 -         * Mackie d.2 (FireWire Option)
157621 +         * Mackie d.2 (FireWire Option) and d.2 Pro
157622           * Stanton FinalScratch 2 (ScratchAmp)
157623           * Tascam IF-FW/DM
157624           * Behringer XENIX UFX 1204/1604
157625 diff --git a/sound/firewire/amdtp-stream-trace.h b/sound/firewire/amdtp-stream-trace.h
157626 index 26e7cb555d3c..aa53c13b89d3 100644
157627 --- a/sound/firewire/amdtp-stream-trace.h
157628 +++ b/sound/firewire/amdtp-stream-trace.h
157629 @@ -14,8 +14,8 @@
157630  #include <linux/tracepoint.h>
157632  TRACE_EVENT(amdtp_packet,
157633 -       TP_PROTO(const struct amdtp_stream *s, u32 cycles, const __be32 *cip_header, unsigned int payload_length, unsigned int data_blocks, unsigned int data_block_counter, unsigned int index),
157634 -       TP_ARGS(s, cycles, cip_header, payload_length, data_blocks, data_block_counter, index),
157635 +       TP_PROTO(const struct amdtp_stream *s, u32 cycles, const __be32 *cip_header, unsigned int payload_length, unsigned int data_blocks, unsigned int data_block_counter, unsigned int packet_index, unsigned int index),
157636 +       TP_ARGS(s, cycles, cip_header, payload_length, data_blocks, data_block_counter, packet_index, index),
157637         TP_STRUCT__entry(
157638                 __field(unsigned int, second)
157639                 __field(unsigned int, cycle)
157640 @@ -48,7 +48,7 @@ TRACE_EVENT(amdtp_packet,
157641                 __entry->payload_quadlets = payload_length / sizeof(__be32);
157642                 __entry->data_blocks = data_blocks;
157643                 __entry->data_block_counter = data_block_counter,
157644 -               __entry->packet_index = s->packet_index;
157645 +               __entry->packet_index = packet_index;
157646                 __entry->irq = !!in_interrupt();
157647                 __entry->index = index;
157648         ),
157649 diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
157650 index 4e2f2bb7879f..e0faa6601966 100644
157651 --- a/sound/firewire/amdtp-stream.c
157652 +++ b/sound/firewire/amdtp-stream.c
157653 @@ -526,7 +526,7 @@ static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle,
157654         }
157656         trace_amdtp_packet(s, cycle, cip_header, payload_length, data_blocks,
157657 -                          data_block_counter, index);
157658 +                          data_block_counter, s->packet_index, index);
157661  static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
157662 @@ -630,21 +630,27 @@ static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
157663                                unsigned int *payload_length,
157664                                unsigned int *data_blocks,
157665                                unsigned int *data_block_counter,
157666 -                              unsigned int *syt, unsigned int index)
157667 +                              unsigned int *syt, unsigned int packet_index, unsigned int index)
157669         const __be32 *cip_header;
157670 +       unsigned int cip_header_size;
157671         int err;
157673         *payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT;
157674 -       if (*payload_length > s->ctx_data.tx.ctx_header_size +
157675 -                                       s->ctx_data.tx.max_ctx_payload_length) {
157677 +       if (!(s->flags & CIP_NO_HEADER))
157678 +               cip_header_size = 8;
157679 +       else
157680 +               cip_header_size = 0;
157682 +       if (*payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) {
157683                 dev_err(&s->unit->device,
157684                         "Detect jumbo payload: %04x %04x\n",
157685 -                       *payload_length, s->ctx_data.tx.max_ctx_payload_length);
157686 +                       *payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length);
157687                 return -EIO;
157688         }
157690 -       if (!(s->flags & CIP_NO_HEADER)) {
157691 +       if (cip_header_size > 0) {
157692                 cip_header = ctx_header + 2;
157693                 err = check_cip_header(s, cip_header, *payload_length,
157694                                        data_blocks, data_block_counter, syt);
157695 @@ -662,7 +668,7 @@ static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
157696         }
157698         trace_amdtp_packet(s, cycle, cip_header, *payload_length, *data_blocks,
157699 -                          *data_block_counter, index);
157700 +                          *data_block_counter, packet_index, index);
157702         return err;
157704 @@ -701,12 +707,13 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
157705                                      unsigned int packets)
157707         unsigned int dbc = s->data_block_counter;
157708 +       unsigned int packet_index = s->packet_index;
157709 +       unsigned int queue_size = s->queue_size;
157710         int i;
157711         int err;
157713         for (i = 0; i < packets; ++i) {
157714                 struct pkt_desc *desc = descs + i;
157715 -               unsigned int index = (s->packet_index + i) % s->queue_size;
157716                 unsigned int cycle;
157717                 unsigned int payload_length;
157718                 unsigned int data_blocks;
157719 @@ -715,7 +722,7 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
157720                 cycle = compute_cycle_count(ctx_header[1]);
157722                 err = parse_ir_ctx_header(s, cycle, ctx_header, &payload_length,
157723 -                                         &data_blocks, &dbc, &syt, i);
157724 +                                         &data_blocks, &dbc, &syt, packet_index, i);
157725                 if (err < 0)
157726                         return err;
157728 @@ -723,13 +730,15 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
157729                 desc->syt = syt;
157730                 desc->data_blocks = data_blocks;
157731                 desc->data_block_counter = dbc;
157732 -               desc->ctx_payload = s->buffer.packets[index].buffer;
157733 +               desc->ctx_payload = s->buffer.packets[packet_index].buffer;
157735                 if (!(s->flags & CIP_DBC_IS_END_EVENT))
157736                         dbc = (dbc + desc->data_blocks) & 0xff;
157738                 ctx_header +=
157739                         s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
157741 +               packet_index = (packet_index + 1) % queue_size;
157742         }
157744         s->data_block_counter = dbc;
157745 @@ -1065,23 +1074,22 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
157746                 s->data_block_counter = 0;
157747         }
157749 -       /* initialize packet buffer */
157750 +       // initialize packet buffer.
157751 +       max_ctx_payload_size = amdtp_stream_get_max_payload(s);
157752         if (s->direction == AMDTP_IN_STREAM) {
157753                 dir = DMA_FROM_DEVICE;
157754                 type = FW_ISO_CONTEXT_RECEIVE;
157755 -               if (!(s->flags & CIP_NO_HEADER))
157756 +               if (!(s->flags & CIP_NO_HEADER)) {
157757 +                       max_ctx_payload_size -= 8;
157758                         ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
157759 -               else
157760 +               } else {
157761                         ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
157763 -               max_ctx_payload_size = amdtp_stream_get_max_payload(s) -
157764 -                                      ctx_header_size;
157765 +               }
157766         } else {
157767                 dir = DMA_TO_DEVICE;
157768                 type = FW_ISO_CONTEXT_TRANSMIT;
157769                 ctx_header_size = 0;    // No effect for IT context.
157771 -               max_ctx_payload_size = amdtp_stream_get_max_payload(s);
157772                 if (!(s->flags & CIP_NO_HEADER))
157773                         max_ctx_payload_size -= IT_PKT_HEADER_SIZE_CIP;
157774         }
157775 diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
157776 index 2c8e3392a490..daeecfa8b9aa 100644
157777 --- a/sound/firewire/bebob/bebob.c
157778 +++ b/sound/firewire/bebob/bebob.c
157779 @@ -387,7 +387,7 @@ static const struct ieee1394_device_id bebob_id_table[] = {
157780         SND_BEBOB_DEV_ENTRY(VEN_BRIDGECO, 0x00010049, &spec_normal),
157781         /* Mackie, Onyx 1220/1620/1640 (Firewire I/O Card) */
157782         SND_BEBOB_DEV_ENTRY(VEN_MACKIE2, 0x00010065, &spec_normal),
157783 -       /* Mackie, d.2 (Firewire Option) */
157784 +       // Mackie, d.2 (Firewire option card) and d.2 Pro (the card is built-in).
157785         SND_BEBOB_DEV_ENTRY(VEN_MACKIE1, 0x00010067, &spec_normal),
157786         /* Stanton, ScratchAmp */
157787         SND_BEBOB_DEV_ENTRY(VEN_STANTON, 0x00000001, &spec_normal),
157788 diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
157789 index bbae04793c50..c18017e0a3d9 100644
157790 --- a/sound/firewire/bebob/bebob_stream.c
157791 +++ b/sound/firewire/bebob/bebob_stream.c
157792 @@ -517,20 +517,22 @@ int snd_bebob_stream_init_duplex(struct snd_bebob *bebob)
157793  static int keep_resources(struct snd_bebob *bebob, struct amdtp_stream *stream,
157794                           unsigned int rate, unsigned int index)
157796 -       struct snd_bebob_stream_formation *formation;
157797 +       unsigned int pcm_channels;
157798 +       unsigned int midi_ports;
157799         struct cmp_connection *conn;
157800         int err;
157802         if (stream == &bebob->tx_stream) {
157803 -               formation = bebob->tx_stream_formations + index;
157804 +               pcm_channels = bebob->tx_stream_formations[index].pcm;
157805 +               midi_ports = bebob->midi_input_ports;
157806                 conn = &bebob->out_conn;
157807         } else {
157808 -               formation = bebob->rx_stream_formations + index;
157809 +               pcm_channels = bebob->rx_stream_formations[index].pcm;
157810 +               midi_ports = bebob->midi_output_ports;
157811                 conn = &bebob->in_conn;
157812         }
157814 -       err = amdtp_am824_set_parameters(stream, rate, formation->pcm,
157815 -                                        formation->midi, false);
157816 +       err = amdtp_am824_set_parameters(stream, rate, pcm_channels, midi_ports, false);
157817         if (err < 0)
157818                 return err;
157820 diff --git a/sound/firewire/dice/dice-alesis.c b/sound/firewire/dice/dice-alesis.c
157821 index 0916864511d5..27c13b9cc9ef 100644
157822 --- a/sound/firewire/dice/dice-alesis.c
157823 +++ b/sound/firewire/dice/dice-alesis.c
157824 @@ -16,7 +16,7 @@ alesis_io14_tx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT] = {
157825  static const unsigned int
157826  alesis_io26_tx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT] = {
157827         {10, 10, 4},    /* Tx0 = Analog + S/PDIF. */
157828 -       {16, 8, 0},     /* Tx1 = ADAT1 + ADAT2. */
157829 +       {16, 4, 0},     /* Tx1 = ADAT1 + ADAT2 (available at low rate). */
157832  int snd_dice_detect_alesis_formats(struct snd_dice *dice)
157833 diff --git a/sound/firewire/dice/dice-tcelectronic.c b/sound/firewire/dice/dice-tcelectronic.c
157834 index a8875d24ba2a..43a3bcb15b3d 100644
157835 --- a/sound/firewire/dice/dice-tcelectronic.c
157836 +++ b/sound/firewire/dice/dice-tcelectronic.c
157837 @@ -38,8 +38,8 @@ static const struct dice_tc_spec konnekt_24d = {
157840  static const struct dice_tc_spec konnekt_live = {
157841 -       .tx_pcm_chs = {{16, 16, 16}, {0, 0, 0} },
157842 -       .rx_pcm_chs = {{16, 16, 16}, {0, 0, 0} },
157843 +       .tx_pcm_chs = {{16, 16, 6}, {0, 0, 0} },
157844 +       .rx_pcm_chs = {{16, 16, 6}, {0, 0, 0} },
157845         .has_midi = true,
157848 diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
157849 index 1f1e3236efb8..9eea25c46dc7 100644
157850 --- a/sound/firewire/oxfw/oxfw.c
157851 +++ b/sound/firewire/oxfw/oxfw.c
157852 @@ -355,7 +355,6 @@ static const struct ieee1394_device_id oxfw_id_table[] = {
157853          *  Onyx-i series (former models):      0x081216
157854          *  Mackie Onyx Satellite:              0x00200f
157855          *  Tapco LINK.firewire 4x6:            0x000460
157856 -        *  d.2 pro:                            Unknown
157857          *  d.4 pro:                            Unknown
157858          *  U.420:                              Unknown
157859          *  U.420d:                             Unknown
157860 diff --git a/sound/isa/sb/emu8000.c b/sound/isa/sb/emu8000.c
157861 index 0aa545ac6e60..1c90421a88dc 100644
157862 --- a/sound/isa/sb/emu8000.c
157863 +++ b/sound/isa/sb/emu8000.c
157864 @@ -1029,8 +1029,10 @@ snd_emu8000_create_mixer(struct snd_card *card, struct snd_emu8000 *emu)
157866         memset(emu->controls, 0, sizeof(emu->controls));
157867         for (i = 0; i < EMU8000_NUM_CONTROLS; i++) {
157868 -               if ((err = snd_ctl_add(card, emu->controls[i] = snd_ctl_new1(mixer_defs[i], emu))) < 0)
157869 +               if ((err = snd_ctl_add(card, emu->controls[i] = snd_ctl_new1(mixer_defs[i], emu))) < 0) {
157870 +                       emu->controls[i] = NULL;
157871                         goto __error;
157872 +               }
157873         }
157874         return 0;
157876 diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c
157877 index 8635a2b6b36b..4789345a8fdd 100644
157878 --- a/sound/isa/sb/sb16_csp.c
157879 +++ b/sound/isa/sb/sb16_csp.c
157880 @@ -1045,10 +1045,14 @@ static int snd_sb_qsound_build(struct snd_sb_csp * p)
157882         spin_lock_init(&p->q_lock);
157884 -       if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0)
157885 +       if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0) {
157886 +               p->qsound_switch = NULL;
157887                 goto __error;
157888 -       if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0)
157889 +       }
157890 +       if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0) {
157891 +               p->qsound_space = NULL;
157892                 goto __error;
157893 +       }
157895         return 0;
157897 diff --git a/sound/isa/sb/sb8.c b/sound/isa/sb/sb8.c
157898 index 6c9d534ce8b6..95290ffe5c6e 100644
157899 --- a/sound/isa/sb/sb8.c
157900 +++ b/sound/isa/sb/sb8.c
157901 @@ -95,10 +95,6 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev)
157903         /* block the 0x388 port to avoid PnP conflicts */
157904         acard->fm_res = request_region(0x388, 4, "SoundBlaster FM");
157905 -       if (!acard->fm_res) {
157906 -               err = -EBUSY;
157907 -               goto _err;
157908 -       }
157910         if (port[dev] != SNDRV_AUTO_PORT) {
157911                 if ((err = snd_sbdsp_create(card, port[dev], irq[dev],
157912 diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
157913 index f5cba7afd1c6..ff0fb2d16d82 100644
157914 --- a/sound/pci/hda/hda_generic.c
157915 +++ b/sound/pci/hda/hda_generic.c
157916 @@ -1202,11 +1202,17 @@ static const char *get_line_out_pfx(struct hda_codec *codec, int ch,
157917                 *index = ch;
157918                 return "Headphone";
157919         case AUTO_PIN_LINE_OUT:
157920 -               /* This deals with the case where we have two DACs and
157921 -                * one LO, one HP and one Speaker */
157922 -               if (!ch && cfg->speaker_outs && cfg->hp_outs) {
157923 -                       bool hp_lo_shared = !path_has_mixer(codec, spec->hp_paths[0], ctl_type);
157924 -                       bool spk_lo_shared = !path_has_mixer(codec, spec->speaker_paths[0], ctl_type);
157925 +               /* This deals with the case where one HP or one Speaker or
157926 +                * one HP + one Speaker need to share the DAC with LO
157927 +                */
157928 +               if (!ch) {
157929 +                       bool hp_lo_shared = false, spk_lo_shared = false;
157931 +                       if (cfg->speaker_outs)
157932 +                               spk_lo_shared = !path_has_mixer(codec,
157933 +                                                               spec->speaker_paths[0], ctl_type);
157934 +                       if (cfg->hp_outs)
157935 +                               hp_lo_shared = !path_has_mixer(codec, spec->hp_paths[0], ctl_type);
157936                         if (hp_lo_shared && spk_lo_shared)
157937                                 return spec->vmaster_mute.hook ? "PCM" : "Master";
157938                         if (hp_lo_shared)
157939 diff --git a/sound/pci/hda/ideapad_s740_helper.c b/sound/pci/hda/ideapad_s740_helper.c
157940 new file mode 100644
157941 index 000000000000..564b9086e52d
157942 --- /dev/null
157943 +++ b/sound/pci/hda/ideapad_s740_helper.c
157944 @@ -0,0 +1,492 @@
157945 +// SPDX-License-Identifier: GPL-2.0
157946 +/* Fixes for Lenovo Ideapad S740, to be included from codec driver */
157948 +static const struct hda_verb alc285_ideapad_s740_coefs[] = {
157949 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x10 },
157950 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0320 },
157951 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
157952 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0041 },
157953 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
157954 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0041 },
157955 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157956 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157957 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157958 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157959 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157960 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157961 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157962 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157963 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157964 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157965 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157966 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157967 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157968 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157969 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157970 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x007f },
157971 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157972 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157973 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157974 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157975 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x007f },
157976 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157977 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157978 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157979 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157980 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157981 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157982 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
157983 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157984 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
157985 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157986 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157987 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
157988 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157989 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
157990 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157991 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157992 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
157993 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157994 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x003c },
157995 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
157996 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0011 },
157997 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
157998 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
157999 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x003c },
158000 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158001 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0011 },
158002 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158003 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158004 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158005 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158006 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000c },
158007 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158008 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
158009 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158010 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158011 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000c },
158012 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158013 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
158014 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158015 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158016 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158017 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158018 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000f },
158019 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158020 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0042 },
158021 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158022 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158023 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000f },
158024 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158025 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0042 },
158026 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158027 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158028 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158029 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158030 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
158031 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158032 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
158033 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158034 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158035 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
158036 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158037 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
158038 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158039 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158040 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158041 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158042 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0003 },
158043 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158044 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0009 },
158045 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158046 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158047 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0003 },
158048 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158049 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0009 },
158050 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158051 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158052 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158053 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158054 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001c },
158055 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158056 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x004c },
158057 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158058 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158059 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001c },
158060 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158061 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x004c },
158062 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158063 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158064 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158065 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158066 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001d },
158067 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158068 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x004e },
158069 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158070 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158071 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001d },
158072 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158073 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x004e },
158074 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158075 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158076 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158077 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158078 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001b },
158079 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158080 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
158081 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158082 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158083 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001b },
158084 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158085 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
158086 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158087 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158088 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158089 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158090 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0019 },
158091 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158092 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0025 },
158093 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158094 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158095 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0019 },
158096 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158097 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0025 },
158098 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158099 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158100 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158101 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158102 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0018 },
158103 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158104 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0037 },
158105 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158106 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158107 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0018 },
158108 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158109 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0037 },
158110 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158111 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158112 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158113 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158114 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
158115 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158116 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
158117 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158118 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158119 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
158120 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158121 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
158122 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158123 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158124 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158125 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158126 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0016 },
158127 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158128 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0076 },
158129 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158130 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158131 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0016 },
158132 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158133 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0076 },
158134 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158135 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158136 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158137 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158138 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0017 },
158139 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158140 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
158141 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158142 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158143 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0017 },
158144 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158145 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
158146 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158147 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158148 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158149 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158150 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
158151 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158152 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
158153 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158154 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158155 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
158156 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158157 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
158158 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158159 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158160 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158161 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158162 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0007 },
158163 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158164 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0086 },
158165 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158166 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158167 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0007 },
158168 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158169 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0086 },
158170 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158171 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158172 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158173 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158174 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
158175 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158176 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
158177 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158178 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158179 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
158180 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158181 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
158182 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158183 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158184 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158185 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158186 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
158187 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158188 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158189 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158190 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158191 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
158192 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158193 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158194 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158195 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
158196 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0042 },
158197 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
158198 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0042 },
158199 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158200 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158201 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158202 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158203 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158204 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158205 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158206 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158207 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158208 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158209 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158210 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158211 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158212 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158213 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158214 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x007f },
158215 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158216 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158217 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158218 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158219 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x007f },
158220 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158221 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158222 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158223 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158224 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158225 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158226 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
158227 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158228 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
158229 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158230 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158231 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
158232 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158233 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
158234 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158235 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158236 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158237 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158238 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x003c },
158239 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158240 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0011 },
158241 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158242 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158243 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x003c },
158244 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158245 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0011 },
158246 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158247 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158248 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158249 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158250 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000c },
158251 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158252 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x002a },
158253 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158254 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158255 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000c },
158256 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158257 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x002a },
158258 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158259 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158260 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158261 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158262 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000f },
158263 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158264 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0046 },
158265 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158266 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158267 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000f },
158268 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158269 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0046 },
158270 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158271 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158272 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158273 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158274 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
158275 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158276 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0044 },
158277 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158278 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158279 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
158280 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158281 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0044 },
158282 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158283 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158284 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158285 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158286 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0003 },
158287 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158288 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0009 },
158289 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158290 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158291 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0003 },
158292 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158293 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0009 },
158294 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158295 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158296 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158297 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158298 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001c },
158299 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158300 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x004c },
158301 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158302 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158303 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001c },
158304 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158305 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x004c },
158306 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158307 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158308 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158309 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158310 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158311 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158312 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001b },
158313 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158314 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
158315 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158316 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158317 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001b },
158318 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158319 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
158320 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158321 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158322 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158323 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158324 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0019 },
158325 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158326 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0025 },
158327 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158328 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158329 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0019 },
158330 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158331 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0025 },
158332 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158333 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158334 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158335 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158336 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0018 },
158337 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158338 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0037 },
158339 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158340 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158341 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0018 },
158342 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158343 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0037 },
158344 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158345 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158346 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158347 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158348 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
158349 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158350 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
158351 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158352 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158353 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
158354 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158355 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
158356 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158357 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158358 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158359 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158360 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0016 },
158361 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158362 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0076 },
158363 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158364 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158365 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0016 },
158366 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158367 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0076 },
158368 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158369 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158370 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158371 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158372 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0017 },
158373 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158374 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
158375 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158376 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158377 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0017 },
158378 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158379 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
158380 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158381 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158382 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158383 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158384 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
158385 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158386 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
158387 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158388 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158389 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
158390 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158391 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
158392 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158393 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158394 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158395 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158396 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0007 },
158397 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158398 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0086 },
158399 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158400 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158401 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0007 },
158402 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158403 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0086 },
158404 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158405 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158406 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158407 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158408 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
158409 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158410 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
158411 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158412 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158413 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
158414 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158415 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
158416 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158417 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158418 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
158419 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
158420 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
158421 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158422 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
158423 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
158427 +static void alc285_fixup_ideapad_s740_coef(struct hda_codec *codec,
158428 +                                          const struct hda_fixup *fix,
158429 +                                          int action)
158431 +       switch (action) {
158432 +       case HDA_FIXUP_ACT_PRE_PROBE:
158433 +               snd_hda_add_verbs(codec, alc285_ideapad_s740_coefs);
158434 +               break;
158435 +       }
158437 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
158438 index dfef9c17e140..d111258c6f45 100644
158439 --- a/sound/pci/hda/patch_conexant.c
158440 +++ b/sound/pci/hda/patch_conexant.c
158441 @@ -930,18 +930,18 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
158442         SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
158443         SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
158444         SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
158445 -       SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
158446 -       SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
158447 -       SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
158448 -       SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
158449 -       SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
158450         SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
158451         SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
158452 +       SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
158453         SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
158454 -       SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
158455 -       SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
158456 +       SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
158457         SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
158458         SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
158459 +       SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
158460 +       SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
158461 +       SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
158462 +       SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
158463 +       SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
158464         SND_PCI_QUIRK(0x103c, 0x8402, "HP ProBook 645 G4", CXT_FIXUP_MUTE_LED_GPIO),
158465         SND_PCI_QUIRK(0x103c, 0x8427, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
158466         SND_PCI_QUIRK(0x103c, 0x844f, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
158467 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
158468 index 45ae845e82df..4b2cc8cb55c4 100644
158469 --- a/sound/pci/hda/patch_hdmi.c
158470 +++ b/sound/pci/hda/patch_hdmi.c
158471 @@ -1848,16 +1848,12 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
158472          */
158473         if (spec->intel_hsw_fixup) {
158474                 /*
158475 -                * On Intel platforms, device entries number is
158476 -                * changed dynamically. If there is a DP MST
158477 -                * hub connected, the device entries number is 3.
158478 -                * Otherwise, it is 1.
158479 -                * Here we manually set dev_num to 3, so that
158480 -                * we can initialize all the device entries when
158481 -                * bootup statically.
158482 +                * On Intel platforms, device entries count returned
158483 +                * by AC_PAR_DEVLIST_LEN is dynamic, and depends on
158484 +                * the type of receiver that is connected. Allocate pin
158485 +                * structures based on worst case.
158486                  */
158487 -               dev_num = 3;
158488 -               spec->dev_num = 3;
158489 +               dev_num = spec->dev_num;
158490         } else if (spec->dyn_pcm_assign && codec->dp_mst) {
158491                 dev_num = snd_hda_get_num_devices(codec, pin_nid) + 1;
158492                 /*
158493 @@ -2658,7 +2654,7 @@ static void generic_acomp_pin_eld_notify(void *audio_ptr, int port, int dev_id)
158494         /* skip notification during system suspend (but not in runtime PM);
158495          * the state will be updated at resume
158496          */
158497 -       if (snd_power_get_state(codec->card) != SNDRV_CTL_POWER_D0)
158498 +       if (codec->core.dev.power.power_state.event == PM_EVENT_SUSPEND)
158499                 return;
158500         /* ditto during suspend/resume process itself */
158501         if (snd_hdac_is_in_pm(&codec->core))
158502 @@ -2844,7 +2840,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe)
158503         /* skip notification during system suspend (but not in runtime PM);
158504          * the state will be updated at resume
158505          */
158506 -       if (snd_power_get_state(codec->card) != SNDRV_CTL_POWER_D0)
158507 +       if (codec->core.dev.power.power_state.event == PM_EVENT_SUSPEND)
158508                 return;
158509         /* ditto during suspend/resume process itself */
158510         if (snd_hdac_is_in_pm(&codec->core))
158511 @@ -2942,7 +2938,7 @@ static int parse_intel_hdmi(struct hda_codec *codec)
158513  /* Intel Haswell and onwards; audio component with eld notifier */
158514  static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
158515 -                                const int *port_map, int port_num)
158516 +                                const int *port_map, int port_num, int dev_num)
158518         struct hdmi_spec *spec;
158519         int err;
158520 @@ -2957,6 +2953,7 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
158521         spec->port_map = port_map;
158522         spec->port_num = port_num;
158523         spec->intel_hsw_fixup = true;
158524 +       spec->dev_num = dev_num;
158526         intel_haswell_enable_all_pins(codec, true);
158527         intel_haswell_fixup_enable_dp12(codec);
158528 @@ -2982,12 +2979,12 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
158530  static int patch_i915_hsw_hdmi(struct hda_codec *codec)
158532 -       return intel_hsw_common_init(codec, 0x08, NULL, 0);
158533 +       return intel_hsw_common_init(codec, 0x08, NULL, 0, 3);
158536  static int patch_i915_glk_hdmi(struct hda_codec *codec)
158538 -       return intel_hsw_common_init(codec, 0x0b, NULL, 0);
158539 +       return intel_hsw_common_init(codec, 0x0b, NULL, 0, 3);
158542  static int patch_i915_icl_hdmi(struct hda_codec *codec)
158543 @@ -2998,7 +2995,7 @@ static int patch_i915_icl_hdmi(struct hda_codec *codec)
158544          */
158545         static const int map[] = {0x0, 0x4, 0x6, 0x8, 0xa, 0xb};
158547 -       return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map));
158548 +       return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 3);
158551  static int patch_i915_tgl_hdmi(struct hda_codec *codec)
158552 @@ -3010,7 +3007,7 @@ static int patch_i915_tgl_hdmi(struct hda_codec *codec)
158553         static const int map[] = {0x4, 0x6, 0x8, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
158554         int ret;
158556 -       ret = intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map));
158557 +       ret = intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 4);
158558         if (!ret) {
158559                 struct hdmi_spec *spec = codec->spec;
158561 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
158562 index a7544b77d3f7..43a63db4ab6a 100644
158563 --- a/sound/pci/hda/patch_realtek.c
158564 +++ b/sound/pci/hda/patch_realtek.c
158565 @@ -395,7 +395,6 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
158566         case 0x10ec0282:
158567         case 0x10ec0283:
158568         case 0x10ec0286:
158569 -       case 0x10ec0287:
158570         case 0x10ec0288:
158571         case 0x10ec0285:
158572         case 0x10ec0298:
158573 @@ -406,6 +405,10 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
158574         case 0x10ec0275:
158575                 alc_update_coef_idx(codec, 0xe, 0, 1<<0);
158576                 break;
158577 +       case 0x10ec0287:
158578 +               alc_update_coef_idx(codec, 0x10, 1<<9, 0);
158579 +               alc_write_coef_idx(codec, 0x8, 0x4ab7);
158580 +               break;
158581         case 0x10ec0293:
158582                 alc_update_coef_idx(codec, 0xa, 1<<13, 0);
158583                 break;
158584 @@ -2470,13 +2473,13 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
158585                       ALC882_FIXUP_ACER_ASPIRE_8930G),
158586         SND_PCI_QUIRK(0x1025, 0x0146, "Acer Aspire 6935G",
158587                       ALC882_FIXUP_ACER_ASPIRE_8930G),
158588 +       SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G",
158589 +                     ALC882_FIXUP_ACER_ASPIRE_4930G),
158590 +       SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210),
158591         SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G",
158592                       ALC882_FIXUP_ACER_ASPIRE_4930G),
158593         SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G",
158594                       ALC882_FIXUP_ACER_ASPIRE_4930G),
158595 -       SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G",
158596 -                     ALC882_FIXUP_ACER_ASPIRE_4930G),
158597 -       SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210),
158598         SND_PCI_QUIRK(0x1025, 0x021e, "Acer Aspire 5739G",
158599                       ALC882_FIXUP_ACER_ASPIRE_4930G),
158600         SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE),
158601 @@ -2489,11 +2492,11 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
158602         SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
158603         SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
158604         SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
158605 +       SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
158606 +       SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
158607         SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
158608         SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
158609         SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP),
158610 -       SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
158611 -       SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
158613         /* All Apple entries are in codec SSIDs */
158614         SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
158615 @@ -2536,9 +2539,19 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
158616         SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
158617         SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
158618         SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
158619 +       SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
158620 +       SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
158621 +       SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
158622 +       SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
158623 +       SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
158624 +       SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
158625 +       SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
158626 +       SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
158627 +       SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
158628 +       SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
158629         SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
158630         SND_PCI_QUIRK(0x1558, 0x9506, "Clevo P955HQ", ALC1220_FIXUP_CLEVO_P950),
158631 -       SND_PCI_QUIRK(0x1558, 0x950A, "Clevo P955H[PR]", ALC1220_FIXUP_CLEVO_P950),
158632 +       SND_PCI_QUIRK(0x1558, 0x950a, "Clevo P955H[PR]", ALC1220_FIXUP_CLEVO_P950),
158633         SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
158634         SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
158635         SND_PCI_QUIRK(0x1558, 0x95e3, "Clevo P955[ER]T", ALC1220_FIXUP_CLEVO_P950),
158636 @@ -2548,14 +2561,6 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
158637         SND_PCI_QUIRK(0x1558, 0x96e1, "Clevo P960[ER][CDFN]-K", ALC1220_FIXUP_CLEVO_P950),
158638         SND_PCI_QUIRK(0x1558, 0x97e1, "Clevo P970[ER][CDFN]", ALC1220_FIXUP_CLEVO_P950),
158639         SND_PCI_QUIRK(0x1558, 0x97e2, "Clevo P970RC-M", ALC1220_FIXUP_CLEVO_P950),
158640 -       SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
158641 -       SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
158642 -       SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
158643 -       SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
158644 -       SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
158645 -       SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
158646 -       SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
158647 -       SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
158648         SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
158649         SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
158650         SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
158651 @@ -4329,6 +4334,35 @@ static void alc245_fixup_hp_x360_amp(struct hda_codec *codec,
158652         }
158655 +/* toggle GPIO2 at each time stream is started; we use PREPARE state instead */
158656 +static void alc274_hp_envy_pcm_hook(struct hda_pcm_stream *hinfo,
158657 +                                   struct hda_codec *codec,
158658 +                                   struct snd_pcm_substream *substream,
158659 +                                   int action)
158661 +       switch (action) {
158662 +       case HDA_GEN_PCM_ACT_PREPARE:
158663 +               alc_update_gpio_data(codec, 0x04, true);
158664 +               break;
158665 +       case HDA_GEN_PCM_ACT_CLEANUP:
158666 +               alc_update_gpio_data(codec, 0x04, false);
158667 +               break;
158668 +       }
158671 +static void alc274_fixup_hp_envy_gpio(struct hda_codec *codec,
158672 +                                     const struct hda_fixup *fix,
158673 +                                     int action)
158675 +       struct alc_spec *spec = codec->spec;
158677 +       if (action == HDA_FIXUP_ACT_PROBE) {
158678 +               spec->gpio_mask |= 0x04;
158679 +               spec->gpio_dir |= 0x04;
158680 +               spec->gen.pcm_playback_hook = alc274_hp_envy_pcm_hook;
158681 +       }
158684  static void alc_update_coef_led(struct hda_codec *codec,
158685                                 struct alc_coef_led *led,
158686                                 bool polarity, bool on)
158687 @@ -4438,6 +4472,25 @@ static void alc236_fixup_hp_mute_led(struct hda_codec *codec,
158688         alc236_fixup_hp_coef_micmute_led(codec, fix, action);
158691 +static void alc236_fixup_hp_micmute_led_vref(struct hda_codec *codec,
158692 +                               const struct hda_fixup *fix, int action)
158694 +       struct alc_spec *spec = codec->spec;
158696 +       if (action == HDA_FIXUP_ACT_PRE_PROBE) {
158697 +               spec->cap_mute_led_nid = 0x1a;
158698 +               snd_hda_gen_add_micmute_led_cdev(codec, vref_micmute_led_set);
158699 +               codec->power_filter = led_power_filter;
158700 +       }
158703 +static void alc236_fixup_hp_mute_led_micmute_vref(struct hda_codec *codec,
158704 +                               const struct hda_fixup *fix, int action)
158706 +       alc236_fixup_hp_mute_led_coefbit(codec, fix, action);
158707 +       alc236_fixup_hp_micmute_led_vref(codec, fix, action);
158710  #if IS_REACHABLE(CONFIG_INPUT)
158711  static void gpio2_mic_hotkey_event(struct hda_codec *codec,
158712                                    struct hda_jack_callback *event)
158713 @@ -5667,6 +5720,18 @@ static void alc_fixup_tpt470_dacs(struct hda_codec *codec,
158714                 spec->gen.preferred_dacs = preferred_pairs;
158717 +static void alc295_fixup_asus_dacs(struct hda_codec *codec,
158718 +                                  const struct hda_fixup *fix, int action)
158720 +       static const hda_nid_t preferred_pairs[] = {
158721 +               0x17, 0x02, 0x21, 0x03, 0
158722 +       };
158723 +       struct alc_spec *spec = codec->spec;
158725 +       if (action == HDA_FIXUP_ACT_PRE_PROBE)
158726 +               spec->gen.preferred_dacs = preferred_pairs;
158729  static void alc_shutup_dell_xps13(struct hda_codec *codec)
158731         struct alc_spec *spec = codec->spec;
158732 @@ -6182,6 +6247,35 @@ static void alc294_fixup_gx502_hp(struct hda_codec *codec,
158733         }
158736 +static void alc294_gu502_toggle_output(struct hda_codec *codec,
158737 +                                      struct hda_jack_callback *cb)
158739 +       /* Windows sets 0x10 to 0x8420 for Node 0x20 which is
158740 +        * responsible from changes between speakers and headphones
158741 +        */
158742 +       if (snd_hda_jack_detect_state(codec, 0x21) == HDA_JACK_PRESENT)
158743 +               alc_write_coef_idx(codec, 0x10, 0x8420);
158744 +       else
158745 +               alc_write_coef_idx(codec, 0x10, 0x0a20);
158748 +static void alc294_fixup_gu502_hp(struct hda_codec *codec,
158749 +                                 const struct hda_fixup *fix, int action)
158751 +       if (!is_jack_detectable(codec, 0x21))
158752 +               return;
158754 +       switch (action) {
158755 +       case HDA_FIXUP_ACT_PRE_PROBE:
158756 +               snd_hda_jack_detect_enable_callback(codec, 0x21,
158757 +                               alc294_gu502_toggle_output);
158758 +               break;
158759 +       case HDA_FIXUP_ACT_INIT:
158760 +               alc294_gu502_toggle_output(codec, NULL);
158761 +               break;
158762 +       }
158765  static void  alc285_fixup_hp_gpio_amp_init(struct hda_codec *codec,
158766                               const struct hda_fixup *fix, int action)
158768 @@ -6232,6 +6326,9 @@ static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
158769  /* for alc295_fixup_hp_top_speakers */
158770  #include "hp_x360_helper.c"
158772 +/* for alc285_fixup_ideapad_s740_coef() */
158773 +#include "ideapad_s740_helper.c"
158775  enum {
158776         ALC269_FIXUP_GPIO2,
158777         ALC269_FIXUP_SONY_VAIO,
158778 @@ -6396,10 +6493,14 @@ enum {
158779         ALC294_FIXUP_ASUS_GX502_HP,
158780         ALC294_FIXUP_ASUS_GX502_PINS,
158781         ALC294_FIXUP_ASUS_GX502_VERBS,
158782 +       ALC294_FIXUP_ASUS_GU502_HP,
158783 +       ALC294_FIXUP_ASUS_GU502_PINS,
158784 +       ALC294_FIXUP_ASUS_GU502_VERBS,
158785         ALC285_FIXUP_HP_GPIO_LED,
158786         ALC285_FIXUP_HP_MUTE_LED,
158787         ALC236_FIXUP_HP_GPIO_LED,
158788         ALC236_FIXUP_HP_MUTE_LED,
158789 +       ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
158790         ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
158791         ALC295_FIXUP_ASUS_MIC_NO_PRESENCE,
158792         ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS,
158793 @@ -6415,10 +6516,13 @@ enum {
158794         ALC269_FIXUP_LEMOTE_A1802,
158795         ALC269_FIXUP_LEMOTE_A190X,
158796         ALC256_FIXUP_INTEL_NUC8_RUGGED,
158797 +       ALC233_FIXUP_INTEL_NUC8_DMIC,
158798 +       ALC233_FIXUP_INTEL_NUC8_BOOST,
158799         ALC256_FIXUP_INTEL_NUC10,
158800         ALC255_FIXUP_XIAOMI_HEADSET_MIC,
158801         ALC274_FIXUP_HP_MIC,
158802         ALC274_FIXUP_HP_HEADSET_MIC,
158803 +       ALC274_FIXUP_HP_ENVY_GPIO,
158804         ALC256_FIXUP_ASUS_HPE,
158805         ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
158806         ALC287_FIXUP_HP_GPIO_LED,
158807 @@ -6427,6 +6531,10 @@ enum {
158808         ALC282_FIXUP_ACER_DISABLE_LINEOUT,
158809         ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST,
158810         ALC256_FIXUP_ACER_HEADSET_MIC,
158811 +       ALC285_FIXUP_IDEAPAD_S740_COEF,
158812 +       ALC295_FIXUP_ASUS_DACS,
158813 +       ALC295_FIXUP_HP_OMEN,
158814 +       ALC285_FIXUP_HP_SPECTRE_X360,
158817  static const struct hda_fixup alc269_fixups[] = {
158818 @@ -7136,6 +7244,16 @@ static const struct hda_fixup alc269_fixups[] = {
158819                 .type = HDA_FIXUP_FUNC,
158820                 .v.func = alc233_fixup_lenovo_line2_mic_hotkey,
158821         },
158822 +       [ALC233_FIXUP_INTEL_NUC8_DMIC] = {
158823 +               .type = HDA_FIXUP_FUNC,
158824 +               .v.func = alc_fixup_inv_dmic,
158825 +               .chained = true,
158826 +               .chain_id = ALC233_FIXUP_INTEL_NUC8_BOOST,
158827 +       },
158828 +       [ALC233_FIXUP_INTEL_NUC8_BOOST] = {
158829 +               .type = HDA_FIXUP_FUNC,
158830 +               .v.func = alc269_fixup_limit_int_mic_boost
158831 +       },
158832         [ALC255_FIXUP_DELL_SPK_NOISE] = {
158833                 .type = HDA_FIXUP_FUNC,
158834                 .v.func = alc_fixup_disable_aamix,
158835 @@ -7619,6 +7737,35 @@ static const struct hda_fixup alc269_fixups[] = {
158836                 .type = HDA_FIXUP_FUNC,
158837                 .v.func = alc294_fixup_gx502_hp,
158838         },
158839 +       [ALC294_FIXUP_ASUS_GU502_PINS] = {
158840 +               .type = HDA_FIXUP_PINS,
158841 +               .v.pins = (const struct hda_pintbl[]) {
158842 +                       { 0x19, 0x01a11050 }, /* rear HP mic */
158843 +                       { 0x1a, 0x01a11830 }, /* rear external mic */
158844 +                       { 0x21, 0x012110f0 }, /* rear HP out */
158845 +                       { }
158846 +               },
158847 +               .chained = true,
158848 +               .chain_id = ALC294_FIXUP_ASUS_GU502_VERBS
158849 +       },
158850 +       [ALC294_FIXUP_ASUS_GU502_VERBS] = {
158851 +               .type = HDA_FIXUP_VERBS,
158852 +               .v.verbs = (const struct hda_verb[]) {
158853 +                       /* set 0x15 to HP-OUT ctrl */
158854 +                       { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0 },
158855 +                       /* unmute the 0x15 amp */
158856 +                       { 0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000 },
158857 +                       /* set 0x1b to HP-OUT */
158858 +                       { 0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24 },
158859 +                       { }
158860 +               },
158861 +               .chained = true,
158862 +               .chain_id = ALC294_FIXUP_ASUS_GU502_HP
158863 +       },
158864 +       [ALC294_FIXUP_ASUS_GU502_HP] = {
158865 +               .type = HDA_FIXUP_FUNC,
158866 +               .v.func = alc294_fixup_gu502_hp,
158867 +       },
158868         [ALC294_FIXUP_ASUS_COEF_1B] = {
158869                 .type = HDA_FIXUP_VERBS,
158870                 .v.verbs = (const struct hda_verb[]) {
158871 @@ -7646,6 +7793,10 @@ static const struct hda_fixup alc269_fixups[] = {
158872                 .type = HDA_FIXUP_FUNC,
158873                 .v.func = alc236_fixup_hp_mute_led,
158874         },
158875 +       [ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF] = {
158876 +               .type = HDA_FIXUP_FUNC,
158877 +               .v.func = alc236_fixup_hp_mute_led_micmute_vref,
158878 +       },
158879         [ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET] = {
158880                 .type = HDA_FIXUP_VERBS,
158881                 .v.verbs = (const struct hda_verb[]) {
158882 @@ -7844,6 +7995,10 @@ static const struct hda_fixup alc269_fixups[] = {
158883                 .chained = true,
158884                 .chain_id = ALC274_FIXUP_HP_MIC
158885         },
158886 +       [ALC274_FIXUP_HP_ENVY_GPIO] = {
158887 +               .type = HDA_FIXUP_FUNC,
158888 +               .v.func = alc274_fixup_hp_envy_gpio,
158889 +       },
158890         [ALC256_FIXUP_ASUS_HPE] = {
158891                 .type = HDA_FIXUP_VERBS,
158892                 .v.verbs = (const struct hda_verb[]) {
158893 @@ -7901,6 +8056,45 @@ static const struct hda_fixup alc269_fixups[] = {
158894                 .chained = true,
158895                 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
158896         },
158897 +       [ALC285_FIXUP_IDEAPAD_S740_COEF] = {
158898 +               .type = HDA_FIXUP_FUNC,
158899 +               .v.func = alc285_fixup_ideapad_s740_coef,
158900 +               .chained = true,
158901 +               .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
158902 +       },
158903 +       [ALC295_FIXUP_ASUS_DACS] = {
158904 +               .type = HDA_FIXUP_FUNC,
158905 +               .v.func = alc295_fixup_asus_dacs,
158906 +       },
158907 +       [ALC295_FIXUP_HP_OMEN] = {
158908 +               .type = HDA_FIXUP_PINS,
158909 +               .v.pins = (const struct hda_pintbl[]) {
158910 +                       { 0x12, 0xb7a60130 },
158911 +                       { 0x13, 0x40000000 },
158912 +                       { 0x14, 0x411111f0 },
158913 +                       { 0x16, 0x411111f0 },
158914 +                       { 0x17, 0x90170110 },
158915 +                       { 0x18, 0x411111f0 },
158916 +                       { 0x19, 0x02a11030 },
158917 +                       { 0x1a, 0x411111f0 },
158918 +                       { 0x1b, 0x04a19030 },
158919 +                       { 0x1d, 0x40600001 },
158920 +                       { 0x1e, 0x411111f0 },
158921 +                       { 0x21, 0x03211020 },
158922 +                       {}
158923 +               },
158924 +               .chained = true,
158925 +               .chain_id = ALC269_FIXUP_HP_LINE1_MIC1_LED,
158926 +       },
158927 +       [ALC285_FIXUP_HP_SPECTRE_X360] = {
158928 +               .type = HDA_FIXUP_PINS,
158929 +               .v.pins = (const struct hda_pintbl[]) {
158930 +                       { 0x14, 0x90170110 }, /* enable top speaker */
158931 +                       {}
158932 +               },
158933 +               .chained = true,
158934 +               .chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1,
158935 +       },
158938  static const struct snd_pci_quirk alc269_fixup_tbl[] = {
158939 @@ -7909,12 +8103,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
158940         SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
158941         SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700),
158942         SND_PCI_QUIRK(0x1025, 0x072d, "Acer Aspire V5-571G", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
158943 -       SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
158944         SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
158945         SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
158946         SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
158947         SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
158948         SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
158949 +       SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
158950         SND_PCI_QUIRK(0x1025, 0x0840, "Acer Aspire E1", ALC269VB_FIXUP_ASPIRE_E1_COEF),
158951         SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK),
158952         SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
158953 @@ -7970,8 +8164,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
158954         SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP),
158955         SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
158956         SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
158957 -       SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
158958         SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
158959 +       SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
158960         SND_PCI_QUIRK(0x1028, 0x080c, "Dell WYSE", ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE),
158961         SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
158962         SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
158963 @@ -7981,8 +8175,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
158964         SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
158965         SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
158966         SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
158967 -       SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
158968         SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
158969 +       SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
158970         SND_PCI_QUIRK(0x1028, 0x098d, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
158971         SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
158972         SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
158973 @@ -7993,35 +8187,18 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
158974         SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
158975         SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
158976         SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
158977 -       SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY),
158978 -       /* ALC282 */
158979         SND_PCI_QUIRK(0x103c, 0x21f9, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158980         SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158981         SND_PCI_QUIRK(0x103c, 0x2214, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158982 +       SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
158983 +       SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
158984 +       SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
158985 +       SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
158986         SND_PCI_QUIRK(0x103c, 0x2236, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
158987         SND_PCI_QUIRK(0x103c, 0x2237, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
158988         SND_PCI_QUIRK(0x103c, 0x2238, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
158989         SND_PCI_QUIRK(0x103c, 0x2239, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
158990         SND_PCI_QUIRK(0x103c, 0x224b, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
158991 -       SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158992 -       SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158993 -       SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158994 -       SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158995 -       SND_PCI_QUIRK(0x103c, 0x2271, "HP", ALC286_FIXUP_HP_GPIO_LED),
158996 -       SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC280_FIXUP_HP_DOCK_PINS),
158997 -       SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC280_FIXUP_HP_DOCK_PINS),
158998 -       SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
158999 -       SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159000 -       SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159001 -       SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159002 -       SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159003 -       SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC280_FIXUP_HP_9480M),
159004 -       SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
159005 -       SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
159006 -       /* ALC290 */
159007 -       SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
159008 -       SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
159009 -       SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
159010         SND_PCI_QUIRK(0x103c, 0x2253, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
159011         SND_PCI_QUIRK(0x103c, 0x2254, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
159012         SND_PCI_QUIRK(0x103c, 0x2255, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
159013 @@ -8029,28 +8206,45 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
159014         SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
159015         SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
159016         SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED),
159017 +       SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY),
159018         SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159019         SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159020         SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159021         SND_PCI_QUIRK(0x103c, 0x2265, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159022 +       SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159023 +       SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159024 +       SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159025 +       SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159026 +       SND_PCI_QUIRK(0x103c, 0x2271, "HP", ALC286_FIXUP_HP_GPIO_LED),
159027         SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
159028 +       SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC280_FIXUP_HP_DOCK_PINS),
159029         SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
159030 +       SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC280_FIXUP_HP_DOCK_PINS),
159031         SND_PCI_QUIRK(0x103c, 0x2278, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
159032         SND_PCI_QUIRK(0x103c, 0x227f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159033         SND_PCI_QUIRK(0x103c, 0x2282, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159034         SND_PCI_QUIRK(0x103c, 0x228b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159035         SND_PCI_QUIRK(0x103c, 0x228e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159036 +       SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159037 +       SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159038 +       SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159039 +       SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159040 +       SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159041         SND_PCI_QUIRK(0x103c, 0x22c5, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159042         SND_PCI_QUIRK(0x103c, 0x22c7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159043         SND_PCI_QUIRK(0x103c, 0x22c8, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159044 -       SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159045 +       SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159046 +       SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC280_FIXUP_HP_9480M),
159047 +       SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
159048 +       SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
159049         SND_PCI_QUIRK(0x103c, 0x2334, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159050         SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159051         SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159052         SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
159053 -       SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
159054         SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
159055         SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
159056 +       SND_PCI_QUIRK(0x103c, 0x8077, "HP", ALC256_FIXUP_HP_HEADSET_MIC),
159057 +       SND_PCI_QUIRK(0x103c, 0x8158, "HP", ALC256_FIXUP_HP_HEADSET_MIC),
159058         SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
159059         SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
159060         SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
159061 @@ -8059,10 +8253,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
159062         SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
159063         SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
159064         SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
159065 +       SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
159066         SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
159067 +       SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
159068         SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
159069 +       SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
159070         SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
159071         SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
159072 +       SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
159073         SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
159074         SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
159075         SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
159076 @@ -8087,16 +8285,19 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
159077         SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
159078         SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
159079         SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
159080 +       SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
159081         SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
159082         SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
159083         SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
159084 -       SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
159085         SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
159086 +       SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
159087         SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
159088         SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
159089         SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
159090         SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
159091 +       SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
159092         SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
159093 +       SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
159094         SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
159095         SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
159096         SND_PCI_QUIRK(0x1043, 0x194e, "ASUS UX563FD", ALC294_FIXUP_ASUS_HPE),
159097 @@ -8109,31 +8310,32 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
159098         SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
159099         SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
159100         SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
159101 -       SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
159102         SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
159103         SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
159104         SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
159105 +       SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
159106 +       SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
159107         SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
159108 -       SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
159109         SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
159110         SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
159111         SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
159112         SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
159113         SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
159114         SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101),
159115 -       SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
159116 -       SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
159117         SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2),
159118         SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
159119         SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
159120         SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX),
159121 +       SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
159122 +       SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
159123         SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
159124         SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT),
159125         SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
159126 -       SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
159127         SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
159128 +       SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
159129         SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
159130         SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
159131 +       SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
159132         SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
159133         SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
159134         SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
159135 @@ -8143,9 +8345,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
159136         SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
159137         SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
159138         SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
159139 -       SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
159140         SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
159141         SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
159142 +       SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
159143         SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
159144         SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
159145         SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
159146 @@ -8168,12 +8370,19 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
159147         SND_PCI_QUIRK(0x1558, 0x50b8, "Clevo NK50SZ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159148         SND_PCI_QUIRK(0x1558, 0x50d5, "Clevo NP50D5", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159149         SND_PCI_QUIRK(0x1558, 0x50f0, "Clevo NH50A[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159150 +       SND_PCI_QUIRK(0x1558, 0x50f2, "Clevo NH50E[PR]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159151         SND_PCI_QUIRK(0x1558, 0x50f3, "Clevo NH58DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159152 +       SND_PCI_QUIRK(0x1558, 0x50f5, "Clevo NH55EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159153 +       SND_PCI_QUIRK(0x1558, 0x50f6, "Clevo NH55DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159154         SND_PCI_QUIRK(0x1558, 0x5101, "Clevo S510WU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159155         SND_PCI_QUIRK(0x1558, 0x5157, "Clevo W517GU1", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159156         SND_PCI_QUIRK(0x1558, 0x51a1, "Clevo NS50MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159157         SND_PCI_QUIRK(0x1558, 0x70a1, "Clevo NB70T[HJK]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159158         SND_PCI_QUIRK(0x1558, 0x70b3, "Clevo NK70SB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159159 +       SND_PCI_QUIRK(0x1558, 0x70f2, "Clevo NH79EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159160 +       SND_PCI_QUIRK(0x1558, 0x70f3, "Clevo NH77DPQ", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159161 +       SND_PCI_QUIRK(0x1558, 0x70f4, "Clevo NH77EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159162 +       SND_PCI_QUIRK(0x1558, 0x70f6, "Clevo NH77DPQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159163         SND_PCI_QUIRK(0x1558, 0x8228, "Clevo NR40BU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159164         SND_PCI_QUIRK(0x1558, 0x8520, "Clevo NH50D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159165         SND_PCI_QUIRK(0x1558, 0x8521, "Clevo NH77D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159166 @@ -8191,9 +8400,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
159167         SND_PCI_QUIRK(0x1558, 0x8a51, "Clevo NH70RCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159168         SND_PCI_QUIRK(0x1558, 0x8d50, "Clevo NH55RCQ-M", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159169         SND_PCI_QUIRK(0x1558, 0x951d, "Clevo N950T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159170 +       SND_PCI_QUIRK(0x1558, 0x9600, "Clevo N960K[PR]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159171         SND_PCI_QUIRK(0x1558, 0x961d, "Clevo N960S[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159172         SND_PCI_QUIRK(0x1558, 0x971d, "Clevo N970T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159173         SND_PCI_QUIRK(0x1558, 0xa500, "Clevo NL53RU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159174 +       SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL5XNU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159175 +       SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159176 +       SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159177 +       SND_PCI_QUIRK(0x1558, 0xb022, "Clevo NH77D[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159178 +       SND_PCI_QUIRK(0x1558, 0xc018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159179 +       SND_PCI_QUIRK(0x1558, 0xc019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159180 +       SND_PCI_QUIRK(0x1558, 0xc022, "Clevo NH77[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
159181         SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
159182         SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
159183         SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
159184 @@ -8201,9 +8418,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
159185         SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
159186         SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
159187         SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
159188 +       SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
159189         SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST),
159190         SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
159191 -       SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
159192         SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
159193         SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
159194         SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
159195 @@ -8244,9 +8461,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
159196         SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
159197         SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
159198         SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
159199 +       SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
159200         SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
159201         SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
159202         SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
159203 +       SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
159204         SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
159205         SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
159206         SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
159207 @@ -8265,20 +8484,19 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
159208         SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
159209         SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
159210         SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
159211 -       SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
159212         SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
159213         SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
159214         SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20),
159215         SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
159216         SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101),
159217         SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
159218 +       SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
159219 +       SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
159220         SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
159221         SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
159222         SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
159223         SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
159224 -       SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
159225 -       SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
159226 -       SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
159227 +       SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
159228         SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
159229         SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
159231 @@ -8457,6 +8675,8 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
159232         {.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"},
159233         {.id = ALC274_FIXUP_HP_MIC, .name = "alc274-hp-mic-detect"},
159234         {.id = ALC245_FIXUP_HP_X360_AMP, .name = "alc245-hp-x360-amp"},
159235 +       {.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"},
159236 +       {.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"},
159237         {}
159239  #define ALC225_STANDARD_PINS \
159240 @@ -8733,12 +8953,17 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
159241                 {0x12, 0x90a60130},
159242                 {0x19, 0x03a11020},
159243                 {0x21, 0x0321101f}),
159244 -       SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
159245 +       SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
159246 +               {0x12, 0x90a60130},
159247                 {0x14, 0x90170110},
159248                 {0x19, 0x04a11040},
159249                 {0x21, 0x04211020}),
159250         SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
159251 -               {0x12, 0x90a60130},
159252 +               {0x14, 0x90170110},
159253 +               {0x19, 0x04a11040},
159254 +               {0x1d, 0x40600001},
159255 +               {0x21, 0x04211020}),
159256 +       SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
159257                 {0x14, 0x90170110},
159258                 {0x19, 0x04a11040},
159259                 {0x21, 0x04211020}),
159260 @@ -9224,8 +9449,7 @@ static const struct snd_pci_quirk alc861_fixup_tbl[] = {
159261         SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", ALC861_FIXUP_ASUS_A6RP),
159262         SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", ALC861_FIXUP_AMP_VREF_0F),
159263         SND_PCI_QUIRK(0x1462, 0x7254, "HP DX2200", ALC861_FIXUP_NO_JACK_DETECT),
159264 -       SND_PCI_QUIRK(0x1584, 0x2b01, "Haier W18", ALC861_FIXUP_AMP_VREF_0F),
159265 -       SND_PCI_QUIRK(0x1584, 0x0000, "Uniwill ECS M31EI", ALC861_FIXUP_AMP_VREF_0F),
159266 +       SND_PCI_QUIRK_VENDOR(0x1584, "Haier/Uniwill", ALC861_FIXUP_AMP_VREF_0F),
159267         SND_PCI_QUIRK(0x1734, 0x10c7, "FSC Amilo Pi1505", ALC861_FIXUP_FSC_AMILO_PI1505),
159268         {}
159270 @@ -10020,6 +10244,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
159271         SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
159272         SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC),
159273         SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
159274 +       SND_PCI_QUIRK(0x1025, 0x0566, "Acer Aspire Ethos 8951G", ALC669_FIXUP_ACER_ASPIRE_ETHOS),
159275         SND_PCI_QUIRK(0x1025, 0x123c, "Acer Nitro N50-600", ALC662_FIXUP_ACER_NITRO_HEADSET_MODE),
159276         SND_PCI_QUIRK(0x1025, 0x124e, "Acer 2660G", ALC662_FIXUP_ACER_X2660G_HEADSET_MODE),
159277         SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
159278 @@ -10036,9 +10261,9 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
159279         SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
159280         SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
159281         SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
159282 -       SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
159283         SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
159284         SND_PCI_QUIRK(0x1043, 0x12ff, "ASUS G751", ALC668_FIXUP_ASUS_G751),
159285 +       SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
159286         SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
159287         SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
159288         SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
159289 @@ -10058,7 +10283,6 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
159290         SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON),
159291         SND_PCI_QUIRK(0x1b35, 0x1234, "CZC ET26", ALC662_FIXUP_CZC_ET26),
159292         SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
159293 -       SND_PCI_QUIRK(0x1025, 0x0566, "Acer Aspire Ethos 8951G", ALC669_FIXUP_ACER_ASPIRE_ETHOS),
159295  #if 0
159296         /* Below is a quirk table taken from the old code.
159297 diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
159298 index 35903d1a1cbd..5b124c4ad572 100644
159299 --- a/sound/pci/intel8x0.c
159300 +++ b/sound/pci/intel8x0.c
159301 @@ -331,6 +331,7 @@ struct ichdev {
159302         unsigned int ali_slot;                  /* ALI DMA slot */
159303         struct ac97_pcm *pcm;
159304         int pcm_open_flag;
159305 +       unsigned int prepared:1;
159306         unsigned int suspended: 1;
159309 @@ -691,6 +692,9 @@ static inline void snd_intel8x0_update(struct intel8x0 *chip, struct ichdev *ich
159310         int status, civ, i, step;
159311         int ack = 0;
159313 +       if (!ichdev->prepared || ichdev->suspended)
159314 +               return;
159316         spin_lock_irqsave(&chip->reg_lock, flags);
159317         status = igetbyte(chip, port + ichdev->roff_sr);
159318         civ = igetbyte(chip, port + ICH_REG_OFF_CIV);
159319 @@ -881,6 +885,7 @@ static int snd_intel8x0_hw_params(struct snd_pcm_substream *substream,
159320         if (ichdev->pcm_open_flag) {
159321                 snd_ac97_pcm_close(ichdev->pcm);
159322                 ichdev->pcm_open_flag = 0;
159323 +               ichdev->prepared = 0;
159324         }
159325         err = snd_ac97_pcm_open(ichdev->pcm, params_rate(hw_params),
159326                                 params_channels(hw_params),
159327 @@ -902,6 +907,7 @@ static int snd_intel8x0_hw_free(struct snd_pcm_substream *substream)
159328         if (ichdev->pcm_open_flag) {
159329                 snd_ac97_pcm_close(ichdev->pcm);
159330                 ichdev->pcm_open_flag = 0;
159331 +               ichdev->prepared = 0;
159332         }
159333         return 0;
159335 @@ -976,6 +982,7 @@ static int snd_intel8x0_pcm_prepare(struct snd_pcm_substream *substream)
159336                         ichdev->pos_shift = (runtime->sample_bits > 16) ? 2 : 1;
159337         }
159338         snd_intel8x0_setup_periods(chip, ichdev);
159339 +       ichdev->prepared = 1;
159340         return 0;
159343 diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
159344 index cdc4b6106252..159c40ec680d 100644
159345 --- a/sound/pci/maestro3.c
159346 +++ b/sound/pci/maestro3.c
159347 @@ -1990,7 +1990,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
159348                 outw(0, io + GPIO_DATA);
159349                 outw(dir | GPO_PRIMARY_AC97, io + GPIO_DIRECTION);
159351 -               schedule_timeout_uninterruptible(msecs_to_jiffies(delay1));
159352 +               schedule_msec_hrtimeout_uninterruptible((delay1));
159354                 outw(GPO_PRIMARY_AC97, io + GPIO_DATA);
159355                 udelay(5);
159356 @@ -1998,7 +1998,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
159357                 outw(IO_SRAM_ENABLE | SERIAL_AC_LINK_ENABLE, io + RING_BUS_CTRL_A);
159358                 outw(~0, io + GPIO_MASK);
159360 -               schedule_timeout_uninterruptible(msecs_to_jiffies(delay2));
159361 +               schedule_msec_hrtimeout_uninterruptible((delay2));
159363                 if (! snd_m3_try_read_vendor(chip))
159364                         break;
159365 diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
159366 index 4cf879c42dc4..720297cbdf87 100644
159367 --- a/sound/pci/rme9652/hdsp.c
159368 +++ b/sound/pci/rme9652/hdsp.c
159369 @@ -5390,7 +5390,8 @@ static int snd_hdsp_free(struct hdsp *hdsp)
159370         if (hdsp->port)
159371                 pci_release_regions(hdsp->pci);
159373 -       pci_disable_device(hdsp->pci);
159374 +       if (pci_is_enabled(hdsp->pci))
159375 +               pci_disable_device(hdsp->pci);
159376         return 0;
159379 diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
159380 index 8d900c132f0f..97a0bff96b28 100644
159381 --- a/sound/pci/rme9652/hdspm.c
159382 +++ b/sound/pci/rme9652/hdspm.c
159383 @@ -6883,7 +6883,8 @@ static int snd_hdspm_free(struct hdspm * hdspm)
159384         if (hdspm->port)
159385                 pci_release_regions(hdspm->pci);
159387 -       pci_disable_device(hdspm->pci);
159388 +       if (pci_is_enabled(hdspm->pci))
159389 +               pci_disable_device(hdspm->pci);
159390         return 0;
159393 diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
159394 index 4df992e846f2..7a4d395abcee 100644
159395 --- a/sound/pci/rme9652/rme9652.c
159396 +++ b/sound/pci/rme9652/rme9652.c
159397 @@ -1731,7 +1731,8 @@ static int snd_rme9652_free(struct snd_rme9652 *rme9652)
159398         if (rme9652->port)
159399                 pci_release_regions(rme9652->pci);
159401 -       pci_disable_device(rme9652->pci);
159402 +       if (pci_is_enabled(rme9652->pci))
159403 +               pci_disable_device(rme9652->pci);
159404         return 0;
159407 diff --git a/sound/soc/codecs/ak5558.c b/sound/soc/codecs/ak5558.c
159408 index 85bdd0534180..80b3b162ca5b 100644
159409 --- a/sound/soc/codecs/ak5558.c
159410 +++ b/sound/soc/codecs/ak5558.c
159411 @@ -272,7 +272,7 @@ static void ak5558_power_off(struct ak5558_priv *ak5558)
159412         if (!ak5558->reset_gpiod)
159413                 return;
159415 -       gpiod_set_value_cansleep(ak5558->reset_gpiod, 0);
159416 +       gpiod_set_value_cansleep(ak5558->reset_gpiod, 1);
159417         usleep_range(1000, 2000);
159420 @@ -281,7 +281,7 @@ static void ak5558_power_on(struct ak5558_priv *ak5558)
159421         if (!ak5558->reset_gpiod)
159422                 return;
159424 -       gpiod_set_value_cansleep(ak5558->reset_gpiod, 1);
159425 +       gpiod_set_value_cansleep(ak5558->reset_gpiod, 0);
159426         usleep_range(1000, 2000);
159429 diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
159430 index 8abe232ca4a4..ff23a7d4d2ac 100644
159431 --- a/sound/soc/codecs/rt286.c
159432 +++ b/sound/soc/codecs/rt286.c
159433 @@ -171,6 +171,9 @@ static bool rt286_readable_register(struct device *dev, unsigned int reg)
159434         case RT286_PROC_COEF:
159435         case RT286_SET_AMP_GAIN_ADC_IN1:
159436         case RT286_SET_AMP_GAIN_ADC_IN2:
159437 +       case RT286_SET_GPIO_MASK:
159438 +       case RT286_SET_GPIO_DIRECTION:
159439 +       case RT286_SET_GPIO_DATA:
159440         case RT286_SET_POWER(RT286_DAC_OUT1):
159441         case RT286_SET_POWER(RT286_DAC_OUT2):
159442         case RT286_SET_POWER(RT286_ADC_IN1):
159443 @@ -1117,12 +1120,11 @@ static const struct dmi_system_id force_combo_jack_table[] = {
159444         { }
159447 -static const struct dmi_system_id dmi_dell_dino[] = {
159448 +static const struct dmi_system_id dmi_dell[] = {
159449         {
159450 -               .ident = "Dell Dino",
159451 +               .ident = "Dell",
159452                 .matches = {
159453                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
159454 -                       DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343")
159455                 }
159456         },
159457         { }
159458 @@ -1133,7 +1135,7 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
159460         struct rt286_platform_data *pdata = dev_get_platdata(&i2c->dev);
159461         struct rt286_priv *rt286;
159462 -       int i, ret, val;
159463 +       int i, ret, vendor_id;
159465         rt286 = devm_kzalloc(&i2c->dev, sizeof(*rt286),
159466                                 GFP_KERNEL);
159467 @@ -1149,14 +1151,15 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
159468         }
159470         ret = regmap_read(rt286->regmap,
159471 -               RT286_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val);
159472 +               RT286_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &vendor_id);
159473         if (ret != 0) {
159474                 dev_err(&i2c->dev, "I2C error %d\n", ret);
159475                 return ret;
159476         }
159477 -       if (val != RT286_VENDOR_ID && val != RT288_VENDOR_ID) {
159478 +       if (vendor_id != RT286_VENDOR_ID && vendor_id != RT288_VENDOR_ID) {
159479                 dev_err(&i2c->dev,
159480 -                       "Device with ID register %#x is not rt286\n", val);
159481 +                       "Device with ID register %#x is not rt286\n",
159482 +                       vendor_id);
159483                 return -ENODEV;
159484         }
159486 @@ -1180,8 +1183,8 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
159487         if (pdata)
159488                 rt286->pdata = *pdata;
159490 -       if (dmi_check_system(force_combo_jack_table) ||
159491 -               dmi_check_system(dmi_dell_dino))
159492 +       if ((vendor_id == RT288_VENDOR_ID && dmi_check_system(dmi_dell)) ||
159493 +               dmi_check_system(force_combo_jack_table))
159494                 rt286->pdata.cbj_en = true;
159496         regmap_write(rt286->regmap, RT286_SET_AUDIO_POWER, AC_PWRST_D3);
159497 @@ -1220,7 +1223,7 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
159498         regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL3, 0xf777, 0x4737);
159499         regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL4, 0x00ff, 0x003f);
159501 -       if (dmi_check_system(dmi_dell_dino)) {
159502 +       if (vendor_id == RT288_VENDOR_ID && dmi_check_system(dmi_dell)) {
159503                 regmap_update_bits(rt286->regmap,
159504                         RT286_SET_GPIO_MASK, 0x40, 0x40);
159505                 regmap_update_bits(rt286->regmap,
159506 diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
159507 index 653da3eaf355..d77d12902594 100644
159508 --- a/sound/soc/codecs/rt5631.c
159509 +++ b/sound/soc/codecs/rt5631.c
159510 @@ -417,7 +417,7 @@ static void onebit_depop_mute_stage(struct snd_soc_component *component, int ena
159511         hp_zc = snd_soc_component_read(component, RT5631_INT_ST_IRQ_CTRL_2);
159512         snd_soc_component_write(component, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff);
159513         if (enable) {
159514 -               schedule_timeout_uninterruptible(msecs_to_jiffies(10));
159515 +               schedule_msec_hrtimeout_uninterruptible((10));
159516                 /* config one-bit depop parameter */
159517                 rt5631_write_index(component, RT5631_SPK_INTL_CTRL, 0x307f);
159518                 snd_soc_component_update_bits(component, RT5631_HP_OUT_VOL,
159519 @@ -529,7 +529,7 @@ static void depop_seq_mute_stage(struct snd_soc_component *component, int enable
159520         hp_zc = snd_soc_component_read(component, RT5631_INT_ST_IRQ_CTRL_2);
159521         snd_soc_component_write(component, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff);
159522         if (enable) {
159523 -               schedule_timeout_uninterruptible(msecs_to_jiffies(10));
159524 +               schedule_msec_hrtimeout_uninterruptible((10));
159526                 /* config depop sequence parameter */
159527                 rt5631_write_index(component, RT5631_SPK_INTL_CTRL, 0x302f);
159528 diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
159529 index 4063aac2a443..dd69d874bad2 100644
159530 --- a/sound/soc/codecs/rt5670.c
159531 +++ b/sound/soc/codecs/rt5670.c
159532 @@ -2980,6 +2980,18 @@ static const struct dmi_system_id dmi_platform_intel_quirks[] = {
159533                                                  RT5670_GPIO1_IS_IRQ |
159534                                                  RT5670_JD_MODE3),
159535         },
159536 +       {
159537 +               .callback = rt5670_quirk_cb,
159538 +               .ident = "Dell Venue 10 Pro 5055",
159539 +               .matches = {
159540 +                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
159541 +                       DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
159542 +               },
159543 +               .driver_data = (unsigned long *)(RT5670_DMIC_EN |
159544 +                                                RT5670_DMIC2_INR |
159545 +                                                RT5670_GPIO1_IS_IRQ |
159546 +                                                RT5670_JD_MODE1),
159547 +       },
159548         {
159549                 .callback = rt5670_quirk_cb,
159550                 .ident = "Aegex 10 tablet (RU2)",
159551 diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
159552 index f04f88c8d425..b689f26fc4be 100644
159553 --- a/sound/soc/codecs/tlv320aic32x4.c
159554 +++ b/sound/soc/codecs/tlv320aic32x4.c
159555 @@ -577,12 +577,12 @@ static const struct regmap_range_cfg aic32x4_regmap_pages[] = {
159556                 .window_start = 0,
159557                 .window_len = 128,
159558                 .range_min = 0,
159559 -               .range_max = AIC32X4_RMICPGAVOL,
159560 +               .range_max = AIC32X4_REFPOWERUP,
159561         },
159564  const struct regmap_config aic32x4_regmap_config = {
159565 -       .max_register = AIC32X4_RMICPGAVOL,
159566 +       .max_register = AIC32X4_REFPOWERUP,
159567         .ranges = aic32x4_regmap_pages,
159568         .num_ranges = ARRAY_SIZE(aic32x4_regmap_pages),
159570 @@ -1243,6 +1243,10 @@ int aic32x4_probe(struct device *dev, struct regmap *regmap)
159571         if (ret)
159572                 goto err_disable_regulators;
159574 +       ret = aic32x4_register_clocks(dev, aic32x4->mclk_name);
159575 +       if (ret)
159576 +               goto err_disable_regulators;
159578         ret = devm_snd_soc_register_component(dev,
159579                         &soc_component_dev_aic32x4, &aic32x4_dai, 1);
159580         if (ret) {
159581 @@ -1250,10 +1254,6 @@ int aic32x4_probe(struct device *dev, struct regmap *regmap)
159582                 goto err_disable_regulators;
159583         }
159585 -       ret = aic32x4_register_clocks(dev, aic32x4->mclk_name);
159586 -       if (ret)
159587 -               goto err_disable_regulators;
159589         return 0;
159591  err_disable_regulators:
159592 diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
159593 index 15d42ce3b21d..897fced9589b 100644
159594 --- a/sound/soc/codecs/wm8350.c
159595 +++ b/sound/soc/codecs/wm8350.c
159596 @@ -234,10 +234,10 @@ static void wm8350_pga_work(struct work_struct *work)
159597                     out2->ramp == WM8350_RAMP_UP) {
159598                         /* delay is longer over 0dB as increases are larger */
159599                         if (i >= WM8350_OUTn_0dB)
159600 -                               schedule_timeout_interruptible(msecs_to_jiffies
159601 +                               schedule_msec_hrtimeout_interruptible(
159602                                                                (2));
159603                         else
159604 -                               schedule_timeout_interruptible(msecs_to_jiffies
159605 +                               schedule_msec_hrtimeout_interruptible(
159606                                                                (1));
159607                 } else
159608                         udelay(50);     /* doesn't matter if we delay longer */
159609 @@ -1121,7 +1121,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component,
159610                                          (platform->dis_out4 << 6));
159612                         /* wait for discharge */
159613 -                       schedule_timeout_interruptible(msecs_to_jiffies
159614 +                       schedule_msec_hrtimeout_interruptible(
159615                                                        (platform->
159616                                                         cap_discharge_msecs));
159618 @@ -1137,7 +1137,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component,
159619                                          WM8350_VBUFEN);
159621                         /* wait for vmid */
159622 -                       schedule_timeout_interruptible(msecs_to_jiffies
159623 +                       schedule_msec_hrtimeout_interruptible(
159624                                                        (platform->
159625                                                         vmid_charge_msecs));
159627 @@ -1188,7 +1188,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component,
159628                 wm8350_reg_write(wm8350, WM8350_POWER_MGMT_1, pm1);
159630                 /* wait */
159631 -               schedule_timeout_interruptible(msecs_to_jiffies
159632 +               schedule_msec_hrtimeout_interruptible(
159633                                                (platform->
159634                                                 vmid_discharge_msecs));
159636 @@ -1206,7 +1206,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component,
159637                                  pm1 | WM8350_OUTPUT_DRAIN_EN);
159639                 /* wait */
159640 -               schedule_timeout_interruptible(msecs_to_jiffies
159641 +               schedule_msec_hrtimeout_interruptible(
159642                                                (platform->drain_msecs));
159644                 pm1 &= ~WM8350_BIASEN;
159645 diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
159646 index a9a6d766a176..45bf31de6282 100644
159647 --- a/sound/soc/codecs/wm8900.c
159648 +++ b/sound/soc/codecs/wm8900.c
159649 @@ -1104,7 +1104,7 @@ static int wm8900_set_bias_level(struct snd_soc_component *component,
159650                 /* Need to let things settle before stopping the clock
159651                  * to ensure that restart works, see "Stopping the
159652                  * master clock" in the datasheet. */
159653 -               schedule_timeout_interruptible(msecs_to_jiffies(1));
159654 +               schedule_msec_hrtimeout_interruptible(1);
159655                 snd_soc_component_write(component, WM8900_REG_POWER2,
159656                              WM8900_REG_POWER2_SYSCLK_ENA);
159657                 break;
159658 diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
159659 index cda9cd935d4f..9e621a254392 100644
159660 --- a/sound/soc/codecs/wm8960.c
159661 +++ b/sound/soc/codecs/wm8960.c
159662 @@ -608,10 +608,6 @@ static const int bclk_divs[] = {
159663   *             - lrclk      = sysclk / dac_divs
159664   *             - 10 * bclk  = sysclk / bclk_divs
159665   *
159666 - *     If we cannot find an exact match for (sysclk, lrclk, bclk)
159667 - *     triplet, we relax the bclk such that bclk is chosen as the
159668 - *     closest available frequency greater than expected bclk.
159670   * @wm8960: codec private data
159671   * @mclk: MCLK used to derive sysclk
159672   * @sysclk_idx: sysclk_divs index for found sysclk
159673 @@ -629,7 +625,7 @@ int wm8960_configure_sysclk(struct wm8960_priv *wm8960, int mclk,
159675         int sysclk, bclk, lrclk;
159676         int i, j, k;
159677 -       int diff, closest = mclk;
159678 +       int diff;
159680         /* marker for no match */
159681         *bclk_idx = -1;
159682 @@ -653,12 +649,6 @@ int wm8960_configure_sysclk(struct wm8960_priv *wm8960, int mclk,
159683                                         *bclk_idx = k;
159684                                         break;
159685                                 }
159686 -                               if (diff > 0 && closest > diff) {
159687 -                                       *sysclk_idx = i;
159688 -                                       *dac_idx = j;
159689 -                                       *bclk_idx = k;
159690 -                                       closest = diff;
159691 -                               }
159692                         }
159693                         if (k != ARRAY_SIZE(bclk_divs))
159694                                 break;
159695 diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
159696 index e0ce32dd4a81..eb91c0282aad 100644
159697 --- a/sound/soc/codecs/wm9713.c
159698 +++ b/sound/soc/codecs/wm9713.c
159699 @@ -199,7 +199,7 @@ static int wm9713_voice_shutdown(struct snd_soc_dapm_widget *w,
159701         /* Gracefully shut down the voice interface. */
159702         snd_soc_component_update_bits(component, AC97_HANDSET_RATE, 0x0f00, 0x0200);
159703 -       schedule_timeout_interruptible(msecs_to_jiffies(1));
159704 +       schedule_msec_hrtimeout_interruptible(1);
159705         snd_soc_component_update_bits(component, AC97_HANDSET_RATE, 0x0f00, 0x0f00);
159706         snd_soc_component_update_bits(component, AC97_EXTENDED_MID, 0x1000, 0x1000);
159708 @@ -868,7 +868,7 @@ static int wm9713_set_pll(struct snd_soc_component *component,
159709         wm9713->pll_in = freq_in;
159711         /* wait 10ms AC97 link frames for the link to stabilise */
159712 -       schedule_timeout_interruptible(msecs_to_jiffies(10));
159713 +       schedule_msec_hrtimeout_interruptible((10));
159714         return 0;
159717 diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
159718 index 8c5cdcdc8713..e81b5cf0d37a 100644
159719 --- a/sound/soc/generic/audio-graph-card.c
159720 +++ b/sound/soc/generic/audio-graph-card.c
159721 @@ -380,7 +380,7 @@ static int graph_dai_link_of(struct asoc_simple_priv *priv,
159722         struct device_node *top = dev->of_node;
159723         struct asoc_simple_dai *cpu_dai;
159724         struct asoc_simple_dai *codec_dai;
159725 -       int ret, single_cpu;
159726 +       int ret, single_cpu = 0;
159728         /* Do it only CPU turn */
159729         if (!li->cpu)
159730 diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
159731 index 75365c7bb393..d916ec69c24f 100644
159732 --- a/sound/soc/generic/simple-card.c
159733 +++ b/sound/soc/generic/simple-card.c
159734 @@ -258,7 +258,7 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv,
159735         struct device_node *plat = NULL;
159736         char prop[128];
159737         char *prefix = "";
159738 -       int ret, single_cpu;
159739 +       int ret, single_cpu = 0;
159741         /*
159742          *       |CPU   |Codec   : turn
159743 diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
159744 index 4e0248d2accc..7c5038803be7 100644
159745 --- a/sound/soc/intel/Makefile
159746 +++ b/sound/soc/intel/Makefile
159747 @@ -5,7 +5,7 @@ obj-$(CONFIG_SND_SOC) += common/
159748  # Platform Support
159749  obj-$(CONFIG_SND_SST_ATOM_HIFI2_PLATFORM) += atom/
159750  obj-$(CONFIG_SND_SOC_INTEL_CATPT) += catpt/
159751 -obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += skylake/
159752 +obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON) += skylake/
159753  obj-$(CONFIG_SND_SOC_INTEL_KEEMBAY) += keembay/
159755  # Machine support
159756 diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
159757 index 5d48cc359c3d..22912cab5e63 100644
159758 --- a/sound/soc/intel/boards/bytcr_rt5640.c
159759 +++ b/sound/soc/intel/boards/bytcr_rt5640.c
159760 @@ -482,6 +482,9 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
159761                         DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TAF"),
159762                 },
159763                 .driver_data = (void *)(BYT_RT5640_IN1_MAP |
159764 +                                       BYT_RT5640_JD_SRC_JD2_IN4N |
159765 +                                       BYT_RT5640_OVCD_TH_2000UA |
159766 +                                       BYT_RT5640_OVCD_SF_0P75 |
159767                                         BYT_RT5640_MONO_SPEAKER |
159768                                         BYT_RT5640_DIFF_MIC |
159769                                         BYT_RT5640_SSP0_AIF2 |
159770 @@ -515,6 +518,23 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
159771                                         BYT_RT5640_SSP0_AIF1 |
159772                                         BYT_RT5640_MCLK_EN),
159773         },
159774 +       {
159775 +               /* Chuwi Hi8 (CWI509) */
159776 +               .matches = {
159777 +                       DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
159778 +                       DMI_MATCH(DMI_BOARD_NAME, "BYT-PA03C"),
159779 +                       DMI_MATCH(DMI_SYS_VENDOR, "ilife"),
159780 +                       DMI_MATCH(DMI_PRODUCT_NAME, "S806"),
159781 +               },
159782 +               .driver_data = (void *)(BYT_RT5640_IN1_MAP |
159783 +                                       BYT_RT5640_JD_SRC_JD2_IN4N |
159784 +                                       BYT_RT5640_OVCD_TH_2000UA |
159785 +                                       BYT_RT5640_OVCD_SF_0P75 |
159786 +                                       BYT_RT5640_MONO_SPEAKER |
159787 +                                       BYT_RT5640_DIFF_MIC |
159788 +                                       BYT_RT5640_SSP0_AIF1 |
159789 +                                       BYT_RT5640_MCLK_EN),
159790 +       },
159791         {
159792                 .matches = {
159793                         DMI_MATCH(DMI_SYS_VENDOR, "Circuitco"),
159794 diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c
159795 index cc9a2509ace2..e0149cf6127d 100644
159796 --- a/sound/soc/intel/boards/kbl_da7219_max98927.c
159797 +++ b/sound/soc/intel/boards/kbl_da7219_max98927.c
159798 @@ -282,11 +282,33 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
159799         struct snd_interval *chan = hw_param_interval(params,
159800                         SNDRV_PCM_HW_PARAM_CHANNELS);
159801         struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
159802 -       struct snd_soc_dpcm *dpcm = container_of(
159803 -                       params, struct snd_soc_dpcm, hw_params);
159804 -       struct snd_soc_dai_link *fe_dai_link = dpcm->fe->dai_link;
159805 -       struct snd_soc_dai_link *be_dai_link = dpcm->be->dai_link;
159806 +       struct snd_soc_dpcm *dpcm, *rtd_dpcm = NULL;
159808 +       /*
159809 +        * The following loop will be called only for playback stream
159810 +        * In this platform, there is only one playback device on every SSP
159811 +        */
159812 +       for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_PLAYBACK, dpcm) {
159813 +               rtd_dpcm = dpcm;
159814 +               break;
159815 +       }
159817 +       /*
159818 +        * This following loop will be called only for capture stream
159819 +        * In this platform, there is only one capture device on every SSP
159820 +        */
159821 +       for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_CAPTURE, dpcm) {
159822 +               rtd_dpcm = dpcm;
159823 +               break;
159824 +       }
159826 +       if (!rtd_dpcm)
159827 +               return -EINVAL;
159829 +       /*
159830 +        * The above 2 loops are mutually exclusive based on the stream direction,
159831 +        * thus rtd_dpcm variable will never be overwritten
159832 +        */
159833         /*
159834          * Topology for kblda7219m98373 & kblmax98373 supports only S24_LE,
159835          * where as kblda7219m98927 & kblmax98927 supports S16_LE by default.
159836 @@ -309,9 +331,9 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
159837         /*
159838          * The ADSP will convert the FE rate to 48k, stereo, 24 bit
159839          */
159840 -       if (!strcmp(fe_dai_link->name, "Kbl Audio Port") ||
159841 -           !strcmp(fe_dai_link->name, "Kbl Audio Headset Playback") ||
159842 -           !strcmp(fe_dai_link->name, "Kbl Audio Capture Port")) {
159843 +       if (!strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Port") ||
159844 +           !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Headset Playback") ||
159845 +           !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Capture Port")) {
159846                 rate->min = rate->max = 48000;
159847                 chan->min = chan->max = 2;
159848                 snd_mask_none(fmt);
159849 @@ -322,7 +344,7 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
159850          * The speaker on the SSP0 supports S16_LE and not S24_LE.
159851          * thus changing the mask here
159852          */
159853 -       if (!strcmp(be_dai_link->name, "SSP0-Codec"))
159854 +       if (!strcmp(rtd_dpcm->be->dai_link->name, "SSP0-Codec"))
159855                 snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
159857         return 0;
159858 diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
159859 index 8adce6417b02..ecd3f90f4bbe 100644
159860 --- a/sound/soc/intel/boards/sof_sdw.c
159861 +++ b/sound/soc/intel/boards/sof_sdw.c
159862 @@ -187,6 +187,17 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
159863                                         SOF_RT715_DAI_ID_FIX |
159864                                         SOF_SDW_FOUR_SPK),
159865         },
159866 +       /* AlderLake devices */
159867 +       {
159868 +               .callback = sof_sdw_quirk_cb,
159869 +               .matches = {
159870 +                       DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
159871 +                       DMI_MATCH(DMI_PRODUCT_NAME, "Alder Lake Client Platform"),
159872 +               },
159873 +               .driver_data = (void *)(SOF_RT711_JD_SRC_JD1 |
159874 +                                       SOF_SDW_TGL_HDMI |
159875 +                                       SOF_SDW_PCH_DMIC),
159876 +       },
159877         {}
159880 diff --git a/sound/soc/intel/boards/sof_wm8804.c b/sound/soc/intel/boards/sof_wm8804.c
159881 index a46ba13e8eb0..6a181e45143d 100644
159882 --- a/sound/soc/intel/boards/sof_wm8804.c
159883 +++ b/sound/soc/intel/boards/sof_wm8804.c
159884 @@ -124,7 +124,11 @@ static int sof_wm8804_hw_params(struct snd_pcm_substream *substream,
159885         }
159887         snd_soc_dai_set_clkdiv(codec_dai, WM8804_MCLK_DIV, mclk_div);
159888 -       snd_soc_dai_set_pll(codec_dai, 0, 0, sysclk, mclk_freq);
159889 +       ret = snd_soc_dai_set_pll(codec_dai, 0, 0, sysclk, mclk_freq);
159890 +       if (ret < 0) {
159891 +               dev_err(rtd->card->dev, "Failed to set WM8804 PLL\n");
159892 +               return ret;
159893 +       }
159895         ret = snd_soc_dai_set_sysclk(codec_dai, WM8804_TX_CLKSRC_PLL,
159896                                      sysclk, SND_SOC_CLOCK_OUT);
159897 diff --git a/sound/soc/intel/skylake/Makefile b/sound/soc/intel/skylake/Makefile
159898 index dd39149b89b1..1c4649bccec5 100644
159899 --- a/sound/soc/intel/skylake/Makefile
159900 +++ b/sound/soc/intel/skylake/Makefile
159901 @@ -7,7 +7,7 @@ ifdef CONFIG_DEBUG_FS
159902    snd-soc-skl-objs += skl-debug.o
159903  endif
159905 -obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += snd-soc-skl.o
159906 +obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON) += snd-soc-skl.o
159908  #Skylake Clock device support
159909  snd-soc-skl-ssp-clk-objs := skl-ssp-clk.o
159910 diff --git a/sound/soc/qcom/qdsp6/q6afe-clocks.c b/sound/soc/qcom/qdsp6/q6afe-clocks.c
159911 index f0362f061652..9431656283cd 100644
159912 --- a/sound/soc/qcom/qdsp6/q6afe-clocks.c
159913 +++ b/sound/soc/qcom/qdsp6/q6afe-clocks.c
159914 @@ -11,33 +11,29 @@
159915  #include <linux/slab.h>
159916  #include "q6afe.h"
159918 -#define Q6AFE_CLK(id) &(struct q6afe_clk) {            \
159919 +#define Q6AFE_CLK(id) {                                        \
159920                 .clk_id = id,                           \
159921                 .afe_clk_id     = Q6AFE_##id,           \
159922                 .name = #id,                            \
159923 -               .attributes = LPASS_CLK_ATTRIBUTE_COUPLE_NO, \
159924                 .rate = 19200000,                       \
159925 -               .hw.init = &(struct clk_init_data) {    \
159926 -                       .ops = &clk_q6afe_ops,          \
159927 -                       .name = #id,                    \
159928 -               },                                      \
159929         }
159931 -#define Q6AFE_VOTE_CLK(id, blkid, n) &(struct q6afe_clk) { \
159932 +#define Q6AFE_VOTE_CLK(id, blkid, n) {                 \
159933                 .clk_id = id,                           \
159934                 .afe_clk_id = blkid,                    \
159935 -               .name = #n,                             \
159936 -               .hw.init = &(struct clk_init_data) {    \
159937 -                       .ops = &clk_vote_q6afe_ops,     \
159938 -                       .name = #id,                    \
159939 -               },                                      \
159940 +               .name = n,                              \
159941         }
159943 -struct q6afe_clk {
159944 -       struct device *dev;
159945 +struct q6afe_clk_init {
159946         int clk_id;
159947         int afe_clk_id;
159948         char *name;
159949 +       int rate;
159952 +struct q6afe_clk {
159953 +       struct device *dev;
159954 +       int afe_clk_id;
159955         int attributes;
159956         int rate;
159957         uint32_t handle;
159958 @@ -48,8 +44,7 @@ struct q6afe_clk {
159960  struct q6afe_cc {
159961         struct device *dev;
159962 -       struct q6afe_clk **clks;
159963 -       int num_clks;
159964 +       struct q6afe_clk *clks[Q6AFE_MAX_CLK_ID];
159967  static int clk_q6afe_prepare(struct clk_hw *hw)
159968 @@ -105,7 +100,7 @@ static int clk_vote_q6afe_block(struct clk_hw *hw)
159969         struct q6afe_clk *clk = to_q6afe_clk(hw);
159971         return q6afe_vote_lpass_core_hw(clk->dev, clk->afe_clk_id,
159972 -                                       clk->name, &clk->handle);
159973 +                                       clk_hw_get_name(&clk->hw), &clk->handle);
159976  static void clk_unvote_q6afe_block(struct clk_hw *hw)
159977 @@ -120,84 +115,76 @@ static const struct clk_ops clk_vote_q6afe_ops = {
159978         .unprepare      = clk_unvote_q6afe_block,
159981 -static struct q6afe_clk *q6afe_clks[Q6AFE_MAX_CLK_ID] = {
159982 -       [LPASS_CLK_ID_PRI_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_MI2S_IBIT),
159983 -       [LPASS_CLK_ID_PRI_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_MI2S_EBIT),
159984 -       [LPASS_CLK_ID_SEC_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_MI2S_IBIT),
159985 -       [LPASS_CLK_ID_SEC_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_MI2S_EBIT),
159986 -       [LPASS_CLK_ID_TER_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_MI2S_IBIT),
159987 -       [LPASS_CLK_ID_TER_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_MI2S_EBIT),
159988 -       [LPASS_CLK_ID_QUAD_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_MI2S_IBIT),
159989 -       [LPASS_CLK_ID_QUAD_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_MI2S_EBIT),
159990 -       [LPASS_CLK_ID_SPEAKER_I2S_IBIT] =
159991 -                               Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_IBIT),
159992 -       [LPASS_CLK_ID_SPEAKER_I2S_EBIT] =
159993 -                               Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_EBIT),
159994 -       [LPASS_CLK_ID_SPEAKER_I2S_OSR] =
159995 -                               Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_OSR),
159996 -       [LPASS_CLK_ID_QUI_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_IBIT),
159997 -       [LPASS_CLK_ID_QUI_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_EBIT),
159998 -       [LPASS_CLK_ID_SEN_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEN_MI2S_IBIT),
159999 -       [LPASS_CLK_ID_SEN_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEN_MI2S_EBIT),
160000 -       [LPASS_CLK_ID_INT0_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT0_MI2S_IBIT),
160001 -       [LPASS_CLK_ID_INT1_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT1_MI2S_IBIT),
160002 -       [LPASS_CLK_ID_INT2_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT2_MI2S_IBIT),
160003 -       [LPASS_CLK_ID_INT3_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT3_MI2S_IBIT),
160004 -       [LPASS_CLK_ID_INT4_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT4_MI2S_IBIT),
160005 -       [LPASS_CLK_ID_INT5_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT5_MI2S_IBIT),
160006 -       [LPASS_CLK_ID_INT6_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT6_MI2S_IBIT),
160007 -       [LPASS_CLK_ID_QUI_MI2S_OSR] = Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_OSR),
160008 -       [LPASS_CLK_ID_PRI_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_PCM_IBIT),
160009 -       [LPASS_CLK_ID_PRI_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_PCM_EBIT),
160010 -       [LPASS_CLK_ID_SEC_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_PCM_IBIT),
160011 -       [LPASS_CLK_ID_SEC_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_PCM_EBIT),
160012 -       [LPASS_CLK_ID_TER_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_PCM_IBIT),
160013 -       [LPASS_CLK_ID_TER_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_PCM_EBIT),
160014 -       [LPASS_CLK_ID_QUAD_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_PCM_IBIT),
160015 -       [LPASS_CLK_ID_QUAD_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_PCM_EBIT),
160016 -       [LPASS_CLK_ID_QUIN_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_PCM_IBIT),
160017 -       [LPASS_CLK_ID_QUIN_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_PCM_EBIT),
160018 -       [LPASS_CLK_ID_QUI_PCM_OSR] = Q6AFE_CLK(LPASS_CLK_ID_QUI_PCM_OSR),
160019 -       [LPASS_CLK_ID_PRI_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_TDM_IBIT),
160020 -       [LPASS_CLK_ID_PRI_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_TDM_EBIT),
160021 -       [LPASS_CLK_ID_SEC_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_TDM_IBIT),
160022 -       [LPASS_CLK_ID_SEC_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_TDM_EBIT),
160023 -       [LPASS_CLK_ID_TER_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_TDM_IBIT),
160024 -       [LPASS_CLK_ID_TER_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_TDM_EBIT),
160025 -       [LPASS_CLK_ID_QUAD_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_TDM_IBIT),
160026 -       [LPASS_CLK_ID_QUAD_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_TDM_EBIT),
160027 -       [LPASS_CLK_ID_QUIN_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_IBIT),
160028 -       [LPASS_CLK_ID_QUIN_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_EBIT),
160029 -       [LPASS_CLK_ID_QUIN_TDM_OSR] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_OSR),
160030 -       [LPASS_CLK_ID_MCLK_1] = Q6AFE_CLK(LPASS_CLK_ID_MCLK_1),
160031 -       [LPASS_CLK_ID_MCLK_2] = Q6AFE_CLK(LPASS_CLK_ID_MCLK_2),
160032 -       [LPASS_CLK_ID_MCLK_3] = Q6AFE_CLK(LPASS_CLK_ID_MCLK_3),
160033 -       [LPASS_CLK_ID_MCLK_4] = Q6AFE_CLK(LPASS_CLK_ID_MCLK_4),
160034 -       [LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE] =
160035 -               Q6AFE_CLK(LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE),
160036 -       [LPASS_CLK_ID_INT_MCLK_0] = Q6AFE_CLK(LPASS_CLK_ID_INT_MCLK_0),
160037 -       [LPASS_CLK_ID_INT_MCLK_1] = Q6AFE_CLK(LPASS_CLK_ID_INT_MCLK_1),
160038 -       [LPASS_CLK_ID_WSA_CORE_MCLK] = Q6AFE_CLK(LPASS_CLK_ID_WSA_CORE_MCLK),
160039 -       [LPASS_CLK_ID_WSA_CORE_NPL_MCLK] =
160040 -                               Q6AFE_CLK(LPASS_CLK_ID_WSA_CORE_NPL_MCLK),
160041 -       [LPASS_CLK_ID_VA_CORE_MCLK] = Q6AFE_CLK(LPASS_CLK_ID_VA_CORE_MCLK),
160042 -       [LPASS_CLK_ID_TX_CORE_MCLK] = Q6AFE_CLK(LPASS_CLK_ID_TX_CORE_MCLK),
160043 -       [LPASS_CLK_ID_TX_CORE_NPL_MCLK] =
160044 -                       Q6AFE_CLK(LPASS_CLK_ID_TX_CORE_NPL_MCLK),
160045 -       [LPASS_CLK_ID_RX_CORE_MCLK] = Q6AFE_CLK(LPASS_CLK_ID_RX_CORE_MCLK),
160046 -       [LPASS_CLK_ID_RX_CORE_NPL_MCLK] =
160047 -                               Q6AFE_CLK(LPASS_CLK_ID_RX_CORE_NPL_MCLK),
160048 -       [LPASS_CLK_ID_VA_CORE_2X_MCLK] =
160049 -                               Q6AFE_CLK(LPASS_CLK_ID_VA_CORE_2X_MCLK),
160050 -       [LPASS_HW_AVTIMER_VOTE] = Q6AFE_VOTE_CLK(LPASS_HW_AVTIMER_VOTE,
160051 -                                                Q6AFE_LPASS_CORE_AVTIMER_BLOCK,
160052 -                                                "LPASS_AVTIMER_MACRO"),
160053 -       [LPASS_HW_MACRO_VOTE] = Q6AFE_VOTE_CLK(LPASS_HW_MACRO_VOTE,
160054 -                                               Q6AFE_LPASS_CORE_HW_MACRO_BLOCK,
160055 -                                               "LPASS_HW_MACRO"),
160056 -       [LPASS_HW_DCODEC_VOTE] = Q6AFE_VOTE_CLK(LPASS_HW_DCODEC_VOTE,
160057 -                                       Q6AFE_LPASS_CORE_HW_DCODEC_BLOCK,
160058 -                                       "LPASS_HW_DCODEC"),
160059 +static const struct q6afe_clk_init q6afe_clks[] = {
160060 +       Q6AFE_CLK(LPASS_CLK_ID_PRI_MI2S_IBIT),
160061 +       Q6AFE_CLK(LPASS_CLK_ID_PRI_MI2S_EBIT),
160062 +       Q6AFE_CLK(LPASS_CLK_ID_SEC_MI2S_IBIT),
160063 +       Q6AFE_CLK(LPASS_CLK_ID_SEC_MI2S_EBIT),
160064 +       Q6AFE_CLK(LPASS_CLK_ID_TER_MI2S_IBIT),
160065 +       Q6AFE_CLK(LPASS_CLK_ID_TER_MI2S_EBIT),
160066 +       Q6AFE_CLK(LPASS_CLK_ID_QUAD_MI2S_IBIT),
160067 +       Q6AFE_CLK(LPASS_CLK_ID_QUAD_MI2S_EBIT),
160068 +       Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_IBIT),
160069 +       Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_EBIT),
160070 +       Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_OSR),
160071 +       Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_IBIT),
160072 +       Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_EBIT),
160073 +       Q6AFE_CLK(LPASS_CLK_ID_SEN_MI2S_IBIT),
160074 +       Q6AFE_CLK(LPASS_CLK_ID_SEN_MI2S_EBIT),
160075 +       Q6AFE_CLK(LPASS_CLK_ID_INT0_MI2S_IBIT),
160076 +       Q6AFE_CLK(LPASS_CLK_ID_INT1_MI2S_IBIT),
160077 +       Q6AFE_CLK(LPASS_CLK_ID_INT2_MI2S_IBIT),
160078 +       Q6AFE_CLK(LPASS_CLK_ID_INT3_MI2S_IBIT),
160079 +       Q6AFE_CLK(LPASS_CLK_ID_INT4_MI2S_IBIT),
160080 +       Q6AFE_CLK(LPASS_CLK_ID_INT5_MI2S_IBIT),
160081 +       Q6AFE_CLK(LPASS_CLK_ID_INT6_MI2S_IBIT),
160082 +       Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_OSR),
160083 +       Q6AFE_CLK(LPASS_CLK_ID_PRI_PCM_IBIT),
160084 +       Q6AFE_CLK(LPASS_CLK_ID_PRI_PCM_EBIT),
160085 +       Q6AFE_CLK(LPASS_CLK_ID_SEC_PCM_IBIT),
160086 +       Q6AFE_CLK(LPASS_CLK_ID_SEC_PCM_EBIT),
160087 +       Q6AFE_CLK(LPASS_CLK_ID_TER_PCM_IBIT),
160088 +       Q6AFE_CLK(LPASS_CLK_ID_TER_PCM_EBIT),
160089 +       Q6AFE_CLK(LPASS_CLK_ID_QUAD_PCM_IBIT),
160090 +       Q6AFE_CLK(LPASS_CLK_ID_QUAD_PCM_EBIT),
160091 +       Q6AFE_CLK(LPASS_CLK_ID_QUIN_PCM_IBIT),
160092 +       Q6AFE_CLK(LPASS_CLK_ID_QUIN_PCM_EBIT),
160093 +       Q6AFE_CLK(LPASS_CLK_ID_QUI_PCM_OSR),
160094 +       Q6AFE_CLK(LPASS_CLK_ID_PRI_TDM_IBIT),
160095 +       Q6AFE_CLK(LPASS_CLK_ID_PRI_TDM_EBIT),
160096 +       Q6AFE_CLK(LPASS_CLK_ID_SEC_TDM_IBIT),
160097 +       Q6AFE_CLK(LPASS_CLK_ID_SEC_TDM_EBIT),
160098 +       Q6AFE_CLK(LPASS_CLK_ID_TER_TDM_IBIT),
160099 +       Q6AFE_CLK(LPASS_CLK_ID_TER_TDM_EBIT),
160100 +       Q6AFE_CLK(LPASS_CLK_ID_QUAD_TDM_IBIT),
160101 +       Q6AFE_CLK(LPASS_CLK_ID_QUAD_TDM_EBIT),
160102 +       Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_IBIT),
160103 +       Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_EBIT),
160104 +       Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_OSR),
160105 +       Q6AFE_CLK(LPASS_CLK_ID_MCLK_1),
160106 +       Q6AFE_CLK(LPASS_CLK_ID_MCLK_2),
160107 +       Q6AFE_CLK(LPASS_CLK_ID_MCLK_3),
160108 +       Q6AFE_CLK(LPASS_CLK_ID_MCLK_4),
160109 +       Q6AFE_CLK(LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE),
160110 +       Q6AFE_CLK(LPASS_CLK_ID_INT_MCLK_0),
160111 +       Q6AFE_CLK(LPASS_CLK_ID_INT_MCLK_1),
160112 +       Q6AFE_CLK(LPASS_CLK_ID_WSA_CORE_MCLK),
160113 +       Q6AFE_CLK(LPASS_CLK_ID_WSA_CORE_NPL_MCLK),
160114 +       Q6AFE_CLK(LPASS_CLK_ID_VA_CORE_MCLK),
160115 +       Q6AFE_CLK(LPASS_CLK_ID_TX_CORE_MCLK),
160116 +       Q6AFE_CLK(LPASS_CLK_ID_TX_CORE_NPL_MCLK),
160117 +       Q6AFE_CLK(LPASS_CLK_ID_RX_CORE_MCLK),
160118 +       Q6AFE_CLK(LPASS_CLK_ID_RX_CORE_NPL_MCLK),
160119 +       Q6AFE_CLK(LPASS_CLK_ID_VA_CORE_2X_MCLK),
160120 +       Q6AFE_VOTE_CLK(LPASS_HW_AVTIMER_VOTE,
160121 +                      Q6AFE_LPASS_CORE_AVTIMER_BLOCK,
160122 +                      "LPASS_AVTIMER_MACRO"),
160123 +       Q6AFE_VOTE_CLK(LPASS_HW_MACRO_VOTE,
160124 +                      Q6AFE_LPASS_CORE_HW_MACRO_BLOCK,
160125 +                      "LPASS_HW_MACRO"),
160126 +       Q6AFE_VOTE_CLK(LPASS_HW_DCODEC_VOTE,
160127 +                      Q6AFE_LPASS_CORE_HW_DCODEC_BLOCK,
160128 +                      "LPASS_HW_DCODEC"),
160131  static struct clk_hw *q6afe_of_clk_hw_get(struct of_phandle_args *clkspec,
160132 @@ -207,7 +194,7 @@ static struct clk_hw *q6afe_of_clk_hw_get(struct of_phandle_args *clkspec,
160133         unsigned int idx = clkspec->args[0];
160134         unsigned int attr = clkspec->args[1];
160136 -       if (idx >= cc->num_clks || attr > LPASS_CLK_ATTRIBUTE_COUPLE_DIVISOR) {
160137 +       if (idx >= Q6AFE_MAX_CLK_ID || attr > LPASS_CLK_ATTRIBUTE_COUPLE_DIVISOR) {
160138                 dev_err(cc->dev, "Invalid clk specifier (%d, %d)\n", idx, attr);
160139                 return ERR_PTR(-EINVAL);
160140         }
160141 @@ -230,20 +217,36 @@ static int q6afe_clock_dev_probe(struct platform_device *pdev)
160142         if (!cc)
160143                 return -ENOMEM;
160145 -       cc->clks = &q6afe_clks[0];
160146 -       cc->num_clks = ARRAY_SIZE(q6afe_clks);
160147 +       cc->dev = dev;
160148         for (i = 0; i < ARRAY_SIZE(q6afe_clks); i++) {
160149 -               if (!q6afe_clks[i])
160150 -                       continue;
160151 +               unsigned int id = q6afe_clks[i].clk_id;
160152 +               struct clk_init_data init = {
160153 +                       .name =  q6afe_clks[i].name,
160154 +               };
160155 +               struct q6afe_clk *clk;
160157 +               clk = devm_kzalloc(dev, sizeof(*clk), GFP_KERNEL);
160158 +               if (!clk)
160159 +                       return -ENOMEM;
160161 +               clk->dev = dev;
160162 +               clk->afe_clk_id = q6afe_clks[i].afe_clk_id;
160163 +               clk->rate = q6afe_clks[i].rate;
160164 +               clk->hw.init = &init;
160166 +               if (clk->rate)
160167 +                       init.ops = &clk_q6afe_ops;
160168 +               else
160169 +                       init.ops = &clk_vote_q6afe_ops;
160171 -               q6afe_clks[i]->dev = dev;
160172 +               cc->clks[id] = clk;
160174 -               ret = devm_clk_hw_register(dev, &q6afe_clks[i]->hw);
160175 +               ret = devm_clk_hw_register(dev, &clk->hw);
160176                 if (ret)
160177                         return ret;
160178         }
160180 -       ret = of_clk_add_hw_provider(dev->of_node, q6afe_of_clk_hw_get, cc);
160181 +       ret = devm_of_clk_add_hw_provider(dev, q6afe_of_clk_hw_get, cc);
160182         if (ret)
160183                 return ret;
160185 diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c
160186 index cad1cd1bfdf0..4327b72162ec 100644
160187 --- a/sound/soc/qcom/qdsp6/q6afe.c
160188 +++ b/sound/soc/qcom/qdsp6/q6afe.c
160189 @@ -1681,7 +1681,7 @@ int q6afe_unvote_lpass_core_hw(struct device *dev, uint32_t hw_block_id,
160190  EXPORT_SYMBOL(q6afe_unvote_lpass_core_hw);
160192  int q6afe_vote_lpass_core_hw(struct device *dev, uint32_t hw_block_id,
160193 -                            char *client_name, uint32_t *client_handle)
160194 +                            const char *client_name, uint32_t *client_handle)
160196         struct q6afe *afe = dev_get_drvdata(dev->parent);
160197         struct afe_cmd_remote_lpass_core_hw_vote_request *vote_cfg;
160198 diff --git a/sound/soc/qcom/qdsp6/q6afe.h b/sound/soc/qcom/qdsp6/q6afe.h
160199 index 22e10269aa10..3845b56c0ed3 100644
160200 --- a/sound/soc/qcom/qdsp6/q6afe.h
160201 +++ b/sound/soc/qcom/qdsp6/q6afe.h
160202 @@ -236,7 +236,7 @@ int q6afe_port_set_sysclk(struct q6afe_port *port, int clk_id,
160203  int q6afe_set_lpass_clock(struct device *dev, int clk_id, int clk_src,
160204                           int clk_root, unsigned int freq);
160205  int q6afe_vote_lpass_core_hw(struct device *dev, uint32_t hw_block_id,
160206 -                            char *client_name, uint32_t *client_handle);
160207 +                            const char *client_name, uint32_t *client_handle);
160208  int q6afe_unvote_lpass_core_hw(struct device *dev, uint32_t hw_block_id,
160209                                uint32_t client_handle);
160210  #endif /* __Q6AFE_H__ */
160211 diff --git a/sound/soc/samsung/tm2_wm5110.c b/sound/soc/samsung/tm2_wm5110.c
160212 index 9300fef9bf26..125e07f65d2b 100644
160213 --- a/sound/soc/samsung/tm2_wm5110.c
160214 +++ b/sound/soc/samsung/tm2_wm5110.c
160215 @@ -553,7 +553,7 @@ static int tm2_probe(struct platform_device *pdev)
160217                 ret = of_parse_phandle_with_args(dev->of_node, "i2s-controller",
160218                                                  cells_name, i, &args);
160219 -               if (!args.np) {
160220 +               if (ret) {
160221                         dev_err(dev, "i2s-controller property parse error: %d\n", i);
160222                         ret = -EINVAL;
160223                         goto dai_node_put;
160224 diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
160225 index 1029d8d9d800..d2b4632d9c2a 100644
160226 --- a/sound/soc/sh/rcar/core.c
160227 +++ b/sound/soc/sh/rcar/core.c
160228 @@ -1428,8 +1428,75 @@ static int rsnd_hw_params(struct snd_soc_component *component,
160229                 }
160230                 if (io->converted_chan)
160231                         dev_dbg(dev, "convert channels = %d\n", io->converted_chan);
160232 -               if (io->converted_rate)
160233 +               if (io->converted_rate) {
160234 +                       /*
160235 +                        * SRC supports convert rates from params_rate(hw_params)/k_down
160236 +                        * to params_rate(hw_params)*k_up, where k_up is always 6, and
160237 +                        * k_down depends on number of channels and SRC unit.
160238 +                        * So all SRC units can upsample audio up to 6 times regardless
160239 +                        * its number of channels. And all SRC units can downsample
160240 +                        * 2 channel audio up to 6 times too.
160241 +                        */
160242 +                       int k_up = 6;
160243 +                       int k_down = 6;
160244 +                       int channel;
160245 +                       struct rsnd_mod *src_mod = rsnd_io_to_mod_src(io);
160247                         dev_dbg(dev, "convert rate     = %d\n", io->converted_rate);
160249 +                       channel = io->converted_chan ? io->converted_chan :
160250 +                                 params_channels(hw_params);
160252 +                       switch (rsnd_mod_id(src_mod)) {
160253 +                       /*
160254 +                        * SRC0 can downsample 4, 6 and 8 channel audio up to 4 times.
160255 +                        * SRC1, SRC3 and SRC4 can downsample 4 channel audio
160256 +                        * up to 4 times.
160257 +                        * SRC1, SRC3 and SRC4 can downsample 6 and 8 channel audio
160258 +                        * no more than twice.
160259 +                        */
160260 +                       case 1:
160261 +                       case 3:
160262 +                       case 4:
160263 +                               if (channel > 4) {
160264 +                                       k_down = 2;
160265 +                                       break;
160266 +                               }
160267 +                               fallthrough;
160268 +                       case 0:
160269 +                               if (channel > 2)
160270 +                                       k_down = 4;
160271 +                               break;
160273 +                       /* Other SRC units do not support more than 2 channels */
160274 +                       default:
160275 +                               if (channel > 2)
160276 +                                       return -EINVAL;
160277 +                       }
160279 +                       if (params_rate(hw_params) > io->converted_rate * k_down) {
160280 +                               hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->min =
160281 +                                       io->converted_rate * k_down;
160282 +                               hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->max =
160283 +                                       io->converted_rate * k_down;
160284 +                               hw_params->cmask |= SNDRV_PCM_HW_PARAM_RATE;
160285 +                       } else if (params_rate(hw_params) * k_up < io->converted_rate) {
160286 +                               hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->min =
160287 +                                       (io->converted_rate + k_up - 1) / k_up;
160288 +                               hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->max =
160289 +                                       (io->converted_rate + k_up - 1) / k_up;
160290 +                               hw_params->cmask |= SNDRV_PCM_HW_PARAM_RATE;
160291 +                       }
160293 +                       /*
160294 +                        * TBD: Max SRC input and output rates also depend on number
160295 +                        * of channels and SRC unit:
160296 +                        * SRC1, SRC3 and SRC4 do not support more than 128kHz
160297 +                        * for 6 channel and 96kHz for 8 channel audio.
160298 +                        * Perhaps this function should return EINVAL if the input or
160299 +                        * the output rate exceeds the limitation.
160300 +                        */
160301 +               }
160302         }
160304         return rsnd_dai_call(hw_params, io, substream, hw_params);
160305 diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
160306 index d0ded427a836..042207c11651 100644
160307 --- a/sound/soc/sh/rcar/ssi.c
160308 +++ b/sound/soc/sh/rcar/ssi.c
160309 @@ -507,10 +507,15 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
160310                          struct rsnd_priv *priv)
160312         struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
160313 +       int ret;
160315         if (!rsnd_ssi_is_run_mods(mod, io))
160316                 return 0;
160318 +       ret = rsnd_ssi_master_clk_start(mod, io);
160319 +       if (ret < 0)
160320 +               return ret;
160322         ssi->usrcnt++;
160324         rsnd_mod_power_on(mod);
160325 @@ -792,7 +797,6 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
160326                                                        SSI_SYS_STATUS(i * 2),
160327                                                        0xf << (id * 4));
160328                                         stop = true;
160329 -                                       break;
160330                                 }
160331                         }
160332                         break;
160333 @@ -810,7 +814,6 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
160334                                                 SSI_SYS_STATUS((i * 2) + 1),
160335                                                 0xf << 4);
160336                                         stop = true;
160337 -                                       break;
160338                                 }
160339                         }
160340                         break;
160341 @@ -1060,13 +1063,6 @@ static int rsnd_ssi_pio_pointer(struct rsnd_mod *mod,
160342         return 0;
160345 -static int rsnd_ssi_prepare(struct rsnd_mod *mod,
160346 -                           struct rsnd_dai_stream *io,
160347 -                           struct rsnd_priv *priv)
160349 -       return rsnd_ssi_master_clk_start(mod, io);
160352  static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
160353         .name           = SSI_NAME,
160354         .probe          = rsnd_ssi_common_probe,
160355 @@ -1079,7 +1075,6 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
160356         .pointer        = rsnd_ssi_pio_pointer,
160357         .pcm_new        = rsnd_ssi_pcm_new,
160358         .hw_params      = rsnd_ssi_hw_params,
160359 -       .prepare        = rsnd_ssi_prepare,
160360         .get_status     = rsnd_ssi_get_status,
160363 @@ -1166,7 +1161,6 @@ static struct rsnd_mod_ops rsnd_ssi_dma_ops = {
160364         .pcm_new        = rsnd_ssi_pcm_new,
160365         .fallback       = rsnd_ssi_fallback,
160366         .hw_params      = rsnd_ssi_hw_params,
160367 -       .prepare        = rsnd_ssi_prepare,
160368         .get_status     = rsnd_ssi_get_status,
160371 diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
160372 index 246a5e32e22a..b4810266f5e5 100644
160373 --- a/sound/soc/soc-compress.c
160374 +++ b/sound/soc/soc-compress.c
160375 @@ -153,7 +153,9 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
160376         fe->dpcm[stream].state = SND_SOC_DPCM_STATE_OPEN;
160377         fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
160379 +       mutex_lock_nested(&fe->card->pcm_mutex, fe->card->pcm_subclass);
160380         snd_soc_runtime_activate(fe, stream);
160381 +       mutex_unlock(&fe->card->pcm_mutex);
160383         mutex_unlock(&fe->card->mutex);
160385 @@ -181,7 +183,9 @@ static int soc_compr_free_fe(struct snd_compr_stream *cstream)
160387         mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
160389 +       mutex_lock_nested(&fe->card->pcm_mutex, fe->card->pcm_subclass);
160390         snd_soc_runtime_deactivate(fe, stream);
160391 +       mutex_unlock(&fe->card->pcm_mutex);
160393         fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
160395 diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
160396 index b005f9eadd71..2f75a449c45c 100644
160397 --- a/sound/soc/soc-dapm.c
160398 +++ b/sound/soc/soc-dapm.c
160399 @@ -154,7 +154,7 @@ static void dapm_assert_locked(struct snd_soc_dapm_context *dapm)
160400  static void pop_wait(u32 pop_time)
160402         if (pop_time)
160403 -               schedule_timeout_uninterruptible(msecs_to_jiffies(pop_time));
160404 +               schedule_msec_hrtimeout_uninterruptible((pop_time));
160407  __printf(3, 4)
160408 diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
160409 index 6740df541508..3d22c1be6f3d 100644
160410 --- a/sound/soc/tegra/tegra30_i2s.c
160411 +++ b/sound/soc/tegra/tegra30_i2s.c
160412 @@ -58,8 +58,18 @@ static int tegra30_i2s_runtime_resume(struct device *dev)
160413         }
160415         regcache_cache_only(i2s->regmap, false);
160416 +       regcache_mark_dirty(i2s->regmap);
160418 +       ret = regcache_sync(i2s->regmap);
160419 +       if (ret)
160420 +               goto disable_clocks;
160422         return 0;
160424 +disable_clocks:
160425 +       clk_disable_unprepare(i2s->clk_i2s);
160427 +       return ret;
160430  static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
160431 @@ -551,37 +561,11 @@ static int tegra30_i2s_platform_remove(struct platform_device *pdev)
160432         return 0;
160435 -#ifdef CONFIG_PM_SLEEP
160436 -static int tegra30_i2s_suspend(struct device *dev)
160438 -       struct tegra30_i2s *i2s = dev_get_drvdata(dev);
160440 -       regcache_mark_dirty(i2s->regmap);
160442 -       return 0;
160445 -static int tegra30_i2s_resume(struct device *dev)
160447 -       struct tegra30_i2s *i2s = dev_get_drvdata(dev);
160448 -       int ret;
160450 -       ret = pm_runtime_get_sync(dev);
160451 -       if (ret < 0) {
160452 -               pm_runtime_put(dev);
160453 -               return ret;
160454 -       }
160455 -       ret = regcache_sync(i2s->regmap);
160456 -       pm_runtime_put(dev);
160458 -       return ret;
160460 -#endif
160462  static const struct dev_pm_ops tegra30_i2s_pm_ops = {
160463         SET_RUNTIME_PM_OPS(tegra30_i2s_runtime_suspend,
160464                            tegra30_i2s_runtime_resume, NULL)
160465 -       SET_SYSTEM_SLEEP_PM_OPS(tegra30_i2s_suspend, tegra30_i2s_resume)
160466 +       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
160467 +                               pm_runtime_force_resume)
160470  static struct platform_driver tegra30_i2s_driver = {
160471 diff --git a/sound/usb/card.c b/sound/usb/card.c
160472 index 0826a437f8fc..7b7526d3a56e 100644
160473 --- a/sound/usb/card.c
160474 +++ b/sound/usb/card.c
160475 @@ -181,9 +181,8 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
160476                                 ctrlif, interface);
160477                         return -EINVAL;
160478                 }
160479 -               usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
160481 -               return 0;
160482 +               return usb_driver_claim_interface(&usb_audio_driver, iface,
160483 +                                                 USB_AUDIO_IFACE_UNUSED);
160484         }
160486         if ((altsd->bInterfaceClass != USB_CLASS_AUDIO &&
160487 @@ -203,7 +202,8 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
160489         if (! snd_usb_parse_audio_interface(chip, interface)) {
160490                 usb_set_interface(dev, interface, 0); /* reset the current interface */
160491 -               usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
160492 +               return usb_driver_claim_interface(&usb_audio_driver, iface,
160493 +                                                 USB_AUDIO_IFACE_UNUSED);
160494         }
160496         return 0;
160497 @@ -862,7 +862,7 @@ static void usb_audio_disconnect(struct usb_interface *intf)
160498         struct snd_card *card;
160499         struct list_head *p;
160501 -       if (chip == (void *)-1L)
160502 +       if (chip == USB_AUDIO_IFACE_UNUSED)
160503                 return;
160505         card = chip->card;
160506 @@ -992,7 +992,7 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
160507         struct usb_mixer_interface *mixer;
160508         struct list_head *p;
160510 -       if (chip == (void *)-1L)
160511 +       if (chip == USB_AUDIO_IFACE_UNUSED)
160512                 return 0;
160514         if (!chip->num_suspended_intf++) {
160515 @@ -1022,7 +1022,7 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
160516         struct list_head *p;
160517         int err = 0;
160519 -       if (chip == (void *)-1L)
160520 +       if (chip == USB_AUDIO_IFACE_UNUSED)
160521                 return 0;
160523         atomic_inc(&chip->active); /* avoid autopm */
160524 diff --git a/sound/usb/clock.c b/sound/usb/clock.c
160525 index a746802d0ac3..17bbde73d4d1 100644
160526 --- a/sound/usb/clock.c
160527 +++ b/sound/usb/clock.c
160528 @@ -296,7 +296,7 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
160530         selector = snd_usb_find_clock_selector(chip->ctrl_intf, entity_id);
160531         if (selector) {
160532 -               int ret, i, cur;
160533 +               int ret, i, cur, err;
160535                 if (selector->bNrInPins == 1) {
160536                         ret = 1;
160537 @@ -324,13 +324,17 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
160538                 ret = __uac_clock_find_source(chip, fmt,
160539                                               selector->baCSourceID[ret - 1],
160540                                               visited, validate);
160541 +               if (ret > 0) {
160542 +                       err = uac_clock_selector_set_val(chip, entity_id, cur);
160543 +                       if (err < 0)
160544 +                               return err;
160545 +               }
160547                 if (!validate || ret > 0 || !chip->autoclock)
160548                         return ret;
160550                 /* The current clock source is invalid, try others. */
160551                 for (i = 1; i <= selector->bNrInPins; i++) {
160552 -                       int err;
160554                         if (i == cur)
160555                                 continue;
160557 @@ -396,7 +400,7 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip,
160559         selector = snd_usb_find_clock_selector_v3(chip->ctrl_intf, entity_id);
160560         if (selector) {
160561 -               int ret, i, cur;
160562 +               int ret, i, cur, err;
160564                 /* the entity ID we are looking for is a selector.
160565                  * find out what it currently selects */
160566 @@ -418,6 +422,12 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip,
160567                 ret = __uac3_clock_find_source(chip, fmt,
160568                                                selector->baCSourceID[ret - 1],
160569                                                visited, validate);
160570 +               if (ret > 0) {
160571 +                       err = uac_clock_selector_set_val(chip, entity_id, cur);
160572 +                       if (err < 0)
160573 +                               return err;
160574 +               }
160576                 if (!validate || ret > 0 || !chip->autoclock)
160577                         return ret;
160579 diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
160580 index 102d53515a76..933586a895e7 100644
160581 --- a/sound/usb/endpoint.c
160582 +++ b/sound/usb/endpoint.c
160583 @@ -1442,11 +1442,11 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep)
160584         if (snd_BUG_ON(!atomic_read(&ep->running)))
160585                 return;
160587 -       if (ep->sync_source)
160588 -               WRITE_ONCE(ep->sync_source->sync_sink, NULL);
160590 -       if (!atomic_dec_return(&ep->running))
160591 +       if (!atomic_dec_return(&ep->running)) {
160592 +               if (ep->sync_source)
160593 +                       WRITE_ONCE(ep->sync_source->sync_sink, NULL);
160594                 stop_urbs(ep, false);
160595 +       }
160598  /**
160599 diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
160600 index a030dd65eb28..9602929b7de9 100644
160601 --- a/sound/usb/line6/driver.c
160602 +++ b/sound/usb/line6/driver.c
160603 @@ -699,6 +699,10 @@ static int line6_init_cap_control(struct usb_line6 *line6)
160604                 line6->buffer_message = kmalloc(LINE6_MIDI_MESSAGE_MAXLEN, GFP_KERNEL);
160605                 if (!line6->buffer_message)
160606                         return -ENOMEM;
160608 +               ret = line6_init_midi(line6);
160609 +               if (ret < 0)
160610 +                       return ret;
160611         } else {
160612                 ret = line6_hwdep_init(line6);
160613                 if (ret < 0)
160614 diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
160615 index fdbdfb7bce92..fa8e8faf3eb3 100644
160616 --- a/sound/usb/line6/pcm.c
160617 +++ b/sound/usb/line6/pcm.c
160618 @@ -127,7 +127,7 @@ static void line6_wait_clear_audio_urbs(struct snd_line6_pcm *line6pcm,
160619                 if (!alive)
160620                         break;
160621                 set_current_state(TASK_UNINTERRUPTIBLE);
160622 -               schedule_timeout(1);
160623 +               schedule_min_hrtimeout();
160624         } while (--timeout > 0);
160625         if (alive)
160626                 dev_err(line6pcm->line6->ifcdev,
160627 diff --git a/sound/usb/line6/pod.c b/sound/usb/line6/pod.c
160628 index cd44cb5f1310..16e644330c4d 100644
160629 --- a/sound/usb/line6/pod.c
160630 +++ b/sound/usb/line6/pod.c
160631 @@ -376,11 +376,6 @@ static int pod_init(struct usb_line6 *line6,
160632         if (err < 0)
160633                 return err;
160635 -       /* initialize MIDI subsystem: */
160636 -       err = line6_init_midi(line6);
160637 -       if (err < 0)
160638 -               return err;
160640         /* initialize PCM subsystem: */
160641         err = line6_init_pcm(line6, &pod_pcm_properties);
160642         if (err < 0)
160643 diff --git a/sound/usb/line6/variax.c b/sound/usb/line6/variax.c
160644 index ed158f04de80..c2245aa93b08 100644
160645 --- a/sound/usb/line6/variax.c
160646 +++ b/sound/usb/line6/variax.c
160647 @@ -159,7 +159,6 @@ static int variax_init(struct usb_line6 *line6,
160648                        const struct usb_device_id *id)
160650         struct usb_line6_variax *variax = line6_to_variax(line6);
160651 -       int err;
160653         line6->process_message = line6_variax_process_message;
160654         line6->disconnect = line6_variax_disconnect;
160655 @@ -172,11 +171,6 @@ static int variax_init(struct usb_line6 *line6,
160656         if (variax->buffer_activate == NULL)
160657                 return -ENOMEM;
160659 -       /* initialize MIDI subsystem: */
160660 -       err = line6_init_midi(&variax->line6);
160661 -       if (err < 0)
160662 -               return err;
160664         /* initiate startup procedure: */
160665         schedule_delayed_work(&line6->startup_work,
160666                               msecs_to_jiffies(VARIAX_STARTUP_DELAY1));
160667 diff --git a/sound/usb/midi.c b/sound/usb/midi.c
160668 index 0c23fa6d8525..fa91290ad89d 100644
160669 --- a/sound/usb/midi.c
160670 +++ b/sound/usb/midi.c
160671 @@ -1332,7 +1332,7 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi *umidi,
160673   error:
160674         snd_usbmidi_in_endpoint_delete(ep);
160675 -       return -ENOMEM;
160676 +       return err;
160680 @@ -1889,8 +1889,12 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi,
160681                 ms_ep = find_usb_ms_endpoint_descriptor(hostep);
160682                 if (!ms_ep)
160683                         continue;
160684 +               if (ms_ep->bLength <= sizeof(*ms_ep))
160685 +                       continue;
160686                 if (ms_ep->bNumEmbMIDIJack > 0x10)
160687                         continue;
160688 +               if (ms_ep->bLength < sizeof(*ms_ep) + ms_ep->bNumEmbMIDIJack)
160689 +                       continue;
160690                 if (usb_endpoint_dir_out(ep)) {
160691                         if (endpoints[epidx].out_ep) {
160692                                 if (++epidx >= MIDI_MAX_ENDPOINTS) {
160693 diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
160694 index 646deb6244b1..c5794e83fd80 100644
160695 --- a/sound/usb/mixer_maps.c
160696 +++ b/sound/usb/mixer_maps.c
160697 @@ -337,6 +337,13 @@ static const struct usbmix_name_map bose_companion5_map[] = {
160698         { 0 }   /* terminator */
160701 +/* Sennheiser Communications Headset [PC 8], the dB value is reported as -6 negative maximum  */
160702 +static const struct usbmix_dB_map sennheiser_pc8_dB = {-9500, 0};
160703 +static const struct usbmix_name_map sennheiser_pc8_map[] = {
160704 +       { 9, NULL, .dB = &sennheiser_pc8_dB },
160705 +       { 0 }   /* terminator */
160709   * Dell usb dock with ALC4020 codec had a firmware problem where it got
160710   * screwed up when zero volume is passed; just skip it as a workaround
160711 @@ -593,6 +600,11 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
160712                 .id = USB_ID(0x17aa, 0x1046),
160713                 .map = lenovo_p620_rear_map,
160714         },
160715 +       {
160716 +               /* Sennheiser Communications Headset [PC 8] */
160717 +               .id = USB_ID(0x1395, 0x0025),
160718 +               .map = sennheiser_pc8_map,
160719 +       },
160720         { 0 } /* terminator */
160723 diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
160724 index 1165a5ac60f2..8a8fe2b980a1 100644
160725 --- a/sound/usb/quirks-table.h
160726 +++ b/sound/usb/quirks-table.h
160727 @@ -2376,6 +2376,16 @@ YAMAHA_DEVICE(0x7010, "UB99"),
160728         }
160732 +       USB_DEVICE_VENDOR_SPEC(0x0944, 0x0204),
160733 +       .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
160734 +               .vendor_name = "KORG, Inc.",
160735 +               /* .product_name = "ToneLab EX", */
160736 +               .ifnum = 3,
160737 +               .type = QUIRK_MIDI_STANDARD_INTERFACE,
160738 +       }
160741  /* AKAI devices */
160743         USB_DEVICE(0x09e8, 0x0062),
160744 @@ -3817,6 +3827,69 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
160745                 }
160746         }
160749 +       /*
160750 +        * Pioneer DJ DJM-850
160751 +        * 8 channels playback and 8 channels capture @ 44.1/48/96kHz S24LE
160752 +        * Playback on EP 0x05
160753 +        * Capture on EP 0x86
160754 +        */
160755 +       USB_DEVICE_VENDOR_SPEC(0x08e4, 0x0163),
160756 +       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
160757 +               .ifnum = QUIRK_ANY_INTERFACE,
160758 +               .type = QUIRK_COMPOSITE,
160759 +               .data = (const struct snd_usb_audio_quirk[]) {
160760 +                       {
160761 +                               .ifnum = 0,
160762 +                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
160763 +                               .data = &(const struct audioformat) {
160764 +                                       .formats = SNDRV_PCM_FMTBIT_S24_3LE,
160765 +                                       .channels = 8,
160766 +                                       .iface = 0,
160767 +                                       .altsetting = 1,
160768 +                                       .altset_idx = 1,
160769 +                                       .endpoint = 0x05,
160770 +                                       .ep_attr = USB_ENDPOINT_XFER_ISOC|
160771 +                                           USB_ENDPOINT_SYNC_ASYNC|
160772 +                                               USB_ENDPOINT_USAGE_DATA,
160773 +                                       .rates = SNDRV_PCM_RATE_44100|
160774 +                                               SNDRV_PCM_RATE_48000|
160775 +                                               SNDRV_PCM_RATE_96000,
160776 +                                       .rate_min = 44100,
160777 +                                       .rate_max = 96000,
160778 +                                       .nr_rates = 3,
160779 +                                       .rate_table = (unsigned int[]) { 44100, 48000, 96000 }
160780 +                               }
160781 +                       },
160782 +                       {
160783 +                               .ifnum = 0,
160784 +                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
160785 +                               .data = &(const struct audioformat) {
160786 +                                       .formats = SNDRV_PCM_FMTBIT_S24_3LE,
160787 +                                       .channels = 8,
160788 +                                       .iface = 0,
160789 +                                       .altsetting = 1,
160790 +                                       .altset_idx = 1,
160791 +                                       .endpoint = 0x86,
160792 +                                       .ep_idx = 1,
160793 +                                       .ep_attr = USB_ENDPOINT_XFER_ISOC|
160794 +                                               USB_ENDPOINT_SYNC_ASYNC|
160795 +                                               USB_ENDPOINT_USAGE_DATA,
160796 +                                       .rates = SNDRV_PCM_RATE_44100|
160797 +                                               SNDRV_PCM_RATE_48000|
160798 +                                               SNDRV_PCM_RATE_96000,
160799 +                                       .rate_min = 44100,
160800 +                                       .rate_max = 96000,
160801 +                                       .nr_rates = 3,
160802 +                                       .rate_table = (unsigned int[]) { 44100, 48000, 96000 }
160803 +                               }
160804 +                       },
160805 +                       {
160806 +                               .ifnum = -1
160807 +                       }
160808 +               }
160809 +       }
160812         /*
160813          * Pioneer DJ DJM-450
160814 diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
160815 index 176437a441e6..8b8bee3c3dd6 100644
160816 --- a/sound/usb/quirks.c
160817 +++ b/sound/usb/quirks.c
160818 @@ -55,8 +55,12 @@ static int create_composite_quirk(struct snd_usb_audio *chip,
160819                 if (!iface)
160820                         continue;
160821                 if (quirk->ifnum != probed_ifnum &&
160822 -                   !usb_interface_claimed(iface))
160823 -                       usb_driver_claim_interface(driver, iface, (void *)-1L);
160824 +                   !usb_interface_claimed(iface)) {
160825 +                       err = usb_driver_claim_interface(driver, iface,
160826 +                                                        USB_AUDIO_IFACE_UNUSED);
160827 +                       if (err < 0)
160828 +                               return err;
160829 +               }
160830         }
160832         return 0;
160833 @@ -426,8 +430,12 @@ static int create_autodetect_quirks(struct snd_usb_audio *chip,
160834                         continue;
160836                 err = create_autodetect_quirk(chip, iface, driver);
160837 -               if (err >= 0)
160838 -                       usb_driver_claim_interface(driver, iface, (void *)-1L);
160839 +               if (err >= 0) {
160840 +                       err = usb_driver_claim_interface(driver, iface,
160841 +                                                        USB_AUDIO_IFACE_UNUSED);
160842 +                       if (err < 0)
160843 +                               return err;
160844 +               }
160845         }
160847         return 0;
160848 @@ -1503,6 +1511,10 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
160849         case USB_ID(0x2b73, 0x0013): /* Pioneer DJM-450 */
160850                 pioneer_djm_set_format_quirk(subs, 0x0082);
160851                 break;
160852 +       case USB_ID(0x08e4, 0x017f): /* Pioneer DJM-750 */
160853 +       case USB_ID(0x08e4, 0x0163): /* Pioneer DJM-850 */
160854 +               pioneer_djm_set_format_quirk(subs, 0x0086);
160855 +               break;
160856         }
160859 diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
160860 index 60b9dd7df6bb..8794c8658ab9 100644
160861 --- a/sound/usb/usbaudio.h
160862 +++ b/sound/usb/usbaudio.h
160863 @@ -61,6 +61,8 @@ struct snd_usb_audio {
160864         struct media_intf_devnode *ctl_intf_media_devnode;
160867 +#define USB_AUDIO_IFACE_UNUSED ((void *)-1L)
160869  #define usb_audio_err(chip, fmt, args...) \
160870         dev_err(&(chip)->dev->dev, fmt, ##args)
160871  #define usb_audio_warn(chip, fmt, args...) \
160872 diff --git a/tools/arch/x86/include/asm/unistd_64.h b/tools/arch/x86/include/asm/unistd_64.h
160873 index 4205ed4158bf..b65c51e8d675 100644
160874 --- a/tools/arch/x86/include/asm/unistd_64.h
160875 +++ b/tools/arch/x86/include/asm/unistd_64.h
160876 @@ -17,3 +17,15 @@
160877  #ifndef __NR_setns
160878  #define __NR_setns 308
160879  #endif
160881 +#ifndef __NR_futex_wait
160882 +# define __NR_futex_wait 443
160883 +#endif
160885 +#ifndef __NR_futex_wake
160886 +# define __NR_futex_wake 444
160887 +#endif
160889 +#ifndef __NR_futex_requeue
160890 +# define __NR_futex_requeue 446
160891 +#endif
160892 diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
160893 index fe9e7b3a4b50..1326fff3629b 100644
160894 --- a/tools/bpf/bpftool/btf.c
160895 +++ b/tools/bpf/bpftool/btf.c
160896 @@ -538,6 +538,7 @@ static int do_dump(int argc, char **argv)
160897                         NEXT_ARG();
160898                         if (argc < 1) {
160899                                 p_err("expecting value for 'format' option\n");
160900 +                               err = -EINVAL;
160901                                 goto done;
160902                         }
160903                         if (strcmp(*argv, "c") == 0) {
160904 @@ -547,11 +548,13 @@ static int do_dump(int argc, char **argv)
160905                         } else {
160906                                 p_err("unrecognized format specifier: '%s', possible values: raw, c",
160907                                       *argv);
160908 +                               err = -EINVAL;
160909                                 goto done;
160910                         }
160911                         NEXT_ARG();
160912                 } else {
160913                         p_err("unrecognized option: '%s'", *argv);
160914 +                       err = -EINVAL;
160915                         goto done;
160916                 }
160917         }
160918 diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
160919 index b86f450e6fce..d9afb730136a 100644
160920 --- a/tools/bpf/bpftool/main.c
160921 +++ b/tools/bpf/bpftool/main.c
160922 @@ -276,7 +276,7 @@ static int do_batch(int argc, char **argv)
160923         int n_argc;
160924         FILE *fp;
160925         char *cp;
160926 -       int err;
160927 +       int err = 0;
160928         int i;
160930         if (argc < 2) {
160931 @@ -370,7 +370,6 @@ static int do_batch(int argc, char **argv)
160932         } else {
160933                 if (!json_output)
160934                         printf("processed %d commands\n", lines);
160935 -               err = 0;
160936         }
160937  err_close:
160938         if (fp != stdin)
160939 diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
160940 index b400364ee054..09ae0381205b 100644
160941 --- a/tools/bpf/bpftool/map.c
160942 +++ b/tools/bpf/bpftool/map.c
160943 @@ -100,7 +100,7 @@ static int do_dump_btf(const struct btf_dumper *d,
160944                        void *value)
160946         __u32 value_id;
160947 -       int ret;
160948 +       int ret = 0;
160950         /* start of key-value pair */
160951         jsonw_start_object(d->jw);
160952 diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
160953 index ce58cff99b66..2a6adca37fe9 100644
160954 --- a/tools/include/uapi/asm-generic/unistd.h
160955 +++ b/tools/include/uapi/asm-generic/unistd.h
160956 @@ -864,8 +864,17 @@ __SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
160957  #define __NR_mount_setattr 442
160958  __SYSCALL(__NR_mount_setattr, sys_mount_setattr)
160960 +#define __NR_futex_wait 443
160961 +__SYSCALL(__NR_futex_wait, sys_futex_wait)
160963 +#define __NR_futex_wake 444
160964 +__SYSCALL(__NR_futex_wake, sys_futex_wake)
160966 +#define __NR_futex_waitv 445
160967 +__SC_COMP(__NR_futex_waitv, sys_futex_waitv, compat_sys_futex_waitv)
160969  #undef __NR_syscalls
160970 -#define __NR_syscalls 443
160971 +#define __NR_syscalls 446
160974   * 32 bit systems traditionally used different
160975 diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
160976 index 53b3e199fb25..09ebe3db5f2f 100644
160977 --- a/tools/lib/bpf/bpf_core_read.h
160978 +++ b/tools/lib/bpf/bpf_core_read.h
160979 @@ -88,11 +88,19 @@ enum bpf_enum_value_kind {
160980         const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \
160981         unsigned long long val;                                               \
160982                                                                               \
160983 +       /* This is a so-called barrier_var() operation that makes specified   \
160984 +        * variable "a black box" for optimizing compiler.                    \
160985 +        * It forces compiler to perform BYTE_OFFSET relocation on p and use  \
160986 +        * its calculated value in the switch below, instead of applying      \
160987 +        * the same relocation 4 times for each individual memory load.       \
160988 +        */                                                                   \
160989 +       asm volatile("" : "=r"(p) : "0"(p));                                  \
160990 +                                                                             \
160991         switch (__CORE_RELO(s, field, BYTE_SIZE)) {                           \
160992 -       case 1: val = *(const unsigned char *)p;                              \
160993 -       case 2: val = *(const unsigned short *)p;                             \
160994 -       case 4: val = *(const unsigned int *)p;                               \
160995 -       case 8: val = *(const unsigned long long *)p;                         \
160996 +       case 1: val = *(const unsigned char *)p; break;                       \
160997 +       case 2: val = *(const unsigned short *)p; break;                      \
160998 +       case 4: val = *(const unsigned int *)p; break;                        \
160999 +       case 8: val = *(const unsigned long long *)p; break;                  \
161000         }                                                                     \
161001         val <<= __CORE_RELO(s, field, LSHIFT_U64);                            \
161002         if (__CORE_RELO(s, field, SIGNED))                                    \
161003 diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
161004 index f9ef37707888..1c2e91ee041d 100644
161005 --- a/tools/lib/bpf/bpf_tracing.h
161006 +++ b/tools/lib/bpf/bpf_tracing.h
161007 @@ -413,20 +413,38 @@ typeof(name(0)) name(struct pt_regs *ctx)                             \
161008  }                                                                          \
161009  static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
161011 +#define ___bpf_fill0(arr, p, x) do {} while (0)
161012 +#define ___bpf_fill1(arr, p, x) arr[p] = x
161013 +#define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args)
161014 +#define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args)
161015 +#define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args)
161016 +#define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args)
161017 +#define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args)
161018 +#define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args)
161019 +#define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args)
161020 +#define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args)
161021 +#define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args)
161022 +#define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args)
161023 +#define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args)
161024 +#define ___bpf_fill(arr, args...) \
161025 +       ___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args)
161028   * BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values
161029   * in a structure.
161030   */
161031 -#define BPF_SEQ_PRINTF(seq, fmt, args...)                                  \
161032 -       ({                                                                  \
161033 -               _Pragma("GCC diagnostic push")                              \
161034 -               _Pragma("GCC diagnostic ignored \"-Wint-conversion\"")      \
161035 -               static const char ___fmt[] = fmt;                           \
161036 -               unsigned long long ___param[] = { args };                   \
161037 -               _Pragma("GCC diagnostic pop")                               \
161038 -               int ___ret = bpf_seq_printf(seq, ___fmt, sizeof(___fmt),    \
161039 -                                           ___param, sizeof(___param));    \
161040 -               ___ret;                                                     \
161041 -       })
161042 +#define BPF_SEQ_PRINTF(seq, fmt, args...)                      \
161043 +({                                                             \
161044 +       static const char ___fmt[] = fmt;                       \
161045 +       unsigned long long ___param[___bpf_narg(args)];         \
161046 +                                                               \
161047 +       _Pragma("GCC diagnostic push")                          \
161048 +       _Pragma("GCC diagnostic ignored \"-Wint-conversion\"")  \
161049 +       ___bpf_fill(___param, args);                            \
161050 +       _Pragma("GCC diagnostic pop")                           \
161051 +                                                               \
161052 +       bpf_seq_printf(seq, ___fmt, sizeof(___fmt),             \
161053 +                      ___param, sizeof(___param));             \
161056  #endif
161057 diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
161058 index 1237bcd1dd17..5b8a6ea44b38 100644
161059 --- a/tools/lib/bpf/btf.h
161060 +++ b/tools/lib/bpf/btf.h
161061 @@ -173,6 +173,7 @@ struct btf_dump_emit_type_decl_opts {
161062         int indent_level;
161063         /* strip all the const/volatile/restrict mods */
161064         bool strip_mods;
161065 +       size_t :0;
161067  #define btf_dump_emit_type_decl_opts__last_field strip_mods
161069 diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
161070 index 3c35eb401931..3d690d4e785c 100644
161071 --- a/tools/lib/bpf/libbpf.h
161072 +++ b/tools/lib/bpf/libbpf.h
161073 @@ -507,6 +507,7 @@ struct xdp_link_info {
161074  struct bpf_xdp_set_link_opts {
161075         size_t sz;
161076         int old_fd;
161077 +       size_t :0;
161079  #define bpf_xdp_set_link_opts__last_field old_fd
161081 diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
161082 index e7a8d847161f..1d80ad4e0de8 100644
161083 --- a/tools/lib/bpf/ringbuf.c
161084 +++ b/tools/lib/bpf/ringbuf.c
161085 @@ -202,9 +202,11 @@ static inline int roundup_len(__u32 len)
161086         return (len + 7) / 8 * 8;
161089 -static int ringbuf_process_ring(struct ring* r)
161090 +static int64_t ringbuf_process_ring(struct ring* r)
161092 -       int *len_ptr, len, err, cnt = 0;
161093 +       int *len_ptr, len, err;
161094 +       /* 64-bit to avoid overflow in case of extreme application behavior */
161095 +       int64_t cnt = 0;
161096         unsigned long cons_pos, prod_pos;
161097         bool got_new_data;
161098         void *sample;
161099 @@ -244,12 +246,14 @@ static int ringbuf_process_ring(struct ring* r)
161102  /* Consume available ring buffer(s) data without event polling.
161103 - * Returns number of records consumed across all registered ring buffers, or
161104 - * negative number if any of the callbacks return error.
161105 + * Returns number of records consumed across all registered ring buffers (or
161106 + * INT_MAX, whichever is less), or negative number if any of the callbacks
161107 + * return error.
161108   */
161109  int ring_buffer__consume(struct ring_buffer *rb)
161111 -       int i, err, res = 0;
161112 +       int64_t err, res = 0;
161113 +       int i;
161115         for (i = 0; i < rb->ring_cnt; i++) {
161116                 struct ring *ring = &rb->rings[i];
161117 @@ -259,18 +263,24 @@ int ring_buffer__consume(struct ring_buffer *rb)
161118                         return err;
161119                 res += err;
161120         }
161121 +       if (res > INT_MAX)
161122 +               return INT_MAX;
161123         return res;
161126  /* Poll for available data and consume records, if any are available.
161127 - * Returns number of records consumed, or negative number, if any of the
161128 - * registered callbacks returned error.
161129 + * Returns number of records consumed (or INT_MAX, whichever is less), or
161130 + * negative number, if any of the registered callbacks returned error.
161131   */
161132  int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
161134 -       int i, cnt, err, res = 0;
161135 +       int i, cnt;
161136 +       int64_t err, res = 0;
161138         cnt = epoll_wait(rb->epoll_fd, rb->events, rb->ring_cnt, timeout_ms);
161139 +       if (cnt < 0)
161140 +               return -errno;
161142         for (i = 0; i < cnt; i++) {
161143                 __u32 ring_id = rb->events[i].data.fd;
161144                 struct ring *ring = &rb->rings[ring_id];
161145 @@ -280,7 +290,9 @@ int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
161146                         return err;
161147                 res += err;
161148         }
161149 -       return cnt < 0 ? -errno : res;
161150 +       if (res > INT_MAX)
161151 +               return INT_MAX;
161152 +       return res;
161155  /* Get an fd that can be used to sleep until data is available in the ring(s) */
161156 diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
161157 index d82054225fcc..4d0c02ba3f7d 100644
161158 --- a/tools/lib/perf/include/perf/event.h
161159 +++ b/tools/lib/perf/include/perf/event.h
161160 @@ -8,6 +8,8 @@
161161  #include <linux/bpf.h>
161162  #include <sys/types.h> /* pid_t */
161164 +#define event_contains(obj, mem) ((obj).header.size > offsetof(typeof(obj), mem))
161166  struct perf_record_mmap {
161167         struct perf_event_header header;
161168         __u32                    pid, tid;
161169 @@ -346,8 +348,9 @@ struct perf_record_time_conv {
161170         __u64                    time_zero;
161171         __u64                    time_cycles;
161172         __u64                    time_mask;
161173 -       bool                     cap_user_time_zero;
161174 -       bool                     cap_user_time_short;
161175 +       __u8                     cap_user_time_zero;
161176 +       __u8                     cap_user_time_short;
161177 +       __u8                     reserved[6];   /* For alignment */
161180  struct perf_record_header_feature {
161181 diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
161182 index d8e59d31399a..c955cd683e22 100644
161183 --- a/tools/perf/Makefile.config
161184 +++ b/tools/perf/Makefile.config
161185 @@ -530,6 +530,7 @@ ifndef NO_LIBELF
161186        ifdef LIBBPF_DYNAMIC
161187          ifeq ($(feature-libbpf), 1)
161188            EXTLIBS += -lbpf
161189 +          $(call detected,CONFIG_LIBBPF_DYNAMIC)
161190          else
161191            dummy := $(error Error: No libbpf devel library found, please install libbpf-devel);
161192          endif
161193 diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
161194 index 7bf01cbe582f..86d1b0fae558 100644
161195 --- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
161196 +++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
161197 @@ -364,6 +364,10 @@
161198  440    common  process_madvise         sys_process_madvise
161199  441    common  epoll_pwait2            sys_epoll_pwait2
161200  442    common  mount_setattr           sys_mount_setattr
161201 +443    common  futex_wait              sys_futex_wait
161202 +444    common  futex_wake              sys_futex_wake
161203 +445    common  futex_waitv             sys_futex_waitv
161204 +446    common  futex_requeue           sys_futex_requeue
161207  # Due to a historical design error, certain syscalls are numbered differently
161208 diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
161209 index eac36afab2b3..12346844b354 100644
161210 --- a/tools/perf/bench/bench.h
161211 +++ b/tools/perf/bench/bench.h
161212 @@ -38,9 +38,13 @@ int bench_mem_memcpy(int argc, const char **argv);
161213  int bench_mem_memset(int argc, const char **argv);
161214  int bench_mem_find_bit(int argc, const char **argv);
161215  int bench_futex_hash(int argc, const char **argv);
161216 +int bench_futex2_hash(int argc, const char **argv);
161217  int bench_futex_wake(int argc, const char **argv);
161218 +int bench_futex2_wake(int argc, const char **argv);
161219  int bench_futex_wake_parallel(int argc, const char **argv);
161220 +int bench_futex2_wake_parallel(int argc, const char **argv);
161221  int bench_futex_requeue(int argc, const char **argv);
161222 +int bench_futex2_requeue(int argc, const char **argv);
161223  /* pi futexes */
161224  int bench_futex_lock_pi(int argc, const char **argv);
161225  int bench_epoll_wait(int argc, const char **argv);
161226 diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c
161227 index b65373ce5c4f..1068749af40c 100644
161228 --- a/tools/perf/bench/futex-hash.c
161229 +++ b/tools/perf/bench/futex-hash.c
161230 @@ -33,7 +33,7 @@ static unsigned int nthreads = 0;
161231  static unsigned int nsecs    = 10;
161232  /* amount of futexes per thread */
161233  static unsigned int nfutexes = 1024;
161234 -static bool fshared = false, done = false, silent = false;
161235 +static bool fshared = false, done = false, silent = false, futex2 = false;
161236  static int futex_flag = 0;
161238  struct timeval bench__start, bench__end, bench__runtime;
161239 @@ -85,7 +85,10 @@ static void *workerfn(void *arg)
161240                          * such as internal waitqueue handling, thus enlarging
161241                          * the critical region protected by hb->lock.
161242                          */
161243 -                       ret = futex_wait(&w->futex[i], 1234, NULL, futex_flag);
161244 +                       if (!futex2)
161245 +                               ret = futex_wait(&w->futex[i], 1234, NULL, futex_flag);
161246 +                       else
161247 +                               ret = futex2_wait(&w->futex[i], 1234, futex_flag, NULL);
161248                         if (!silent &&
161249                             (!ret || errno != EAGAIN || errno != EWOULDBLOCK))
161250                                 warn("Non-expected futex return call");
161251 @@ -116,7 +119,7 @@ static void print_summary(void)
161252                (int)bench__runtime.tv_sec);
161255 -int bench_futex_hash(int argc, const char **argv)
161256 +static int __bench_futex_hash(int argc, const char **argv)
161258         int ret = 0;
161259         cpu_set_t cpuset;
161260 @@ -148,7 +151,9 @@ int bench_futex_hash(int argc, const char **argv)
161261         if (!worker)
161262                 goto errmem;
161264 -       if (!fshared)
161265 +       if (futex2)
161266 +               futex_flag = FUTEX_32 | (fshared * FUTEX_SHARED_FLAG);
161267 +       else if (!fshared)
161268                 futex_flag = FUTEX_PRIVATE_FLAG;
161270         printf("Run summary [PID %d]: %d threads, each operating on %d [%s] futexes for %d secs.\n\n",
161271 @@ -228,3 +233,14 @@ int bench_futex_hash(int argc, const char **argv)
161272  errmem:
161273         err(EXIT_FAILURE, "calloc");
161276 +int bench_futex_hash(int argc, const char **argv)
161278 +       return __bench_futex_hash(argc, argv);
161281 +int bench_futex2_hash(int argc, const char **argv)
161283 +       futex2 = true;
161284 +       return __bench_futex_hash(argc, argv);
161286 diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c
161287 index 5fa23295ee5f..6cdd649b54f4 100644
161288 --- a/tools/perf/bench/futex-requeue.c
161289 +++ b/tools/perf/bench/futex-requeue.c
161290 @@ -2,8 +2,8 @@
161292   * Copyright (C) 2013  Davidlohr Bueso <davidlohr@hp.com>
161293   *
161294 - * futex-requeue: Block a bunch of threads on futex1 and requeue them
161295 - *                on futex2, N at a time.
161296 + * futex-requeue: Block a bunch of threads on addr1 and requeue them
161297 + *                on addr2, N at a time.
161298   *
161299   * This program is particularly useful to measure the latency of nthread
161300   * requeues without waking up any tasks -- thus mimicking a regular futex_wait.
161301 @@ -28,7 +28,10 @@
161302  #include <stdlib.h>
161303  #include <sys/time.h>
161305 -static u_int32_t futex1 = 0, futex2 = 0;
161306 +static u_int32_t addr1 = 0, addr2 = 0;
161308 +static struct futex_requeue rq1 = { .uaddr = &addr1, .flags = FUTEX_32 };
161309 +static struct futex_requeue rq2 = { .uaddr = &addr2, .flags = FUTEX_32 };
161312   * How many tasks to requeue at a time.
161313 @@ -37,7 +40,7 @@ static u_int32_t futex1 = 0, futex2 = 0;
161314  static unsigned int nrequeue = 1;
161316  static pthread_t *worker;
161317 -static bool done = false, silent = false, fshared = false;
161318 +static bool done = false, silent = false, fshared = false, futex2 = false;
161319  static pthread_mutex_t thread_lock;
161320  static pthread_cond_t thread_parent, thread_worker;
161321  static struct stats requeuetime_stats, requeued_stats;
161322 @@ -79,7 +82,11 @@ static void *workerfn(void *arg __maybe_unused)
161323         pthread_cond_wait(&thread_worker, &thread_lock);
161324         pthread_mutex_unlock(&thread_lock);
161326 -       futex_wait(&futex1, 0, NULL, futex_flag);
161327 +       if (!futex2)
161328 +               futex_wait(&addr1, 0, NULL, futex_flag);
161329 +       else
161330 +               futex2_wait(&addr1, 0, futex_flag, NULL);
161332         return NULL;
161335 @@ -111,7 +118,7 @@ static void toggle_done(int sig __maybe_unused,
161336         done = true;
161339 -int bench_futex_requeue(int argc, const char **argv)
161340 +static int __bench_futex_requeue(int argc, const char **argv)
161342         int ret = 0;
161343         unsigned int i, j;
161344 @@ -139,15 +146,20 @@ int bench_futex_requeue(int argc, const char **argv)
161345         if (!worker)
161346                 err(EXIT_FAILURE, "calloc");
161348 -       if (!fshared)
161349 +       if (futex2) {
161350 +               futex_flag = FUTEX_32 | (fshared * FUTEX_SHARED_FLAG);
161351 +               rq1.flags |= FUTEX_SHARED_FLAG * fshared;
161352 +               rq2.flags |= FUTEX_SHARED_FLAG * fshared;
161353 +       } else if (!fshared) {
161354                 futex_flag = FUTEX_PRIVATE_FLAG;
161355 +       }
161357         if (nrequeue > nthreads)
161358                 nrequeue = nthreads;
161360         printf("Run summary [PID %d]: Requeuing %d threads (from [%s] %p to %p), "
161361                "%d at a time.\n\n",  getpid(), nthreads,
161362 -              fshared ? "shared":"private", &futex1, &futex2, nrequeue);
161363 +              fshared ? "shared":"private", &addr1, &addr2, nrequeue);
161365         init_stats(&requeued_stats);
161366         init_stats(&requeuetime_stats);
161367 @@ -176,11 +188,15 @@ int bench_futex_requeue(int argc, const char **argv)
161368                 gettimeofday(&start, NULL);
161369                 while (nrequeued < nthreads) {
161370                         /*
161371 -                        * Do not wakeup any tasks blocked on futex1, allowing
161372 +                        * Do not wakeup any tasks blocked on addr1, allowing
161373                          * us to really measure futex_wait functionality.
161374                          */
161375 -                       nrequeued += futex_cmp_requeue(&futex1, 0, &futex2, 0,
161376 -                                                      nrequeue, futex_flag);
161377 +                       if (!futex2)
161378 +                               nrequeued += futex_cmp_requeue(&addr1, 0, &addr2,
161379 +                                                       0, nrequeue, futex_flag);
161380 +                       else
161381 +                               nrequeued += futex2_requeue(&rq1, &rq2,
161382 +                                                       0, nrequeue, 0, 0);
161383                 }
161385                 gettimeofday(&end, NULL);
161386 @@ -194,8 +210,12 @@ int bench_futex_requeue(int argc, const char **argv)
161387                                j + 1, nrequeued, nthreads, runtime.tv_usec / (double)USEC_PER_MSEC);
161388                 }
161390 -               /* everybody should be blocked on futex2, wake'em up */
161391 -               nrequeued = futex_wake(&futex2, nrequeued, futex_flag);
161392 +               /* everybody should be blocked on addr2, wake'em up */
161393 +               if (!futex2)
161394 +                       nrequeued = futex_wake(&addr2, nrequeued, futex_flag);
161395 +               else
161396 +                       nrequeued = futex2_wake(&addr2, nrequeued, futex_flag);
161398                 if (nthreads != nrequeued)
161399                         warnx("couldn't wakeup all tasks (%d/%d)", nrequeued, nthreads);
161401 @@ -220,3 +240,14 @@ int bench_futex_requeue(int argc, const char **argv)
161402         usage_with_options(bench_futex_requeue_usage, options);
161403         exit(EXIT_FAILURE);
161406 +int bench_futex_requeue(int argc, const char **argv)
161408 +       return __bench_futex_requeue(argc, argv);
161411 +int bench_futex2_requeue(int argc, const char **argv)
161413 +       futex2 = true;
161414 +       return __bench_futex_requeue(argc, argv);
161416 diff --git a/tools/perf/bench/futex-wake-parallel.c b/tools/perf/bench/futex-wake-parallel.c
161417 index 6e6f5247e1fe..cac90fc0bfb3 100644
161418 --- a/tools/perf/bench/futex-wake-parallel.c
161419 +++ b/tools/perf/bench/futex-wake-parallel.c
161420 @@ -17,6 +17,12 @@ int bench_futex_wake_parallel(int argc __maybe_unused, const char **argv __maybe
161421         pr_err("%s: pthread_barrier_t unavailable, disabling this test...\n", __func__);
161422         return 0;
161425 +int bench_futex2_wake_parallel(int argc __maybe_unused, const char **argv __maybe_unused)
161427 +       pr_err("%s: pthread_barrier_t unavailable, disabling this test...\n", __func__);
161428 +       return 0;
161430  #else /* HAVE_PTHREAD_BARRIER */
161431  /* For the CLR_() macros */
161432  #include <string.h>
161433 @@ -47,7 +53,7 @@ static unsigned int nwakes = 1;
161434  static u_int32_t futex = 0;
161436  static pthread_t *blocked_worker;
161437 -static bool done = false, silent = false, fshared = false;
161438 +static bool done = false, silent = false, fshared = false, futex2 = false;
161439  static unsigned int nblocked_threads = 0, nwaking_threads = 0;
161440  static pthread_mutex_t thread_lock;
161441  static pthread_cond_t thread_parent, thread_worker;
161442 @@ -78,7 +84,11 @@ static void *waking_workerfn(void *arg)
161444         gettimeofday(&start, NULL);
161446 -       waker->nwoken = futex_wake(&futex, nwakes, futex_flag);
161447 +       if (!futex2)
161448 +               waker->nwoken = futex_wake(&futex, nwakes, futex_flag);
161449 +       else
161450 +               waker->nwoken = futex2_wake(&futex, nwakes, futex_flag);
161452         if (waker->nwoken != nwakes)
161453                 warnx("couldn't wakeup all tasks (%d/%d)",
161454                       waker->nwoken, nwakes);
161455 @@ -129,8 +139,13 @@ static void *blocked_workerfn(void *arg __maybe_unused)
161456         pthread_mutex_unlock(&thread_lock);
161458         while (1) { /* handle spurious wakeups */
161459 -               if (futex_wait(&futex, 0, NULL, futex_flag) != EINTR)
161460 -                       break;
161461 +               if (!futex2) {
161462 +                       if (futex_wait(&futex, 0, NULL, futex_flag) != EINTR)
161463 +                               break;
161464 +               } else {
161465 +                       if (futex2_wait(&futex, 0, futex_flag, NULL) != EINTR)
161466 +                               break;
161467 +               }
161468         }
161470         pthread_exit(NULL);
161471 @@ -217,7 +232,7 @@ static void toggle_done(int sig __maybe_unused,
161472         done = true;
161475 -int bench_futex_wake_parallel(int argc, const char **argv)
161476 +static int __bench_futex_wake_parallel(int argc, const char **argv)
161478         int ret = 0;
161479         unsigned int i, j;
161480 @@ -261,7 +276,9 @@ int bench_futex_wake_parallel(int argc, const char **argv)
161481         if (!blocked_worker)
161482                 err(EXIT_FAILURE, "calloc");
161484 -       if (!fshared)
161485 +       if (futex2)
161486 +               futex_flag = FUTEX_32 | (fshared * FUTEX_SHARED_FLAG);
161487 +       else if (!fshared)
161488                 futex_flag = FUTEX_PRIVATE_FLAG;
161490         printf("Run summary [PID %d]: blocking on %d threads (at [%s] "
161491 @@ -321,4 +338,16 @@ int bench_futex_wake_parallel(int argc, const char **argv)
161492         free(blocked_worker);
161493         return ret;
161496 +int bench_futex_wake_parallel(int argc, const char **argv)
161498 +       return __bench_futex_wake_parallel(argc, argv);
161501 +int bench_futex2_wake_parallel(int argc, const char **argv)
161503 +       futex2 = true;
161504 +       return __bench_futex_wake_parallel(argc, argv);
161507  #endif /* HAVE_PTHREAD_BARRIER */
161508 diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c
161509 index 6d217868f53c..546d2818eed8 100644
161510 --- a/tools/perf/bench/futex-wake.c
161511 +++ b/tools/perf/bench/futex-wake.c
161512 @@ -38,7 +38,7 @@ static u_int32_t futex1 = 0;
161513  static unsigned int nwakes = 1;
161515  pthread_t *worker;
161516 -static bool done = false, silent = false, fshared = false;
161517 +static bool done = false, silent = false, fshared = false, futex2 = false;
161518  static pthread_mutex_t thread_lock;
161519  static pthread_cond_t thread_parent, thread_worker;
161520  static struct stats waketime_stats, wakeup_stats;
161521 @@ -68,8 +68,13 @@ static void *workerfn(void *arg __maybe_unused)
161522         pthread_mutex_unlock(&thread_lock);
161524         while (1) {
161525 -               if (futex_wait(&futex1, 0, NULL, futex_flag) != EINTR)
161526 -                       break;
161527 +               if (!futex2) {
161528 +                       if (futex_wait(&futex1, 0, NULL, futex_flag) != EINTR)
161529 +                               break;
161530 +               } else {
161531 +                       if (futex2_wait(&futex1, 0, futex_flag, NULL) != EINTR)
161532 +                               break;
161533 +               }
161534         }
161536         pthread_exit(NULL);
161537 @@ -117,7 +122,7 @@ static void toggle_done(int sig __maybe_unused,
161538         done = true;
161541 -int bench_futex_wake(int argc, const char **argv)
161542 +static int __bench_futex_wake(int argc, const char **argv)
161544         int ret = 0;
161545         unsigned int i, j;
161546 @@ -147,7 +152,9 @@ int bench_futex_wake(int argc, const char **argv)
161547         if (!worker)
161548                 err(EXIT_FAILURE, "calloc");
161550 -       if (!fshared)
161551 +       if (futex2)
161552 +               futex_flag = FUTEX_32 | (fshared * FUTEX_SHARED_FLAG);
161553 +       else if (!fshared)
161554                 futex_flag = FUTEX_PRIVATE_FLAG;
161556         printf("Run summary [PID %d]: blocking on %d threads (at [%s] futex %p), "
161557 @@ -179,9 +186,14 @@ int bench_futex_wake(int argc, const char **argv)
161559                 /* Ok, all threads are patiently blocked, start waking folks up */
161560                 gettimeofday(&start, NULL);
161561 -               while (nwoken != nthreads)
161562 -                       nwoken += futex_wake(&futex1, nwakes, futex_flag);
161563 +               while (nwoken != nthreads) {
161564 +                       if (!futex2)
161565 +                               nwoken += futex_wake(&futex1, nwakes, futex_flag);
161566 +                       else
161567 +                               nwoken += futex2_wake(&futex1, nwakes, futex_flag);
161568 +               }
161569                 gettimeofday(&end, NULL);
161571                 timersub(&end, &start, &runtime);
161573                 update_stats(&wakeup_stats, nwoken);
161574 @@ -211,3 +223,14 @@ int bench_futex_wake(int argc, const char **argv)
161575         free(worker);
161576         return ret;
161579 +int bench_futex_wake(int argc, const char **argv)
161581 +       return __bench_futex_wake(argc, argv);
161584 +int bench_futex2_wake(int argc, const char **argv)
161586 +       futex2 = true;
161587 +       return __bench_futex_wake(argc, argv);
161589 diff --git a/tools/perf/bench/futex.h b/tools/perf/bench/futex.h
161590 index 31b53cc7d5bc..6b2213cf3f64 100644
161591 --- a/tools/perf/bench/futex.h
161592 +++ b/tools/perf/bench/futex.h
161593 @@ -86,4 +86,51 @@ futex_cmp_requeue(u_int32_t *uaddr, u_int32_t val, u_int32_t *uaddr2, int nr_wak
161594         return futex(uaddr, FUTEX_CMP_REQUEUE, nr_wake, nr_requeue, uaddr2,
161595                  val, opflags);
161599 + * futex2_wait - Wait at uaddr if *uaddr == val, until timo.
161600 + * @uaddr: User address to wait for
161601 + * @val:   Expected value at uaddr
161602 + * @flags: Operation options
161603 + * @timo:  Optional timeout
161605 + * Return: 0 on success, error code otherwise
161606 + */
161607 +static inline int futex2_wait(volatile void *uaddr, unsigned long val,
161608 +                             unsigned long flags, struct timespec *timo)
161610 +       return syscall(__NR_futex_wait, uaddr, val, flags, timo);
161614 + * futex2_wake - Wake a number of waiters waiting at uaddr
161615 + * @uaddr: Address to wake
161616 + * @nr:    Number of waiters to wake
161617 + * @flags: Operation options
161619 + * Return: number of waked futexes
161620 + */
161621 +static inline int futex2_wake(volatile void *uaddr, unsigned int nr, unsigned long flags)
161623 +       return syscall(__NR_futex_wake, uaddr, nr, flags);
161627 + * futex2_requeue - Requeue waiters from an address to another one
161628 + * @uaddr1:     Address where waiters are currently waiting on
161629 + * @uaddr2:     New address to wait
161630 + * @nr_wake:    Number of waiters at uaddr1 to be wake
161631 + * @nr_requeue: After waking nr_wake, number of waiters to be requeued
161632 + * @cmpval:     Expected value at uaddr1
161633 + * @flags: Operation options
161635 + * Return: waked futexes + requeued futexes at uaddr1
161636 + */
161637 +static inline int futex2_requeue(volatile struct futex_requeue *uaddr1,
161638 +                                volatile struct futex_requeue *uaddr2,
161639 +                                unsigned int nr_wake, unsigned int nr_requeue,
161640 +                                unsigned int cmpval, unsigned long flags)
161642 +       return syscall(__NR_futex_requeue, uaddr1, uaddr2, nr_wake, nr_requeue, cmpval, flags);
161644  #endif /* _FUTEX_H */
161645 diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
161646 index 62a7b7420a44..e41a95ad2db6 100644
161647 --- a/tools/perf/builtin-bench.c
161648 +++ b/tools/perf/builtin-bench.c
161649 @@ -12,10 +12,11 @@
161650   *
161651   *  sched ... scheduler and IPC performance
161652   *  syscall ... System call performance
161653 - *  mem   ... memory access performance
161654 - *  numa  ... NUMA scheduling and MM performance
161655 - *  futex ... Futex performance
161656 - *  epoll ... Event poll performance
161657 + *  mem    ... memory access performance
161658 + *  numa   ... NUMA scheduling and MM performance
161659 + *  futex  ... Futex performance
161660 + *  futex2 ... Futex2 performance
161661 + *  epoll  ... Event poll performance
161662   */
161663  #include <subcmd/parse-options.h>
161664  #include "builtin.h"
161665 @@ -75,6 +76,14 @@ static struct bench futex_benchmarks[] = {
161666         { NULL,         NULL,                                           NULL                    }
161669 +static struct bench futex2_benchmarks[] = {
161670 +       { "hash",          "Benchmark for futex2 hash table",            bench_futex2_hash      },
161671 +       { "wake",          "Benchmark for futex2 wake calls",            bench_futex2_wake      },
161672 +       { "wake-parallel", "Benchmark for parallel futex2 wake calls",   bench_futex2_wake_parallel },
161673 +       { "requeue",       "Benchmark for futex2 requeue calls",         bench_futex2_requeue   },
161674 +       { NULL,         NULL,                                           NULL                    }
161677  #ifdef HAVE_EVENTFD_SUPPORT
161678  static struct bench epoll_benchmarks[] = {
161679         { "wait",       "Benchmark epoll concurrent epoll_waits",       bench_epoll_wait        },
161680 @@ -105,6 +114,7 @@ static struct collection collections[] = {
161681         { "numa",       "NUMA scheduling and MM benchmarks",            numa_benchmarks         },
161682  #endif
161683         {"futex",       "Futex stressing benchmarks",                   futex_benchmarks        },
161684 +       {"futex2",      "Futex2 stressing benchmarks",                  futex2_benchmarks        },
161685  #ifdef HAVE_EVENTFD_SUPPORT
161686         {"epoll",       "Epoll stressing benchmarks",                   epoll_benchmarks        },
161687  #endif
161688 diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/cache.json b/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
161689 index 4ea7ec4f496e..008f1683e540 100644
161690 --- a/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
161691 +++ b/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
161692 @@ -275,7 +275,7 @@
161693    {
161694      "EventName": "l2_pf_hit_l2",
161695      "EventCode": "0x70",
161696 -    "BriefDescription": "L2 prefetch hit in L2.",
161697 +    "BriefDescription": "L2 prefetch hit in L2. Use l2_cache_hits_from_l2_hwpf instead.",
161698      "UMask": "0xff"
161699    },
161700    {
161701 diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
161702 index 2cfe2d2f3bfd..3c954543d1ae 100644
161703 --- a/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
161704 +++ b/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
161705 @@ -79,10 +79,10 @@
161706      "UMask": "0x70"
161707    },
161708    {
161709 -    "MetricName": "l2_cache_hits_from_l2_hwpf",
161710 +    "EventName": "l2_cache_hits_from_l2_hwpf",
161711 +    "EventCode": "0x70",
161712      "BriefDescription": "L2 Cache Hits from L2 HWPF",
161713 -    "MetricExpr": "l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
161714 -    "MetricGroup": "l2_cache"
161715 +    "UMask": "0xff"
161716    },
161717    {
161718      "EventName": "l3_accesses",
161719 diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/cache.json b/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
161720 index f61b982f83ca..8ba84a48188d 100644
161721 --- a/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
161722 +++ b/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
161723 @@ -205,7 +205,7 @@
161724    {
161725      "EventName": "l2_pf_hit_l2",
161726      "EventCode": "0x70",
161727 -    "BriefDescription": "L2 prefetch hit in L2.",
161728 +    "BriefDescription": "L2 prefetch hit in L2. Use l2_cache_hits_from_l2_hwpf instead.",
161729      "UMask": "0xff"
161730    },
161731    {
161732 diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
161733 index 2ef91e25e661..1c624cee9ef4 100644
161734 --- a/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
161735 +++ b/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
161736 @@ -79,10 +79,10 @@
161737      "UMask": "0x70"
161738    },
161739    {
161740 -    "MetricName": "l2_cache_hits_from_l2_hwpf",
161741 +    "EventName": "l2_cache_hits_from_l2_hwpf",
161742 +    "EventCode": "0x70",
161743      "BriefDescription": "L2 Cache Hits from L2 HWPF",
161744 -    "MetricExpr": "l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
161745 -    "MetricGroup": "l2_cache"
161746 +    "UMask": "0xff"
161747    },
161748    {
161749      "EventName": "l3_accesses",
161750 diff --git a/tools/perf/trace/beauty/fsconfig.sh b/tools/perf/trace/beauty/fsconfig.sh
161751 index 83fb24df05c9..bc6ef7bb7a5f 100755
161752 --- a/tools/perf/trace/beauty/fsconfig.sh
161753 +++ b/tools/perf/trace/beauty/fsconfig.sh
161754 @@ -10,8 +10,7 @@ fi
161755  linux_mount=${linux_header_dir}/mount.h
161757  printf "static const char *fsconfig_cmds[] = {\n"
161758 -regex='^[[:space:]]*+FSCONFIG_([[:alnum:]_]+)[[:space:]]*=[[:space:]]*([[:digit:]]+)[[:space:]]*,[[:space:]]*.*'
161759 -egrep $regex ${linux_mount} | \
161760 -       sed -r "s/$regex/\2 \1/g"       | \
161761 -       xargs printf "\t[%s] = \"%s\",\n"
161762 +ms='[[:space:]]*'
161763 +sed -nr "s/^${ms}FSCONFIG_([[:alnum:]_]+)${ms}=${ms}([[:digit:]]+)${ms},.*/\t[\2] = \"\1\",/p" \
161764 +       ${linux_mount}
161765  printf "};\n"
161766 diff --git a/tools/perf/util/Build b/tools/perf/util/Build
161767 index e3e12f9d4733..5a296ac69415 100644
161768 --- a/tools/perf/util/Build
161769 +++ b/tools/perf/util/Build
161770 @@ -141,7 +141,14 @@ perf-$(CONFIG_LIBELF) += symbol-elf.o
161771  perf-$(CONFIG_LIBELF) += probe-file.o
161772  perf-$(CONFIG_LIBELF) += probe-event.o
161774 +ifdef CONFIG_LIBBPF_DYNAMIC
161775 +  hashmap := 1
161776 +endif
161777  ifndef CONFIG_LIBBPF
161778 +  hashmap := 1
161779 +endif
161781 +ifdef hashmap
161782  perf-y += hashmap.o
161783  endif
161785 diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
161786 index 9760d8e7b386..917a9c707371 100644
161787 --- a/tools/perf/util/jitdump.c
161788 +++ b/tools/perf/util/jitdump.c
161789 @@ -396,21 +396,31 @@ static pid_t jr_entry_tid(struct jit_buf_desc *jd, union jr_entry *jr)
161791  static uint64_t convert_timestamp(struct jit_buf_desc *jd, uint64_t timestamp)
161793 -       struct perf_tsc_conversion tc;
161794 +       struct perf_tsc_conversion tc = { .time_shift = 0, };
161795 +       struct perf_record_time_conv *time_conv = &jd->session->time_conv;
161797         if (!jd->use_arch_timestamp)
161798                 return timestamp;
161800 -       tc.time_shift          = jd->session->time_conv.time_shift;
161801 -       tc.time_mult           = jd->session->time_conv.time_mult;
161802 -       tc.time_zero           = jd->session->time_conv.time_zero;
161803 -       tc.time_cycles         = jd->session->time_conv.time_cycles;
161804 -       tc.time_mask           = jd->session->time_conv.time_mask;
161805 -       tc.cap_user_time_zero  = jd->session->time_conv.cap_user_time_zero;
161806 -       tc.cap_user_time_short = jd->session->time_conv.cap_user_time_short;
161807 +       tc.time_shift = time_conv->time_shift;
161808 +       tc.time_mult  = time_conv->time_mult;
161809 +       tc.time_zero  = time_conv->time_zero;
161811 -       if (!tc.cap_user_time_zero)
161812 -               return 0;
161813 +       /*
161814 +        * The event TIME_CONV was extended for the fields from "time_cycles"
161815 +        * when supported cap_user_time_short, for backward compatibility,
161816 +        * checks the event size and assigns these extended fields if these
161817 +        * fields are contained in the event.
161818 +        */
161819 +       if (event_contains(*time_conv, time_cycles)) {
161820 +               tc.time_cycles         = time_conv->time_cycles;
161821 +               tc.time_mask           = time_conv->time_mask;
161822 +               tc.cap_user_time_zero  = time_conv->cap_user_time_zero;
161823 +               tc.cap_user_time_short = time_conv->cap_user_time_short;
161825 +               if (!tc.cap_user_time_zero)
161826 +                       return 0;
161827 +       }
161829         return tsc_to_perf_time(timestamp, &tc);
161831 diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
161832 index 859832a82496..e9d4e6f4bdf3 100644
161833 --- a/tools/perf/util/session.c
161834 +++ b/tools/perf/util/session.c
161835 @@ -949,6 +949,19 @@ static void perf_event__stat_round_swap(union perf_event *event,
161836         event->stat_round.time = bswap_64(event->stat_round.time);
161839 +static void perf_event__time_conv_swap(union perf_event *event,
161840 +                                      bool sample_id_all __maybe_unused)
161842 +       event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
161843 +       event->time_conv.time_mult  = bswap_64(event->time_conv.time_mult);
161844 +       event->time_conv.time_zero  = bswap_64(event->time_conv.time_zero);
161846 +       if (event_contains(event->time_conv, time_cycles)) {
161847 +               event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
161848 +               event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
161849 +       }
161852  typedef void (*perf_event__swap_op)(union perf_event *event,
161853                                     bool sample_id_all);
161855 @@ -985,7 +998,7 @@ static perf_event__swap_op perf_event__swap_ops[] = {
161856         [PERF_RECORD_STAT]                = perf_event__stat_swap,
161857         [PERF_RECORD_STAT_ROUND]          = perf_event__stat_round_swap,
161858         [PERF_RECORD_EVENT_UPDATE]        = perf_event__event_update_swap,
161859 -       [PERF_RECORD_TIME_CONV]           = perf_event__all64_swap,
161860 +       [PERF_RECORD_TIME_CONV]           = perf_event__time_conv_swap,
161861         [PERF_RECORD_HEADER_MAX]          = NULL,
161864 diff --git a/tools/perf/util/symbol_fprintf.c b/tools/perf/util/symbol_fprintf.c
161865 index 35c936ce33ef..2664fb65e47a 100644
161866 --- a/tools/perf/util/symbol_fprintf.c
161867 +++ b/tools/perf/util/symbol_fprintf.c
161868 @@ -68,7 +68,7 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso,
161870         for (nd = rb_first_cached(&dso->symbol_names); nd; nd = rb_next(nd)) {
161871                 pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
161872 -               fprintf(fp, "%s\n", pos->sym.name);
161873 +               ret += fprintf(fp, "%s\n", pos->sym.name);
161874         }
161876         return ret;
161877 diff --git a/tools/power/x86/intel-speed-select/isst-display.c b/tools/power/x86/intel-speed-select/isst-display.c
161878 index 8e54ce47648e..3bf1820c0da1 100644
161879 --- a/tools/power/x86/intel-speed-select/isst-display.c
161880 +++ b/tools/power/x86/intel-speed-select/isst-display.c
161881 @@ -25,10 +25,14 @@ static void printcpulist(int str_len, char *str, int mask_size,
161882                         index = snprintf(&str[curr_index],
161883                                          str_len - curr_index, ",");
161884                         curr_index += index;
161885 +                       if (curr_index >= str_len)
161886 +                               break;
161887                 }
161888                 index = snprintf(&str[curr_index], str_len - curr_index, "%d",
161889                                  i);
161890                 curr_index += index;
161891 +               if (curr_index >= str_len)
161892 +                       break;
161893                 first = 0;
161894         }
161896 @@ -64,10 +68,14 @@ static void printcpumask(int str_len, char *str, int mask_size,
161897                 index = snprintf(&str[curr_index], str_len - curr_index, "%08x",
161898                                  mask[i]);
161899                 curr_index += index;
161900 +               if (curr_index >= str_len)
161901 +                       break;
161902                 if (i) {
161903                         strncat(&str[curr_index], ",", str_len - curr_index);
161904                         curr_index++;
161905                 }
161906 +               if (curr_index >= str_len)
161907 +                       break;
161908         }
161910         free(mask);
161911 @@ -185,7 +193,7 @@ static void _isst_pbf_display_information(int cpu, FILE *outf, int level,
161912                                           int disp_level)
161914         char header[256];
161915 -       char value[256];
161916 +       char value[512];
161918         snprintf(header, sizeof(header), "speed-select-base-freq-properties");
161919         format_and_print(outf, disp_level, header, NULL);
161920 @@ -349,7 +357,7 @@ void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
161921                                    struct isst_pkg_ctdp *pkg_dev)
161923         char header[256];
161924 -       char value[256];
161925 +       char value[512];
161926         static int level;
161927         int i;
161929 diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
161930 index a7c4f0772e53..002697021474 100644
161931 --- a/tools/power/x86/turbostat/turbostat.c
161932 +++ b/tools/power/x86/turbostat/turbostat.c
161933 @@ -291,13 +291,16 @@ struct msr_sum_array {
161934  /* The percpu MSR sum array.*/
161935  struct msr_sum_array *per_cpu_msr_sum;
161937 -int idx_to_offset(int idx)
161938 +off_t idx_to_offset(int idx)
161940 -       int offset;
161941 +       off_t offset;
161943         switch (idx) {
161944         case IDX_PKG_ENERGY:
161945 -               offset = MSR_PKG_ENERGY_STATUS;
161946 +               if (do_rapl & RAPL_AMD_F17H)
161947 +                       offset = MSR_PKG_ENERGY_STAT;
161948 +               else
161949 +                       offset = MSR_PKG_ENERGY_STATUS;
161950                 break;
161951         case IDX_DRAM_ENERGY:
161952                 offset = MSR_DRAM_ENERGY_STATUS;
161953 @@ -320,12 +323,13 @@ int idx_to_offset(int idx)
161954         return offset;
161957 -int offset_to_idx(int offset)
161958 +int offset_to_idx(off_t offset)
161960         int idx;
161962         switch (offset) {
161963         case MSR_PKG_ENERGY_STATUS:
161964 +       case MSR_PKG_ENERGY_STAT:
161965                 idx = IDX_PKG_ENERGY;
161966                 break;
161967         case MSR_DRAM_ENERGY_STATUS:
161968 @@ -353,7 +357,7 @@ int idx_valid(int idx)
161970         switch (idx) {
161971         case IDX_PKG_ENERGY:
161972 -               return do_rapl & RAPL_PKG;
161973 +               return do_rapl & (RAPL_PKG | RAPL_AMD_F17H);
161974         case IDX_DRAM_ENERGY:
161975                 return do_rapl & RAPL_DRAM;
161976         case IDX_PP0_ENERGY:
161977 @@ -3272,7 +3276,7 @@ static int update_msr_sum(struct thread_data *t, struct core_data *c, struct pkg
161979         for (i = IDX_PKG_ENERGY; i < IDX_COUNT; i++) {
161980                 unsigned long long msr_cur, msr_last;
161981 -               int offset;
161982 +               off_t offset;
161984                 if (!idx_valid(i))
161985                         continue;
161986 @@ -3281,7 +3285,8 @@ static int update_msr_sum(struct thread_data *t, struct core_data *c, struct pkg
161987                         continue;
161988                 ret = get_msr(cpu, offset, &msr_cur);
161989                 if (ret) {
161990 -                       fprintf(outf, "Can not update msr(0x%x)\n", offset);
161991 +                       fprintf(outf, "Can not update msr(0x%llx)\n",
161992 +                               (unsigned long long)offset);
161993                         continue;
161994                 }
161996 @@ -4817,33 +4822,12 @@ double discover_bclk(unsigned int family, unsigned int model)
161997   * below this value, including the Digital Thermal Sensor (DTS),
161998   * Package Thermal Management Sensor (PTM), and thermal event thresholds.
161999   */
162000 -int read_tcc_activation_temp()
162001 +int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
162003         unsigned long long msr;
162004 -       unsigned int tcc, target_c, offset_c;
162006 -       /* Temperature Target MSR is Nehalem and newer only */
162007 -       if (!do_nhm_platform_info)
162008 -               return 0;
162010 -       if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
162011 -               return 0;
162013 -       target_c = (msr >> 16) & 0xFF;
162015 -       offset_c = (msr >> 24) & 0xF;
162017 -       tcc = target_c - offset_c;
162019 -       if (!quiet)
162020 -               fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C) (%d default - %d offset)\n",
162021 -                       base_cpu, msr, tcc, target_c, offset_c);
162023 -       return tcc;
162025 +       unsigned int target_c_local;
162026 +       int cpu;
162028 -int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
162030         /* tcc_activation_temp is used only for dts or ptm */
162031         if (!(do_dts || do_ptm))
162032                 return 0;
162033 @@ -4852,18 +4836,43 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk
162034         if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
162035                 return 0;
162037 +       cpu = t->cpu_id;
162038 +       if (cpu_migrate(cpu)) {
162039 +               fprintf(outf, "Could not migrate to CPU %d\n", cpu);
162040 +               return -1;
162041 +       }
162043         if (tcc_activation_temp_override != 0) {
162044                 tcc_activation_temp = tcc_activation_temp_override;
162045 -               fprintf(outf, "Using cmdline TCC Target (%d C)\n", tcc_activation_temp);
162046 +               fprintf(outf, "cpu%d: Using cmdline TCC Target (%d C)\n",
162047 +                       cpu, tcc_activation_temp);
162048                 return 0;
162049         }
162051 -       tcc_activation_temp = read_tcc_activation_temp();
162052 -       if (tcc_activation_temp)
162053 -               return 0;
162054 +       /* Temperature Target MSR is Nehalem and newer only */
162055 +       if (!do_nhm_platform_info)
162056 +               goto guess;
162058 +       if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
162059 +               goto guess;
162061 +       target_c_local = (msr >> 16) & 0xFF;
162063 +       if (!quiet)
162064 +               fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
162065 +                       cpu, msr, target_c_local);
162067 +       if (!target_c_local)
162068 +               goto guess;
162070 +       tcc_activation_temp = target_c_local;
162072 +       return 0;
162074 +guess:
162075         tcc_activation_temp = TJMAX_DEFAULT;
162076 -       fprintf(outf, "Guessing tjMax %d C, Please use -T to specify\n", tcc_activation_temp);
162077 +       fprintf(outf, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n",
162078 +               cpu, tcc_activation_temp);
162080         return 0;
162082 diff --git a/tools/spi/Makefile b/tools/spi/Makefile
162083 index ada881afb489..0aa6dbd31fb8 100644
162084 --- a/tools/spi/Makefile
162085 +++ b/tools/spi/Makefile
162086 @@ -25,11 +25,12 @@ include $(srctree)/tools/build/Makefile.include
162088  # We need the following to be outside of kernel tree
162090 -$(OUTPUT)include/linux/spi/spidev.h: ../../include/uapi/linux/spi/spidev.h
162091 +$(OUTPUT)include/linux/spi: ../../include/uapi/linux/spi
162092         mkdir -p $(OUTPUT)include/linux/spi 2>&1 || true
162093         ln -sf $(CURDIR)/../../include/uapi/linux/spi/spidev.h $@
162094 +       ln -sf $(CURDIR)/../../include/uapi/linux/spi/spi.h $@
162096 -prepare: $(OUTPUT)include/linux/spi/spidev.h
162097 +prepare: $(OUTPUT)include/linux/spi
162100  # spidev_test
162101 diff --git a/tools/testing/selftests/arm64/mte/Makefile b/tools/testing/selftests/arm64/mte/Makefile
162102 index 0b3af552632a..df15d44aeb8d 100644
162103 --- a/tools/testing/selftests/arm64/mte/Makefile
162104 +++ b/tools/testing/selftests/arm64/mte/Makefile
162105 @@ -6,9 +6,7 @@ SRCS := $(filter-out mte_common_util.c,$(wildcard *.c))
162106  PROGS := $(patsubst %.c,%,$(SRCS))
162108  #Add mte compiler option
162109 -ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep gcc),)
162110  CFLAGS += -march=armv8.5-a+memtag
162111 -endif
162113  #check if the compiler works well
162114  mte_cc_support := $(shell if ($(CC) $(CFLAGS) -E -x c /dev/null -o /dev/null 2>&1) then echo "1"; fi)
162115 diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.c b/tools/testing/selftests/arm64/mte/mte_common_util.c
162116 index 39f8908988ea..70665ba88cbb 100644
162117 --- a/tools/testing/selftests/arm64/mte/mte_common_util.c
162118 +++ b/tools/testing/selftests/arm64/mte/mte_common_util.c
162119 @@ -278,22 +278,13 @@ int mte_switch_mode(int mte_option, unsigned long incl_mask)
162120         return 0;
162123 -#define ID_AA64PFR1_MTE_SHIFT          8
162124 -#define ID_AA64PFR1_MTE                        2
162126  int mte_default_setup(void)
162128 -       unsigned long hwcaps = getauxval(AT_HWCAP);
162129 +       unsigned long hwcaps2 = getauxval(AT_HWCAP2);
162130         unsigned long en = 0;
162131         int ret;
162133 -       if (!(hwcaps & HWCAP_CPUID)) {
162134 -               ksft_print_msg("FAIL: CPUID registers unavailable\n");
162135 -               return KSFT_FAIL;
162136 -       }
162137 -       /* Read ID_AA64PFR1_EL1 register */
162138 -       asm volatile("mrs %0, id_aa64pfr1_el1" : "=r"(hwcaps) : : "memory");
162139 -       if (((hwcaps >> ID_AA64PFR1_MTE_SHIFT) & MT_TAG_MASK) != ID_AA64PFR1_MTE) {
162140 +       if (!(hwcaps2 & HWCAP2_MTE)) {
162141                 ksft_print_msg("FAIL: MTE features unavailable\n");
162142                 return KSFT_SKIP;
162143         }
162144 diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
162145 index 044bfdcf5b74..76a325862119 100644
162146 --- a/tools/testing/selftests/bpf/Makefile
162147 +++ b/tools/testing/selftests/bpf/Makefile
162148 @@ -221,7 +221,7 @@ $(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile)                \
162149                     DESTDIR=$(HOST_SCRATCH_DIR)/ prefix= all install_headers
162150  endif
162152 -$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) | $(BPFTOOL) $(INCLUDE_DIR)
162153 +$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) | $(INCLUDE_DIR)
162154  ifeq ($(VMLINUX_H),)
162155         $(call msg,GEN,,$@)
162156         $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
162157 @@ -346,7 +346,8 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o:                         \
162159  $(TRUNNER_BPF_SKELS): $(TRUNNER_OUTPUT)/%.skel.h:                      \
162160                       $(TRUNNER_OUTPUT)/%.o                             \
162161 -                     | $(BPFTOOL) $(TRUNNER_OUTPUT)
162162 +                     $(BPFTOOL)                                        \
162163 +                     | $(TRUNNER_OUTPUT)
162164         $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
162165         $(Q)$$(BPFTOOL) gen skeleton $$< > $$@
162166  endif
162167 diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
162168 index 06eb956ff7bb..4b517d76257d 100644
162169 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
162170 +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
162171 @@ -210,11 +210,6 @@ static int duration = 0;
162172         .bpf_obj_file = "test_core_reloc_existence.o",                  \
162173         .btf_src_file = "btf__core_reloc_" #name ".o"                   \
162175 -#define FIELD_EXISTS_ERR_CASE(name) {                                  \
162176 -       FIELD_EXISTS_CASE_COMMON(name),                                 \
162177 -       .fails = true,                                                  \
162180  #define BITFIELDS_CASE_COMMON(objfile, test_name_prefix,  name)                \
162181         .case_name = test_name_prefix#name,                             \
162182         .bpf_obj_file = objfile,                                        \
162183 @@ -222,7 +217,7 @@ static int duration = 0;
162185  #define BITFIELDS_CASE(name, ...) {                                    \
162186         BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o",     \
162187 -                             "direct:", name),                         \
162188 +                             "probed:", name),                         \
162189         .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__,     \
162190         .input_len = sizeof(struct core_reloc_##name),                  \
162191         .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output)       \
162192 @@ -230,7 +225,7 @@ static int duration = 0;
162193         .output_len = sizeof(struct core_reloc_bitfields_output),       \
162194  }, {                                                                   \
162195         BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o",     \
162196 -                             "probed:", name),                         \
162197 +                             "direct:", name),                         \
162198         .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__,     \
162199         .input_len = sizeof(struct core_reloc_##name),                  \
162200         .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output)       \
162201 @@ -550,8 +545,7 @@ static struct core_reloc_test_case test_cases[] = {
162202         ARRAYS_ERR_CASE(arrays___err_too_small),
162203         ARRAYS_ERR_CASE(arrays___err_too_shallow),
162204         ARRAYS_ERR_CASE(arrays___err_non_array),
162205 -       ARRAYS_ERR_CASE(arrays___err_wrong_val_type1),
162206 -       ARRAYS_ERR_CASE(arrays___err_wrong_val_type2),
162207 +       ARRAYS_ERR_CASE(arrays___err_wrong_val_type),
162208         ARRAYS_ERR_CASE(arrays___err_bad_zero_sz_arr),
162210         /* enum/ptr/int handling scenarios */
162211 @@ -642,13 +636,25 @@ static struct core_reloc_test_case test_cases[] = {
162212                 },
162213                 .output_len = sizeof(struct core_reloc_existence_output),
162214         },
162216 -       FIELD_EXISTS_ERR_CASE(existence__err_int_sz),
162217 -       FIELD_EXISTS_ERR_CASE(existence__err_int_type),
162218 -       FIELD_EXISTS_ERR_CASE(existence__err_int_kind),
162219 -       FIELD_EXISTS_ERR_CASE(existence__err_arr_kind),
162220 -       FIELD_EXISTS_ERR_CASE(existence__err_arr_value_type),
162221 -       FIELD_EXISTS_ERR_CASE(existence__err_struct_type),
162222 +       {
162223 +               FIELD_EXISTS_CASE_COMMON(existence___wrong_field_defs),
162224 +               .input = STRUCT_TO_CHAR_PTR(core_reloc_existence___wrong_field_defs) {
162225 +               },
162226 +               .input_len = sizeof(struct core_reloc_existence___wrong_field_defs),
162227 +               .output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) {
162228 +                       .a_exists = 0,
162229 +                       .b_exists = 0,
162230 +                       .c_exists = 0,
162231 +                       .arr_exists = 0,
162232 +                       .s_exists = 0,
162233 +                       .a_value = 0xff000001u,
162234 +                       .b_value = 0xff000002u,
162235 +                       .c_value = 0xff000003u,
162236 +                       .arr_value = 0xff000004u,
162237 +                       .s_value = 0xff000005u,
162238 +               },
162239 +               .output_len = sizeof(struct core_reloc_existence_output),
162240 +       },
162242         /* bitfield relocation checks */
162243         BITFIELDS_CASE(bitfields, {
162244 @@ -857,13 +863,20 @@ void test_core_reloc(void)
162245                           "prog '%s' not found\n", probe_name))
162246                         goto cleanup;
162249 +               if (test_case->btf_src_file) {
162250 +                       err = access(test_case->btf_src_file, R_OK);
162251 +                       if (!ASSERT_OK(err, "btf_src_file"))
162252 +                               goto cleanup;
162253 +               }
162255                 load_attr.obj = obj;
162256                 load_attr.log_level = 0;
162257                 load_attr.target_btf_path = test_case->btf_src_file;
162258                 err = bpf_object__load_xattr(&load_attr);
162259                 if (err) {
162260                         if (!test_case->fails)
162261 -                               CHECK(false, "obj_load", "failed to load prog '%s': %d\n", probe_name, err);
162262 +                               ASSERT_OK(err, "obj_load");
162263                         goto cleanup;
162264                 }
162266 @@ -902,10 +915,8 @@ void test_core_reloc(void)
162267                         goto cleanup;
162268                 }
162270 -               if (test_case->fails) {
162271 -                       CHECK(false, "obj_load_fail", "should fail to load prog '%s'\n", probe_name);
162272 +               if (!ASSERT_FALSE(test_case->fails, "obj_load_should_fail"))
162273                         goto cleanup;
162274 -               }
162276                 equal = memcmp(data->out, test_case->output,
162277                                test_case->output_len) == 0;
162278 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c
162279 deleted file mode 100644
162280 index dd0ffa518f36..000000000000
162281 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c
162282 +++ /dev/null
162283 @@ -1,3 +0,0 @@
162284 -#include "core_reloc_types.h"
162286 -void f(struct core_reloc_existence___err_wrong_arr_kind x) {}
162287 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c
162288 deleted file mode 100644
162289 index bc83372088ad..000000000000
162290 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c
162291 +++ /dev/null
162292 @@ -1,3 +0,0 @@
162293 -#include "core_reloc_types.h"
162295 -void f(struct core_reloc_existence___err_wrong_arr_value_type x) {}
162296 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c
162297 deleted file mode 100644
162298 index 917bec41be08..000000000000
162299 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c
162300 +++ /dev/null
162301 @@ -1,3 +0,0 @@
162302 -#include "core_reloc_types.h"
162304 -void f(struct core_reloc_existence___err_wrong_int_kind x) {}
162305 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c
162306 deleted file mode 100644
162307 index 6ec7e6ec1c91..000000000000
162308 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c
162309 +++ /dev/null
162310 @@ -1,3 +0,0 @@
162311 -#include "core_reloc_types.h"
162313 -void f(struct core_reloc_existence___err_wrong_int_sz x) {}
162314 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c
162315 deleted file mode 100644
162316 index 7bbcacf2b0d1..000000000000
162317 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c
162318 +++ /dev/null
162319 @@ -1,3 +0,0 @@
162320 -#include "core_reloc_types.h"
162322 -void f(struct core_reloc_existence___err_wrong_int_type x) {}
162323 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c
162324 deleted file mode 100644
162325 index f384dd38ec70..000000000000
162326 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c
162327 +++ /dev/null
162328 @@ -1,3 +0,0 @@
162329 -#include "core_reloc_types.h"
162331 -void f(struct core_reloc_existence___err_wrong_struct_type x) {}
162332 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c
162333 new file mode 100644
162334 index 000000000000..d14b496190c3
162335 --- /dev/null
162336 +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c
162337 @@ -0,0 +1,3 @@
162338 +#include "core_reloc_types.h"
162340 +void f(struct core_reloc_existence___wrong_field_defs x) {}
162341 diff --git a/tools/testing/selftests/bpf/progs/core_reloc_types.h b/tools/testing/selftests/bpf/progs/core_reloc_types.h
162342 index 9a2850850121..664eea1013aa 100644
162343 --- a/tools/testing/selftests/bpf/progs/core_reloc_types.h
162344 +++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h
162345 @@ -700,27 +700,11 @@ struct core_reloc_existence___minimal {
162346         int a;
162349 -struct core_reloc_existence___err_wrong_int_sz {
162350 -       short a;
162353 -struct core_reloc_existence___err_wrong_int_type {
162354 +struct core_reloc_existence___wrong_field_defs {
162355 +       void *a;
162356         int b[1];
162359 -struct core_reloc_existence___err_wrong_int_kind {
162360         struct{ int x; } c;
162363 -struct core_reloc_existence___err_wrong_arr_kind {
162364         int arr;
162367 -struct core_reloc_existence___err_wrong_arr_value_type {
162368 -       short arr[1];
162371 -struct core_reloc_existence___err_wrong_struct_type {
162372         int s;
162375 diff --git a/tools/testing/selftests/bpf/verifier/array_access.c b/tools/testing/selftests/bpf/verifier/array_access.c
162376 index 1b138cd2b187..1b1c798e9248 100644
162377 --- a/tools/testing/selftests/bpf/verifier/array_access.c
162378 +++ b/tools/testing/selftests/bpf/verifier/array_access.c
162379 @@ -186,7 +186,7 @@
162380         },
162381         .fixup_map_hash_48b = { 3 },
162382         .errstr_unpriv = "R0 leaks addr",
162383 -       .errstr = "invalid access to map value, value_size=48 off=44 size=8",
162384 +       .errstr = "R0 unbounded memory access",
162385         .result_unpriv = REJECT,
162386         .result = REJECT,
162387         .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
162388 diff --git a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
162389 index 6f3a70df63bc..e00435753008 100644
162390 --- a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
162391 +++ b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
162392 @@ -120,12 +120,13 @@ __mirror_gre_test()
162393         sleep 5
162395         for ((i = 0; i < count; ++i)); do
162396 +               local sip=$(mirror_gre_ipv6_addr 1 $i)::1
162397                 local dip=$(mirror_gre_ipv6_addr 1 $i)::2
162398                 local htun=h3-gt6-$i
162399                 local message
162401                 icmp6_capture_install $htun
162402 -               mirror_test v$h1 "" $dip $htun 100 10
162403 +               mirror_test v$h1 $sip $dip $htun 100 10
162404                 icmp6_capture_uninstall $htun
162405         done
162407 diff --git a/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh
162408 index f813ffefc07e..65f43a7ce9c9 100644
162409 --- a/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh
162410 +++ b/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh
162411 @@ -55,10 +55,6 @@ port_test()
162412               | jq '.[][][] | select(.name=="physical_ports") |.["occ"]')
162414         [[ $occ -eq $max_ports ]]
162415 -       if [[ $should_fail -eq 0 ]]; then
162416 -               check_err $? "Mismatch ports number: Expected $max_ports, got $occ."
162417 -       else
162418 -               check_err_fail $should_fail $? "Reached more ports than expected"
162419 -       fi
162420 +       check_err_fail $should_fail $? "Attempt to create $max_ports ports (actual result $occ)"
162423 diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
162424 index b0cb1aaffdda..33ddd01689be 100644
162425 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
162426 +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
162427 @@ -507,8 +507,8 @@ do_red_test()
162428         check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0."
162429         local diff=$((limit - backlog))
162430         pct=$((100 * diff / limit))
162431 -       ((0 <= pct && pct <= 5))
162432 -       check_err $? "backlog $backlog / $limit expected <= 5% distance"
162433 +       ((0 <= pct && pct <= 10))
162434 +       check_err $? "backlog $backlog / $limit expected <= 10% distance"
162435         log_test "TC $((vlan - 10)): RED backlog > limit"
162437         stop_traffic
162438 diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
162439 index cc0f07e72cf2..aa74be9f47c8 100644
162440 --- a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
162441 +++ b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
162442 @@ -98,11 +98,7 @@ __tc_flower_test()
162443                         jq -r '[ .[] | select(.kind == "flower") |
162444                         .options | .in_hw ]' | jq .[] | wc -l)
162445         [[ $((offload_count - 1)) -eq $count ]]
162446 -       if [[ $should_fail -eq 0 ]]; then
162447 -               check_err $? "Offload mismatch"
162448 -       else
162449 -               check_err_fail $should_fail $? "Offload more than expacted"
162450 -       fi
162451 +       check_err_fail $should_fail $? "Attempt to offload $count rules (actual result $((offload_count - 1)))"
162454  tc_flower_test()
162455 diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile
162456 index cf69b2fcce59..dd61118df66e 100644
162457 --- a/tools/testing/selftests/exec/Makefile
162458 +++ b/tools/testing/selftests/exec/Makefile
162459 @@ -28,8 +28,8 @@ $(OUTPUT)/execveat.denatured: $(OUTPUT)/execveat
162460         cp $< $@
162461         chmod -x $@
162462  $(OUTPUT)/load_address_4096: load_address.c
162463 -       $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000 -pie $< -o $@
162464 +       $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000 -pie -static $< -o $@
162465  $(OUTPUT)/load_address_2097152: load_address.c
162466 -       $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x200000 -pie $< -o $@
162467 +       $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x200000 -pie -static $< -o $@
162468  $(OUTPUT)/load_address_16777216: load_address.c
162469 -       $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000000 -pie $< -o $@
162470 +       $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000000 -pie -static $< -o $@
162471 diff --git a/tools/testing/selftests/futex/functional/.gitignore b/tools/testing/selftests/futex/functional/.gitignore
162472 index 0efcd494daab..af7557e821da 100644
162473 --- a/tools/testing/selftests/futex/functional/.gitignore
162474 +++ b/tools/testing/selftests/futex/functional/.gitignore
162475 @@ -6,3 +6,6 @@ futex_wait_private_mapped_file
162476  futex_wait_timeout
162477  futex_wait_uninitialized_heap
162478  futex_wait_wouldblock
162479 +futex2_wait
162480 +futex2_waitv
162481 +futex2_requeue
162482 diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
162483 index 23207829ec75..3ccb9ea58ddd 100644
162484 --- a/tools/testing/selftests/futex/functional/Makefile
162485 +++ b/tools/testing/selftests/futex/functional/Makefile
162486 @@ -1,10 +1,11 @@
162487  # SPDX-License-Identifier: GPL-2.0
162488 -INCLUDES := -I../include -I../../
162489 +INCLUDES := -I../include -I../../ -I../../../../../usr/include/
162490  CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES)
162491  LDLIBS := -lpthread -lrt
162493  HEADERS := \
162494         ../include/futextest.h \
162495 +       ../include/futex2test.h \
162496         ../include/atomic.h \
162497         ../include/logging.h
162498  TEST_GEN_FILES := \
162499 @@ -14,7 +15,10 @@ TEST_GEN_FILES := \
162500         futex_requeue_pi_signal_restart \
162501         futex_requeue_pi_mismatched_ops \
162502         futex_wait_uninitialized_heap \
162503 -       futex_wait_private_mapped_file
162504 +       futex_wait_private_mapped_file \
162505 +       futex2_wait \
162506 +       futex2_waitv \
162507 +       futex2_requeue
162509  TEST_PROGS := run.sh
162511 diff --git a/tools/testing/selftests/futex/functional/futex2_requeue.c b/tools/testing/selftests/futex/functional/futex2_requeue.c
162512 new file mode 100644
162513 index 000000000000..1bc3704dc8c2
162514 --- /dev/null
162515 +++ b/tools/testing/selftests/futex/functional/futex2_requeue.c
162516 @@ -0,0 +1,164 @@
162517 +// SPDX-License-Identifier: GPL-2.0-or-later
162518 +/******************************************************************************
162520 + *   Copyright Collabora Ltd., 2021
162522 + * DESCRIPTION
162523 + *     Test requeue mechanism of futex2, using 32bit sized futexes.
162525 + * AUTHOR
162526 + *     André Almeida <andrealmeid@collabora.com>
162528 + * HISTORY
162529 + *      2021-Feb-5: Initial version by André <andrealmeid@collabora.com>
162531 + *****************************************************************************/
162533 +#include <errno.h>
162534 +#include <error.h>
162535 +#include <getopt.h>
162536 +#include <stdio.h>
162537 +#include <stdlib.h>
162538 +#include <string.h>
162539 +#include <time.h>
162540 +#include <pthread.h>
162541 +#include <sys/shm.h>
162542 +#include <limits.h>
162543 +#include "futex2test.h"
162544 +#include "logging.h"
162546 +#define TEST_NAME "futex2-wait"
162547 +#define timeout_ns  30000000
162548 +#define WAKE_WAIT_US 10000
162549 +volatile futex_t *f1;
162551 +void usage(char *prog)
162553 +       printf("Usage: %s\n", prog);
162554 +       printf("  -c    Use color\n");
162555 +       printf("  -h    Display this help message\n");
162556 +       printf("  -v L  Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
162557 +              VQUIET, VCRITICAL, VINFO);
162560 +void *waiterfn(void *arg)
162562 +       struct timespec64 to64;
162564 +       /* setting absolute timeout for futex2 */
162565 +       if (gettime64(CLOCK_MONOTONIC, &to64))
162566 +               error("gettime64 failed\n", errno);
162568 +       to64.tv_nsec += timeout_ns;
162570 +       if (to64.tv_nsec >= 1000000000) {
162571 +               to64.tv_sec++;
162572 +               to64.tv_nsec -= 1000000000;
162573 +       }
162575 +       if (futex2_wait(f1, *f1, FUTEX_32, &to64))
162576 +               printf("waiter failed errno %d\n", errno);
162578 +       return NULL;
162581 +int main(int argc, char *argv[])
162583 +       pthread_t waiter[10];
162584 +       int res, ret = RET_PASS;
162585 +       int c, i;
162586 +       volatile futex_t _f1 = 0;
162587 +       volatile futex_t f2 = 0;
162588 +       struct futex_requeue r1, r2;
162590 +       f1 = &_f1;
162592 +       r1.flags = FUTEX_32;
162593 +       r2.flags = FUTEX_32;
162595 +       r1.uaddr = f1;
162596 +       r2.uaddr = &f2;
162598 +       while ((c = getopt(argc, argv, "cht:v:")) != -1) {
162599 +               switch (c) {
162600 +               case 'c':
162601 +                       log_color(1);
162602 +                       break;
162603 +               case 'h':
162604 +                       usage(basename(argv[0]));
162605 +                       exit(0);
162606 +               case 'v':
162607 +                       log_verbosity(atoi(optarg));
162608 +                       break;
162609 +               default:
162610 +                       usage(basename(argv[0]));
162611 +                       exit(1);
162612 +               }
162613 +       }
162615 +       ksft_print_header();
162616 +       ksft_set_plan(2);
162617 +       ksft_print_msg("%s: Test FUTEX2_REQUEUE\n",
162618 +                      basename(argv[0]));
162620 +       /*
162621 +        * Requeue a waiter from f1 to f2, and wake f2.
162622 +        */
162623 +       if (pthread_create(&waiter[0], NULL, waiterfn, NULL))
162624 +               error("pthread_create failed\n", errno);
162626 +       usleep(WAKE_WAIT_US);
162628 +       res = futex2_requeue(&r1, &r2, 0, 1, 0, 0);
162629 +       if (res != 1) {
162630 +               ksft_test_result_fail("futex2_requeue private returned: %d %s\n",
162631 +                                     res ? errno : res,
162632 +                                     res ? strerror(errno) : "");
162633 +               ret = RET_FAIL;
162634 +       }
162637 +       info("Calling private futex2_wake on f2: %u @ %p with val=%u\n", f2, &f2, f2);
162638 +       res = futex2_wake(&f2, 1, FUTEX_32);
162639 +       if (res != 1) {
162640 +               ksft_test_result_fail("futex2_requeue private returned: %d %s\n",
162641 +                                     res ? errno : res,
162642 +                                     res ? strerror(errno) : "");
162643 +               ret = RET_FAIL;
162644 +       } else {
162645 +               ksft_test_result_pass("futex2_requeue simple succeeds\n");
162646 +       }
162649 +       /*
162650 +        * Create 10 waiters at f1. At futex_requeue, wake 3 and requeue 7.
162651 +        * At futex_wake, wake INT_MAX (should be exaclty 7).
162652 +        */
162653 +       for (i = 0; i < 10; i++) {
162654 +               if (pthread_create(&waiter[i], NULL, waiterfn, NULL))
162655 +                       error("pthread_create failed\n", errno);
162656 +       }
162658 +       usleep(WAKE_WAIT_US);
162660 +       res = futex2_requeue(&r1, &r2, 3, 7, 0, 0);
162661 +       if (res != 10) {
162662 +               ksft_test_result_fail("futex2_requeue private returned: %d %s\n",
162663 +                                     res ? errno : res,
162664 +                                     res ? strerror(errno) : "");
162665 +               ret = RET_FAIL;
162666 +       }
162668 +       res = futex2_wake(&f2, INT_MAX, FUTEX_32);
162669 +       if (res != 7) {
162670 +               ksft_test_result_fail("futex2_requeue private returned: %d %s\n",
162671 +                                     res ? errno : res,
162672 +                                     res ? strerror(errno) : "");
162673 +               ret = RET_FAIL;
162674 +       } else {
162675 +               ksft_test_result_pass("futex2_requeue succeeds\n");
162676 +       }
162678 +       ksft_print_cnts();
162679 +       return ret;
162681 diff --git a/tools/testing/selftests/futex/functional/futex2_wait.c b/tools/testing/selftests/futex/functional/futex2_wait.c
162682 new file mode 100644
162683 index 000000000000..4b5416585c79
162684 --- /dev/null
162685 +++ b/tools/testing/selftests/futex/functional/futex2_wait.c
162686 @@ -0,0 +1,209 @@
162687 +// SPDX-License-Identifier: GPL-2.0-or-later
162688 +/******************************************************************************
162690 + *   Copyright Collabora Ltd., 2021
162692 + * DESCRIPTION
162693 + *     Test wait/wake mechanism of futex2, using 32bit sized futexes.
162695 + * AUTHOR
162696 + *     André Almeida <andrealmeid@collabora.com>
162698 + * HISTORY
162699 + *      2021-Feb-5: Initial version by André <andrealmeid@collabora.com>
162701 + *****************************************************************************/
162703 +#include <errno.h>
162704 +#include <error.h>
162705 +#include <getopt.h>
162706 +#include <stdio.h>
162707 +#include <stdlib.h>
162708 +#include <string.h>
162709 +#include <time.h>
162710 +#include <pthread.h>
162711 +#include <sys/shm.h>
162712 +#include <sys/mman.h>
162713 +#include <fcntl.h>
162714 +#include <string.h>
162715 +#include "futex2test.h"
162716 +#include "logging.h"
162718 +#define TEST_NAME "futex2-wait"
162719 +#define timeout_ns  30000000
162720 +#define WAKE_WAIT_US 10000
162721 +#define SHM_PATH "futex2_shm_file"
162722 +futex_t *f1;
162724 +void usage(char *prog)
162726 +       printf("Usage: %s\n", prog);
162727 +       printf("  -c    Use color\n");
162728 +       printf("  -h    Display this help message\n");
162729 +       printf("  -v L  Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
162730 +              VQUIET, VCRITICAL, VINFO);
162733 +void *waiterfn(void *arg)
162735 +       struct timespec64 to64;
162736 +       unsigned int flags = 0;
162738 +       if (arg)
162739 +               flags = *((unsigned int *) arg);
162741 +       /* setting absolute timeout for futex2 */
162742 +       if (gettime64(CLOCK_MONOTONIC, &to64))
162743 +               error("gettime64 failed\n", errno);
162745 +       to64.tv_nsec += timeout_ns;
162747 +       if (to64.tv_nsec >= 1000000000) {
162748 +               to64.tv_sec++;
162749 +               to64.tv_nsec -= 1000000000;
162750 +       }
162752 +       if (futex2_wait(f1, *f1, FUTEX_32 | flags, &to64))
162753 +               printf("waiter failed errno %d\n", errno);
162755 +       return NULL;
162758 +void *waitershm(void *arg)
162760 +       futex2_wait(arg, 0, FUTEX_32 | FUTEX_SHARED_FLAG, NULL);
162762 +       return NULL;
162765 +int main(int argc, char *argv[])
162767 +       pthread_t waiter;
162768 +       unsigned int flags = FUTEX_SHARED_FLAG;
162769 +       int res, ret = RET_PASS;
162770 +       int c;
162771 +       futex_t f_private = 0;
162773 +       f1 = &f_private;
162775 +       while ((c = getopt(argc, argv, "cht:v:")) != -1) {
162776 +               switch (c) {
162777 +               case 'c':
162778 +                       log_color(1);
162779 +                       break;
162780 +               case 'h':
162781 +                       usage(basename(argv[0]));
162782 +                       exit(0);
162783 +               case 'v':
162784 +                       log_verbosity(atoi(optarg));
162785 +                       break;
162786 +               default:
162787 +                       usage(basename(argv[0]));
162788 +                       exit(1);
162789 +               }
162790 +       }
162792 +       ksft_print_header();
162793 +       ksft_set_plan(3);
162794 +       ksft_print_msg("%s: Test FUTEX2_WAIT\n",
162795 +                      basename(argv[0]));
162797 +       /* Testing a private futex */
162798 +       info("Calling private futex2_wait on f1: %u @ %p with val=%u\n", *f1, f1, *f1);
162800 +       if (pthread_create(&waiter, NULL, waiterfn, NULL))
162801 +               error("pthread_create failed\n", errno);
162803 +       usleep(WAKE_WAIT_US);
162805 +       info("Calling private futex2_wake on f1: %u @ %p with val=%u\n", *f1, f1, *f1);
162806 +       res = futex2_wake(f1, 1, FUTEX_32);
162807 +       if (res != 1) {
162808 +               ksft_test_result_fail("futex2_wake private returned: %d %s\n",
162809 +                                     res ? errno : res,
162810 +                                     res ? strerror(errno) : "");
162811 +               ret = RET_FAIL;
162812 +       } else {
162813 +               ksft_test_result_pass("futex2_wake private succeeds\n");
162814 +       }
162816 +       int shm_id = shmget(IPC_PRIVATE, 4096, IPC_CREAT | 0666);
162818 +       if (shm_id < 0) {
162819 +               perror("shmget");
162820 +               exit(1);
162821 +       }
162823 +       /* Testing an anon page shared memory */
162824 +       unsigned int *shared_data = shmat(shm_id, NULL, 0);
162826 +       *shared_data = 0;
162827 +       f1 = shared_data;
162829 +       info("Calling shared futex2_wait on f1: %u @ %p with val=%u\n", *f1, f1, *f1);
162831 +       if (pthread_create(&waiter, NULL, waiterfn, &flags))
162832 +               error("pthread_create failed\n", errno);
162834 +       usleep(WAKE_WAIT_US);
162836 +       info("Calling shared futex2_wake on f1: %u @ %p with val=%u\n", *f1, f1, *f1);
162837 +       res = futex2_wake(f1, 1, FUTEX_32 | FUTEX_SHARED_FLAG);
162838 +       if (res != 1) {
162839 +               ksft_test_result_fail("futex2_wake shared (shmget) returned: %d %s\n",
162840 +                                     res ? errno : res,
162841 +                                     res ? strerror(errno) : "");
162842 +               ret = RET_FAIL;
162843 +       } else {
162844 +               ksft_test_result_pass("futex2_wake shared (shmget) succeeds\n");
162845 +       }
162847 +       shmdt(shared_data);
162849 +       /* Testing a file backed shared memory */
162850 +       void *shm;
162851 +       int fd, pid;
162853 +       f_private = 0;
162855 +       fd = open(SHM_PATH, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
162856 +       if (fd < 0) {
162857 +               perror("open");
162858 +               exit(1);
162859 +       }
162861 +       res = ftruncate(fd, sizeof(f_private));
162862 +       if (res) {
162863 +               perror("ftruncate");
162864 +               exit(1);
162865 +       }
162867 +       shm = mmap(NULL, sizeof(f_private), PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
162868 +       if (shm == MAP_FAILED) {
162869 +               perror("mmap");
162870 +               exit(1);
162871 +       }
162873 +       memcpy(shm, &f_private, sizeof(f_private));
162875 +       pthread_create(&waiter, NULL, waitershm, shm);
162877 +       usleep(WAKE_WAIT_US);
162879 +       res = futex2_wake(shm, 1, FUTEX_32 | FUTEX_SHARED_FLAG);
162880 +       if (res != 1) {
162881 +               ksft_test_result_fail("futex2_wake shared (mmap) returned: %d %s\n",
162882 +                                     res ? errno : res,
162883 +                                     res ? strerror(errno) : "");
162884 +               ret = RET_FAIL;
162885 +       } else {
162886 +               ksft_test_result_pass("futex2_wake shared (mmap) succeeds\n");
162887 +       }
162889 +       munmap(shm, sizeof(f_private));
162891 +       remove(SHM_PATH);
162893 +       ksft_print_cnts();
162894 +       return ret;
162896 diff --git a/tools/testing/selftests/futex/functional/futex2_waitv.c b/tools/testing/selftests/futex/functional/futex2_waitv.c
162897 new file mode 100644
162898 index 000000000000..2f81d296d95d
162899 --- /dev/null
162900 +++ b/tools/testing/selftests/futex/functional/futex2_waitv.c
162901 @@ -0,0 +1,157 @@
162902 +// SPDX-License-Identifier: GPL-2.0-or-later
162903 +/******************************************************************************
162905 + *   Copyright Collabora Ltd., 2021
162907 + * DESCRIPTION
162908 + *     Test waitv/wake mechanism of futex2, using 32bit sized futexes.
162910 + * AUTHOR
162911 + *     André Almeida <andrealmeid@collabora.com>
162913 + * HISTORY
162914 + *      2021-Feb-5: Initial version by André <andrealmeid@collabora.com>
162916 + *****************************************************************************/
162918 +#include <errno.h>
162919 +#include <error.h>
162920 +#include <getopt.h>
162921 +#include <stdio.h>
162922 +#include <stdlib.h>
162923 +#include <string.h>
162924 +#include <time.h>
162925 +#include <pthread.h>
162926 +#include <sys/shm.h>
162927 +#include "futex2test.h"
162928 +#include "logging.h"
162930 +#define TEST_NAME "futex2-wait"
162931 +#define timeout_ns  1000000000
162932 +#define WAKE_WAIT_US 10000
162933 +#define NR_FUTEXES 30
162934 +struct futex_waitv waitv[NR_FUTEXES];
162935 +u_int32_t futexes[NR_FUTEXES] = {0};
162937 +void usage(char *prog)
162939 +       printf("Usage: %s\n", prog);
162940 +       printf("  -c    Use color\n");
162941 +       printf("  -h    Display this help message\n");
162942 +       printf("  -v L  Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
162943 +              VQUIET, VCRITICAL, VINFO);
162946 +void *waiterfn(void *arg)
162948 +       struct timespec64 to64;
162949 +       int res;
162951 +       /* setting absolute timeout for futex2 */
162952 +       if (gettime64(CLOCK_MONOTONIC, &to64))
162953 +               error("gettime64 failed\n", errno);
162955 +       to64.tv_sec++;
162957 +       res = futex2_waitv(waitv, NR_FUTEXES, 0, &to64);
162958 +       if (res < 0) {
162959 +               ksft_test_result_fail("futex2_waitv private returned: %d %s\n",
162960 +                                     res ? errno : res,
162961 +                                     res ? strerror(errno) : "");
162962 +       } else if (res != NR_FUTEXES - 1) {
162963 +               ksft_test_result_fail("futex2_waitv private returned: %d %s\n",
162964 +                                     res ? errno : res,
162965 +                                     res ? strerror(errno) : "");
162966 +       }
162968 +       return NULL;
162971 +int main(int argc, char *argv[])
162973 +       pthread_t waiter;
162974 +       int res, ret = RET_PASS;
162975 +       int c, i;
162977 +       while ((c = getopt(argc, argv, "cht:v:")) != -1) {
162978 +               switch (c) {
162979 +               case 'c':
162980 +                       log_color(1);
162981 +                       break;
162982 +               case 'h':
162983 +                       usage(basename(argv[0]));
162984 +                       exit(0);
162985 +               case 'v':
162986 +                       log_verbosity(atoi(optarg));
162987 +                       break;
162988 +               default:
162989 +                       usage(basename(argv[0]));
162990 +                       exit(1);
162991 +               }
162992 +       }
162994 +       ksft_print_header();
162995 +       ksft_set_plan(2);
162996 +       ksft_print_msg("%s: Test FUTEX2_WAITV\n",
162997 +                      basename(argv[0]));
162999 +       for (i = 0; i < NR_FUTEXES; i++) {
163000 +               waitv[i].uaddr = &futexes[i];
163001 +               waitv[i].flags = FUTEX_32;
163002 +               waitv[i].val = 0;
163003 +       }
163005 +       /* Private waitv */
163006 +       if (pthread_create(&waiter, NULL, waiterfn, NULL))
163007 +               error("pthread_create failed\n", errno);
163009 +       usleep(WAKE_WAIT_US);
163011 +       res = futex2_wake(waitv[NR_FUTEXES - 1].uaddr, 1, FUTEX_32);
163012 +       if (res != 1) {
163013 +               ksft_test_result_fail("futex2_waitv private returned: %d %s\n",
163014 +                                     res ? errno : res,
163015 +                                     res ? strerror(errno) : "");
163016 +               ret = RET_FAIL;
163017 +       } else {
163018 +               ksft_test_result_pass("futex2_waitv private succeeds\n");
163019 +       }
163021 +       /* Shared waitv */
163022 +       for (i = 0; i < NR_FUTEXES; i++) {
163023 +               int shm_id = shmget(IPC_PRIVATE, 4096, IPC_CREAT | 0666);
163025 +               if (shm_id < 0) {
163026 +                       perror("shmget");
163027 +                       exit(1);
163028 +               }
163030 +               unsigned int *shared_data = shmat(shm_id, NULL, 0);
163032 +               *shared_data = 0;
163033 +               waitv[i].uaddr = shared_data;
163034 +               waitv[i].flags = FUTEX_32 | FUTEX_SHARED_FLAG;
163035 +               waitv[i].val = 0;
163036 +       }
163038 +       if (pthread_create(&waiter, NULL, waiterfn, NULL))
163039 +               error("pthread_create failed\n", errno);
163041 +       usleep(WAKE_WAIT_US);
163043 +       res = futex2_wake(waitv[NR_FUTEXES - 1].uaddr, 1, FUTEX_32 | FUTEX_SHARED_FLAG);
163044 +       if (res != 1) {
163045 +               ksft_test_result_fail("futex2_waitv shared returned: %d %s\n",
163046 +                                     res ? errno : res,
163047 +                                     res ? strerror(errno) : "");
163048 +               ret = RET_FAIL;
163049 +       } else {
163050 +               ksft_test_result_pass("futex2_waitv shared succeeds\n");
163051 +       }
163053 +       for (i = 0; i < NR_FUTEXES; i++)
163054 +               shmdt(waitv[i].uaddr);
163056 +       ksft_print_cnts();
163057 +       return ret;
163059 diff --git a/tools/testing/selftests/futex/functional/futex_wait_timeout.c b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
163060 index ee55e6d389a3..b4dffe9e3b44 100644
163061 --- a/tools/testing/selftests/futex/functional/futex_wait_timeout.c
163062 +++ b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
163063 @@ -11,6 +11,7 @@
163064   *
163065   * HISTORY
163066   *      2009-Nov-6: Initial version by Darren Hart <dvhart@linux.intel.com>
163067 + *      2021-Feb-5: Add futex2 test by André <andrealmeid@collabora.com>
163068   *
163069   *****************************************************************************/
163071 @@ -20,7 +21,7 @@
163072  #include <stdlib.h>
163073  #include <string.h>
163074  #include <time.h>
163075 -#include "futextest.h"
163076 +#include "futex2test.h"
163077  #include "logging.h"
163079  #define TEST_NAME "futex-wait-timeout"
163080 @@ -40,7 +41,8 @@ void usage(char *prog)
163081  int main(int argc, char *argv[])
163083         futex_t f1 = FUTEX_INITIALIZER;
163084 -       struct timespec to;
163085 +       struct timespec to = {.tv_sec = 0, .tv_nsec = timeout_ns};
163086 +       struct timespec64 to64;
163087         int res, ret = RET_PASS;
163088         int c;
163090 @@ -65,22 +67,60 @@ int main(int argc, char *argv[])
163091         }
163093         ksft_print_header();
163094 -       ksft_set_plan(1);
163095 +       ksft_set_plan(3);
163096         ksft_print_msg("%s: Block on a futex and wait for timeout\n",
163097                basename(argv[0]));
163098         ksft_print_msg("\tArguments: timeout=%ldns\n", timeout_ns);
163100 -       /* initialize timeout */
163101 -       to.tv_sec = 0;
163102 -       to.tv_nsec = timeout_ns;
163104         info("Calling futex_wait on f1: %u @ %p\n", f1, &f1);
163105         res = futex_wait(&f1, f1, &to, FUTEX_PRIVATE_FLAG);
163106         if (!res || errno != ETIMEDOUT) {
163107 -               fail("futex_wait returned %d\n", ret < 0 ? errno : ret);
163108 +               ksft_test_result_fail("futex_wait returned %d\n", ret < 0 ? errno : ret);
163109 +               ret = RET_FAIL;
163110 +       } else {
163111 +               ksft_test_result_pass("futex_wait timeout succeeds\n");
163112 +       }
163114 +       /* setting absolute monotonic timeout for futex2 */
163115 +       if (gettime64(CLOCK_MONOTONIC, &to64))
163116 +               error("gettime64 failed\n", errno);
163118 +       to64.tv_nsec += timeout_ns;
163120 +       if (to64.tv_nsec >= 1000000000) {
163121 +               to64.tv_sec++;
163122 +               to64.tv_nsec -= 1000000000;
163123 +       }
163125 +       info("Calling futex2_wait on f1: %u @ %p\n", f1, &f1);
163126 +       res = futex2_wait(&f1, f1, FUTEX_32, &to64);
163127 +       if (!res || errno != ETIMEDOUT) {
163128 +               ksft_test_result_fail("futex2_wait monotonic returned %d\n", ret < 0 ? errno : ret);
163129 +               ret = RET_FAIL;
163130 +       } else {
163131 +               ksft_test_result_pass("futex2_wait monotonic timeout succeeds\n");
163132 +       }
163134 +       /* setting absolute realtime timeout for futex2 */
163135 +       if (gettime64(CLOCK_REALTIME, &to64))
163136 +               error("gettime64 failed\n", errno);
163138 +       to64.tv_nsec += timeout_ns;
163140 +       if (to64.tv_nsec >= 1000000000) {
163141 +               to64.tv_sec++;
163142 +               to64.tv_nsec -= 1000000000;
163143 +       }
163145 +       info("Calling futex2_wait on f1: %u @ %p\n", f1, &f1);
163146 +       res = futex2_wait(&f1, f1, FUTEX_32 | FUTEX_CLOCK_REALTIME, &to64);
163147 +       if (!res || errno != ETIMEDOUT) {
163148 +               ksft_test_result_fail("futex2_wait realtime returned %d\n", ret < 0 ? errno : ret);
163149                 ret = RET_FAIL;
163150 +       } else {
163151 +               ksft_test_result_pass("futex2_wait realtime timeout succeeds\n");
163152         }
163154 -       print_result(TEST_NAME, ret);
163155 +       ksft_print_cnts();
163156         return ret;
163158 diff --git a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
163159 index 0ae390ff8164..ed3660090907 100644
163160 --- a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
163161 +++ b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
163162 @@ -12,6 +12,7 @@
163163   *
163164   * HISTORY
163165   *      2009-Nov-14: Initial version by Gowrishankar <gowrishankar.m@in.ibm.com>
163166 + *      2021-Feb-5: Add futex2 test by André <andrealmeid@collabora.com>
163167   *
163168   *****************************************************************************/
163170 @@ -21,7 +22,7 @@
163171  #include <stdlib.h>
163172  #include <string.h>
163173  #include <time.h>
163174 -#include "futextest.h"
163175 +#include "futex2test.h"
163176  #include "logging.h"
163178  #define TEST_NAME "futex-wait-wouldblock"
163179 @@ -39,6 +40,7 @@ void usage(char *prog)
163180  int main(int argc, char *argv[])
163182         struct timespec to = {.tv_sec = 0, .tv_nsec = timeout_ns};
163183 +       struct timespec64 to64;
163184         futex_t f1 = FUTEX_INITIALIZER;
163185         int res, ret = RET_PASS;
163186         int c;
163187 @@ -61,18 +63,41 @@ int main(int argc, char *argv[])
163188         }
163190         ksft_print_header();
163191 -       ksft_set_plan(1);
163192 +       ksft_set_plan(2);
163193         ksft_print_msg("%s: Test the unexpected futex value in FUTEX_WAIT\n",
163194                basename(argv[0]));
163196         info("Calling futex_wait on f1: %u @ %p with val=%u\n", f1, &f1, f1+1);
163197         res = futex_wait(&f1, f1+1, &to, FUTEX_PRIVATE_FLAG);
163198         if (!res || errno != EWOULDBLOCK) {
163199 -               fail("futex_wait returned: %d %s\n",
163200 +               ksft_test_result_fail("futex_wait returned: %d %s\n",
163201                      res ? errno : res, res ? strerror(errno) : "");
163202                 ret = RET_FAIL;
163203 +       } else {
163204 +               ksft_test_result_pass("futex_wait wouldblock succeeds\n");
163205         }
163207 -       print_result(TEST_NAME, ret);
163208 +       /* setting absolute timeout for futex2 */
163209 +       if (gettime64(CLOCK_MONOTONIC, &to64))
163210 +               error("gettime64 failed\n", errno);
163212 +       to64.tv_nsec += timeout_ns;
163214 +       if (to64.tv_nsec >= 1000000000) {
163215 +               to64.tv_sec++;
163216 +               to64.tv_nsec -= 1000000000;
163217 +       }
163219 +       info("Calling futex2_wait on f1: %u @ %p with val=%u\n", f1, &f1, f1+1);
163220 +       res = futex2_wait(&f1, f1+1, FUTEX_32, &to64);
163221 +       if (!res || errno != EWOULDBLOCK) {
163222 +               ksft_test_result_fail("futex2_wait returned: %d %s\n",
163223 +                    res ? errno : res, res ? strerror(errno) : "");
163224 +               ret = RET_FAIL;
163225 +       } else {
163226 +               ksft_test_result_pass("futex2_wait wouldblock succeeds\n");
163227 +       }
163229 +       ksft_print_cnts();
163230         return ret;
163232 diff --git a/tools/testing/selftests/futex/functional/run.sh b/tools/testing/selftests/futex/functional/run.sh
163233 index 1acb6ace1680..18b3883d7236 100755
163234 --- a/tools/testing/selftests/futex/functional/run.sh
163235 +++ b/tools/testing/selftests/futex/functional/run.sh
163236 @@ -73,3 +73,9 @@ echo
163237  echo
163238  ./futex_wait_uninitialized_heap $COLOR
163239  ./futex_wait_private_mapped_file $COLOR
163241 +echo
163242 +./futex2_wait $COLOR
163244 +echo
163245 +./futex2_waitv $COLOR
163246 diff --git a/tools/testing/selftests/futex/include/futex2test.h b/tools/testing/selftests/futex/include/futex2test.h
163247 new file mode 100644
163248 index 000000000000..e2635006b1a9
163249 --- /dev/null
163250 +++ b/tools/testing/selftests/futex/include/futex2test.h
163251 @@ -0,0 +1,121 @@
163252 +/* SPDX-License-Identifier: GPL-2.0-or-later */
163253 +/******************************************************************************
163255 + *   Copyright Collabora Ltd., 2021
163257 + * DESCRIPTION
163258 + *     Futex2 library addons for old futex library
163260 + * AUTHOR
163261 + *     André Almeida <andrealmeid@collabora.com>
163263 + * HISTORY
163264 + *      2021-Feb-5: Initial version by André <andrealmeid@collabora.com>
163266 + *****************************************************************************/
163267 +#include "futextest.h"
163268 +#include <stdio.h>
163270 +#define NSEC_PER_SEC   1000000000L
163272 +#ifndef FUTEX_8
163273 +# define FUTEX_8       0
163274 +#endif
163275 +#ifndef FUTEX_16
163276 +# define FUTEX_16      1
163277 +#endif
163278 +#ifndef FUTEX_32
163279 +# define FUTEX_32      2
163280 +#endif
163282 +#ifndef FUTEX_SHARED_FLAG
163283 +#define FUTEX_SHARED_FLAG 8
163284 +#endif
163286 +#ifndef FUTEX_WAITV_MAX
163287 +#define FUTEX_WAITV_MAX 128
163288 +struct futex_waitv {
163289 +       void *uaddr;
163290 +       unsigned int val;
163291 +       unsigned int flags;
163293 +#endif
163296 + * - Y2038 section for 32-bit applications -
163298 + * Remove this when glibc is ready for y2038. Then, always compile with
163299 + * `-DTIME_BITS=64` or `-D__USE_TIME_BITS64`. glibc will provide both
163300 + * timespec64 and clock_gettime64 so we won't need to define here.
163301 + */
163302 +#if defined(__i386__) || __TIMESIZE == 32
163303 +# define NR_gettime __NR_clock_gettime64
163304 +#else
163305 +# define NR_gettime __NR_clock_gettime
163306 +#endif
163308 +struct timespec64 {
163309 +       long long tv_sec;       /* seconds */
163310 +       long long tv_nsec;      /* nanoseconds */
163313 +int gettime64(clock_t clockid, struct timespec64 *tv)
163315 +       return syscall(NR_gettime, clockid, tv);
163318 + * - End of Y2038 section -
163319 + */
163322 + * futex2_wait - If (*uaddr == val), wait at uaddr until timo
163323 + * @uaddr: User address to wait on
163324 + * @val:   Expected value at uaddr, return if is not equal
163325 + * @flags: Operation flags
163326 + * @timo:  Optional timeout for operation
163327 + */
163328 +static inline int futex2_wait(volatile void *uaddr, unsigned long val,
163329 +                             unsigned long flags, struct timespec64 *timo)
163331 +       return syscall(__NR_futex_wait, uaddr, val, flags, timo);
163335 + * futex2_wake - Wake a number of waiters at uaddr
163336 + * @uaddr: Address to wake
163337 + * @nr:    Number of waiters to wake
163338 + * @flags: Operation flags
163339 + */
163340 +static inline int futex2_wake(volatile void *uaddr, unsigned int nr, unsigned long flags)
163342 +       return syscall(__NR_futex_wake, uaddr, nr, flags);
163346 + * futex2_waitv - Wait at multiple futexes, wake on any
163347 + * @waiters:    Array of waiters
163348 + * @nr_waiters: Length of waiters array
163349 + * @flags: Operation flags
163350 + * @timo:  Optional timeout for operation
163351 + */
163352 +static inline int futex2_waitv(volatile struct futex_waitv *waiters, unsigned long nr_waiters,
163353 +                             unsigned long flags, struct timespec64 *timo)
163355 +       return syscall(__NR_futex_waitv, waiters, nr_waiters, flags, timo);
163359 + * futex2_requeue - Wake futexes at uaddr1 and requeue from uaddr1 to uaddr2
163360 + * @uaddr1:     Original address to wake and requeue from
163361 + * @uaddr2:     Address to requeue to
163362 + * @nr_wake:    Number of futexes to wake at uaddr1 before requeuing
163363 + * @nr_requeue: Number of futexes to requeue from uaddr1 to uaddr2
163364 + * @cmpval:     If (uaddr1->uaddr != cmpval), return immediatally
163365 + * @flgas:      Operation flags
163366 + */
163367 +static inline int futex2_requeue(struct futex_requeue *uaddr1, struct futex_requeue *uaddr2,
163368 +                                unsigned int nr_wake, unsigned int nr_requeue,
163369 +                                unsigned int cmpval, unsigned long flags)
163371 +       return syscall(__NR_futex_requeue, uaddr1, uaddr2, nr_wake, nr_requeue, cmpval, flags);
163373 diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
163374 index bb2752d78fe3..81edbd23d371 100644
163375 --- a/tools/testing/selftests/kvm/dirty_log_test.c
163376 +++ b/tools/testing/selftests/kvm/dirty_log_test.c
163377 @@ -17,6 +17,7 @@
163378  #include <linux/bitmap.h>
163379  #include <linux/bitops.h>
163380  #include <asm/barrier.h>
163381 +#include <linux/atomic.h>
163383  #include "kvm_util.h"
163384  #include "test_util.h"
163385 @@ -137,12 +138,20 @@ static uint64_t host_clear_count;
163386  static uint64_t host_track_next_count;
163388  /* Whether dirty ring reset is requested, or finished */
163389 -static sem_t dirty_ring_vcpu_stop;
163390 -static sem_t dirty_ring_vcpu_cont;
163391 +static sem_t sem_vcpu_stop;
163392 +static sem_t sem_vcpu_cont;
163394 + * This is only set by main thread, and only cleared by vcpu thread.  It is
163395 + * used to request vcpu thread to stop at the next GUEST_SYNC, since GUEST_SYNC
163396 + * is the only place that we'll guarantee both "dirty bit" and "dirty data"
163397 + * will match.  E.g., SIG_IPI won't guarantee that if the vcpu is interrupted
163398 + * after setting dirty bit but before the data is written.
163399 + */
163400 +static atomic_t vcpu_sync_stop_requested;
163402   * This is updated by the vcpu thread to tell the host whether it's a
163403   * ring-full event.  It should only be read until a sem_wait() of
163404 - * dirty_ring_vcpu_stop and before vcpu continues to run.
163405 + * sem_vcpu_stop and before vcpu continues to run.
163406   */
163407  static bool dirty_ring_vcpu_ring_full;
163409 @@ -234,6 +243,17 @@ static void clear_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
163410         kvm_vm_clear_dirty_log(vm, slot, bitmap, 0, num_pages);
163413 +/* Should only be called after a GUEST_SYNC */
163414 +static void vcpu_handle_sync_stop(void)
163416 +       if (atomic_read(&vcpu_sync_stop_requested)) {
163417 +               /* It means main thread is sleeping waiting */
163418 +               atomic_set(&vcpu_sync_stop_requested, false);
163419 +               sem_post(&sem_vcpu_stop);
163420 +               sem_wait_until(&sem_vcpu_cont);
163421 +       }
163424  static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
163426         struct kvm_run *run = vcpu_state(vm, VCPU_ID);
163427 @@ -244,6 +264,8 @@ static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
163428         TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC,
163429                     "Invalid guest sync status: exit_reason=%s\n",
163430                     exit_reason_str(run->exit_reason));
163432 +       vcpu_handle_sync_stop();
163435  static bool dirty_ring_supported(void)
163436 @@ -301,13 +323,13 @@ static void dirty_ring_wait_vcpu(void)
163438         /* This makes sure that hardware PML cache flushed */
163439         vcpu_kick();
163440 -       sem_wait_until(&dirty_ring_vcpu_stop);
163441 +       sem_wait_until(&sem_vcpu_stop);
163444  static void dirty_ring_continue_vcpu(void)
163446         pr_info("Notifying vcpu to continue\n");
163447 -       sem_post(&dirty_ring_vcpu_cont);
163448 +       sem_post(&sem_vcpu_cont);
163451  static void dirty_ring_collect_dirty_pages(struct kvm_vm *vm, int slot,
163452 @@ -361,11 +383,11 @@ static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
163453                 /* Update the flag first before pause */
163454                 WRITE_ONCE(dirty_ring_vcpu_ring_full,
163455                            run->exit_reason == KVM_EXIT_DIRTY_RING_FULL);
163456 -               sem_post(&dirty_ring_vcpu_stop);
163457 +               sem_post(&sem_vcpu_stop);
163458                 pr_info("vcpu stops because %s...\n",
163459                         dirty_ring_vcpu_ring_full ?
163460                         "dirty ring is full" : "vcpu is kicked out");
163461 -               sem_wait_until(&dirty_ring_vcpu_cont);
163462 +               sem_wait_until(&sem_vcpu_cont);
163463                 pr_info("vcpu continues now.\n");
163464         } else {
163465                 TEST_ASSERT(false, "Invalid guest sync status: "
163466 @@ -377,7 +399,7 @@ static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
163467  static void dirty_ring_before_vcpu_join(void)
163469         /* Kick another round of vcpu just to make sure it will quit */
163470 -       sem_post(&dirty_ring_vcpu_cont);
163471 +       sem_post(&sem_vcpu_cont);
163474  struct log_mode {
163475 @@ -505,9 +527,8 @@ static void *vcpu_worker(void *data)
163476          */
163477         sigmask->len = 8;
163478         pthread_sigmask(0, NULL, sigset);
163479 +       sigdelset(sigset, SIG_IPI);
163480         vcpu_ioctl(vm, VCPU_ID, KVM_SET_SIGNAL_MASK, sigmask);
163481 -       sigaddset(sigset, SIG_IPI);
163482 -       pthread_sigmask(SIG_BLOCK, sigset, NULL);
163484         sigemptyset(sigset);
163485         sigaddset(sigset, SIG_IPI);
163486 @@ -768,7 +789,25 @@ static void run_test(enum vm_guest_mode mode, void *arg)
163487                 usleep(p->interval * 1000);
163488                 log_mode_collect_dirty_pages(vm, TEST_MEM_SLOT_INDEX,
163489                                              bmap, host_num_pages);
163491 +               /*
163492 +                * See vcpu_sync_stop_requested definition for details on why
163493 +                * we need to stop vcpu when verify data.
163494 +                */
163495 +               atomic_set(&vcpu_sync_stop_requested, true);
163496 +               sem_wait_until(&sem_vcpu_stop);
163497 +               /*
163498 +                * NOTE: for dirty ring, it's possible that we didn't stop at
163499 +                * GUEST_SYNC but instead we stopped because ring is full;
163500 +                * that's okay too because ring full means we're only missing
163501 +                * the flush of the last page, and since we handle the last
163502 +                * page specially verification will succeed anyway.
163503 +                */
163504 +               assert(host_log_mode == LOG_MODE_DIRTY_RING ||
163505 +                      atomic_read(&vcpu_sync_stop_requested) == false);
163506                 vm_dirty_log_verify(mode, bmap);
163507 +               sem_post(&sem_vcpu_cont);
163509                 iteration++;
163510                 sync_global_to_guest(vm, iteration);
163511         }
163512 @@ -818,9 +857,10 @@ int main(int argc, char *argv[])
163513                 .interval = TEST_HOST_LOOP_INTERVAL,
163514         };
163515         int opt, i;
163516 +       sigset_t sigset;
163518 -       sem_init(&dirty_ring_vcpu_stop, 0, 0);
163519 -       sem_init(&dirty_ring_vcpu_cont, 0, 0);
163520 +       sem_init(&sem_vcpu_stop, 0, 0);
163521 +       sem_init(&sem_vcpu_cont, 0, 0);
163523         guest_modes_append_default();
163525 @@ -876,6 +916,11 @@ int main(int argc, char *argv[])
163527         srandom(time(0));
163529 +       /* Ensure that vCPU threads start with SIG_IPI blocked.  */
163530 +       sigemptyset(&sigset);
163531 +       sigaddset(&sigset, SIG_IPI);
163532 +       pthread_sigmask(SIG_BLOCK, &sigset, NULL);
163534         if (host_log_mode_option == LOG_MODE_ALL) {
163535                 /* Run each log mode */
163536                 for (i = 0; i < LOG_MODE_NUM; i++) {
163537 diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
163538 index a5ce26d548e4..0af84ad48aa7 100644
163539 --- a/tools/testing/selftests/lib.mk
163540 +++ b/tools/testing/selftests/lib.mk
163541 @@ -1,6 +1,10 @@
163542  # This mimics the top-level Makefile. We do it explicitly here so that this
163543  # Makefile can operate with or without the kbuild infrastructure.
163544 +ifneq ($(LLVM),)
163545 +CC := clang
163546 +else
163547  CC := $(CROSS_COMPILE)gcc
163548 +endif
163550  ifeq (0,$(MAKELEVEL))
163551      ifeq ($(OUTPUT),)
163552 @@ -74,7 +78,8 @@ ifdef building_out_of_srctree
163553                 rsync -aq $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
163554         fi
163555         @if [ "X$(TEST_PROGS)" != "X" ]; then \
163556 -               $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(OUTPUT)/$(TEST_PROGS)) ; \
163557 +               $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) \
163558 +                                 $(addprefix $(OUTPUT)/,$(TEST_PROGS))) ; \
163559         else \
163560                 $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS)); \
163561         fi
163562 diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
163563 index c02291e9841e..880e3ab9d088 100755
163564 --- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
163565 +++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
163566 @@ -271,7 +271,7 @@ test_span_gre_fdb_roaming()
163568         while ((RET == 0)); do
163569                 bridge fdb del dev $swp3 $h3mac vlan 555 master 2>/dev/null
163570 -               bridge fdb add dev $swp2 $h3mac vlan 555 master
163571 +               bridge fdb add dev $swp2 $h3mac vlan 555 master static
163572                 sleep 1
163573                 fail_test_span_gre_dir $tundev ingress
163575 diff --git a/tools/testing/selftests/net/forwarding/mirror_lib.sh b/tools/testing/selftests/net/forwarding/mirror_lib.sh
163576 index 13db1cb50e57..6406cd76a19d 100644
163577 --- a/tools/testing/selftests/net/forwarding/mirror_lib.sh
163578 +++ b/tools/testing/selftests/net/forwarding/mirror_lib.sh
163579 @@ -20,6 +20,13 @@ mirror_uninstall()
163580         tc filter del dev $swp1 $direction pref 1000
163583 +is_ipv6()
163585 +       local addr=$1; shift
163587 +       [[ -z ${addr//[0-9a-fA-F:]/} ]]
163590  mirror_test()
163592         local vrf_name=$1; shift
163593 @@ -29,9 +36,17 @@ mirror_test()
163594         local pref=$1; shift
163595         local expect=$1; shift
163597 +       if is_ipv6 $dip; then
163598 +               local proto=-6
163599 +               local type="icmp6 type=128" # Echo request.
163600 +       else
163601 +               local proto=
163602 +               local type="icmp echoreq"
163603 +       fi
163605         local t0=$(tc_rule_stats_get $dev $pref)
163606 -       $MZ $vrf_name ${sip:+-A $sip} -B $dip -a own -b bc -q \
163607 -           -c 10 -d 100msec -t icmp type=8
163608 +       $MZ $proto $vrf_name ${sip:+-A $sip} -B $dip -a own -b bc -q \
163609 +           -c 10 -d 100msec -t $type
163610         sleep 0.5
163611         local t1=$(tc_rule_stats_get $dev $pref)
163612         local delta=$((t1 - t0))
163613 diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
163614 index 39edce4f541c..2674ba20d524 100755
163615 --- a/tools/testing/selftests/net/mptcp/diag.sh
163616 +++ b/tools/testing/selftests/net/mptcp/diag.sh
163617 @@ -5,8 +5,9 @@ rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
163618  ns="ns1-$rndh"
163619  ksft_skip=4
163620  test_cnt=1
163621 +timeout_poll=100
163622 +timeout_test=$((timeout_poll * 2 + 1))
163623  ret=0
163624 -pids=()
163626  flush_pids()
163628 @@ -14,18 +15,14 @@ flush_pids()
163629         # give it some time
163630         sleep 1.1
163632 -       for pid in ${pids[@]}; do
163633 -               [ -d /proc/$pid ] && kill -SIGUSR1 $pid >/dev/null 2>&1
163634 -       done
163635 -       pids=()
163636 +       ip netns pids "${ns}" | xargs --no-run-if-empty kill -SIGUSR1 &>/dev/null
163639  cleanup()
163641 +       ip netns pids "${ns}" | xargs --no-run-if-empty kill -SIGKILL &>/dev/null
163643         ip netns del $ns
163644 -       for pid in ${pids[@]}; do
163645 -               [ -d /proc/$pid ] && kill -9 $pid >/dev/null 2>&1
163646 -       done
163649  ip -Version > /dev/null 2>&1
163650 @@ -79,39 +76,57 @@ trap cleanup EXIT
163651  ip netns add $ns
163652  ip -n $ns link set dev lo up
163654 -echo "a" | ip netns exec $ns ./mptcp_connect -p 10000 -l 0.0.0.0 -t 100 >/dev/null &
163655 +echo "a" | \
163656 +       timeout ${timeout_test} \
163657 +               ip netns exec $ns \
163658 +                       ./mptcp_connect -p 10000 -l -t ${timeout_poll} \
163659 +                               0.0.0.0 >/dev/null &
163660  sleep 0.1
163661 -pids[0]=$!
163662  chk_msk_nr 0 "no msk on netns creation"
163664 -echo "b" | ip netns exec $ns ./mptcp_connect -p 10000 127.0.0.1 -j -t 100 >/dev/null &
163665 +echo "b" | \
163666 +       timeout ${timeout_test} \
163667 +               ip netns exec $ns \
163668 +                       ./mptcp_connect -p 10000 -j -t ${timeout_poll} \
163669 +                               127.0.0.1 >/dev/null &
163670  sleep 0.1
163671 -pids[1]=$!
163672  chk_msk_nr 2 "after MPC handshake "
163673  chk_msk_remote_key_nr 2 "....chk remote_key"
163674  chk_msk_fallback_nr 0 "....chk no fallback"
163675  flush_pids
163678 -echo "a" | ip netns exec $ns ./mptcp_connect -p 10001 -s TCP -l 0.0.0.0 -t 100 >/dev/null &
163679 -pids[0]=$!
163680 +echo "a" | \
163681 +       timeout ${timeout_test} \
163682 +               ip netns exec $ns \
163683 +                       ./mptcp_connect -p 10001 -l -s TCP -t ${timeout_poll} \
163684 +                               0.0.0.0 >/dev/null &
163685  sleep 0.1
163686 -echo "b" | ip netns exec $ns ./mptcp_connect -p 10001 127.0.0.1 -j -t 100 >/dev/null &
163687 -pids[1]=$!
163688 +echo "b" | \
163689 +       timeout ${timeout_test} \
163690 +               ip netns exec $ns \
163691 +                       ./mptcp_connect -p 10001 -j -t ${timeout_poll} \
163692 +                               127.0.0.1 >/dev/null &
163693  sleep 0.1
163694  chk_msk_fallback_nr 1 "check fallback"
163695  flush_pids
163697  NR_CLIENTS=100
163698  for I in `seq 1 $NR_CLIENTS`; do
163699 -       echo "a" | ip netns exec $ns ./mptcp_connect -p $((I+10001)) -l 0.0.0.0 -t 100 -w 10 >/dev/null  &
163700 -       pids[$((I*2))]=$!
163701 +       echo "a" | \
163702 +               timeout ${timeout_test} \
163703 +                       ip netns exec $ns \
163704 +                               ./mptcp_connect -p $((I+10001)) -l -w 10 \
163705 +                                       -t ${timeout_poll} 0.0.0.0 >/dev/null &
163706  done
163707  sleep 0.1
163709  for I in `seq 1 $NR_CLIENTS`; do
163710 -       echo "b" | ip netns exec $ns ./mptcp_connect -p $((I+10001)) 127.0.0.1 -t 100 -w 10 >/dev/null &
163711 -       pids[$((I*2 + 1))]=$!
163712 +       echo "b" | \
163713 +               timeout ${timeout_test} \
163714 +                       ip netns exec $ns \
163715 +                               ./mptcp_connect -p $((I+10001)) -w 10 \
163716 +                                       -t ${timeout_poll} 127.0.0.1 >/dev/null &
163717  done
163718  sleep 1.5
163720 diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
163721 index 10a030b53b23..65b3b983efc2 100755
163722 --- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
163723 +++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
163724 @@ -11,7 +11,8 @@ cin=""
163725  cout=""
163726  ksft_skip=4
163727  capture=false
163728 -timeout=30
163729 +timeout_poll=30
163730 +timeout_test=$((timeout_poll * 2 + 1))
163731  ipv6=true
163732  ethtool_random_on=true
163733  tc_delay="$((RANDOM%50))"
163734 @@ -273,7 +274,7 @@ check_mptcp_disabled()
163735         ip netns exec ${disabled_ns} sysctl -q net.mptcp.enabled=0
163737         local err=0
163738 -       LANG=C ip netns exec ${disabled_ns} ./mptcp_connect -t $timeout -p 10000 -s MPTCP 127.0.0.1 < "$cin" 2>&1 | \
163739 +       LANG=C ip netns exec ${disabled_ns} ./mptcp_connect -p 10000 -s MPTCP 127.0.0.1 < "$cin" 2>&1 | \
163740                 grep -q "^socket: Protocol not available$" && err=1
163741         ip netns delete ${disabled_ns}
163743 @@ -430,14 +431,20 @@ do_transfer()
163744         local stat_cookietx_last=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesSent")
163745         local stat_cookierx_last=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesRecv")
163747 -       ip netns exec ${listener_ns} ./mptcp_connect -t $timeout -l -p $port -s ${srv_proto} $extra_args $local_addr < "$sin" > "$sout" &
163748 +       timeout ${timeout_test} \
163749 +               ip netns exec ${listener_ns} \
163750 +                       ./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
163751 +                               $extra_args $local_addr < "$sin" > "$sout" &
163752         local spid=$!
163754         wait_local_port_listen "${listener_ns}" "${port}"
163756         local start
163757         start=$(date +%s%3N)
163758 -       ip netns exec ${connector_ns} ./mptcp_connect -t $timeout -p $port -s ${cl_proto} $extra_args $connect_addr < "$cin" > "$cout" &
163759 +       timeout ${timeout_test} \
163760 +               ip netns exec ${connector_ns} \
163761 +                       ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
163762 +                               $extra_args $connect_addr < "$cin" > "$cout" &
163763         local cpid=$!
163765         wait $cpid
163766 diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
163767 index ad32240fbfda..43ed99de7734 100755
163768 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
163769 +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
163770 @@ -8,7 +8,8 @@ cin=""
163771  cinsent=""
163772  cout=""
163773  ksft_skip=4
163774 -timeout=30
163775 +timeout_poll=30
163776 +timeout_test=$((timeout_poll * 2 + 1))
163777  mptcp_connect=""
163778  capture=0
163779  do_all_tests=1
163780 @@ -245,17 +246,26 @@ do_transfer()
163781                 local_addr="0.0.0.0"
163782         fi
163784 -       ip netns exec ${listener_ns} $mptcp_connect -t $timeout -l -p $port \
163785 -               -s ${srv_proto} ${local_addr} < "$sin" > "$sout" &
163786 +       timeout ${timeout_test} \
163787 +               ip netns exec ${listener_ns} \
163788 +                       $mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
163789 +                               ${local_addr} < "$sin" > "$sout" &
163790         spid=$!
163792         sleep 1
163794         if [ "$test_link_fail" -eq 0 ];then
163795 -               ip netns exec ${connector_ns} $mptcp_connect -t $timeout -p $port -s ${cl_proto} $connect_addr < "$cin" > "$cout" &
163796 +               timeout ${timeout_test} \
163797 +                       ip netns exec ${connector_ns} \
163798 +                               $mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
163799 +                                       $connect_addr < "$cin" > "$cout" &
163800         else
163801 -               ( cat "$cin" ; sleep 2; link_failure $listener_ns ; cat "$cin" ) | tee "$cinsent" | \
163802 -               ip netns exec ${connector_ns} $mptcp_connect -t $timeout -p $port -s ${cl_proto} $connect_addr > "$cout" &
163803 +               ( cat "$cin" ; sleep 2; link_failure $listener_ns ; cat "$cin" ) | \
163804 +                       tee "$cinsent" | \
163805 +                       timeout ${timeout_test} \
163806 +                               ip netns exec ${connector_ns} \
163807 +                                       $mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
163808 +                                               $connect_addr > "$cout" &
163809         fi
163810         cpid=$!
163812 diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
163813 index f039ee57eb3c..3aeef3bcb101 100755
163814 --- a/tools/testing/selftests/net/mptcp/simult_flows.sh
163815 +++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
163816 @@ -7,7 +7,8 @@ ns2="ns2-$rndh"
163817  ns3="ns3-$rndh"
163818  capture=false
163819  ksft_skip=4
163820 -timeout=30
163821 +timeout_poll=30
163822 +timeout_test=$((timeout_poll * 2 + 1))
163823  test_cnt=1
163824  ret=0
163825  bail=0
163826 @@ -157,14 +158,20 @@ do_transfer()
163827                 sleep 1
163828         fi
163830 -       ip netns exec ${ns3} ./mptcp_connect -jt $timeout -l -p $port 0.0.0.0 < "$sin" > "$sout" &
163831 +       timeout ${timeout_test} \
163832 +               ip netns exec ${ns3} \
163833 +                       ./mptcp_connect -jt ${timeout_poll} -l -p $port \
163834 +                               0.0.0.0 < "$sin" > "$sout" &
163835         local spid=$!
163837         wait_local_port_listen "${ns3}" "${port}"
163839         local start
163840         start=$(date +%s%3N)
163841 -       ip netns exec ${ns1} ./mptcp_connect -jt $timeout -p $port 10.0.3.3 < "$cin" > "$cout" &
163842 +       timeout ${timeout_test} \
163843 +               ip netns exec ${ns1} \
163844 +                       ./mptcp_connect -jt ${timeout_poll} -p $port \
163845 +                               10.0.3.3 < "$cin" > "$cout" &
163846         local cpid=$!
163848         wait $cpid
163849 diff --git a/tools/testing/selftests/powerpc/security/entry_flush.c b/tools/testing/selftests/powerpc/security/entry_flush.c
163850 index 78cf914fa321..68ce377b205e 100644
163851 --- a/tools/testing/selftests/powerpc/security/entry_flush.c
163852 +++ b/tools/testing/selftests/powerpc/security/entry_flush.c
163853 @@ -53,7 +53,7 @@ int entry_flush_test(void)
163855         entry_flush = entry_flush_orig;
163857 -       fd = perf_event_open_counter(PERF_TYPE_RAW, /* L1d miss */ 0x400f0, -1);
163858 +       fd = perf_event_open_counter(PERF_TYPE_HW_CACHE, PERF_L1D_READ_MISS_CONFIG, -1);
163859         FAIL_IF(fd < 0);
163861         p = (char *)memalign(zero_size, CACHELINE_SIZE);
163862 diff --git a/tools/testing/selftests/powerpc/security/flush_utils.h b/tools/testing/selftests/powerpc/security/flush_utils.h
163863 index 07a5eb301466..7a3d60292916 100644
163864 --- a/tools/testing/selftests/powerpc/security/flush_utils.h
163865 +++ b/tools/testing/selftests/powerpc/security/flush_utils.h
163866 @@ -9,6 +9,10 @@
163868  #define CACHELINE_SIZE 128
163870 +#define PERF_L1D_READ_MISS_CONFIG      ((PERF_COUNT_HW_CACHE_L1D) |            \
163871 +                                       (PERF_COUNT_HW_CACHE_OP_READ << 8) |    \
163872 +                                       (PERF_COUNT_HW_CACHE_RESULT_MISS << 16))
163874  void syscall_loop(char *p, unsigned long iterations,
163875                   unsigned long zero_size);
163877 diff --git a/tools/testing/selftests/powerpc/security/rfi_flush.c b/tools/testing/selftests/powerpc/security/rfi_flush.c
163878 index 7565fd786640..f73484a6470f 100644
163879 --- a/tools/testing/selftests/powerpc/security/rfi_flush.c
163880 +++ b/tools/testing/selftests/powerpc/security/rfi_flush.c
163881 @@ -54,7 +54,7 @@ int rfi_flush_test(void)
163883         rfi_flush = rfi_flush_orig;
163885 -       fd = perf_event_open_counter(PERF_TYPE_RAW, /* L1d miss */ 0x400f0, -1);
163886 +       fd = perf_event_open_counter(PERF_TYPE_HW_CACHE, PERF_L1D_READ_MISS_CONFIG, -1);
163887         FAIL_IF(fd < 0);
163889         p = (char *)memalign(zero_size, CACHELINE_SIZE);
163890 diff --git a/tools/testing/selftests/resctrl/Makefile b/tools/testing/selftests/resctrl/Makefile
163891 index d585cc1948cc..6bcee2ec91a9 100644
163892 --- a/tools/testing/selftests/resctrl/Makefile
163893 +++ b/tools/testing/selftests/resctrl/Makefile
163894 @@ -1,5 +1,5 @@
163895  CC = $(CROSS_COMPILE)gcc
163896 -CFLAGS = -g -Wall
163897 +CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2
163898  SRCS=$(wildcard *.c)
163899  OBJS=$(SRCS:.c=.o)
163901 diff --git a/tools/testing/selftests/resctrl/cache.c b/tools/testing/selftests/resctrl/cache.c
163902 index 38dbf4962e33..5922cc1b0386 100644
163903 --- a/tools/testing/selftests/resctrl/cache.c
163904 +++ b/tools/testing/selftests/resctrl/cache.c
163905 @@ -182,7 +182,7 @@ int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
163906         /*
163907          * Measure cache miss from perf.
163908          */
163909 -       if (!strcmp(param->resctrl_val, "cat")) {
163910 +       if (!strncmp(param->resctrl_val, CAT_STR, sizeof(CAT_STR))) {
163911                 ret = get_llc_perf(&llc_perf_miss);
163912                 if (ret < 0)
163913                         return ret;
163914 @@ -192,7 +192,7 @@ int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
163915         /*
163916          * Measure llc occupancy from resctrl.
163917          */
163918 -       if (!strcmp(param->resctrl_val, "cqm")) {
163919 +       if (!strncmp(param->resctrl_val, CQM_STR, sizeof(CQM_STR))) {
163920                 ret = get_llc_occu_resctrl(&llc_occu_resc);
163921                 if (ret < 0)
163922                         return ret;
163923 @@ -234,7 +234,7 @@ int cat_val(struct resctrl_val_param *param)
163924         if (ret)
163925                 return ret;
163927 -       if ((strcmp(resctrl_val, "cat") == 0)) {
163928 +       if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
163929                 ret = initialize_llc_perf();
163930                 if (ret)
163931                         return ret;
163932 @@ -242,7 +242,7 @@ int cat_val(struct resctrl_val_param *param)
163934         /* Test runs until the callback setup() tells the test to stop. */
163935         while (1) {
163936 -               if (strcmp(resctrl_val, "cat") == 0) {
163937 +               if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
163938                         ret = param->setup(1, param);
163939                         if (ret) {
163940                                 ret = 0;
163941 diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c
163942 index 5da43767b973..20823725daca 100644
163943 --- a/tools/testing/selftests/resctrl/cat_test.c
163944 +++ b/tools/testing/selftests/resctrl/cat_test.c
163945 @@ -17,10 +17,10 @@
163946  #define MAX_DIFF_PERCENT       4
163947  #define MAX_DIFF               1000000
163949 -int count_of_bits;
163950 -char cbm_mask[256];
163951 -unsigned long long_mask;
163952 -unsigned long cache_size;
163953 +static int count_of_bits;
163954 +static char cbm_mask[256];
163955 +static unsigned long long_mask;
163956 +static unsigned long cache_size;
163959   * Change schemata. Write schemata to specified
163960 @@ -136,7 +136,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
163961                 return -1;
163963         /* Get default cbm mask for L3/L2 cache */
163964 -       ret = get_cbm_mask(cache_type);
163965 +       ret = get_cbm_mask(cache_type, cbm_mask);
163966         if (ret)
163967                 return ret;
163969 @@ -164,7 +164,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
163970                 return -1;
163972         struct resctrl_val_param param = {
163973 -               .resctrl_val    = "cat",
163974 +               .resctrl_val    = CAT_STR,
163975                 .cpu_no         = cpu_no,
163976                 .mum_resctrlfs  = 0,
163977                 .setup          = cat_setup,
163978 diff --git a/tools/testing/selftests/resctrl/cqm_test.c b/tools/testing/selftests/resctrl/cqm_test.c
163979 index c8756152bd61..271752e9ef5b 100644
163980 --- a/tools/testing/selftests/resctrl/cqm_test.c
163981 +++ b/tools/testing/selftests/resctrl/cqm_test.c
163982 @@ -16,10 +16,10 @@
163983  #define MAX_DIFF               2000000
163984  #define MAX_DIFF_PERCENT       15
163986 -int count_of_bits;
163987 -char cbm_mask[256];
163988 -unsigned long long_mask;
163989 -unsigned long cache_size;
163990 +static int count_of_bits;
163991 +static char cbm_mask[256];
163992 +static unsigned long long_mask;
163993 +static unsigned long cache_size;
163995  static int cqm_setup(int num, ...)
163997 @@ -86,7 +86,7 @@ static int check_results(struct resctrl_val_param *param, int no_of_bits)
163998                 return errno;
163999         }
164001 -       while (fgets(temp, 1024, fp)) {
164002 +       while (fgets(temp, sizeof(temp), fp)) {
164003                 char *token = strtok(temp, ":\t");
164004                 int fields = 0;
164006 @@ -125,7 +125,7 @@ int cqm_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
164007         if (!validate_resctrl_feature_request("cqm"))
164008                 return -1;
164010 -       ret = get_cbm_mask("L3");
164011 +       ret = get_cbm_mask("L3", cbm_mask);
164012         if (ret)
164013                 return ret;
164015 @@ -145,7 +145,7 @@ int cqm_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
164016         }
164018         struct resctrl_val_param param = {
164019 -               .resctrl_val    = "cqm",
164020 +               .resctrl_val    = CQM_STR,
164021                 .ctrlgrp        = "c1",
164022                 .mongrp         = "m1",
164023                 .cpu_no         = cpu_no,
164024 diff --git a/tools/testing/selftests/resctrl/fill_buf.c b/tools/testing/selftests/resctrl/fill_buf.c
164025 index 79c611c99a3d..51e5cf22632f 100644
164026 --- a/tools/testing/selftests/resctrl/fill_buf.c
164027 +++ b/tools/testing/selftests/resctrl/fill_buf.c
164028 @@ -115,7 +115,7 @@ static int fill_cache_read(unsigned char *start_ptr, unsigned char *end_ptr,
164030         while (1) {
164031                 ret = fill_one_span_read(start_ptr, end_ptr);
164032 -               if (!strcmp(resctrl_val, "cat"))
164033 +               if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)))
164034                         break;
164035         }
164037 @@ -134,7 +134,7 @@ static int fill_cache_write(unsigned char *start_ptr, unsigned char *end_ptr,
164039         while (1) {
164040                 fill_one_span_write(start_ptr, end_ptr);
164041 -               if (!strcmp(resctrl_val, "cat"))
164042 +               if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)))
164043                         break;
164044         }
164046 diff --git a/tools/testing/selftests/resctrl/mba_test.c b/tools/testing/selftests/resctrl/mba_test.c
164047 index 7bf8eaa6204b..6449fbd96096 100644
164048 --- a/tools/testing/selftests/resctrl/mba_test.c
164049 +++ b/tools/testing/selftests/resctrl/mba_test.c
164050 @@ -141,7 +141,7 @@ void mba_test_cleanup(void)
164051  int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd)
164053         struct resctrl_val_param param = {
164054 -               .resctrl_val    = "mba",
164055 +               .resctrl_val    = MBA_STR,
164056                 .ctrlgrp        = "c1",
164057                 .mongrp         = "m1",
164058                 .cpu_no         = cpu_no,
164059 diff --git a/tools/testing/selftests/resctrl/mbm_test.c b/tools/testing/selftests/resctrl/mbm_test.c
164060 index 4700f7453f81..ec6cfe01c9c2 100644
164061 --- a/tools/testing/selftests/resctrl/mbm_test.c
164062 +++ b/tools/testing/selftests/resctrl/mbm_test.c
164063 @@ -114,7 +114,7 @@ void mbm_test_cleanup(void)
164064  int mbm_bw_change(int span, int cpu_no, char *bw_report, char **benchmark_cmd)
164066         struct resctrl_val_param param = {
164067 -               .resctrl_val    = "mbm",
164068 +               .resctrl_val    = MBM_STR,
164069                 .ctrlgrp        = "c1",
164070                 .mongrp         = "m1",
164071                 .span           = span,
164072 diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
164073 index 39bf59c6b9c5..9dcc96e1ad3d 100644
164074 --- a/tools/testing/selftests/resctrl/resctrl.h
164075 +++ b/tools/testing/selftests/resctrl/resctrl.h
164076 @@ -28,6 +28,10 @@
164077  #define RESCTRL_PATH           "/sys/fs/resctrl"
164078  #define PHYS_ID_PATH           "/sys/devices/system/cpu/cpu"
164079  #define CBM_MASK_PATH          "/sys/fs/resctrl/info"
164080 +#define L3_PATH                        "/sys/fs/resctrl/info/L3"
164081 +#define MB_PATH                        "/sys/fs/resctrl/info/MB"
164082 +#define L3_MON_PATH            "/sys/fs/resctrl/info/L3_MON"
164083 +#define L3_MON_FEATURES_PATH   "/sys/fs/resctrl/info/L3_MON/mon_features"
164085  #define PARENT_EXIT(err_msg)                   \
164086         do {                                    \
164087 @@ -62,11 +66,16 @@ struct resctrl_val_param {
164088         int             (*setup)(int num, ...);
164091 -pid_t bm_pid, ppid;
164092 -int tests_run;
164093 +#define MBM_STR                        "mbm"
164094 +#define MBA_STR                        "mba"
164095 +#define CQM_STR                        "cqm"
164096 +#define CAT_STR                        "cat"
164098 -char llc_occup_path[1024];
164099 -bool is_amd;
164100 +extern pid_t bm_pid, ppid;
164101 +extern int tests_run;
164103 +extern char llc_occup_path[1024];
164104 +extern bool is_amd;
164106  bool check_resctrlfs_support(void);
164107  int filter_dmesg(void);
164108 @@ -74,7 +83,7 @@ int remount_resctrlfs(bool mum_resctrlfs);
164109  int get_resource_id(int cpu_no, int *resource_id);
164110  int umount_resctrlfs(void);
164111  int validate_bw_report_request(char *bw_report);
164112 -bool validate_resctrl_feature_request(char *resctrl_val);
164113 +bool validate_resctrl_feature_request(const char *resctrl_val);
164114  char *fgrep(FILE *inf, const char *str);
164115  int taskset_benchmark(pid_t bm_pid, int cpu_no);
164116  void run_benchmark(int signum, siginfo_t *info, void *ucontext);
164117 @@ -92,7 +101,7 @@ void tests_cleanup(void);
164118  void mbm_test_cleanup(void);
164119  int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd);
164120  void mba_test_cleanup(void);
164121 -int get_cbm_mask(char *cache_type);
164122 +int get_cbm_mask(char *cache_type, char *cbm_mask);
164123  int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size);
164124  void ctrlc_handler(int signum, siginfo_t *info, void *ptr);
164125  int cat_val(struct resctrl_val_param *param);
164126 diff --git a/tools/testing/selftests/resctrl/resctrl_tests.c b/tools/testing/selftests/resctrl/resctrl_tests.c
164127 index 425cc85ac883..ac2269610aa9 100644
164128 --- a/tools/testing/selftests/resctrl/resctrl_tests.c
164129 +++ b/tools/testing/selftests/resctrl/resctrl_tests.c
164130 @@ -73,7 +73,7 @@ int main(int argc, char **argv)
164131                 }
164132         }
164134 -       while ((c = getopt(argc_new, argv, "ht:b:")) != -1) {
164135 +       while ((c = getopt(argc_new, argv, "ht:b:n:p:")) != -1) {
164136                 char *token;
164138                 switch (c) {
164139 @@ -85,13 +85,13 @@ int main(int argc, char **argv)
164140                         cqm_test = false;
164141                         cat_test = false;
164142                         while (token) {
164143 -                               if (!strcmp(token, "mbm")) {
164144 +                               if (!strncmp(token, MBM_STR, sizeof(MBM_STR))) {
164145                                         mbm_test = true;
164146 -                               } else if (!strcmp(token, "mba")) {
164147 +                               } else if (!strncmp(token, MBA_STR, sizeof(MBA_STR))) {
164148                                         mba_test = true;
164149 -                               } else if (!strcmp(token, "cqm")) {
164150 +                               } else if (!strncmp(token, CQM_STR, sizeof(CQM_STR))) {
164151                                         cqm_test = true;
164152 -                               } else if (!strcmp(token, "cat")) {
164153 +                               } else if (!strncmp(token, CAT_STR, sizeof(CAT_STR))) {
164154                                         cat_test = true;
164155                                 } else {
164156                                         printf("invalid argument\n");
164157 @@ -161,7 +161,7 @@ int main(int argc, char **argv)
164158         if (!is_amd && mbm_test) {
164159                 printf("# Starting MBM BW change ...\n");
164160                 if (!has_ben)
164161 -                       sprintf(benchmark_cmd[5], "%s", "mba");
164162 +                       sprintf(benchmark_cmd[5], "%s", MBA_STR);
164163                 res = mbm_bw_change(span, cpu_no, bw_report, benchmark_cmd);
164164                 printf("%sok MBM: bw change\n", res ? "not " : "");
164165                 mbm_test_cleanup();
164166 @@ -181,7 +181,7 @@ int main(int argc, char **argv)
164167         if (cqm_test) {
164168                 printf("# Starting CQM test ...\n");
164169                 if (!has_ben)
164170 -                       sprintf(benchmark_cmd[5], "%s", "cqm");
164171 +                       sprintf(benchmark_cmd[5], "%s", CQM_STR);
164172                 res = cqm_resctrl_val(cpu_no, no_of_bits, benchmark_cmd);
164173                 printf("%sok CQM: test\n", res ? "not " : "");
164174                 cqm_test_cleanup();
164175 diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
164176 index 520fea3606d1..8df557894059 100644
164177 --- a/tools/testing/selftests/resctrl/resctrl_val.c
164178 +++ b/tools/testing/selftests/resctrl/resctrl_val.c
164179 @@ -221,8 +221,8 @@ static int read_from_imc_dir(char *imc_dir, int count)
164180   */
164181  static int num_of_imcs(void)
164183 +       char imc_dir[512], *temp;
164184         unsigned int count = 0;
164185 -       char imc_dir[512];
164186         struct dirent *ep;
164187         int ret;
164188         DIR *dp;
164189 @@ -230,7 +230,25 @@ static int num_of_imcs(void)
164190         dp = opendir(DYN_PMU_PATH);
164191         if (dp) {
164192                 while ((ep = readdir(dp))) {
164193 -                       if (strstr(ep->d_name, UNCORE_IMC)) {
164194 +                       temp = strstr(ep->d_name, UNCORE_IMC);
164195 +                       if (!temp)
164196 +                               continue;
164198 +                       /*
164199 +                        * imc counters are named as "uncore_imc_<n>", hence
164200 +                        * increment the pointer to point to <n>. Note that
164201 +                        * sizeof(UNCORE_IMC) would count for null character as
164202 +                        * well and hence the last underscore character in
164203 +                        * uncore_imc'_' need not be counted.
164204 +                        */
164205 +                       temp = temp + sizeof(UNCORE_IMC);
164207 +                       /*
164208 +                        * Some directories under "DYN_PMU_PATH" could have
164209 +                        * names like "uncore_imc_free_running", hence, check if
164210 +                        * first character is a numerical digit or not.
164211 +                        */
164212 +                       if (temp[0] >= '0' && temp[0] <= '9') {
164213                                 sprintf(imc_dir, "%s/%s/", DYN_PMU_PATH,
164214                                         ep->d_name);
164215                                 ret = read_from_imc_dir(imc_dir, count);
164216 @@ -282,9 +300,9 @@ static int initialize_mem_bw_imc(void)
164217   * Memory B/W utilized by a process on a socket can be calculated using
164218   * iMC counters. Perf events are used to read these counters.
164219   *
164220 - * Return: >= 0 on success. < 0 on failure.
164221 + * Return: = 0 on success. < 0 on failure.
164222   */
164223 -static float get_mem_bw_imc(int cpu_no, char *bw_report)
164224 +static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
164226         float reads, writes, of_mul_read, of_mul_write;
164227         int imc, j, ret;
164228 @@ -355,13 +373,18 @@ static float get_mem_bw_imc(int cpu_no, char *bw_report)
164229                 close(imc_counters_config[imc][WRITE].fd);
164230         }
164232 -       if (strcmp(bw_report, "reads") == 0)
164233 -               return reads;
164234 +       if (strcmp(bw_report, "reads") == 0) {
164235 +               *bw_imc = reads;
164236 +               return 0;
164237 +       }
164239 -       if (strcmp(bw_report, "writes") == 0)
164240 -               return writes;
164241 +       if (strcmp(bw_report, "writes") == 0) {
164242 +               *bw_imc = writes;
164243 +               return 0;
164244 +       }
164246 -       return (reads + writes);
164247 +       *bw_imc = reads + writes;
164248 +       return 0;
164251  void set_mbm_path(const char *ctrlgrp, const char *mongrp, int resource_id)
164252 @@ -397,10 +420,10 @@ static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp,
164253                 return;
164254         }
164256 -       if (strcmp(resctrl_val, "mbm") == 0)
164257 +       if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)))
164258                 set_mbm_path(ctrlgrp, mongrp, resource_id);
164260 -       if ((strcmp(resctrl_val, "mba") == 0)) {
164261 +       if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
164262                 if (ctrlgrp)
164263                         sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH,
164264                                 RESCTRL_PATH, ctrlgrp, resource_id);
164265 @@ -420,9 +443,8 @@ static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp,
164266   * 1. If con_mon grp is given, then read from it
164267   * 2. If con_mon grp is not given, then read from root con_mon grp
164268   */
164269 -static unsigned long get_mem_bw_resctrl(void)
164270 +static int get_mem_bw_resctrl(unsigned long *mbm_total)
164272 -       unsigned long mbm_total = 0;
164273         FILE *fp;
164275         fp = fopen(mbm_total_path, "r");
164276 @@ -431,7 +453,7 @@ static unsigned long get_mem_bw_resctrl(void)
164278                 return -1;
164279         }
164280 -       if (fscanf(fp, "%lu", &mbm_total) <= 0) {
164281 +       if (fscanf(fp, "%lu", mbm_total) <= 0) {
164282                 perror("Could not get mbm local bytes");
164283                 fclose(fp);
164285 @@ -439,7 +461,7 @@ static unsigned long get_mem_bw_resctrl(void)
164286         }
164287         fclose(fp);
164289 -       return mbm_total;
164290 +       return 0;
164293  pid_t bm_pid, ppid;
164294 @@ -524,14 +546,15 @@ static void initialize_llc_occu_resctrl(const char *ctrlgrp, const char *mongrp,
164295                 return;
164296         }
164298 -       if (strcmp(resctrl_val, "cqm") == 0)
164299 +       if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
164300                 set_cqm_path(ctrlgrp, mongrp, resource_id);
164303  static int
164304  measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
164306 -       unsigned long bw_imc, bw_resc, bw_resc_end;
164307 +       unsigned long bw_resc, bw_resc_end;
164308 +       float bw_imc;
164309         int ret;
164311         /*
164312 @@ -541,13 +564,13 @@ measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
164313          * Compare the two values to validate resctrl value.
164314          * It takes 1sec to measure the data.
164315          */
164316 -       bw_imc = get_mem_bw_imc(param->cpu_no, param->bw_report);
164317 -       if (bw_imc <= 0)
164318 -               return bw_imc;
164319 +       ret = get_mem_bw_imc(param->cpu_no, param->bw_report, &bw_imc);
164320 +       if (ret < 0)
164321 +               return ret;
164323 -       bw_resc_end = get_mem_bw_resctrl();
164324 -       if (bw_resc_end <= 0)
164325 -               return bw_resc_end;
164326 +       ret = get_mem_bw_resctrl(&bw_resc_end);
164327 +       if (ret < 0)
164328 +               return ret;
164330         bw_resc = (bw_resc_end - *bw_resc_start) / MB;
164331         ret = print_results_bw(param->filename, bm_pid, bw_imc, bw_resc);
164332 @@ -579,8 +602,8 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
164333         if (strcmp(param->filename, "") == 0)
164334                 sprintf(param->filename, "stdio");
164336 -       if ((strcmp(resctrl_val, "mba")) == 0 ||
164337 -           (strcmp(resctrl_val, "mbm")) == 0) {
164338 +       if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) ||
164339 +           !strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
164340                 ret = validate_bw_report_request(param->bw_report);
164341                 if (ret)
164342                         return ret;
164343 @@ -674,15 +697,15 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
164344         if (ret)
164345                 goto out;
164347 -       if ((strcmp(resctrl_val, "mbm") == 0) ||
164348 -           (strcmp(resctrl_val, "mba") == 0)) {
164349 +       if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
164350 +           !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
164351                 ret = initialize_mem_bw_imc();
164352                 if (ret)
164353                         goto out;
164355                 initialize_mem_bw_resctrl(param->ctrlgrp, param->mongrp,
164356                                           param->cpu_no, resctrl_val);
164357 -       } else if (strcmp(resctrl_val, "cqm") == 0)
164358 +       } else if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
164359                 initialize_llc_occu_resctrl(param->ctrlgrp, param->mongrp,
164360                                             param->cpu_no, resctrl_val);
164362 @@ -710,8 +733,8 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
164364         /* Test runs until the callback setup() tells the test to stop. */
164365         while (1) {
164366 -               if ((strcmp(resctrl_val, "mbm") == 0) ||
164367 -                   (strcmp(resctrl_val, "mba") == 0)) {
164368 +               if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
164369 +                   !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
164370                         ret = param->setup(1, param);
164371                         if (ret) {
164372                                 ret = 0;
164373 @@ -721,7 +744,7 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
164374                         ret = measure_vals(param, &bw_resc_start);
164375                         if (ret)
164376                                 break;
164377 -               } else if (strcmp(resctrl_val, "cqm") == 0) {
164378 +               } else if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR))) {
164379                         ret = param->setup(1, param);
164380                         if (ret) {
164381                                 ret = 0;
164382 diff --git a/tools/testing/selftests/resctrl/resctrlfs.c b/tools/testing/selftests/resctrl/resctrlfs.c
164383 index 19c0ec4045a4..b57170f53861 100644
164384 --- a/tools/testing/selftests/resctrl/resctrlfs.c
164385 +++ b/tools/testing/selftests/resctrl/resctrlfs.c
164386 @@ -49,8 +49,6 @@ static int find_resctrl_mount(char *buffer)
164387         return -ENOENT;
164390 -char cbm_mask[256];
164393   * remount_resctrlfs - Remount resctrl FS at /sys/fs/resctrl
164394   * @mum_resctrlfs:     Should the resctrl FS be remounted?
164395 @@ -205,16 +203,18 @@ int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size)
164397   * get_cbm_mask - Get cbm mask for given cache
164398   * @cache_type:        Cache level L2/L3
164400 - * Mask is stored in cbm_mask which is global variable.
164401 + * @cbm_mask:  cbm_mask returned as a string
164402   *
164403   * Return: = 0 on success, < 0 on failure.
164404   */
164405 -int get_cbm_mask(char *cache_type)
164406 +int get_cbm_mask(char *cache_type, char *cbm_mask)
164408         char cbm_mask_path[1024];
164409         FILE *fp;
164411 +       if (!cbm_mask)
164412 +               return -1;
164414         sprintf(cbm_mask_path, "%s/%s/cbm_mask", CBM_MASK_PATH, cache_type);
164416         fp = fopen(cbm_mask_path, "r");
164417 @@ -334,7 +334,7 @@ void run_benchmark(int signum, siginfo_t *info, void *ucontext)
164418                 operation = atoi(benchmark_cmd[4]);
164419                 sprintf(resctrl_val, "%s", benchmark_cmd[5]);
164421 -               if (strcmp(resctrl_val, "cqm") != 0)
164422 +               if (strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
164423                         buffer_span = span * MB;
164424                 else
164425                         buffer_span = span;
164426 @@ -459,8 +459,8 @@ int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
164427                 goto out;
164429         /* Create mon grp and write pid into it for "mbm" and "cqm" test */
164430 -       if ((strcmp(resctrl_val, "cqm") == 0) ||
164431 -           (strcmp(resctrl_val, "mbm") == 0)) {
164432 +       if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)) ||
164433 +           !strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
164434                 if (strlen(mongrp)) {
164435                         sprintf(monitorgroup_p, "%s/mon_groups", controlgroup);
164436                         sprintf(monitorgroup, "%s/%s", monitorgroup_p, mongrp);
164437 @@ -505,9 +505,9 @@ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
164438         int resource_id, ret = 0;
164439         FILE *fp;
164441 -       if ((strcmp(resctrl_val, "mba") != 0) &&
164442 -           (strcmp(resctrl_val, "cat") != 0) &&
164443 -           (strcmp(resctrl_val, "cqm") != 0))
164444 +       if (strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) &&
164445 +           strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) &&
164446 +           strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
164447                 return -ENOENT;
164449         if (!schemata) {
164450 @@ -528,9 +528,10 @@ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
164451         else
164452                 sprintf(controlgroup, "%s/schemata", RESCTRL_PATH);
164454 -       if (!strcmp(resctrl_val, "cat") || !strcmp(resctrl_val, "cqm"))
164455 +       if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) ||
164456 +           !strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
164457                 sprintf(schema, "%s%d%c%s", "L3:", resource_id, '=', schemata);
164458 -       if (strcmp(resctrl_val, "mba") == 0)
164459 +       if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)))
164460                 sprintf(schema, "%s%d%c%s", "MB:", resource_id, '=', schemata);
164462         fp = fopen(controlgroup, "w");
164463 @@ -615,26 +616,56 @@ char *fgrep(FILE *inf, const char *str)
164464   * validate_resctrl_feature_request - Check if requested feature is valid.
164465   * @resctrl_val:       Requested feature
164466   *
164467 - * Return: 0 on success, non-zero on failure
164468 + * Return: True if the feature is supported, else false
164469   */
164470 -bool validate_resctrl_feature_request(char *resctrl_val)
164471 +bool validate_resctrl_feature_request(const char *resctrl_val)
164473 -       FILE *inf = fopen("/proc/cpuinfo", "r");
164474 +       struct stat statbuf;
164475         bool found = false;
164476         char *res;
164477 +       FILE *inf;
164479 -       if (!inf)
164480 +       if (!resctrl_val)
164481                 return false;
164483 -       res = fgrep(inf, "flags");
164485 -       if (res) {
164486 -               char *s = strchr(res, ':');
164487 +       if (remount_resctrlfs(false))
164488 +               return false;
164490 -               found = s && !strstr(s, resctrl_val);
164491 -               free(res);
164492 +       if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
164493 +               if (!stat(L3_PATH, &statbuf))
164494 +                       return true;
164495 +       } else if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
164496 +               if (!stat(MB_PATH, &statbuf))
164497 +                       return true;
164498 +       } else if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
164499 +                  !strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
164500 +               if (!stat(L3_MON_PATH, &statbuf)) {
164501 +                       inf = fopen(L3_MON_FEATURES_PATH, "r");
164502 +                       if (!inf)
164503 +                               return false;
164505 +                       if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
164506 +                               res = fgrep(inf, "llc_occupancy");
164507 +                               if (res) {
164508 +                                       found = true;
164509 +                                       free(res);
164510 +                               }
164511 +                       }
164513 +                       if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
164514 +                               res = fgrep(inf, "mbm_total_bytes");
164515 +                               if (res) {
164516 +                                       free(res);
164517 +                                       res = fgrep(inf, "mbm_local_bytes");
164518 +                                       if (res) {
164519 +                                               found = true;
164520 +                                               free(res);
164521 +                                       }
164522 +                               }
164523 +                       }
164524 +                       fclose(inf);
164525 +               }
164526         }
164527 -       fclose(inf);
164529         return found;
164531 diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
164532 index 98c3b647f54d..e3d5c77a8612 100644
164533 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c
164534 +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
164535 @@ -1753,16 +1753,25 @@ TEST_F(TRACE_poke, getpid_runs_normally)
164536  # define SYSCALL_RET_SET(_regs, _val)                          \
164537         do {                                                    \
164538                 typeof(_val) _result = (_val);                  \
164539 -               /*                                              \
164540 -                * A syscall error is signaled by CR0 SO bit    \
164541 -                * and the code is stored as a positive value.  \
164542 -                */                                             \
164543 -               if (_result < 0) {                              \
164544 -                       SYSCALL_RET(_regs) = -_result;          \
164545 -                       (_regs).ccr |= 0x10000000;              \
164546 -               } else {                                        \
164547 +               if ((_regs.trap & 0xfff0) == 0x3000) {          \
164548 +                       /*                                      \
164549 +                        * scv 0 system call uses -ve result    \
164550 +                        * for error, so no need to adjust.     \
164551 +                        */                                     \
164552                         SYSCALL_RET(_regs) = _result;           \
164553 -                       (_regs).ccr &= ~0x10000000;             \
164554 +               } else {                                        \
164555 +                       /*                                      \
164556 +                        * A syscall error is signaled by the   \
164557 +                        * CR0 SO bit and the code is stored as \
164558 +                        * a positive value.                    \
164559 +                        */                                     \
164560 +                       if (_result < 0) {                      \
164561 +                               SYSCALL_RET(_regs) = -_result;  \
164562 +                               (_regs).ccr |= 0x10000000;      \
164563 +                       } else {                                \
164564 +                               SYSCALL_RET(_regs) = _result;   \
164565 +                               (_regs).ccr &= ~0x10000000;     \
164566 +                       }                                       \
164567                 }                                               \
164568         } while (0)
164569  # define SYSCALL_RET_SET_ON_PTRACE_EXIT
164570 diff --git a/tools/testing/selftests/x86/thunks_32.S b/tools/testing/selftests/x86/thunks_32.S
164571 index a71d92da8f46..f3f56e681e9f 100644
164572 --- a/tools/testing/selftests/x86/thunks_32.S
164573 +++ b/tools/testing/selftests/x86/thunks_32.S
164574 @@ -45,3 +45,5 @@ call64_from_32:
164575         ret
164577  .size call64_from_32, .-call64_from_32
164579 +.section .note.GNU-stack,"",%progbits
164580 diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
164581 index 62bd908ecd58..f08f5e82460b 100644
164582 --- a/virt/kvm/coalesced_mmio.c
164583 +++ b/virt/kvm/coalesced_mmio.c
164584 @@ -174,21 +174,36 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
164585                                            struct kvm_coalesced_mmio_zone *zone)
164587         struct kvm_coalesced_mmio_dev *dev, *tmp;
164588 +       int r;
164590         if (zone->pio != 1 && zone->pio != 0)
164591                 return -EINVAL;
164593         mutex_lock(&kvm->slots_lock);
164595 -       list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
164596 +       list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) {
164597                 if (zone->pio == dev->zone.pio &&
164598                     coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
164599 -                       kvm_io_bus_unregister_dev(kvm,
164600 +                       r = kvm_io_bus_unregister_dev(kvm,
164601                                 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
164602                         kvm_iodevice_destructor(&dev->dev);
164604 +                       /*
164605 +                        * On failure, unregister destroys all devices on the
164606 +                        * bus _except_ the target device, i.e. coalesced_zones
164607 +                        * has been modified.  No need to restart the walk as
164608 +                        * there aren't any zones left.
164609 +                        */
164610 +                       if (r)
164611 +                               break;
164612                 }
164613 +       }
164615         mutex_unlock(&kvm->slots_lock);
164617 +       /*
164618 +        * Ignore the result of kvm_io_bus_unregister_dev(), from userspace's
164619 +        * perspective, the coalesced MMIO is most definitely unregistered.
164620 +        */
164621         return 0;
164623 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
164624 index 383df23514b9..5cabc6c748db 100644
164625 --- a/virt/kvm/kvm_main.c
164626 +++ b/virt/kvm/kvm_main.c
164627 @@ -2758,8 +2758,8 @@ static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
164628         if (val < grow_start)
164629                 val = grow_start;
164631 -       if (val > halt_poll_ns)
164632 -               val = halt_poll_ns;
164633 +       if (val > vcpu->kvm->max_halt_poll_ns)
164634 +               val = vcpu->kvm->max_halt_poll_ns;
164636         vcpu->halt_poll_ns = val;
164637  out:
164638 @@ -2838,7 +2838,8 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
164639                                 goto out;
164640                         }
164641                         poll_end = cur = ktime_get();
164642 -               } while (single_task_running() && ktime_before(cur, stop));
164643 +               } while (single_task_running() && !need_resched() &&
164644 +                        ktime_before(cur, stop));
164645         }
164647         prepare_to_rcuwait(&vcpu->wait);
164648 @@ -4486,15 +4487,15 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
164651  /* Caller must hold slots_lock. */
164652 -void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
164653 -                              struct kvm_io_device *dev)
164654 +int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
164655 +                             struct kvm_io_device *dev)
164657         int i, j;
164658         struct kvm_io_bus *new_bus, *bus;
164660         bus = kvm_get_bus(kvm, bus_idx);
164661         if (!bus)
164662 -               return;
164663 +               return 0;
164665         for (i = 0; i < bus->dev_count; i++)
164666                 if (bus->range[i].dev == dev) {
164667 @@ -4502,7 +4503,7 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
164668                 }
164670         if (i == bus->dev_count)
164671 -               return;
164672 +               return 0;
164674         new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
164675                           GFP_KERNEL_ACCOUNT);
164676 @@ -4511,7 +4512,13 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
164677                 new_bus->dev_count--;
164678                 memcpy(new_bus->range + i, bus->range + i + 1,
164679                                 flex_array_size(new_bus, range, new_bus->dev_count - i));
164680 -       } else {
164681 +       }
164683 +       rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
164684 +       synchronize_srcu_expedited(&kvm->srcu);
164686 +       /* Destroy the old bus _after_ installing the (null) bus. */
164687 +       if (!new_bus) {
164688                 pr_err("kvm: failed to shrink bus, removing it completely\n");
164689                 for (j = 0; j < bus->dev_count; j++) {
164690                         if (j == i)
164691 @@ -4520,10 +4527,8 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
164692                 }
164693         }
164695 -       rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
164696 -       synchronize_srcu_expedited(&kvm->srcu);
164697         kfree(bus);
164698 -       return;
164699 +       return new_bus ? 0 : -ENOMEM;
164702  struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,