net: use SPEED_UNKNOWN and DUPLEX_UNKNOWN when appropriate
[linux-2.6/btrfs-unstable.git] / kernel / cpu.c
blob247979a1b815660b61100e5c9168ea85c690a4c7
1 /* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
5 */
6 #include <linux/proc_fs.h>
7 #include <linux/smp.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 #include <linux/lockdep.h>
24 #include "smpboot.h"
26 #ifdef CONFIG_SMP
27 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
28 static DEFINE_MUTEX(cpu_add_remove_lock);
31 * The following two APIs (cpu_maps_update_begin/done) must be used when
32 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
33 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
34 * hotplug callback (un)registration performed using __register_cpu_notifier()
35 * or __unregister_cpu_notifier().
37 void cpu_maps_update_begin(void)
39 mutex_lock(&cpu_add_remove_lock);
41 EXPORT_SYMBOL(cpu_notifier_register_begin);
43 void cpu_maps_update_done(void)
45 mutex_unlock(&cpu_add_remove_lock);
47 EXPORT_SYMBOL(cpu_notifier_register_done);
49 static RAW_NOTIFIER_HEAD(cpu_chain);
51 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
52 * Should always be manipulated under cpu_add_remove_lock
54 static int cpu_hotplug_disabled;
56 #ifdef CONFIG_HOTPLUG_CPU
58 static struct {
59 struct task_struct *active_writer;
60 struct mutex lock; /* Synchronizes accesses to refcount, */
62 * Also blocks the new readers during
63 * an ongoing cpu hotplug operation.
65 int refcount;
67 #ifdef CONFIG_DEBUG_LOCK_ALLOC
68 struct lockdep_map dep_map;
69 #endif
70 } cpu_hotplug = {
71 .active_writer = NULL,
72 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
73 .refcount = 0,
74 #ifdef CONFIG_DEBUG_LOCK_ALLOC
75 .dep_map = {.name = "cpu_hotplug.lock" },
76 #endif
79 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
80 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
81 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
82 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
84 void get_online_cpus(void)
86 might_sleep();
87 if (cpu_hotplug.active_writer == current)
88 return;
89 cpuhp_lock_acquire_read();
90 mutex_lock(&cpu_hotplug.lock);
91 cpu_hotplug.refcount++;
92 mutex_unlock(&cpu_hotplug.lock);
95 EXPORT_SYMBOL_GPL(get_online_cpus);
97 void put_online_cpus(void)
99 if (cpu_hotplug.active_writer == current)
100 return;
101 mutex_lock(&cpu_hotplug.lock);
103 if (WARN_ON(!cpu_hotplug.refcount))
104 cpu_hotplug.refcount++; /* try to fix things up */
106 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
107 wake_up_process(cpu_hotplug.active_writer);
108 mutex_unlock(&cpu_hotplug.lock);
109 cpuhp_lock_release();
112 EXPORT_SYMBOL_GPL(put_online_cpus);
115 * This ensures that the hotplug operation can begin only when the
116 * refcount goes to zero.
118 * Note that during a cpu-hotplug operation, the new readers, if any,
119 * will be blocked by the cpu_hotplug.lock
121 * Since cpu_hotplug_begin() is always called after invoking
122 * cpu_maps_update_begin(), we can be sure that only one writer is active.
124 * Note that theoretically, there is a possibility of a livelock:
125 * - Refcount goes to zero, last reader wakes up the sleeping
126 * writer.
127 * - Last reader unlocks the cpu_hotplug.lock.
128 * - A new reader arrives at this moment, bumps up the refcount.
129 * - The writer acquires the cpu_hotplug.lock finds the refcount
130 * non zero and goes to sleep again.
132 * However, this is very difficult to achieve in practice since
133 * get_online_cpus() not an api which is called all that often.
136 void cpu_hotplug_begin(void)
138 cpu_hotplug.active_writer = current;
140 cpuhp_lock_acquire();
141 for (;;) {
142 mutex_lock(&cpu_hotplug.lock);
143 if (likely(!cpu_hotplug.refcount))
144 break;
145 __set_current_state(TASK_UNINTERRUPTIBLE);
146 mutex_unlock(&cpu_hotplug.lock);
147 schedule();
151 void cpu_hotplug_done(void)
153 cpu_hotplug.active_writer = NULL;
154 mutex_unlock(&cpu_hotplug.lock);
155 cpuhp_lock_release();
159 * Wait for currently running CPU hotplug operations to complete (if any) and
160 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
161 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
162 * hotplug path before performing hotplug operations. So acquiring that lock
163 * guarantees mutual exclusion from any currently running hotplug operations.
165 void cpu_hotplug_disable(void)
167 cpu_maps_update_begin();
168 cpu_hotplug_disabled = 1;
169 cpu_maps_update_done();
172 void cpu_hotplug_enable(void)
174 cpu_maps_update_begin();
175 cpu_hotplug_disabled = 0;
176 cpu_maps_update_done();
179 #endif /* CONFIG_HOTPLUG_CPU */
181 /* Need to know about CPUs going up/down? */
182 int __ref register_cpu_notifier(struct notifier_block *nb)
184 int ret;
185 cpu_maps_update_begin();
186 ret = raw_notifier_chain_register(&cpu_chain, nb);
187 cpu_maps_update_done();
188 return ret;
191 int __ref __register_cpu_notifier(struct notifier_block *nb)
193 return raw_notifier_chain_register(&cpu_chain, nb);
196 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
197 int *nr_calls)
199 int ret;
201 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
202 nr_calls);
204 return notifier_to_errno(ret);
207 static int cpu_notify(unsigned long val, void *v)
209 return __cpu_notify(val, v, -1, NULL);
212 #ifdef CONFIG_HOTPLUG_CPU
214 static void cpu_notify_nofail(unsigned long val, void *v)
216 BUG_ON(cpu_notify(val, v));
218 EXPORT_SYMBOL(register_cpu_notifier);
219 EXPORT_SYMBOL(__register_cpu_notifier);
221 void __ref unregister_cpu_notifier(struct notifier_block *nb)
223 cpu_maps_update_begin();
224 raw_notifier_chain_unregister(&cpu_chain, nb);
225 cpu_maps_update_done();
227 EXPORT_SYMBOL(unregister_cpu_notifier);
229 void __ref __unregister_cpu_notifier(struct notifier_block *nb)
231 raw_notifier_chain_unregister(&cpu_chain, nb);
233 EXPORT_SYMBOL(__unregister_cpu_notifier);
236 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
237 * @cpu: a CPU id
239 * This function walks all processes, finds a valid mm struct for each one and
240 * then clears a corresponding bit in mm's cpumask. While this all sounds
241 * trivial, there are various non-obvious corner cases, which this function
242 * tries to solve in a safe manner.
244 * Also note that the function uses a somewhat relaxed locking scheme, so it may
245 * be called only for an already offlined CPU.
247 void clear_tasks_mm_cpumask(int cpu)
249 struct task_struct *p;
252 * This function is called after the cpu is taken down and marked
253 * offline, so its not like new tasks will ever get this cpu set in
254 * their mm mask. -- Peter Zijlstra
255 * Thus, we may use rcu_read_lock() here, instead of grabbing
256 * full-fledged tasklist_lock.
258 WARN_ON(cpu_online(cpu));
259 rcu_read_lock();
260 for_each_process(p) {
261 struct task_struct *t;
264 * Main thread might exit, but other threads may still have
265 * a valid mm. Find one.
267 t = find_lock_task_mm(p);
268 if (!t)
269 continue;
270 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
271 task_unlock(t);
273 rcu_read_unlock();
276 static inline void check_for_tasks(int cpu)
278 struct task_struct *p;
279 cputime_t utime, stime;
281 write_lock_irq(&tasklist_lock);
282 for_each_process(p) {
283 task_cputime(p, &utime, &stime);
284 if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
285 (utime || stime))
286 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
287 "(state = %ld, flags = %x)\n",
288 p->comm, task_pid_nr(p), cpu,
289 p->state, p->flags);
291 write_unlock_irq(&tasklist_lock);
294 struct take_cpu_down_param {
295 unsigned long mod;
296 void *hcpu;
299 /* Take this CPU down. */
300 static int __ref take_cpu_down(void *_param)
302 struct take_cpu_down_param *param = _param;
303 int err;
305 /* Ensure this CPU doesn't handle any more interrupts. */
306 err = __cpu_disable();
307 if (err < 0)
308 return err;
310 cpu_notify(CPU_DYING | param->mod, param->hcpu);
311 /* Park the stopper thread */
312 kthread_park(current);
313 return 0;
316 /* Requires cpu_add_remove_lock to be held */
317 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
319 int err, nr_calls = 0;
320 void *hcpu = (void *)(long)cpu;
321 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
322 struct take_cpu_down_param tcd_param = {
323 .mod = mod,
324 .hcpu = hcpu,
327 if (num_online_cpus() == 1)
328 return -EBUSY;
330 if (!cpu_online(cpu))
331 return -EINVAL;
333 cpu_hotplug_begin();
335 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
336 if (err) {
337 nr_calls--;
338 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
339 printk("%s: attempt to take down CPU %u failed\n",
340 __func__, cpu);
341 goto out_release;
345 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
346 * and RCU users of this state to go away such that all new such users
347 * will observe it.
349 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
350 * not imply sync_sched(), so explicitly call both.
352 * Do sync before park smpboot threads to take care the rcu boost case.
354 #ifdef CONFIG_PREEMPT
355 synchronize_sched();
356 #endif
357 synchronize_rcu();
359 smpboot_park_threads(cpu);
362 * So now all preempt/rcu users must observe !cpu_active().
365 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
366 if (err) {
367 /* CPU didn't die: tell everyone. Can't complain. */
368 smpboot_unpark_threads(cpu);
369 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
370 goto out_release;
372 BUG_ON(cpu_online(cpu));
375 * The migration_call() CPU_DYING callback will have removed all
376 * runnable tasks from the cpu, there's only the idle task left now
377 * that the migration thread is done doing the stop_machine thing.
379 * Wait for the stop thread to go away.
381 while (!idle_cpu(cpu))
382 cpu_relax();
384 /* This actually kills the CPU. */
385 __cpu_die(cpu);
387 /* CPU is completely dead: tell everyone. Too late to complain. */
388 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
390 check_for_tasks(cpu);
392 out_release:
393 cpu_hotplug_done();
394 if (!err)
395 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
396 return err;
399 int __ref cpu_down(unsigned int cpu)
401 int err;
403 cpu_maps_update_begin();
405 if (cpu_hotplug_disabled) {
406 err = -EBUSY;
407 goto out;
410 err = _cpu_down(cpu, 0);
412 out:
413 cpu_maps_update_done();
414 return err;
416 EXPORT_SYMBOL(cpu_down);
417 #endif /*CONFIG_HOTPLUG_CPU*/
419 /* Requires cpu_add_remove_lock to be held */
420 static int _cpu_up(unsigned int cpu, int tasks_frozen)
422 int ret, nr_calls = 0;
423 void *hcpu = (void *)(long)cpu;
424 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
425 struct task_struct *idle;
427 cpu_hotplug_begin();
429 if (cpu_online(cpu) || !cpu_present(cpu)) {
430 ret = -EINVAL;
431 goto out;
434 idle = idle_thread_get(cpu);
435 if (IS_ERR(idle)) {
436 ret = PTR_ERR(idle);
437 goto out;
440 ret = smpboot_create_threads(cpu);
441 if (ret)
442 goto out;
444 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
445 if (ret) {
446 nr_calls--;
447 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
448 __func__, cpu);
449 goto out_notify;
452 /* Arch-specific enabling code. */
453 ret = __cpu_up(cpu, idle);
454 if (ret != 0)
455 goto out_notify;
456 BUG_ON(!cpu_online(cpu));
458 /* Wake the per cpu threads */
459 smpboot_unpark_threads(cpu);
461 /* Now call notifier in preparation. */
462 cpu_notify(CPU_ONLINE | mod, hcpu);
464 out_notify:
465 if (ret != 0)
466 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
467 out:
468 cpu_hotplug_done();
470 return ret;
473 int cpu_up(unsigned int cpu)
475 int err = 0;
477 if (!cpu_possible(cpu)) {
478 printk(KERN_ERR "can't online cpu %d because it is not "
479 "configured as may-hotadd at boot time\n", cpu);
480 #if defined(CONFIG_IA64)
481 printk(KERN_ERR "please check additional_cpus= boot "
482 "parameter\n");
483 #endif
484 return -EINVAL;
487 err = try_online_node(cpu_to_node(cpu));
488 if (err)
489 return err;
491 cpu_maps_update_begin();
493 if (cpu_hotplug_disabled) {
494 err = -EBUSY;
495 goto out;
498 err = _cpu_up(cpu, 0);
500 out:
501 cpu_maps_update_done();
502 return err;
504 EXPORT_SYMBOL_GPL(cpu_up);
506 #ifdef CONFIG_PM_SLEEP_SMP
507 static cpumask_var_t frozen_cpus;
509 int disable_nonboot_cpus(void)
511 int cpu, first_cpu, error = 0;
513 cpu_maps_update_begin();
514 first_cpu = cpumask_first(cpu_online_mask);
516 * We take down all of the non-boot CPUs in one shot to avoid races
517 * with the userspace trying to use the CPU hotplug at the same time
519 cpumask_clear(frozen_cpus);
521 printk("Disabling non-boot CPUs ...\n");
522 for_each_online_cpu(cpu) {
523 if (cpu == first_cpu)
524 continue;
525 error = _cpu_down(cpu, 1);
526 if (!error)
527 cpumask_set_cpu(cpu, frozen_cpus);
528 else {
529 printk(KERN_ERR "Error taking CPU%d down: %d\n",
530 cpu, error);
531 break;
535 if (!error) {
536 BUG_ON(num_online_cpus() > 1);
537 /* Make sure the CPUs won't be enabled by someone else */
538 cpu_hotplug_disabled = 1;
539 } else {
540 printk(KERN_ERR "Non-boot CPUs are not disabled\n");
542 cpu_maps_update_done();
543 return error;
546 void __weak arch_enable_nonboot_cpus_begin(void)
550 void __weak arch_enable_nonboot_cpus_end(void)
554 void __ref enable_nonboot_cpus(void)
556 int cpu, error;
558 /* Allow everyone to use the CPU hotplug again */
559 cpu_maps_update_begin();
560 cpu_hotplug_disabled = 0;
561 if (cpumask_empty(frozen_cpus))
562 goto out;
564 printk(KERN_INFO "Enabling non-boot CPUs ...\n");
566 arch_enable_nonboot_cpus_begin();
568 for_each_cpu(cpu, frozen_cpus) {
569 error = _cpu_up(cpu, 1);
570 if (!error) {
571 printk(KERN_INFO "CPU%d is up\n", cpu);
572 continue;
574 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
577 arch_enable_nonboot_cpus_end();
579 cpumask_clear(frozen_cpus);
580 out:
581 cpu_maps_update_done();
584 static int __init alloc_frozen_cpus(void)
586 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
587 return -ENOMEM;
588 return 0;
590 core_initcall(alloc_frozen_cpus);
593 * When callbacks for CPU hotplug notifications are being executed, we must
594 * ensure that the state of the system with respect to the tasks being frozen
595 * or not, as reported by the notification, remains unchanged *throughout the
596 * duration* of the execution of the callbacks.
597 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
599 * This synchronization is implemented by mutually excluding regular CPU
600 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
601 * Hibernate notifications.
603 static int
604 cpu_hotplug_pm_callback(struct notifier_block *nb,
605 unsigned long action, void *ptr)
607 switch (action) {
609 case PM_SUSPEND_PREPARE:
610 case PM_HIBERNATION_PREPARE:
611 cpu_hotplug_disable();
612 break;
614 case PM_POST_SUSPEND:
615 case PM_POST_HIBERNATION:
616 cpu_hotplug_enable();
617 break;
619 default:
620 return NOTIFY_DONE;
623 return NOTIFY_OK;
627 static int __init cpu_hotplug_pm_sync_init(void)
630 * cpu_hotplug_pm_callback has higher priority than x86
631 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
632 * to disable cpu hotplug to avoid cpu hotplug race.
634 pm_notifier(cpu_hotplug_pm_callback, 0);
635 return 0;
637 core_initcall(cpu_hotplug_pm_sync_init);
639 #endif /* CONFIG_PM_SLEEP_SMP */
642 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
643 * @cpu: cpu that just started
645 * This function calls the cpu_chain notifiers with CPU_STARTING.
646 * It must be called by the arch code on the new cpu, before the new cpu
647 * enables interrupts and before the "boot" cpu returns from __cpu_up().
649 void notify_cpu_starting(unsigned int cpu)
651 unsigned long val = CPU_STARTING;
653 #ifdef CONFIG_PM_SLEEP_SMP
654 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
655 val = CPU_STARTING_FROZEN;
656 #endif /* CONFIG_PM_SLEEP_SMP */
657 cpu_notify(val, (void *)(long)cpu);
660 #endif /* CONFIG_SMP */
663 * cpu_bit_bitmap[] is a special, "compressed" data structure that
664 * represents all NR_CPUS bits binary values of 1<<nr.
666 * It is used by cpumask_of() to get a constant address to a CPU
667 * mask value that has a single bit set only.
670 /* cpu_bit_bitmap[0] is empty - so we can back into it */
671 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
672 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
673 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
674 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
676 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
678 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
679 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
680 #if BITS_PER_LONG > 32
681 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
682 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
683 #endif
685 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
687 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
688 EXPORT_SYMBOL(cpu_all_bits);
690 #ifdef CONFIG_INIT_ALL_POSSIBLE
691 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
692 = CPU_BITS_ALL;
693 #else
694 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
695 #endif
696 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
697 EXPORT_SYMBOL(cpu_possible_mask);
699 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
700 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
701 EXPORT_SYMBOL(cpu_online_mask);
703 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
704 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
705 EXPORT_SYMBOL(cpu_present_mask);
707 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
708 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
709 EXPORT_SYMBOL(cpu_active_mask);
711 void set_cpu_possible(unsigned int cpu, bool possible)
713 if (possible)
714 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
715 else
716 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
719 void set_cpu_present(unsigned int cpu, bool present)
721 if (present)
722 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
723 else
724 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
727 void set_cpu_online(unsigned int cpu, bool online)
729 if (online) {
730 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
731 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
732 } else {
733 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
737 void set_cpu_active(unsigned int cpu, bool active)
739 if (active)
740 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
741 else
742 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
745 void init_cpu_present(const struct cpumask *src)
747 cpumask_copy(to_cpumask(cpu_present_bits), src);
750 void init_cpu_possible(const struct cpumask *src)
752 cpumask_copy(to_cpumask(cpu_possible_bits), src);
755 void init_cpu_online(const struct cpumask *src)
757 cpumask_copy(to_cpumask(cpu_online_bits), src);