Linux 4.19-rc7
[linux-2.6/btrfs-unstable.git] / arch / powerpc / kernel / rtas.c
blob8afd146bc9c70dc6480e2fff20d6239d327e33d3
1 /*
3 * Procedures for interfacing to the RTAS on CHRP machines.
5 * Peter Bergner, IBM March 2001.
6 * Copyright (C) 2001 IBM.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 #include <stdarg.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/spinlock.h>
18 #include <linux/export.h>
19 #include <linux/init.h>
20 #include <linux/capability.h>
21 #include <linux/delay.h>
22 #include <linux/cpu.h>
23 #include <linux/smp.h>
24 #include <linux/completion.h>
25 #include <linux/cpumask.h>
26 #include <linux/memblock.h>
27 #include <linux/slab.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
31 #include <asm/prom.h>
32 #include <asm/rtas.h>
33 #include <asm/hvcall.h>
34 #include <asm/machdep.h>
35 #include <asm/firmware.h>
36 #include <asm/page.h>
37 #include <asm/param.h>
38 #include <asm/delay.h>
39 #include <linux/uaccess.h>
40 #include <asm/udbg.h>
41 #include <asm/syscalls.h>
42 #include <asm/smp.h>
43 #include <linux/atomic.h>
44 #include <asm/time.h>
45 #include <asm/mmu.h>
46 #include <asm/topology.h>
48 /* This is here deliberately so it's only used in this file */
49 void enter_rtas(unsigned long);
51 struct rtas_t rtas = {
52 .lock = __ARCH_SPIN_LOCK_UNLOCKED
54 EXPORT_SYMBOL(rtas);
56 DEFINE_SPINLOCK(rtas_data_buf_lock);
57 EXPORT_SYMBOL(rtas_data_buf_lock);
59 char rtas_data_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned;
60 EXPORT_SYMBOL(rtas_data_buf);
62 unsigned long rtas_rmo_buf;
65 * If non-NULL, this gets called when the kernel terminates.
66 * This is done like this so rtas_flash can be a module.
68 void (*rtas_flash_term_hook)(int);
69 EXPORT_SYMBOL(rtas_flash_term_hook);
71 /* RTAS use home made raw locking instead of spin_lock_irqsave
72 * because those can be called from within really nasty contexts
73 * such as having the timebase stopped which would lockup with
74 * normal locks and spinlock debugging enabled
76 static unsigned long lock_rtas(void)
78 unsigned long flags;
80 local_irq_save(flags);
81 preempt_disable();
82 arch_spin_lock(&rtas.lock);
83 return flags;
86 static void unlock_rtas(unsigned long flags)
88 arch_spin_unlock(&rtas.lock);
89 local_irq_restore(flags);
90 preempt_enable();
94 * call_rtas_display_status and call_rtas_display_status_delay
95 * are designed only for very early low-level debugging, which
96 * is why the token is hard-coded to 10.
98 static void call_rtas_display_status(unsigned char c)
100 unsigned long s;
102 if (!rtas.base)
103 return;
105 s = lock_rtas();
106 rtas_call_unlocked(&rtas.args, 10, 1, 1, NULL, c);
107 unlock_rtas(s);
110 static void call_rtas_display_status_delay(char c)
112 static int pending_newline = 0; /* did last write end with unprinted newline? */
113 static int width = 16;
115 if (c == '\n') {
116 while (width-- > 0)
117 call_rtas_display_status(' ');
118 width = 16;
119 mdelay(500);
120 pending_newline = 1;
121 } else {
122 if (pending_newline) {
123 call_rtas_display_status('\r');
124 call_rtas_display_status('\n');
126 pending_newline = 0;
127 if (width--) {
128 call_rtas_display_status(c);
129 udelay(10000);
134 void __init udbg_init_rtas_panel(void)
136 udbg_putc = call_rtas_display_status_delay;
139 #ifdef CONFIG_UDBG_RTAS_CONSOLE
141 /* If you think you're dying before early_init_dt_scan_rtas() does its
142 * work, you can hard code the token values for your firmware here and
143 * hardcode rtas.base/entry etc.
145 static unsigned int rtas_putchar_token = RTAS_UNKNOWN_SERVICE;
146 static unsigned int rtas_getchar_token = RTAS_UNKNOWN_SERVICE;
148 static void udbg_rtascon_putc(char c)
150 int tries;
152 if (!rtas.base)
153 return;
155 /* Add CRs before LFs */
156 if (c == '\n')
157 udbg_rtascon_putc('\r');
159 /* if there is more than one character to be displayed, wait a bit */
160 for (tries = 0; tries < 16; tries++) {
161 if (rtas_call(rtas_putchar_token, 1, 1, NULL, c) == 0)
162 break;
163 udelay(1000);
167 static int udbg_rtascon_getc_poll(void)
169 int c;
171 if (!rtas.base)
172 return -1;
174 if (rtas_call(rtas_getchar_token, 0, 2, &c))
175 return -1;
177 return c;
180 static int udbg_rtascon_getc(void)
182 int c;
184 while ((c = udbg_rtascon_getc_poll()) == -1)
187 return c;
191 void __init udbg_init_rtas_console(void)
193 udbg_putc = udbg_rtascon_putc;
194 udbg_getc = udbg_rtascon_getc;
195 udbg_getc_poll = udbg_rtascon_getc_poll;
197 #endif /* CONFIG_UDBG_RTAS_CONSOLE */
199 void rtas_progress(char *s, unsigned short hex)
201 struct device_node *root;
202 int width;
203 const __be32 *p;
204 char *os;
205 static int display_character, set_indicator;
206 static int display_width, display_lines, form_feed;
207 static const int *row_width;
208 static DEFINE_SPINLOCK(progress_lock);
209 static int current_line;
210 static int pending_newline = 0; /* did last write end with unprinted newline? */
212 if (!rtas.base)
213 return;
215 if (display_width == 0) {
216 display_width = 0x10;
217 if ((root = of_find_node_by_path("/rtas"))) {
218 if ((p = of_get_property(root,
219 "ibm,display-line-length", NULL)))
220 display_width = be32_to_cpu(*p);
221 if ((p = of_get_property(root,
222 "ibm,form-feed", NULL)))
223 form_feed = be32_to_cpu(*p);
224 if ((p = of_get_property(root,
225 "ibm,display-number-of-lines", NULL)))
226 display_lines = be32_to_cpu(*p);
227 row_width = of_get_property(root,
228 "ibm,display-truncation-length", NULL);
229 of_node_put(root);
231 display_character = rtas_token("display-character");
232 set_indicator = rtas_token("set-indicator");
235 if (display_character == RTAS_UNKNOWN_SERVICE) {
236 /* use hex display if available */
237 if (set_indicator != RTAS_UNKNOWN_SERVICE)
238 rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex);
239 return;
242 spin_lock(&progress_lock);
245 * Last write ended with newline, but we didn't print it since
246 * it would just clear the bottom line of output. Print it now
247 * instead.
249 * If no newline is pending and form feed is supported, clear the
250 * display with a form feed; otherwise, print a CR to start output
251 * at the beginning of the line.
253 if (pending_newline) {
254 rtas_call(display_character, 1, 1, NULL, '\r');
255 rtas_call(display_character, 1, 1, NULL, '\n');
256 pending_newline = 0;
257 } else {
258 current_line = 0;
259 if (form_feed)
260 rtas_call(display_character, 1, 1, NULL,
261 (char)form_feed);
262 else
263 rtas_call(display_character, 1, 1, NULL, '\r');
266 if (row_width)
267 width = row_width[current_line];
268 else
269 width = display_width;
270 os = s;
271 while (*os) {
272 if (*os == '\n' || *os == '\r') {
273 /* If newline is the last character, save it
274 * until next call to avoid bumping up the
275 * display output.
277 if (*os == '\n' && !os[1]) {
278 pending_newline = 1;
279 current_line++;
280 if (current_line > display_lines-1)
281 current_line = display_lines-1;
282 spin_unlock(&progress_lock);
283 return;
286 /* RTAS wants CR-LF, not just LF */
288 if (*os == '\n') {
289 rtas_call(display_character, 1, 1, NULL, '\r');
290 rtas_call(display_character, 1, 1, NULL, '\n');
291 } else {
292 /* CR might be used to re-draw a line, so we'll
293 * leave it alone and not add LF.
295 rtas_call(display_character, 1, 1, NULL, *os);
298 if (row_width)
299 width = row_width[current_line];
300 else
301 width = display_width;
302 } else {
303 width--;
304 rtas_call(display_character, 1, 1, NULL, *os);
307 os++;
309 /* if we overwrite the screen length */
310 if (width <= 0)
311 while ((*os != 0) && (*os != '\n') && (*os != '\r'))
312 os++;
315 spin_unlock(&progress_lock);
317 EXPORT_SYMBOL(rtas_progress); /* needed by rtas_flash module */
319 int rtas_token(const char *service)
321 const __be32 *tokp;
322 if (rtas.dev == NULL)
323 return RTAS_UNKNOWN_SERVICE;
324 tokp = of_get_property(rtas.dev, service, NULL);
325 return tokp ? be32_to_cpu(*tokp) : RTAS_UNKNOWN_SERVICE;
327 EXPORT_SYMBOL(rtas_token);
329 int rtas_service_present(const char *service)
331 return rtas_token(service) != RTAS_UNKNOWN_SERVICE;
333 EXPORT_SYMBOL(rtas_service_present);
335 #ifdef CONFIG_RTAS_ERROR_LOGGING
337 * Return the firmware-specified size of the error log buffer
338 * for all rtas calls that require an error buffer argument.
339 * This includes 'check-exception' and 'rtas-last-error'.
341 int rtas_get_error_log_max(void)
343 static int rtas_error_log_max;
344 if (rtas_error_log_max)
345 return rtas_error_log_max;
347 rtas_error_log_max = rtas_token ("rtas-error-log-max");
348 if ((rtas_error_log_max == RTAS_UNKNOWN_SERVICE) ||
349 (rtas_error_log_max > RTAS_ERROR_LOG_MAX)) {
350 printk (KERN_WARNING "RTAS: bad log buffer size %d\n",
351 rtas_error_log_max);
352 rtas_error_log_max = RTAS_ERROR_LOG_MAX;
354 return rtas_error_log_max;
356 EXPORT_SYMBOL(rtas_get_error_log_max);
359 static char rtas_err_buf[RTAS_ERROR_LOG_MAX];
360 static int rtas_last_error_token;
362 /** Return a copy of the detailed error text associated with the
363 * most recent failed call to rtas. Because the error text
364 * might go stale if there are any other intervening rtas calls,
365 * this routine must be called atomically with whatever produced
366 * the error (i.e. with rtas.lock still held from the previous call).
368 static char *__fetch_rtas_last_error(char *altbuf)
370 struct rtas_args err_args, save_args;
371 u32 bufsz;
372 char *buf = NULL;
374 if (rtas_last_error_token == -1)
375 return NULL;
377 bufsz = rtas_get_error_log_max();
379 err_args.token = cpu_to_be32(rtas_last_error_token);
380 err_args.nargs = cpu_to_be32(2);
381 err_args.nret = cpu_to_be32(1);
382 err_args.args[0] = cpu_to_be32(__pa(rtas_err_buf));
383 err_args.args[1] = cpu_to_be32(bufsz);
384 err_args.args[2] = 0;
386 save_args = rtas.args;
387 rtas.args = err_args;
389 enter_rtas(__pa(&rtas.args));
391 err_args = rtas.args;
392 rtas.args = save_args;
394 /* Log the error in the unlikely case that there was one. */
395 if (unlikely(err_args.args[2] == 0)) {
396 if (altbuf) {
397 buf = altbuf;
398 } else {
399 buf = rtas_err_buf;
400 if (slab_is_available())
401 buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
403 if (buf)
404 memcpy(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX);
407 return buf;
410 #define get_errorlog_buffer() kmalloc(RTAS_ERROR_LOG_MAX, GFP_KERNEL)
412 #else /* CONFIG_RTAS_ERROR_LOGGING */
413 #define __fetch_rtas_last_error(x) NULL
414 #define get_errorlog_buffer() NULL
415 #endif
418 static void
419 va_rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret,
420 va_list list)
422 int i;
424 args->token = cpu_to_be32(token);
425 args->nargs = cpu_to_be32(nargs);
426 args->nret = cpu_to_be32(nret);
427 args->rets = &(args->args[nargs]);
429 for (i = 0; i < nargs; ++i)
430 args->args[i] = cpu_to_be32(va_arg(list, __u32));
432 for (i = 0; i < nret; ++i)
433 args->rets[i] = 0;
435 enter_rtas(__pa(args));
438 void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, ...)
440 va_list list;
442 va_start(list, nret);
443 va_rtas_call_unlocked(args, token, nargs, nret, list);
444 va_end(list);
447 int rtas_call(int token, int nargs, int nret, int *outputs, ...)
449 va_list list;
450 int i;
451 unsigned long s;
452 struct rtas_args *rtas_args;
453 char *buff_copy = NULL;
454 int ret;
456 if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE)
457 return -1;
459 s = lock_rtas();
461 /* We use the global rtas args buffer */
462 rtas_args = &rtas.args;
464 va_start(list, outputs);
465 va_rtas_call_unlocked(rtas_args, token, nargs, nret, list);
466 va_end(list);
468 /* A -1 return code indicates that the last command couldn't
469 be completed due to a hardware error. */
470 if (be32_to_cpu(rtas_args->rets[0]) == -1)
471 buff_copy = __fetch_rtas_last_error(NULL);
473 if (nret > 1 && outputs != NULL)
474 for (i = 0; i < nret-1; ++i)
475 outputs[i] = be32_to_cpu(rtas_args->rets[i+1]);
476 ret = (nret > 0)? be32_to_cpu(rtas_args->rets[0]): 0;
478 unlock_rtas(s);
480 if (buff_copy) {
481 log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
482 if (slab_is_available())
483 kfree(buff_copy);
485 return ret;
487 EXPORT_SYMBOL(rtas_call);
489 /* For RTAS_BUSY (-2), delay for 1 millisecond. For an extended busy status
490 * code of 990n, perform the hinted delay of 10^n (last digit) milliseconds.
492 unsigned int rtas_busy_delay_time(int status)
494 int order;
495 unsigned int ms = 0;
497 if (status == RTAS_BUSY) {
498 ms = 1;
499 } else if (status >= RTAS_EXTENDED_DELAY_MIN &&
500 status <= RTAS_EXTENDED_DELAY_MAX) {
501 order = status - RTAS_EXTENDED_DELAY_MIN;
502 for (ms = 1; order > 0; order--)
503 ms *= 10;
506 return ms;
508 EXPORT_SYMBOL(rtas_busy_delay_time);
510 /* For an RTAS busy status code, perform the hinted delay. */
511 unsigned int rtas_busy_delay(int status)
513 unsigned int ms;
515 might_sleep();
516 ms = rtas_busy_delay_time(status);
517 if (ms && need_resched())
518 msleep(ms);
520 return ms;
522 EXPORT_SYMBOL(rtas_busy_delay);
524 static int rtas_error_rc(int rtas_rc)
526 int rc;
528 switch (rtas_rc) {
529 case -1: /* Hardware Error */
530 rc = -EIO;
531 break;
532 case -3: /* Bad indicator/domain/etc */
533 rc = -EINVAL;
534 break;
535 case -9000: /* Isolation error */
536 rc = -EFAULT;
537 break;
538 case -9001: /* Outstanding TCE/PTE */
539 rc = -EEXIST;
540 break;
541 case -9002: /* No usable slot */
542 rc = -ENODEV;
543 break;
544 default:
545 printk(KERN_ERR "%s: unexpected RTAS error %d\n",
546 __func__, rtas_rc);
547 rc = -ERANGE;
548 break;
550 return rc;
553 int rtas_get_power_level(int powerdomain, int *level)
555 int token = rtas_token("get-power-level");
556 int rc;
558 if (token == RTAS_UNKNOWN_SERVICE)
559 return -ENOENT;
561 while ((rc = rtas_call(token, 1, 2, level, powerdomain)) == RTAS_BUSY)
562 udelay(1);
564 if (rc < 0)
565 return rtas_error_rc(rc);
566 return rc;
568 EXPORT_SYMBOL(rtas_get_power_level);
570 int rtas_set_power_level(int powerdomain, int level, int *setlevel)
572 int token = rtas_token("set-power-level");
573 int rc;
575 if (token == RTAS_UNKNOWN_SERVICE)
576 return -ENOENT;
578 do {
579 rc = rtas_call(token, 2, 2, setlevel, powerdomain, level);
580 } while (rtas_busy_delay(rc));
582 if (rc < 0)
583 return rtas_error_rc(rc);
584 return rc;
586 EXPORT_SYMBOL(rtas_set_power_level);
588 int rtas_get_sensor(int sensor, int index, int *state)
590 int token = rtas_token("get-sensor-state");
591 int rc;
593 if (token == RTAS_UNKNOWN_SERVICE)
594 return -ENOENT;
596 do {
597 rc = rtas_call(token, 2, 2, state, sensor, index);
598 } while (rtas_busy_delay(rc));
600 if (rc < 0)
601 return rtas_error_rc(rc);
602 return rc;
604 EXPORT_SYMBOL(rtas_get_sensor);
606 int rtas_get_sensor_fast(int sensor, int index, int *state)
608 int token = rtas_token("get-sensor-state");
609 int rc;
611 if (token == RTAS_UNKNOWN_SERVICE)
612 return -ENOENT;
614 rc = rtas_call(token, 2, 2, state, sensor, index);
615 WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
616 rc <= RTAS_EXTENDED_DELAY_MAX));
618 if (rc < 0)
619 return rtas_error_rc(rc);
620 return rc;
623 bool rtas_indicator_present(int token, int *maxindex)
625 int proplen, count, i;
626 const struct indicator_elem {
627 __be32 token;
628 __be32 maxindex;
629 } *indicators;
631 indicators = of_get_property(rtas.dev, "rtas-indicators", &proplen);
632 if (!indicators)
633 return false;
635 count = proplen / sizeof(struct indicator_elem);
637 for (i = 0; i < count; i++) {
638 if (__be32_to_cpu(indicators[i].token) != token)
639 continue;
640 if (maxindex)
641 *maxindex = __be32_to_cpu(indicators[i].maxindex);
642 return true;
645 return false;
647 EXPORT_SYMBOL(rtas_indicator_present);
649 int rtas_set_indicator(int indicator, int index, int new_value)
651 int token = rtas_token("set-indicator");
652 int rc;
654 if (token == RTAS_UNKNOWN_SERVICE)
655 return -ENOENT;
657 do {
658 rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
659 } while (rtas_busy_delay(rc));
661 if (rc < 0)
662 return rtas_error_rc(rc);
663 return rc;
665 EXPORT_SYMBOL(rtas_set_indicator);
668 * Ignoring RTAS extended delay
670 int rtas_set_indicator_fast(int indicator, int index, int new_value)
672 int rc;
673 int token = rtas_token("set-indicator");
675 if (token == RTAS_UNKNOWN_SERVICE)
676 return -ENOENT;
678 rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
680 WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
681 rc <= RTAS_EXTENDED_DELAY_MAX));
683 if (rc < 0)
684 return rtas_error_rc(rc);
686 return rc;
689 void __noreturn rtas_restart(char *cmd)
691 if (rtas_flash_term_hook)
692 rtas_flash_term_hook(SYS_RESTART);
693 printk("RTAS system-reboot returned %d\n",
694 rtas_call(rtas_token("system-reboot"), 0, 1, NULL));
695 for (;;);
698 void rtas_power_off(void)
700 if (rtas_flash_term_hook)
701 rtas_flash_term_hook(SYS_POWER_OFF);
702 /* allow power on only with power button press */
703 printk("RTAS power-off returned %d\n",
704 rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
705 for (;;);
708 void __noreturn rtas_halt(void)
710 if (rtas_flash_term_hook)
711 rtas_flash_term_hook(SYS_HALT);
712 /* allow power on only with power button press */
713 printk("RTAS power-off returned %d\n",
714 rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
715 for (;;);
718 /* Must be in the RMO region, so we place it here */
719 static char rtas_os_term_buf[2048];
721 void rtas_os_term(char *str)
723 int status;
726 * Firmware with the ibm,extended-os-term property is guaranteed
727 * to always return from an ibm,os-term call. Earlier versions without
728 * this property may terminate the partition which we want to avoid
729 * since it interferes with panic_timeout.
731 if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") ||
732 RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term"))
733 return;
735 snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str);
737 do {
738 status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL,
739 __pa(rtas_os_term_buf));
740 } while (rtas_busy_delay(status));
742 if (status != 0)
743 printk(KERN_EMERG "ibm,os-term call failed %d\n", status);
746 static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE;
747 #ifdef CONFIG_PPC_PSERIES
748 static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_when_done)
750 u16 slb_size = mmu_slb_size;
751 int rc = H_MULTI_THREADS_ACTIVE;
752 int cpu;
754 slb_set_size(SLB_MIN_SIZE);
755 printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());
757 while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
758 !atomic_read(&data->error))
759 rc = rtas_call(data->token, 0, 1, NULL);
761 if (rc || atomic_read(&data->error)) {
762 printk(KERN_DEBUG "ibm,suspend-me returned %d\n", rc);
763 slb_set_size(slb_size);
766 if (atomic_read(&data->error))
767 rc = atomic_read(&data->error);
769 atomic_set(&data->error, rc);
770 pSeries_coalesce_init();
772 if (wake_when_done) {
773 atomic_set(&data->done, 1);
775 for_each_online_cpu(cpu)
776 plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
779 if (atomic_dec_return(&data->working) == 0)
780 complete(data->complete);
782 return rc;
785 int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data)
787 atomic_inc(&data->working);
788 return __rtas_suspend_last_cpu(data, 0);
791 static int __rtas_suspend_cpu(struct rtas_suspend_me_data *data, int wake_when_done)
793 long rc = H_SUCCESS;
794 unsigned long msr_save;
795 int cpu;
797 atomic_inc(&data->working);
799 /* really need to ensure MSR.EE is off for H_JOIN */
800 msr_save = mfmsr();
801 mtmsr(msr_save & ~(MSR_EE));
803 while (rc == H_SUCCESS && !atomic_read(&data->done) && !atomic_read(&data->error))
804 rc = plpar_hcall_norets(H_JOIN);
806 mtmsr(msr_save);
808 if (rc == H_SUCCESS) {
809 /* This cpu was prodded and the suspend is complete. */
810 goto out;
811 } else if (rc == H_CONTINUE) {
812 /* All other cpus are in H_JOIN, this cpu does
813 * the suspend.
815 return __rtas_suspend_last_cpu(data, wake_when_done);
816 } else {
817 printk(KERN_ERR "H_JOIN on cpu %i failed with rc = %ld\n",
818 smp_processor_id(), rc);
819 atomic_set(&data->error, rc);
822 if (wake_when_done) {
823 atomic_set(&data->done, 1);
825 /* This cpu did the suspend or got an error; in either case,
826 * we need to prod all other other cpus out of join state.
827 * Extra prods are harmless.
829 for_each_online_cpu(cpu)
830 plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
832 out:
833 if (atomic_dec_return(&data->working) == 0)
834 complete(data->complete);
835 return rc;
838 int rtas_suspend_cpu(struct rtas_suspend_me_data *data)
840 return __rtas_suspend_cpu(data, 0);
843 static void rtas_percpu_suspend_me(void *info)
845 __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
848 enum rtas_cpu_state {
849 DOWN,
853 #ifndef CONFIG_SMP
854 static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
855 cpumask_var_t cpus)
857 if (!cpumask_empty(cpus)) {
858 cpumask_clear(cpus);
859 return -EINVAL;
860 } else
861 return 0;
863 #else
864 /* On return cpumask will be altered to indicate CPUs changed.
865 * CPUs with states changed will be set in the mask,
866 * CPUs with status unchanged will be unset in the mask. */
867 static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
868 cpumask_var_t cpus)
870 int cpu;
871 int cpuret = 0;
872 int ret = 0;
874 if (cpumask_empty(cpus))
875 return 0;
877 for_each_cpu(cpu, cpus) {
878 switch (state) {
879 case DOWN:
880 cpuret = cpu_down(cpu);
881 break;
882 case UP:
883 cpuret = cpu_up(cpu);
884 break;
886 if (cpuret) {
887 pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
888 __func__,
889 ((state == UP) ? "up" : "down"),
890 cpu, cpuret);
891 if (!ret)
892 ret = cpuret;
893 if (state == UP) {
894 /* clear bits for unchanged cpus, return */
895 cpumask_shift_right(cpus, cpus, cpu);
896 cpumask_shift_left(cpus, cpus, cpu);
897 break;
898 } else {
899 /* clear bit for unchanged cpu, continue */
900 cpumask_clear_cpu(cpu, cpus);
905 return ret;
907 #endif
909 int rtas_online_cpus_mask(cpumask_var_t cpus)
911 int ret;
913 ret = rtas_cpu_state_change_mask(UP, cpus);
915 if (ret) {
916 cpumask_var_t tmp_mask;
918 if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL))
919 return ret;
921 /* Use tmp_mask to preserve cpus mask from first failure */
922 cpumask_copy(tmp_mask, cpus);
923 rtas_offline_cpus_mask(tmp_mask);
924 free_cpumask_var(tmp_mask);
927 return ret;
929 EXPORT_SYMBOL(rtas_online_cpus_mask);
931 int rtas_offline_cpus_mask(cpumask_var_t cpus)
933 return rtas_cpu_state_change_mask(DOWN, cpus);
935 EXPORT_SYMBOL(rtas_offline_cpus_mask);
937 int rtas_ibm_suspend_me(u64 handle)
939 long state;
940 long rc;
941 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
942 struct rtas_suspend_me_data data;
943 DECLARE_COMPLETION_ONSTACK(done);
944 cpumask_var_t offline_mask;
945 int cpuret;
947 if (!rtas_service_present("ibm,suspend-me"))
948 return -ENOSYS;
950 /* Make sure the state is valid */
951 rc = plpar_hcall(H_VASI_STATE, retbuf, handle);
953 state = retbuf[0];
955 if (rc) {
956 printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc);
957 return rc;
958 } else if (state == H_VASI_ENABLED) {
959 return -EAGAIN;
960 } else if (state != H_VASI_SUSPENDING) {
961 printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n",
962 state);
963 return -EIO;
966 if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL))
967 return -ENOMEM;
969 atomic_set(&data.working, 0);
970 atomic_set(&data.done, 0);
971 atomic_set(&data.error, 0);
972 data.token = rtas_token("ibm,suspend-me");
973 data.complete = &done;
975 /* All present CPUs must be online */
976 cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
977 cpuret = rtas_online_cpus_mask(offline_mask);
978 if (cpuret) {
979 pr_err("%s: Could not bring present CPUs online.\n", __func__);
980 atomic_set(&data.error, cpuret);
981 goto out;
984 stop_topology_update();
986 /* Call function on all CPUs. One of us will make the
987 * rtas call
989 if (on_each_cpu(rtas_percpu_suspend_me, &data, 0))
990 atomic_set(&data.error, -EINVAL);
992 wait_for_completion(&done);
994 if (atomic_read(&data.error) != 0)
995 printk(KERN_ERR "Error doing global join\n");
997 start_topology_update();
999 /* Take down CPUs not online prior to suspend */
1000 cpuret = rtas_offline_cpus_mask(offline_mask);
1001 if (cpuret)
1002 pr_warn("%s: Could not restore CPUs to offline state.\n",
1003 __func__);
1005 out:
1006 free_cpumask_var(offline_mask);
1007 return atomic_read(&data.error);
1009 #else /* CONFIG_PPC_PSERIES */
1010 int rtas_ibm_suspend_me(u64 handle)
1012 return -ENOSYS;
1014 #endif
1017 * Find a specific pseries error log in an RTAS extended event log.
1018 * @log: RTAS error/event log
1019 * @section_id: two character section identifier
1021 * Returns a pointer to the specified errorlog or NULL if not found.
1023 struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
1024 uint16_t section_id)
1026 struct rtas_ext_event_log_v6 *ext_log =
1027 (struct rtas_ext_event_log_v6 *)log->buffer;
1028 struct pseries_errorlog *sect;
1029 unsigned char *p, *log_end;
1030 uint32_t ext_log_length = rtas_error_extended_log_length(log);
1031 uint8_t log_format = rtas_ext_event_log_format(ext_log);
1032 uint32_t company_id = rtas_ext_event_company_id(ext_log);
1034 /* Check that we understand the format */
1035 if (ext_log_length < sizeof(struct rtas_ext_event_log_v6) ||
1036 log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG ||
1037 company_id != RTAS_V6EXT_COMPANY_ID_IBM)
1038 return NULL;
1040 log_end = log->buffer + ext_log_length;
1041 p = ext_log->vendor_log;
1043 while (p < log_end) {
1044 sect = (struct pseries_errorlog *)p;
1045 if (pseries_errorlog_id(sect) == section_id)
1046 return sect;
1047 p += pseries_errorlog_length(sect);
1050 return NULL;
1053 /* We assume to be passed big endian arguments */
1054 SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
1056 struct rtas_args args;
1057 unsigned long flags;
1058 char *buff_copy, *errbuf = NULL;
1059 int nargs, nret, token;
1061 if (!capable(CAP_SYS_ADMIN))
1062 return -EPERM;
1064 if (!rtas.entry)
1065 return -EINVAL;
1067 if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
1068 return -EFAULT;
1070 nargs = be32_to_cpu(args.nargs);
1071 nret = be32_to_cpu(args.nret);
1072 token = be32_to_cpu(args.token);
1074 if (nargs >= ARRAY_SIZE(args.args)
1075 || nret > ARRAY_SIZE(args.args)
1076 || nargs + nret > ARRAY_SIZE(args.args))
1077 return -EINVAL;
1079 /* Copy in args. */
1080 if (copy_from_user(args.args, uargs->args,
1081 nargs * sizeof(rtas_arg_t)) != 0)
1082 return -EFAULT;
1084 if (token == RTAS_UNKNOWN_SERVICE)
1085 return -EINVAL;
1087 args.rets = &args.args[nargs];
1088 memset(args.rets, 0, nret * sizeof(rtas_arg_t));
1090 /* Need to handle ibm,suspend_me call specially */
1091 if (token == ibm_suspend_me_token) {
1094 * rtas_ibm_suspend_me assumes the streamid handle is in cpu
1095 * endian, or at least the hcall within it requires it.
1097 int rc = 0;
1098 u64 handle = ((u64)be32_to_cpu(args.args[0]) << 32)
1099 | be32_to_cpu(args.args[1]);
1100 rc = rtas_ibm_suspend_me(handle);
1101 if (rc == -EAGAIN)
1102 args.rets[0] = cpu_to_be32(RTAS_NOT_SUSPENDABLE);
1103 else if (rc == -EIO)
1104 args.rets[0] = cpu_to_be32(-1);
1105 else if (rc)
1106 return rc;
1107 goto copy_return;
1110 buff_copy = get_errorlog_buffer();
1112 flags = lock_rtas();
1114 rtas.args = args;
1115 enter_rtas(__pa(&rtas.args));
1116 args = rtas.args;
1118 /* A -1 return code indicates that the last command couldn't
1119 be completed due to a hardware error. */
1120 if (be32_to_cpu(args.rets[0]) == -1)
1121 errbuf = __fetch_rtas_last_error(buff_copy);
1123 unlock_rtas(flags);
1125 if (buff_copy) {
1126 if (errbuf)
1127 log_error(errbuf, ERR_TYPE_RTAS_LOG, 0);
1128 kfree(buff_copy);
1131 copy_return:
1132 /* Copy out args. */
1133 if (copy_to_user(uargs->args + nargs,
1134 args.args + nargs,
1135 nret * sizeof(rtas_arg_t)) != 0)
1136 return -EFAULT;
1138 return 0;
1142 * Call early during boot, before mem init, to retrieve the RTAS
1143 * information from the device-tree and allocate the RMO buffer for userland
1144 * accesses.
1146 void __init rtas_initialize(void)
1148 unsigned long rtas_region = RTAS_INSTANTIATE_MAX;
1149 u32 base, size, entry;
1150 int no_base, no_size, no_entry;
1152 /* Get RTAS dev node and fill up our "rtas" structure with infos
1153 * about it.
1155 rtas.dev = of_find_node_by_name(NULL, "rtas");
1156 if (!rtas.dev)
1157 return;
1159 no_base = of_property_read_u32(rtas.dev, "linux,rtas-base", &base);
1160 no_size = of_property_read_u32(rtas.dev, "rtas-size", &size);
1161 if (no_base || no_size) {
1162 of_node_put(rtas.dev);
1163 rtas.dev = NULL;
1164 return;
1167 rtas.base = base;
1168 rtas.size = size;
1169 no_entry = of_property_read_u32(rtas.dev, "linux,rtas-entry", &entry);
1170 rtas.entry = no_entry ? rtas.base : entry;
1172 /* If RTAS was found, allocate the RMO buffer for it and look for
1173 * the stop-self token if any
1175 #ifdef CONFIG_PPC64
1176 if (firmware_has_feature(FW_FEATURE_LPAR)) {
1177 rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX);
1178 ibm_suspend_me_token = rtas_token("ibm,suspend-me");
1180 #endif
1181 rtas_rmo_buf = memblock_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
1183 #ifdef CONFIG_RTAS_ERROR_LOGGING
1184 rtas_last_error_token = rtas_token("rtas-last-error");
1185 #endif
1188 int __init early_init_dt_scan_rtas(unsigned long node,
1189 const char *uname, int depth, void *data)
1191 const u32 *basep, *entryp, *sizep;
1193 if (depth != 1 || strcmp(uname, "rtas") != 0)
1194 return 0;
1196 basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
1197 entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1198 sizep = of_get_flat_dt_prop(node, "rtas-size", NULL);
1200 if (basep && entryp && sizep) {
1201 rtas.base = *basep;
1202 rtas.entry = *entryp;
1203 rtas.size = *sizep;
1206 #ifdef CONFIG_UDBG_RTAS_CONSOLE
1207 basep = of_get_flat_dt_prop(node, "put-term-char", NULL);
1208 if (basep)
1209 rtas_putchar_token = *basep;
1211 basep = of_get_flat_dt_prop(node, "get-term-char", NULL);
1212 if (basep)
1213 rtas_getchar_token = *basep;
1215 if (rtas_putchar_token != RTAS_UNKNOWN_SERVICE &&
1216 rtas_getchar_token != RTAS_UNKNOWN_SERVICE)
1217 udbg_init_rtas_console();
1219 #endif
1221 /* break now */
1222 return 1;
1225 static arch_spinlock_t timebase_lock;
1226 static u64 timebase = 0;
1228 void rtas_give_timebase(void)
1230 unsigned long flags;
1232 local_irq_save(flags);
1233 hard_irq_disable();
1234 arch_spin_lock(&timebase_lock);
1235 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
1236 timebase = get_tb();
1237 arch_spin_unlock(&timebase_lock);
1239 while (timebase)
1240 barrier();
1241 rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
1242 local_irq_restore(flags);
1245 void rtas_take_timebase(void)
1247 while (!timebase)
1248 barrier();
1249 arch_spin_lock(&timebase_lock);
1250 set_tb(timebase >> 32, timebase & 0xffffffff);
1251 timebase = 0;
1252 arch_spin_unlock(&timebase_lock);