1 /* ds.c: Domain Services driver for Logical Domains
3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/string.h>
11 #include <linux/slab.h>
12 #include <linux/sched.h>
13 #include <linux/delay.h>
14 #include <linux/mutex.h>
15 #include <linux/kthread.h>
16 #include <linux/cpu.h>
20 #include <asm/power.h>
21 #include <asm/mdesc.h>
24 #define DRV_MODULE_NAME "ds"
25 #define PFX DRV_MODULE_NAME ": "
26 #define DRV_MODULE_VERSION "1.0"
27 #define DRV_MODULE_RELDATE "Jul 11, 2007"
29 static char version
[] __devinitdata
=
30 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
31 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
32 MODULE_DESCRIPTION("Sun LDOM domain services driver");
33 MODULE_LICENSE("GPL");
34 MODULE_VERSION(DRV_MODULE_VERSION
);
38 #define DS_INIT_REQ 0x00
39 #define DS_INIT_ACK 0x01
40 #define DS_INIT_NACK 0x02
41 #define DS_REG_REQ 0x03
42 #define DS_REG_ACK 0x04
43 #define DS_REG_NACK 0x05
44 #define DS_UNREG_REQ 0x06
45 #define DS_UNREG_ACK 0x07
46 #define DS_UNREG_NACK 0x08
55 #define DS_REG_VER_NACK 0x01
56 #define DS_REG_DUP 0x02
57 #define DS_INV_HDL 0x03
58 #define DS_TYPE_UNKNOWN 0x04
66 struct ds_msg_tag tag
;
67 struct ds_version ver
;
71 struct ds_msg_tag tag
;
76 struct ds_msg_tag tag
;
81 struct ds_msg_tag tag
;
89 struct ds_msg_tag tag
;
95 struct ds_msg_tag tag
;
100 struct ds_unreg_req
{
101 struct ds_msg_tag tag
;
105 struct ds_unreg_ack
{
106 struct ds_msg_tag tag
;
110 struct ds_unreg_nack
{
111 struct ds_msg_tag tag
;
116 struct ds_msg_tag tag
;
120 struct ds_data_nack
{
121 struct ds_msg_tag tag
;
126 struct ds_cap_state
{
129 void (*data
)(struct ldc_channel
*lp
,
130 struct ds_cap_state
*cp
,
133 const char *service_id
;
136 #define CAP_STATE_UNKNOWN 0x00
137 #define CAP_STATE_REG_SENT 0x01
138 #define CAP_STATE_REGISTERED 0x02
141 static void md_update_data(struct ldc_channel
*lp
, struct ds_cap_state
*cp
,
143 static void domain_shutdown_data(struct ldc_channel
*lp
,
144 struct ds_cap_state
*cp
,
146 static void domain_panic_data(struct ldc_channel
*lp
,
147 struct ds_cap_state
*cp
,
149 #ifdef CONFIG_HOTPLUG_CPU
150 static void dr_cpu_data(struct ldc_channel
*lp
,
151 struct ds_cap_state
*cp
,
154 static void ds_pri_data(struct ldc_channel
*lp
,
155 struct ds_cap_state
*cp
,
157 static void ds_var_data(struct ldc_channel
*lp
,
158 struct ds_cap_state
*cp
,
161 struct ds_cap_state ds_states
[] = {
163 .service_id
= "md-update",
164 .data
= md_update_data
,
167 .service_id
= "domain-shutdown",
168 .data
= domain_shutdown_data
,
171 .service_id
= "domain-panic",
172 .data
= domain_panic_data
,
174 #ifdef CONFIG_HOTPLUG_CPU
176 .service_id
= "dr-cpu",
185 .service_id
= "var-config",
189 .service_id
= "var-config-backup",
194 static DEFINE_SPINLOCK(ds_lock
);
197 struct ldc_channel
*lp
;
199 #define DS_HS_START 0x01
200 #define DS_HS_DONE 0x02
206 static struct ds_info
*ds_info
;
208 static struct ds_cap_state
*find_cap(u64 handle
)
210 unsigned int index
= handle
>> 32;
212 if (index
>= ARRAY_SIZE(ds_states
))
214 return &ds_states
[index
];
217 static struct ds_cap_state
*find_cap_by_string(const char *name
)
221 for (i
= 0; i
< ARRAY_SIZE(ds_states
); i
++) {
222 if (strcmp(ds_states
[i
].service_id
, name
))
225 return &ds_states
[i
];
230 static int ds_send(struct ldc_channel
*lp
, void *data
, int len
)
232 int err
, limit
= 1000;
235 while (limit
-- > 0) {
236 err
= ldc_write(lp
, data
, len
);
237 if (!err
|| (err
!= -EAGAIN
))
245 struct ds_md_update_req
{
249 struct ds_md_update_res
{
254 static void md_update_data(struct ldc_channel
*lp
,
255 struct ds_cap_state
*dp
,
258 struct ds_data
*dpkt
= buf
;
259 struct ds_md_update_req
*rp
;
262 struct ds_md_update_res res
;
265 rp
= (struct ds_md_update_req
*) (dpkt
+ 1);
267 printk(KERN_INFO PFX
"Machine description update.\n");
269 memset(&pkt
, 0, sizeof(pkt
));
270 pkt
.data
.tag
.type
= DS_DATA
;
271 pkt
.data
.tag
.len
= sizeof(pkt
) - sizeof(struct ds_msg_tag
);
272 pkt
.data
.handle
= dp
->handle
;
273 pkt
.res
.req_num
= rp
->req_num
;
274 pkt
.res
.result
= DS_OK
;
276 ds_send(lp
, &pkt
, sizeof(pkt
));
281 struct ds_shutdown_req
{
286 struct ds_shutdown_res
{
292 static void domain_shutdown_data(struct ldc_channel
*lp
,
293 struct ds_cap_state
*dp
,
296 struct ds_data
*dpkt
= buf
;
297 struct ds_shutdown_req
*rp
;
300 struct ds_shutdown_res res
;
303 rp
= (struct ds_shutdown_req
*) (dpkt
+ 1);
305 printk(KERN_ALERT PFX
"Shutdown request from "
306 "LDOM manager received.\n");
308 memset(&pkt
, 0, sizeof(pkt
));
309 pkt
.data
.tag
.type
= DS_DATA
;
310 pkt
.data
.tag
.len
= sizeof(pkt
) - sizeof(struct ds_msg_tag
);
311 pkt
.data
.handle
= dp
->handle
;
312 pkt
.res
.req_num
= rp
->req_num
;
313 pkt
.res
.result
= DS_OK
;
314 pkt
.res
.reason
[0] = 0;
316 ds_send(lp
, &pkt
, sizeof(pkt
));
321 struct ds_panic_req
{
325 struct ds_panic_res
{
331 static void domain_panic_data(struct ldc_channel
*lp
,
332 struct ds_cap_state
*dp
,
335 struct ds_data
*dpkt
= buf
;
336 struct ds_panic_req
*rp
;
339 struct ds_panic_res res
;
342 rp
= (struct ds_panic_req
*) (dpkt
+ 1);
344 printk(KERN_ALERT PFX
"Panic request from "
345 "LDOM manager received.\n");
347 memset(&pkt
, 0, sizeof(pkt
));
348 pkt
.data
.tag
.type
= DS_DATA
;
349 pkt
.data
.tag
.len
= sizeof(pkt
) - sizeof(struct ds_msg_tag
);
350 pkt
.data
.handle
= dp
->handle
;
351 pkt
.res
.req_num
= rp
->req_num
;
352 pkt
.res
.result
= DS_OK
;
353 pkt
.res
.reason
[0] = 0;
355 ds_send(lp
, &pkt
, sizeof(pkt
));
357 panic("PANIC requested by LDOM manager.");
360 #ifdef CONFIG_HOTPLUG_CPU
364 #define DR_CPU_CONFIGURE 0x43
365 #define DR_CPU_UNCONFIGURE 0x55
366 #define DR_CPU_FORCE_UNCONFIGURE 0x46
367 #define DR_CPU_STATUS 0x53
370 #define DR_CPU_OK 0x6f
371 #define DR_CPU_ERROR 0x65
376 struct dr_cpu_resp_entry
{
379 #define DR_CPU_RES_OK 0x00
380 #define DR_CPU_RES_FAILURE 0x01
381 #define DR_CPU_RES_BLOCKED 0x02
382 #define DR_CPU_RES_CPU_NOT_RESPONDING 0x03
383 #define DR_CPU_RES_NOT_IN_MD 0x04
386 #define DR_CPU_STAT_NOT_PRESENT 0x00
387 #define DR_CPU_STAT_UNCONFIGURED 0x01
388 #define DR_CPU_STAT_CONFIGURED 0x02
393 /* DR cpu requests get queued onto the work list by the
394 * dr_cpu_data() callback. The list is protected by
395 * ds_lock, and processed by dr_cpu_process() in order.
397 static LIST_HEAD(dr_cpu_work_list
);
398 static DECLARE_WAIT_QUEUE_HEAD(dr_cpu_wait
);
400 struct dr_cpu_queue_entry
{
401 struct list_head list
;
405 static void __dr_cpu_send_error(struct ds_cap_state
*cp
, struct ds_data
*data
)
407 struct dr_cpu_tag
*tag
= (struct dr_cpu_tag
*) (data
+ 1);
408 struct ds_info
*dp
= ds_info
;
411 struct dr_cpu_tag tag
;
415 memset(&pkt
, 0, sizeof(pkt
));
416 pkt
.data
.tag
.type
= DS_DATA
;
417 pkt
.data
.handle
= cp
->handle
;
418 pkt
.tag
.req_num
= tag
->req_num
;
419 pkt
.tag
.type
= DR_CPU_ERROR
;
420 pkt
.tag
.num_records
= 0;
422 msg_len
= (sizeof(struct ds_data
) +
423 sizeof(struct dr_cpu_tag
));
425 pkt
.data
.tag
.len
= msg_len
- sizeof(struct ds_msg_tag
);
427 ds_send(dp
->lp
, &pkt
, msg_len
);
430 static void dr_cpu_send_error(struct ds_cap_state
*cp
, struct ds_data
*data
)
434 spin_lock_irqsave(&ds_lock
, flags
);
435 __dr_cpu_send_error(cp
, data
);
436 spin_unlock_irqrestore(&ds_lock
, flags
);
439 #define CPU_SENTINEL 0xffffffff
441 static void purge_dups(u32
*list
, u32 num_ents
)
445 for (i
= 0; i
< num_ents
; i
++) {
449 if (cpu
== CPU_SENTINEL
)
452 for (j
= i
+ 1; j
< num_ents
; j
++) {
454 list
[j
] = CPU_SENTINEL
;
459 static int dr_cpu_size_response(int ncpus
)
461 return (sizeof(struct ds_data
) +
462 sizeof(struct dr_cpu_tag
) +
463 (sizeof(struct dr_cpu_resp_entry
) * ncpus
));
466 static void dr_cpu_init_response(struct ds_data
*resp
, u64 req_num
,
467 u64 handle
, int resp_len
, int ncpus
,
468 cpumask_t
*mask
, u32 default_stat
)
470 struct dr_cpu_resp_entry
*ent
;
471 struct dr_cpu_tag
*tag
;
474 tag
= (struct dr_cpu_tag
*) (resp
+ 1);
475 ent
= (struct dr_cpu_resp_entry
*) (tag
+ 1);
477 resp
->tag
.type
= DS_DATA
;
478 resp
->tag
.len
= resp_len
- sizeof(struct ds_msg_tag
);
479 resp
->handle
= handle
;
480 tag
->req_num
= req_num
;
481 tag
->type
= DR_CPU_OK
;
482 tag
->num_records
= ncpus
;
485 for_each_cpu_mask(cpu
, *mask
) {
487 ent
[i
].result
= DR_CPU_RES_OK
;
488 ent
[i
].stat
= default_stat
;
494 static void dr_cpu_mark(struct ds_data
*resp
, int cpu
, int ncpus
,
497 struct dr_cpu_resp_entry
*ent
;
498 struct dr_cpu_tag
*tag
;
501 tag
= (struct dr_cpu_tag
*) (resp
+ 1);
502 ent
= (struct dr_cpu_resp_entry
*) (tag
+ 1);
504 for (i
= 0; i
< ncpus
; i
++) {
505 if (ent
[i
].cpu
!= cpu
)
513 static int dr_cpu_configure(struct ds_cap_state
*cp
, u64 req_num
,
516 struct ds_data
*resp
;
517 int resp_len
, ncpus
, cpu
;
520 ncpus
= cpus_weight(*mask
);
521 resp_len
= dr_cpu_size_response(ncpus
);
522 resp
= kzalloc(resp_len
, GFP_KERNEL
);
526 dr_cpu_init_response(resp
, req_num
, cp
->handle
,
527 resp_len
, ncpus
, mask
,
528 DR_CPU_STAT_CONFIGURED
);
530 mdesc_fill_in_cpu_data(*mask
);
532 for_each_cpu_mask(cpu
, *mask
) {
535 printk(KERN_INFO PFX
"Starting cpu %d...\n", cpu
);
538 __u32 res
= DR_CPU_RES_FAILURE
;
539 __u32 stat
= DR_CPU_STAT_UNCONFIGURED
;
541 if (!cpu_present(cpu
)) {
542 /* CPU not present in MD */
543 res
= DR_CPU_RES_NOT_IN_MD
;
544 stat
= DR_CPU_STAT_NOT_PRESENT
;
545 } else if (err
== -ENODEV
) {
546 /* CPU did not call in successfully */
547 res
= DR_CPU_RES_CPU_NOT_RESPONDING
;
550 printk(KERN_INFO PFX
"CPU startup failed err=%d\n",
552 dr_cpu_mark(resp
, cpu
, ncpus
, res
, stat
);
556 spin_lock_irqsave(&ds_lock
, flags
);
557 ds_send(ds_info
->lp
, resp
, resp_len
);
558 spin_unlock_irqrestore(&ds_lock
, flags
);
565 static int dr_cpu_unconfigure(struct ds_cap_state
*cp
, u64 req_num
,
568 struct ds_data
*resp
;
571 ncpus
= cpus_weight(*mask
);
572 resp_len
= dr_cpu_size_response(ncpus
);
573 resp
= kzalloc(resp_len
, GFP_KERNEL
);
577 dr_cpu_init_response(resp
, req_num
, cp
->handle
,
578 resp_len
, ncpus
, mask
,
579 DR_CPU_STAT_UNCONFIGURED
);
586 static void process_dr_cpu_list(struct ds_cap_state
*cp
)
588 struct dr_cpu_queue_entry
*qp
, *tmp
;
593 spin_lock_irqsave(&ds_lock
, flags
);
594 list_splice(&dr_cpu_work_list
, &todo
);
595 INIT_LIST_HEAD(&dr_cpu_work_list
);
596 spin_unlock_irqrestore(&ds_lock
, flags
);
598 list_for_each_entry_safe(qp
, tmp
, &todo
, list
) {
599 struct ds_data
*data
= (struct ds_data
*) qp
->req
;
600 struct dr_cpu_tag
*tag
= (struct dr_cpu_tag
*) (data
+ 1);
601 u32
*cpu_list
= (u32
*) (tag
+ 1);
602 u64 req_num
= tag
->req_num
;
607 case DR_CPU_CONFIGURE
:
608 case DR_CPU_UNCONFIGURE
:
609 case DR_CPU_FORCE_UNCONFIGURE
:
613 dr_cpu_send_error(cp
, data
);
617 purge_dups(cpu_list
, tag
->num_records
);
620 for (i
= 0; i
< tag
->num_records
; i
++) {
621 if (cpu_list
[i
] == CPU_SENTINEL
)
624 if (cpu_list
[i
] < NR_CPUS
)
625 cpu_set(cpu_list
[i
], mask
);
628 if (tag
->type
== DR_CPU_CONFIGURE
)
629 err
= dr_cpu_configure(cp
, req_num
, &mask
);
631 err
= dr_cpu_unconfigure(cp
, req_num
, &mask
);
634 dr_cpu_send_error(cp
, data
);
642 static int dr_cpu_thread(void *__unused
)
644 struct ds_cap_state
*cp
;
647 cp
= find_cap_by_string("dr-cpu");
650 prepare_to_wait(&dr_cpu_wait
, &wait
, TASK_INTERRUPTIBLE
);
651 if (list_empty(&dr_cpu_work_list
))
653 finish_wait(&dr_cpu_wait
, &wait
);
655 if (kthread_should_stop())
658 process_dr_cpu_list(cp
);
664 static void dr_cpu_data(struct ldc_channel
*lp
,
665 struct ds_cap_state
*dp
,
668 struct dr_cpu_queue_entry
*qp
;
669 struct ds_data
*dpkt
= buf
;
670 struct dr_cpu_tag
*rp
;
672 rp
= (struct dr_cpu_tag
*) (dpkt
+ 1);
674 qp
= kmalloc(sizeof(struct dr_cpu_queue_entry
) + len
, GFP_ATOMIC
);
676 struct ds_cap_state
*cp
;
678 cp
= find_cap_by_string("dr-cpu");
679 __dr_cpu_send_error(cp
, dpkt
);
681 memcpy(&qp
->req
, buf
, len
);
682 list_add_tail(&qp
->list
, &dr_cpu_work_list
);
683 wake_up(&dr_cpu_wait
);
691 #define DS_PRI_REQUEST 0x00
692 #define DS_PRI_DATA 0x01
693 #define DS_PRI_UPDATE 0x02
696 static void ds_pri_data(struct ldc_channel
*lp
,
697 struct ds_cap_state
*dp
,
700 struct ds_data
*dpkt
= buf
;
701 struct ds_pri_msg
*rp
;
703 rp
= (struct ds_pri_msg
*) (dpkt
+ 1);
705 printk(KERN_INFO PFX
"PRI REQ [%lx:%lx], len=%d\n",
706 rp
->req_num
, rp
->type
, len
);
711 #define DS_VAR_SET_REQ 0x00
712 #define DS_VAR_DELETE_REQ 0x01
713 #define DS_VAR_SET_RESP 0x02
714 #define DS_VAR_DELETE_RESP 0x03
717 struct ds_var_set_msg
{
718 struct ds_var_hdr hdr
;
719 char name_and_value
[0];
722 struct ds_var_delete_msg
{
723 struct ds_var_hdr hdr
;
728 struct ds_var_hdr hdr
;
730 #define DS_VAR_SUCCESS 0x00
731 #define DS_VAR_NO_SPACE 0x01
732 #define DS_VAR_INVALID_VAR 0x02
733 #define DS_VAR_INVALID_VAL 0x03
734 #define DS_VAR_NOT_PRESENT 0x04
737 static DEFINE_MUTEX(ds_var_mutex
);
738 static int ds_var_doorbell
;
739 static int ds_var_response
;
741 static void ds_var_data(struct ldc_channel
*lp
,
742 struct ds_cap_state
*dp
,
745 struct ds_data
*dpkt
= buf
;
746 struct ds_var_resp
*rp
;
748 rp
= (struct ds_var_resp
*) (dpkt
+ 1);
750 if (rp
->hdr
.type
!= DS_VAR_SET_RESP
&&
751 rp
->hdr
.type
!= DS_VAR_DELETE_RESP
)
754 ds_var_response
= rp
->result
;
759 void ldom_set_var(const char *var
, const char *value
)
761 struct ds_info
*dp
= ds_info
;
762 struct ds_cap_state
*cp
;
764 cp
= find_cap_by_string("var-config");
765 if (cp
->state
!= CAP_STATE_REGISTERED
)
766 cp
= find_cap_by_string("var-config-backup");
768 if (cp
->state
== CAP_STATE_REGISTERED
) {
772 struct ds_var_set_msg msg
;
780 memset(&pkt
, 0, sizeof(pkt
));
781 pkt
.header
.data
.tag
.type
= DS_DATA
;
782 pkt
.header
.data
.handle
= cp
->handle
;
783 pkt
.header
.msg
.hdr
.type
= DS_VAR_SET_REQ
;
784 base
= p
= &pkt
.header
.msg
.name_and_value
[0];
786 p
+= strlen(var
) + 1;
788 p
+= strlen(value
) + 1;
790 msg_len
= (sizeof(struct ds_data
) +
791 sizeof(struct ds_var_set_msg
) +
793 msg_len
= (msg_len
+ 3) & ~3;
794 pkt
.header
.data
.tag
.len
= msg_len
- sizeof(struct ds_msg_tag
);
796 mutex_lock(&ds_var_mutex
);
798 spin_lock_irqsave(&ds_lock
, flags
);
800 ds_var_response
= -1;
802 ds_send(dp
->lp
, &pkt
, msg_len
);
803 spin_unlock_irqrestore(&ds_lock
, flags
);
806 while (ds_var_doorbell
== 0) {
813 mutex_unlock(&ds_var_mutex
);
815 if (ds_var_doorbell
== 0 ||
816 ds_var_response
!= DS_VAR_SUCCESS
)
817 printk(KERN_ERR PFX
"var-config [%s:%s] "
818 "failed, response(%d).\n",
822 printk(KERN_ERR PFX
"var-config not registered so "
823 "could not set (%s) variable to (%s).\n",
828 void ldom_reboot(const char *boot_command
)
830 /* Don't bother with any of this if the boot_command
833 if (boot_command
&& strlen(boot_command
)) {
834 char full_boot_str
[256];
836 strcpy(full_boot_str
, "boot ");
837 strcpy(full_boot_str
+ strlen("boot "), boot_command
);
839 ldom_set_var("reboot-command", full_boot_str
);
844 void ldom_power_off(void)
849 static void ds_conn_reset(struct ds_info
*dp
)
851 printk(KERN_ERR PFX
"ds_conn_reset() from %p\n",
852 __builtin_return_address(0));
855 static int register_services(struct ds_info
*dp
)
857 struct ldc_channel
*lp
= dp
->lp
;
860 for (i
= 0; i
< ARRAY_SIZE(ds_states
); i
++) {
862 struct ds_reg_req req
;
865 struct ds_cap_state
*cp
= &ds_states
[i
];
869 if (cp
->state
== CAP_STATE_REGISTERED
)
872 new_count
= sched_clock() & 0xffffffff;
873 cp
->handle
= ((u64
) i
<< 32) | new_count
;
875 msg_len
= (sizeof(struct ds_reg_req
) +
876 strlen(cp
->service_id
));
878 memset(&pbuf
, 0, sizeof(pbuf
));
879 pbuf
.req
.tag
.type
= DS_REG_REQ
;
880 pbuf
.req
.tag
.len
= (msg_len
- sizeof(struct ds_msg_tag
));
881 pbuf
.req
.handle
= cp
->handle
;
884 strcpy(pbuf
.req
.svc_id
, cp
->service_id
);
886 err
= ds_send(lp
, &pbuf
, msg_len
);
888 cp
->state
= CAP_STATE_REG_SENT
;
893 static int ds_handshake(struct ds_info
*dp
, struct ds_msg_tag
*pkt
)
896 if (dp
->hs_state
== DS_HS_START
) {
897 if (pkt
->type
!= DS_INIT_ACK
)
900 dp
->hs_state
= DS_HS_DONE
;
902 return register_services(dp
);
905 if (dp
->hs_state
!= DS_HS_DONE
)
908 if (pkt
->type
== DS_REG_ACK
) {
909 struct ds_reg_ack
*ap
= (struct ds_reg_ack
*) pkt
;
910 struct ds_cap_state
*cp
= find_cap(ap
->handle
);
913 printk(KERN_ERR PFX
"REG ACK for unknown handle %lx\n",
917 printk(KERN_INFO PFX
"Registered %s service.\n",
919 cp
->state
= CAP_STATE_REGISTERED
;
920 } else if (pkt
->type
== DS_REG_NACK
) {
921 struct ds_reg_nack
*np
= (struct ds_reg_nack
*) pkt
;
922 struct ds_cap_state
*cp
= find_cap(np
->handle
);
925 printk(KERN_ERR PFX
"REG NACK for "
926 "unknown handle %lx\n",
930 printk(KERN_INFO PFX
"Could not register %s service\n",
932 cp
->state
= CAP_STATE_UNKNOWN
;
942 static int ds_data(struct ds_info
*dp
, struct ds_msg_tag
*pkt
, int len
)
944 struct ds_data
*dpkt
= (struct ds_data
*) pkt
;
945 struct ds_cap_state
*cp
= find_cap(dpkt
->handle
);
948 struct ds_data_nack nack
= {
951 .len
= (sizeof(struct ds_data_nack
) -
952 sizeof(struct ds_msg_tag
)),
954 .handle
= dpkt
->handle
,
955 .result
= DS_INV_HDL
,
958 printk(KERN_ERR PFX
"Data for unknown handle %lu\n",
960 ds_send(dp
->lp
, &nack
, sizeof(nack
));
962 cp
->data(dp
->lp
, cp
, dpkt
, len
);
967 static void ds_up(struct ds_info
*dp
)
969 struct ldc_channel
*lp
= dp
->lp
;
970 struct ds_ver_req req
;
973 req
.tag
.type
= DS_INIT_REQ
;
974 req
.tag
.len
= sizeof(req
) - sizeof(struct ds_msg_tag
);
978 err
= ds_send(lp
, &req
, sizeof(req
));
980 dp
->hs_state
= DS_HS_START
;
983 static void ds_event(void *arg
, int event
)
985 struct ds_info
*dp
= arg
;
986 struct ldc_channel
*lp
= dp
->lp
;
990 spin_lock_irqsave(&ds_lock
, flags
);
992 if (event
== LDC_EVENT_UP
) {
994 spin_unlock_irqrestore(&ds_lock
, flags
);
998 if (event
!= LDC_EVENT_DATA_READY
) {
999 printk(KERN_WARNING PFX
"Unexpected LDC event %d\n", event
);
1000 spin_unlock_irqrestore(&ds_lock
, flags
);
1006 struct ds_msg_tag
*tag
;
1008 err
= ldc_read(lp
, dp
->rcv_buf
, sizeof(*tag
));
1010 if (unlikely(err
< 0)) {
1011 if (err
== -ECONNRESET
)
1019 err
= ldc_read(lp
, tag
+ 1, tag
->len
);
1021 if (unlikely(err
< 0)) {
1022 if (err
== -ECONNRESET
)
1029 if (tag
->type
< DS_DATA
)
1030 err
= ds_handshake(dp
, dp
->rcv_buf
);
1032 err
= ds_data(dp
, dp
->rcv_buf
,
1033 sizeof(*tag
) + err
);
1034 if (err
== -ECONNRESET
)
1038 spin_unlock_irqrestore(&ds_lock
, flags
);
1041 static int __devinit
ds_probe(struct vio_dev
*vdev
,
1042 const struct vio_device_id
*id
)
1044 static int ds_version_printed
;
1045 struct ldc_channel_config ds_cfg
= {
1048 .mode
= LDC_MODE_STREAM
,
1050 struct ldc_channel
*lp
;
1054 if (ds_version_printed
++ == 0)
1055 printk(KERN_INFO
"%s", version
);
1057 dp
= kzalloc(sizeof(*dp
), GFP_KERNEL
);
1062 dp
->rcv_buf
= kzalloc(4096, GFP_KERNEL
);
1066 dp
->rcv_buf_len
= 4096;
1068 ds_cfg
.tx_irq
= vdev
->tx_irq
;
1069 ds_cfg
.rx_irq
= vdev
->rx_irq
;
1071 lp
= ldc_alloc(vdev
->channel_id
, &ds_cfg
, dp
);
1074 goto out_free_rcv_buf
;
1078 err
= ldc_bind(lp
, "DS");
1101 static int ds_remove(struct vio_dev
*vdev
)
1106 static struct vio_device_id ds_match
[] = {
1108 .type
= "domain-services-port",
1113 static struct vio_driver ds_driver
= {
1114 .id_table
= ds_match
,
1116 .remove
= ds_remove
,
1119 .owner
= THIS_MODULE
,
1123 static int __init
ds_init(void)
1127 for (i
= 0; i
< ARRAY_SIZE(ds_states
); i
++)
1128 ds_states
[i
].handle
= ((u64
)i
<< 32);
1130 #ifdef CONFIG_HOTPLUG_CPU
1131 kthread_run(dr_cpu_thread
, NULL
, "kdrcpud");
1134 return vio_register_driver(&ds_driver
);
1137 subsys_initcall(ds_init
);