Staging: sm7xx: add a new framebuffer driver
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / dst / export.c
blobc324230e8b60c9c74e0be1cf3b8392ffb1adceef
1 /*
2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
3 * All rights reserved.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/blkdev.h>
17 #include <linux/bio.h>
18 #include <linux/dst.h>
19 #include <linux/in.h>
20 #include <linux/in6.h>
21 #include <linux/poll.h>
22 #include <linux/slab.h>
23 #include <linux/socket.h>
25 #include <net/sock.h>
28 * Export bioset is used for server block IO requests.
30 static struct bio_set *dst_bio_set;
32 int __init dst_export_init(void)
34 int err = -ENOMEM;
36 dst_bio_set = bioset_create(32, sizeof(struct dst_export_priv));
37 if (!dst_bio_set)
38 goto err_out_exit;
40 return 0;
42 err_out_exit:
43 return err;
46 void dst_export_exit(void)
48 bioset_free(dst_bio_set);
52 * When client connects and autonegotiates with the server node,
53 * its permissions are checked in a security attributes and sent
54 * back.
56 static unsigned int dst_check_permissions(struct dst_state *main,
57 struct dst_state *st)
59 struct dst_node *n = main->node;
60 struct dst_secure *sentry;
61 struct dst_secure_user *s;
62 struct saddr *sa = &st->ctl.addr;
63 unsigned int perm = 0;
65 mutex_lock(&n->security_lock);
66 list_for_each_entry(sentry, &n->security_list, sec_entry) {
67 s = &sentry->sec;
69 if (s->addr.sa_family != sa->sa_family)
70 continue;
72 if (s->addr.sa_data_len != sa->sa_data_len)
73 continue;
76 * This '2' below is a port field. This may be very wrong to do
77 * in atalk for example though. If there will be any need
78 * to extent protocol to something else, I can create
79 * per-family helpers and use them instead of this memcmp.
81 if (memcmp(s->addr.sa_data + 2, sa->sa_data + 2,
82 sa->sa_data_len - 2))
83 continue;
85 perm = s->permissions;
87 mutex_unlock(&n->security_lock);
89 return perm;
93 * Accept new client: allocate appropriate network state and check permissions.
95 static struct dst_state *dst_accept_client(struct dst_state *st)
97 unsigned int revents = 0;
98 unsigned int err_mask = POLLERR | POLLHUP | POLLRDHUP;
99 unsigned int mask = err_mask | POLLIN;
100 struct dst_node *n = st->node;
101 int err = 0;
102 struct socket *sock = NULL;
103 struct dst_state *new;
105 while (!err && !sock) {
106 revents = dst_state_poll(st);
108 if (!(revents & mask)) {
109 DEFINE_WAIT(wait);
111 for (;;) {
112 prepare_to_wait(&st->thread_wait,
113 &wait, TASK_INTERRUPTIBLE);
114 if (!n->trans_scan_timeout || st->need_exit)
115 break;
117 revents = dst_state_poll(st);
119 if (revents & mask)
120 break;
122 if (signal_pending(current))
123 break;
126 * Magic HZ? Polling check above is not safe in
127 * all cases (like socket reset in BH context),
128 * so it is simpler just to postpone it to the
129 * process context instead of implementing
130 * special locking there.
132 schedule_timeout(HZ);
134 finish_wait(&st->thread_wait, &wait);
137 err = -ECONNRESET;
138 dst_state_lock(st);
140 dprintk("%s: st: %p, revents: %x [err: %d, in: %d].\n",
141 __func__, st, revents, revents & err_mask,
142 revents & POLLIN);
144 if (revents & err_mask) {
145 dprintk("%s: revents: %x, socket: %p, err: %d.\n",
146 __func__, revents, st->socket, err);
147 err = -ECONNRESET;
150 if (!n->trans_scan_timeout || st->need_exit)
151 err = -ENODEV;
153 if (st->socket && (revents & POLLIN))
154 err = kernel_accept(st->socket, &sock, 0);
156 dst_state_unlock(st);
159 if (err)
160 goto err_out_exit;
162 new = dst_state_alloc(st->node);
163 if (IS_ERR(new)) {
164 err = -ENOMEM;
165 goto err_out_release;
167 new->socket = sock;
169 new->ctl.addr.sa_data_len = sizeof(struct sockaddr);
170 err = kernel_getpeername(sock, (struct sockaddr *)&new->ctl.addr,
171 (int *)&new->ctl.addr.sa_data_len);
172 if (err)
173 goto err_out_put;
175 new->permissions = dst_check_permissions(st, new);
176 if (new->permissions == 0) {
177 err = -EPERM;
178 dst_dump_addr(sock, (struct sockaddr *)&new->ctl.addr,
179 "Client is not allowed to connect");
180 goto err_out_put;
183 err = dst_poll_init(new);
184 if (err)
185 goto err_out_put;
187 dst_dump_addr(sock, (struct sockaddr *)&new->ctl.addr,
188 "Connected client");
190 return new;
192 err_out_put:
193 dst_state_put(new);
194 err_out_release:
195 sock_release(sock);
196 err_out_exit:
197 return ERR_PTR(err);
201 * Each server's block request sometime finishes.
202 * Usually it happens in hard irq context of the appropriate controller,
203 * so to play good with all cases we just queue BIO into the queue
204 * and wake up processing thread, which gets completed request and
205 * send (encrypting if needed) it back to the client (if it was a read
206 * request), or sends back reply that writing successfully completed.
208 static int dst_export_process_request_queue(struct dst_state *st)
210 unsigned long flags;
211 struct dst_export_priv *p = NULL;
212 struct bio *bio;
213 int err = 0;
215 while (!list_empty(&st->request_list)) {
216 spin_lock_irqsave(&st->request_lock, flags);
217 if (!list_empty(&st->request_list)) {
218 p = list_first_entry(&st->request_list,
219 struct dst_export_priv, request_entry);
220 list_del(&p->request_entry);
222 spin_unlock_irqrestore(&st->request_lock, flags);
224 if (!p)
225 break;
227 bio = p->bio;
229 if (dst_need_crypto(st->node) && (bio_data_dir(bio) == READ))
230 err = dst_export_crypto(st->node, bio);
231 else
232 err = dst_export_send_bio(bio);
234 if (err)
235 break;
238 return err;
242 * Cleanup export state.
243 * It has to wait until all requests are finished,
244 * and then free them all.
246 static void dst_state_cleanup_export(struct dst_state *st)
248 struct dst_export_priv *p;
249 unsigned long flags;
252 * This loop waits for all pending bios to be completed and freed.
254 while (atomic_read(&st->refcnt) > 1) {
255 dprintk("%s: st: %p, refcnt: %d, list_empty: %d.\n",
256 __func__, st, atomic_read(&st->refcnt),
257 list_empty(&st->request_list));
258 wait_event_timeout(st->thread_wait,
259 (atomic_read(&st->refcnt) == 1) ||
260 !list_empty(&st->request_list),
261 HZ/2);
263 while (!list_empty(&st->request_list)) {
264 p = NULL;
265 spin_lock_irqsave(&st->request_lock, flags);
266 if (!list_empty(&st->request_list)) {
267 p = list_first_entry(&st->request_list,
268 struct dst_export_priv, request_entry);
269 list_del(&p->request_entry);
271 spin_unlock_irqrestore(&st->request_lock, flags);
273 if (p)
274 bio_put(p->bio);
276 dprintk("%s: st: %p, refcnt: %d, list_empty: %d, p: "
277 "%p.\n", __func__, st, atomic_read(&st->refcnt),
278 list_empty(&st->request_list), p);
282 dst_state_put(st);
286 * Client accepting thread.
287 * Not only accepts new connection, but also schedules receiving thread
288 * and performs request completion described above.
290 static int dst_accept(void *init_data, void *schedule_data)
292 struct dst_state *main_st = schedule_data;
293 struct dst_node *n = init_data;
294 struct dst_state *st;
295 int err;
297 while (n->trans_scan_timeout && !main_st->need_exit) {
298 dprintk("%s: main_st: %p, n: %p.\n", __func__, main_st, n);
299 st = dst_accept_client(main_st);
300 if (IS_ERR(st))
301 continue;
303 err = dst_state_schedule_receiver(st);
304 if (!err) {
305 while (n->trans_scan_timeout) {
306 err = wait_event_interruptible_timeout(st->thread_wait,
307 !list_empty(&st->request_list) ||
308 !n->trans_scan_timeout ||
309 st->need_exit,
310 HZ);
312 if (!n->trans_scan_timeout || st->need_exit)
313 break;
315 if (list_empty(&st->request_list))
316 continue;
318 err = dst_export_process_request_queue(st);
319 if (err)
320 break;
323 st->need_exit = 1;
324 wake_up(&st->thread_wait);
327 dst_state_cleanup_export(st);
330 dprintk("%s: freeing listening socket st: %p.\n", __func__, main_st);
332 dst_state_lock(main_st);
333 dst_poll_exit(main_st);
334 dst_state_socket_release(main_st);
335 dst_state_unlock(main_st);
336 dst_state_put(main_st);
337 dprintk("%s: freed listening socket st: %p.\n", __func__, main_st);
339 return 0;
342 int dst_start_export(struct dst_node *n)
344 if (list_empty(&n->security_list)) {
345 printk(KERN_ERR "You are trying to export node '%s' "
346 "without security attributes.\nNo clients will "
347 "be allowed to connect. Exiting.\n", n->name);
348 return -EINVAL;
350 return dst_node_trans_init(n, sizeof(struct dst_export_priv));
354 * Initialize listening state and schedule accepting thread.
356 int dst_node_init_listened(struct dst_node *n, struct dst_export_ctl *le)
358 struct dst_state *st;
359 int err = -ENOMEM;
360 struct dst_network_ctl *ctl = &le->ctl;
362 memcpy(&n->info->net, ctl, sizeof(struct dst_network_ctl));
364 st = dst_state_alloc(n);
365 if (IS_ERR(st)) {
366 err = PTR_ERR(st);
367 goto err_out_exit;
369 memcpy(&st->ctl, ctl, sizeof(struct dst_network_ctl));
371 err = dst_state_socket_create(st);
372 if (err)
373 goto err_out_put;
375 st->socket->sk->sk_reuse = 1;
377 err = kernel_bind(st->socket, (struct sockaddr *)&ctl->addr,
378 ctl->addr.sa_data_len);
379 if (err)
380 goto err_out_socket_release;
382 err = kernel_listen(st->socket, 1024);
383 if (err)
384 goto err_out_socket_release;
385 n->state = st;
387 err = dst_poll_init(st);
388 if (err)
389 goto err_out_socket_release;
391 dst_state_get(st);
393 err = thread_pool_schedule(n->pool, dst_thread_setup,
394 dst_accept, st, MAX_SCHEDULE_TIMEOUT);
395 if (err)
396 goto err_out_poll_exit;
398 return 0;
400 err_out_poll_exit:
401 dst_poll_exit(st);
402 err_out_socket_release:
403 dst_state_socket_release(st);
404 err_out_put:
405 dst_state_put(st);
406 err_out_exit:
407 n->state = NULL;
408 return err;
412 * Free bio and related private data.
413 * Also drop a reference counter for appropriate state,
414 * which waits when there are no more block IOs in-flight.
416 static void dst_bio_destructor(struct bio *bio)
418 struct bio_vec *bv;
419 struct dst_export_priv *priv = bio->bi_private;
420 int i;
422 bio_for_each_segment(bv, bio, i) {
423 if (!bv->bv_page)
424 break;
426 __free_page(bv->bv_page);
429 if (priv)
430 dst_state_put(priv->state);
431 bio_free(bio, dst_bio_set);
435 * Block IO completion. Queue request to be sent back to
436 * the client (or just confirmation).
438 static void dst_bio_end_io(struct bio *bio, int err)
440 struct dst_export_priv *p = bio->bi_private;
441 struct dst_state *st = p->state;
442 unsigned long flags;
444 spin_lock_irqsave(&st->request_lock, flags);
445 list_add_tail(&p->request_entry, &st->request_list);
446 spin_unlock_irqrestore(&st->request_lock, flags);
448 wake_up(&st->thread_wait);
452 * Allocate read request for the server.
454 static int dst_export_read_request(struct bio *bio, unsigned int total_size)
456 unsigned int size;
457 struct page *page;
458 int err;
460 while (total_size) {
461 err = -ENOMEM;
462 page = alloc_page(GFP_KERNEL);
463 if (!page)
464 goto err_out_exit;
466 size = min_t(unsigned int, PAGE_SIZE, total_size);
468 err = bio_add_page(bio, page, size, 0);
469 dprintk("%s: bio: %llu/%u, size: %u, err: %d.\n",
470 __func__, (u64)bio->bi_sector, bio->bi_size,
471 size, err);
472 if (err <= 0)
473 goto err_out_free_page;
475 total_size -= size;
478 return 0;
480 err_out_free_page:
481 __free_page(page);
482 err_out_exit:
483 return err;
487 * Allocate write request for the server.
488 * Should not only get pages, but also read data from the network.
490 static int dst_export_write_request(struct dst_state *st,
491 struct bio *bio, unsigned int total_size)
493 unsigned int size;
494 struct page *page;
495 void *data;
496 int err;
498 while (total_size) {
499 err = -ENOMEM;
500 page = alloc_page(GFP_KERNEL);
501 if (!page)
502 goto err_out_exit;
504 data = kmap(page);
505 if (!data)
506 goto err_out_free_page;
508 size = min_t(unsigned int, PAGE_SIZE, total_size);
510 err = dst_data_recv(st, data, size);
511 if (err)
512 goto err_out_unmap_page;
514 err = bio_add_page(bio, page, size, 0);
515 if (err <= 0)
516 goto err_out_unmap_page;
518 kunmap(page);
520 total_size -= size;
523 return 0;
525 err_out_unmap_page:
526 kunmap(page);
527 err_out_free_page:
528 __free_page(page);
529 err_out_exit:
530 return err;
534 * Groovy, we've gotten an IO request from the client.
535 * Allocate BIO from the bioset, private data from the mempool
536 * and lots of pages for IO.
538 int dst_process_io(struct dst_state *st)
540 struct dst_node *n = st->node;
541 struct dst_cmd *cmd = st->data;
542 struct bio *bio;
543 struct dst_export_priv *priv;
544 int err = -ENOMEM;
546 if (unlikely(!n->bdev)) {
547 err = -EINVAL;
548 goto err_out_exit;
551 bio = bio_alloc_bioset(GFP_KERNEL,
552 PAGE_ALIGN(cmd->size) >> PAGE_SHIFT,
553 dst_bio_set);
554 if (!bio)
555 goto err_out_exit;
557 priv = (struct dst_export_priv *)(((void *)bio) -
558 sizeof (struct dst_export_priv));
560 priv->state = dst_state_get(st);
561 priv->bio = bio;
563 bio->bi_private = priv;
564 bio->bi_end_io = dst_bio_end_io;
565 bio->bi_destructor = dst_bio_destructor;
566 bio->bi_bdev = n->bdev;
569 * Server side is only interested in two low bits:
570 * uptodate (set by itself actually) and rw block
572 bio->bi_flags |= cmd->flags & 3;
574 bio->bi_rw = cmd->rw;
575 bio->bi_size = 0;
576 bio->bi_sector = cmd->sector;
578 dst_bio_to_cmd(bio, &priv->cmd, DST_IO_RESPONSE, cmd->id);
580 priv->cmd.flags = 0;
581 priv->cmd.size = cmd->size;
583 if (bio_data_dir(bio) == WRITE) {
584 err = dst_recv_cdata(st, priv->cmd.hash);
585 if (err)
586 goto err_out_free;
588 err = dst_export_write_request(st, bio, cmd->size);
589 if (err)
590 goto err_out_free;
592 if (dst_need_crypto(n))
593 return dst_export_crypto(n, bio);
594 } else {
595 err = dst_export_read_request(bio, cmd->size);
596 if (err)
597 goto err_out_free;
600 dprintk("%s: bio: %llu/%u, rw: %lu, dir: %lu, flags: %lx, phys: %d.\n",
601 __func__, (u64)bio->bi_sector, bio->bi_size,
602 bio->bi_rw, bio_data_dir(bio),
603 bio->bi_flags, bio->bi_phys_segments);
605 generic_make_request(bio);
607 return 0;
609 err_out_free:
610 bio_put(bio);
611 err_out_exit:
612 return err;
616 * Ok, block IO is ready, let's send it back to the client...
618 int dst_export_send_bio(struct bio *bio)
620 struct dst_export_priv *p = bio->bi_private;
621 struct dst_state *st = p->state;
622 struct dst_cmd *cmd = &p->cmd;
623 int err;
625 dprintk("%s: id: %llu, bio: %llu/%u, csize: %u, flags: %lu, rw: %lu.\n",
626 __func__, cmd->id, (u64)bio->bi_sector, bio->bi_size,
627 cmd->csize, bio->bi_flags, bio->bi_rw);
629 dst_convert_cmd(cmd);
631 dst_state_lock(st);
632 if (!st->socket) {
633 err = -ECONNRESET;
634 goto err_out_unlock;
637 if (bio_data_dir(bio) == WRITE) {
638 /* ... or just confirmation that writing has completed. */
639 cmd->size = cmd->csize = 0;
640 err = dst_data_send_header(st->socket, cmd,
641 sizeof(struct dst_cmd), 0);
642 if (err)
643 goto err_out_unlock;
644 } else {
645 err = dst_send_bio(st, cmd, bio);
646 if (err)
647 goto err_out_unlock;
650 dst_state_unlock(st);
652 bio_put(bio);
653 return 0;
655 err_out_unlock:
656 dst_state_unlock(st);
658 bio_put(bio);
659 return err;