2 * Tty buffer allocation management
5 #include <linux/types.h>
6 #include <linux/errno.h>
8 #include <linux/tty_driver.h>
9 #include <linux/tty_flip.h>
10 #include <linux/timer.h>
11 #include <linux/string.h>
12 #include <linux/slab.h>
13 #include <linux/sched.h>
14 #include <linux/init.h>
15 #include <linux/wait.h>
16 #include <linux/bitops.h>
17 #include <linux/delay.h>
18 #include <linux/module.h>
21 * tty_buffer_free_all - free buffers used by a tty
22 * @tty: tty to free from
24 * Remove all the buffers pending on a tty whether queued with data
25 * or in the free ring. Must be called when the tty is no longer in use
30 void tty_buffer_free_all(struct tty_struct
*tty
)
32 struct tty_buffer
*thead
;
33 while ((thead
= tty
->buf
.head
) != NULL
) {
34 tty
->buf
.head
= thead
->next
;
37 while ((thead
= tty
->buf
.free
) != NULL
) {
38 tty
->buf
.free
= thead
->next
;
42 tty
->buf
.memory_used
= 0;
46 * tty_buffer_alloc - allocate a tty buffer
48 * @size: desired size (characters)
50 * Allocate a new tty buffer to hold the desired number of characters.
51 * Return NULL if out of memory or the allocation would exceed the
54 * Locking: Caller must hold tty->buf.lock
57 static struct tty_buffer
*tty_buffer_alloc(struct tty_struct
*tty
, size_t size
)
61 if (tty
->buf
.memory_used
+ size
> 65536)
63 p
= kmalloc(sizeof(struct tty_buffer
) + 2 * size
, GFP_ATOMIC
);
71 p
->char_buf_ptr
= (char *)(p
->data
);
72 p
->flag_buf_ptr
= (unsigned char *)p
->char_buf_ptr
+ size
;
73 tty
->buf
.memory_used
+= size
;
78 * tty_buffer_free - free a tty buffer
79 * @tty: tty owning the buffer
80 * @b: the buffer to free
82 * Free a tty buffer, or add it to the free list according to our
85 * Locking: Caller must hold tty->buf.lock
88 static void tty_buffer_free(struct tty_struct
*tty
, struct tty_buffer
*b
)
90 /* Dumb strategy for now - should keep some stats */
91 tty
->buf
.memory_used
-= b
->size
;
92 WARN_ON(tty
->buf
.memory_used
< 0);
97 b
->next
= tty
->buf
.free
;
103 * __tty_buffer_flush - flush full tty buffers
106 * flush all the buffers containing receive data. Caller must
107 * hold the buffer lock and must have ensured no parallel flush to
110 * Locking: Caller must hold tty->buf.lock
113 static void __tty_buffer_flush(struct tty_struct
*tty
)
115 struct tty_buffer
*thead
;
117 while ((thead
= tty
->buf
.head
) != NULL
) {
118 tty
->buf
.head
= thead
->next
;
119 tty_buffer_free(tty
, thead
);
121 tty
->buf
.tail
= NULL
;
125 * tty_buffer_flush - flush full tty buffers
128 * flush all the buffers containing receive data. If the buffer is
129 * being processed by flush_to_ldisc then we defer the processing
135 void tty_buffer_flush(struct tty_struct
*tty
)
138 spin_lock_irqsave(&tty
->buf
.lock
, flags
);
140 /* If the data is being pushed to the tty layer then we can't
141 process it here. Instead set a flag and the flush_to_ldisc
142 path will process the flush request before it exits */
143 if (test_bit(TTY_FLUSHING
, &tty
->flags
)) {
144 set_bit(TTY_FLUSHPENDING
, &tty
->flags
);
145 spin_unlock_irqrestore(&tty
->buf
.lock
, flags
);
146 wait_event(tty
->read_wait
,
147 test_bit(TTY_FLUSHPENDING
, &tty
->flags
) == 0);
150 __tty_buffer_flush(tty
);
151 spin_unlock_irqrestore(&tty
->buf
.lock
, flags
);
155 * tty_buffer_find - find a free tty buffer
156 * @tty: tty owning the buffer
157 * @size: characters wanted
159 * Locate an existing suitable tty buffer or if we are lacking one then
160 * allocate a new one. We round our buffers off in 256 character chunks
161 * to get better allocation behaviour.
163 * Locking: Caller must hold tty->buf.lock
166 static struct tty_buffer
*tty_buffer_find(struct tty_struct
*tty
, size_t size
)
168 struct tty_buffer
**tbh
= &tty
->buf
.free
;
169 while ((*tbh
) != NULL
) {
170 struct tty_buffer
*t
= *tbh
;
171 if (t
->size
>= size
) {
177 tty
->buf
.memory_used
+= t
->size
;
180 tbh
= &((*tbh
)->next
);
182 /* Round the buffer size out */
183 size
= (size
+ 0xFF) & ~0xFF;
184 return tty_buffer_alloc(tty
, size
);
185 /* Should possibly check if this fails for the largest buffer we
186 have queued and recycle that ? */
190 * tty_buffer_request_room - grow tty buffer if needed
191 * @tty: tty structure
192 * @size: size desired
194 * Make at least size bytes of linear space available for the tty
195 * buffer. If we fail return the size we managed to find.
197 * Locking: Takes tty->buf.lock
199 int tty_buffer_request_room(struct tty_struct
*tty
, size_t size
)
201 struct tty_buffer
*b
, *n
;
205 spin_lock_irqsave(&tty
->buf
.lock
, flags
);
207 /* OPTIMISATION: We could keep a per tty "zero" sized buffer to
208 remove this conditional if its worth it. This would be invisible
210 if ((b
= tty
->buf
.tail
) != NULL
)
211 left
= b
->size
- b
->used
;
216 /* This is the slow path - looking for new buffers to use */
217 if ((n
= tty_buffer_find(tty
, size
)) != NULL
) {
228 spin_unlock_irqrestore(&tty
->buf
.lock
, flags
);
231 EXPORT_SYMBOL_GPL(tty_buffer_request_room
);
234 * tty_insert_flip_string_fixed_flag - Add characters to the tty buffer
235 * @tty: tty structure
237 * @flag: flag value for each character
240 * Queue a series of bytes to the tty buffering. All the characters
241 * passed are marked with the supplied flag. Returns the number added.
243 * Locking: Called functions may take tty->buf.lock
246 int tty_insert_flip_string_fixed_flag(struct tty_struct
*tty
,
247 const unsigned char *chars
, char flag
, size_t size
)
251 int goal
= min_t(size_t, size
- copied
, TTY_BUFFER_PAGE
);
252 int space
= tty_buffer_request_room(tty
, goal
);
253 struct tty_buffer
*tb
= tty
->buf
.tail
;
254 /* If there is no space then tb may be NULL */
255 if (unlikely(space
== 0))
257 memcpy(tb
->char_buf_ptr
+ tb
->used
, chars
, space
);
258 memset(tb
->flag_buf_ptr
+ tb
->used
, flag
, space
);
262 /* There is a small chance that we need to split the data over
263 several buffers. If this is the case we must loop */
264 } while (unlikely(size
> copied
));
267 EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag
);
270 * tty_insert_flip_string_flags - Add characters to the tty buffer
271 * @tty: tty structure
276 * Queue a series of bytes to the tty buffering. For each character
277 * the flags array indicates the status of the character. Returns the
280 * Locking: Called functions may take tty->buf.lock
283 int tty_insert_flip_string_flags(struct tty_struct
*tty
,
284 const unsigned char *chars
, const char *flags
, size_t size
)
288 int goal
= min_t(size_t, size
- copied
, TTY_BUFFER_PAGE
);
289 int space
= tty_buffer_request_room(tty
, goal
);
290 struct tty_buffer
*tb
= tty
->buf
.tail
;
291 /* If there is no space then tb may be NULL */
292 if (unlikely(space
== 0))
294 memcpy(tb
->char_buf_ptr
+ tb
->used
, chars
, space
);
295 memcpy(tb
->flag_buf_ptr
+ tb
->used
, flags
, space
);
300 /* There is a small chance that we need to split the data over
301 several buffers. If this is the case we must loop */
302 } while (unlikely(size
> copied
));
305 EXPORT_SYMBOL(tty_insert_flip_string_flags
);
308 * tty_schedule_flip - push characters to ldisc
309 * @tty: tty to push from
311 * Takes any pending buffers and transfers their ownership to the
312 * ldisc side of the queue. It then schedules those characters for
313 * processing by the line discipline.
315 * Locking: Takes tty->buf.lock
318 void tty_schedule_flip(struct tty_struct
*tty
)
321 spin_lock_irqsave(&tty
->buf
.lock
, flags
);
322 if (tty
->buf
.tail
!= NULL
)
323 tty
->buf
.tail
->commit
= tty
->buf
.tail
->used
;
324 spin_unlock_irqrestore(&tty
->buf
.lock
, flags
);
325 schedule_delayed_work(&tty
->buf
.work
, 1);
327 EXPORT_SYMBOL(tty_schedule_flip
);
330 * tty_prepare_flip_string - make room for characters
332 * @chars: return pointer for character write area
333 * @size: desired size
335 * Prepare a block of space in the buffer for data. Returns the length
336 * available and buffer pointer to the space which is now allocated and
337 * accounted for as ready for normal characters. This is used for drivers
338 * that need their own block copy routines into the buffer. There is no
339 * guarantee the buffer is a DMA target!
341 * Locking: May call functions taking tty->buf.lock
344 int tty_prepare_flip_string(struct tty_struct
*tty
, unsigned char **chars
,
347 int space
= tty_buffer_request_room(tty
, size
);
349 struct tty_buffer
*tb
= tty
->buf
.tail
;
350 *chars
= tb
->char_buf_ptr
+ tb
->used
;
351 memset(tb
->flag_buf_ptr
+ tb
->used
, TTY_NORMAL
, space
);
356 EXPORT_SYMBOL_GPL(tty_prepare_flip_string
);
359 * tty_prepare_flip_string_flags - make room for characters
361 * @chars: return pointer for character write area
362 * @flags: return pointer for status flag write area
363 * @size: desired size
365 * Prepare a block of space in the buffer for data. Returns the length
366 * available and buffer pointer to the space which is now allocated and
367 * accounted for as ready for characters. This is used for drivers
368 * that need their own block copy routines into the buffer. There is no
369 * guarantee the buffer is a DMA target!
371 * Locking: May call functions taking tty->buf.lock
374 int tty_prepare_flip_string_flags(struct tty_struct
*tty
,
375 unsigned char **chars
, char **flags
, size_t size
)
377 int space
= tty_buffer_request_room(tty
, size
);
379 struct tty_buffer
*tb
= tty
->buf
.tail
;
380 *chars
= tb
->char_buf_ptr
+ tb
->used
;
381 *flags
= tb
->flag_buf_ptr
+ tb
->used
;
386 EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags
);
392 * @work: tty structure passed from work queue.
394 * This routine is called out of the software interrupt to flush data
395 * from the buffer chain to the line discipline.
397 * Locking: holds tty->buf.lock to guard buffer list. Drops the lock
398 * while invoking the line discipline receive_buf method. The
399 * receive_buf method is single threaded for each tty instance.
402 static void flush_to_ldisc(struct work_struct
*work
)
404 struct tty_struct
*tty
=
405 container_of(work
, struct tty_struct
, buf
.work
.work
);
407 struct tty_ldisc
*disc
;
409 disc
= tty_ldisc_ref(tty
);
410 if (disc
== NULL
) /* !TTY_LDISC */
413 spin_lock_irqsave(&tty
->buf
.lock
, flags
);
415 if (!test_and_set_bit(TTY_FLUSHING
, &tty
->flags
)) {
416 struct tty_buffer
*head
;
417 while ((head
= tty
->buf
.head
) != NULL
) {
420 unsigned char *flag_buf
;
422 count
= head
->commit
- head
->read
;
424 if (head
->next
== NULL
)
426 tty
->buf
.head
= head
->next
;
427 tty_buffer_free(tty
, head
);
430 /* Ldisc or user is trying to flush the buffers
431 we are feeding to the ldisc, stop feeding the
432 line discipline as we want to empty the queue */
433 if (test_bit(TTY_FLUSHPENDING
, &tty
->flags
))
435 if (!tty
->receive_room
) {
436 schedule_delayed_work(&tty
->buf
.work
, 1);
439 if (count
> tty
->receive_room
)
440 count
= tty
->receive_room
;
441 char_buf
= head
->char_buf_ptr
+ head
->read
;
442 flag_buf
= head
->flag_buf_ptr
+ head
->read
;
444 spin_unlock_irqrestore(&tty
->buf
.lock
, flags
);
445 disc
->ops
->receive_buf(tty
, char_buf
,
447 spin_lock_irqsave(&tty
->buf
.lock
, flags
);
449 clear_bit(TTY_FLUSHING
, &tty
->flags
);
452 /* We may have a deferred request to flush the input buffer,
453 if so pull the chain under the lock and empty the queue */
454 if (test_bit(TTY_FLUSHPENDING
, &tty
->flags
)) {
455 __tty_buffer_flush(tty
);
456 clear_bit(TTY_FLUSHPENDING
, &tty
->flags
);
457 wake_up(&tty
->read_wait
);
459 spin_unlock_irqrestore(&tty
->buf
.lock
, flags
);
461 tty_ldisc_deref(disc
);
468 * Push the terminal flip buffers to the line discipline.
470 * Must not be called from IRQ context.
472 void tty_flush_to_ldisc(struct tty_struct
*tty
)
474 flush_delayed_work(&tty
->buf
.work
);
478 * tty_flip_buffer_push - terminal
481 * Queue a push of the terminal flip buffers to the line discipline. This
482 * function must not be called from IRQ context if tty->low_latency is set.
484 * In the event of the queue being busy for flipping the work will be
485 * held off and retried later.
487 * Locking: tty buffer lock. Driver locks in low latency mode.
490 void tty_flip_buffer_push(struct tty_struct
*tty
)
493 spin_lock_irqsave(&tty
->buf
.lock
, flags
);
494 if (tty
->buf
.tail
!= NULL
)
495 tty
->buf
.tail
->commit
= tty
->buf
.tail
->used
;
496 spin_unlock_irqrestore(&tty
->buf
.lock
, flags
);
498 if (tty
->low_latency
)
499 flush_to_ldisc(&tty
->buf
.work
.work
);
501 schedule_delayed_work(&tty
->buf
.work
, 1);
503 EXPORT_SYMBOL(tty_flip_buffer_push
);
506 * tty_buffer_init - prepare a tty buffer structure
507 * @tty: tty to initialise
509 * Set up the initial state of the buffer management for a tty device.
510 * Must be called before the other tty buffer functions are used.
515 void tty_buffer_init(struct tty_struct
*tty
)
517 spin_lock_init(&tty
->buf
.lock
);
518 tty
->buf
.head
= NULL
;
519 tty
->buf
.tail
= NULL
;
520 tty
->buf
.free
= NULL
;
521 tty
->buf
.memory_used
= 0;
522 INIT_DELAYED_WORK(&tty
->buf
.work
, flush_to_ldisc
);