/arch/um/drivers/line.c
C | 873 lines | 649 code | 138 blank | 86 comment | 130 complexity | a3ab9c64f0cdc1b4eec971bc52aab06a MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
1/*
2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include "linux/irqreturn.h"
7#include "linux/kd.h"
8#include "linux/sched.h"
9#include "linux/slab.h"
10#include "chan_kern.h"
11#include "irq_kern.h"
12#include "irq_user.h"
13#include "kern_util.h"
14#include "os.h"
15
16#define LINE_BUFSIZE 4096
17
18static irqreturn_t line_interrupt(int irq, void *data)
19{
20 struct chan *chan = data;
21 struct line *line = chan->line;
22
23 if (line)
24 chan_interrupt(&line->chan_list, &line->task, line->tty, irq);
25 return IRQ_HANDLED;
26}
27
28static void line_timer_cb(struct work_struct *work)
29{
30 struct line *line = container_of(work, struct line, task.work);
31
32 if (!line->throttled)
33 chan_interrupt(&line->chan_list, &line->task, line->tty,
34 line->driver->read_irq);
35}
36
37/*
38 * Returns the free space inside the ring buffer of this line.
39 *
40 * Should be called while holding line->lock (this does not modify data).
41 */
42static int write_room(struct line *line)
43{
44 int n;
45
46 if (line->buffer == NULL)
47 return LINE_BUFSIZE - 1;
48
49 /* This is for the case where the buffer is wrapped! */
50 n = line->head - line->tail;
51
52 if (n <= 0)
53 n += LINE_BUFSIZE; /* The other case */
54 return n - 1;
55}
56
57int line_write_room(struct tty_struct *tty)
58{
59 struct line *line = tty->driver_data;
60 unsigned long flags;
61 int room;
62
63 spin_lock_irqsave(&line->lock, flags);
64 room = write_room(line);
65 spin_unlock_irqrestore(&line->lock, flags);
66
67 return room;
68}
69
70int line_chars_in_buffer(struct tty_struct *tty)
71{
72 struct line *line = tty->driver_data;
73 unsigned long flags;
74 int ret;
75
76 spin_lock_irqsave(&line->lock, flags);
77 /* write_room subtracts 1 for the needed NULL, so we readd it.*/
78 ret = LINE_BUFSIZE - (write_room(line) + 1);
79 spin_unlock_irqrestore(&line->lock, flags);
80
81 return ret;
82}
83
84/*
85 * This copies the content of buf into the circular buffer associated with
86 * this line.
87 * The return value is the number of characters actually copied, i.e. the ones
88 * for which there was space: this function is not supposed to ever flush out
89 * the circular buffer.
90 *
91 * Must be called while holding line->lock!
92 */
93static int buffer_data(struct line *line, const char *buf, int len)
94{
95 int end, room;
96
97 if (line->buffer == NULL) {
98 line->buffer = kmalloc(LINE_BUFSIZE, GFP_ATOMIC);
99 if (line->buffer == NULL) {
100 printk(KERN_ERR "buffer_data - atomic allocation "
101 "failed\n");
102 return 0;
103 }
104 line->head = line->buffer;
105 line->tail = line->buffer;
106 }
107
108 room = write_room(line);
109 len = (len > room) ? room : len;
110
111 end = line->buffer + LINE_BUFSIZE - line->tail;
112
113 if (len < end) {
114 memcpy(line->tail, buf, len);
115 line->tail += len;
116 }
117 else {
118 /* The circular buffer is wrapping */
119 memcpy(line->tail, buf, end);
120 buf += end;
121 memcpy(line->buffer, buf, len - end);
122 line->tail = line->buffer + len - end;
123 }
124
125 return len;
126}
127
128/*
129 * Flushes the ring buffer to the output channels. That is, write_chan is
130 * called, passing it line->head as buffer, and an appropriate count.
131 *
132 * On exit, returns 1 when the buffer is empty,
133 * 0 when the buffer is not empty on exit,
134 * and -errno when an error occurred.
135 *
136 * Must be called while holding line->lock!*/
137static int flush_buffer(struct line *line)
138{
139 int n, count;
140
141 if ((line->buffer == NULL) || (line->head == line->tail))
142 return 1;
143
144 if (line->tail < line->head) {
145 /* line->buffer + LINE_BUFSIZE is the end of the buffer! */
146 count = line->buffer + LINE_BUFSIZE - line->head;
147
148 n = write_chan(&line->chan_list, line->head, count,
149 line->driver->write_irq);
150 if (n < 0)
151 return n;
152 if (n == count) {
153 /*
154 * We have flushed from ->head to buffer end, now we
155 * must flush only from the beginning to ->tail.
156 */
157 line->head = line->buffer;
158 } else {
159 line->head += n;
160 return 0;
161 }
162 }
163
164 count = line->tail - line->head;
165 n = write_chan(&line->chan_list, line->head, count,
166 line->driver->write_irq);
167
168 if (n < 0)
169 return n;
170
171 line->head += n;
172 return line->head == line->tail;
173}
174
175void line_flush_buffer(struct tty_struct *tty)
176{
177 struct line *line = tty->driver_data;
178 unsigned long flags;
179 int err;
180
181 spin_lock_irqsave(&line->lock, flags);
182 err = flush_buffer(line);
183 spin_unlock_irqrestore(&line->lock, flags);
184}
185
186/*
187 * We map both ->flush_chars and ->put_char (which go in pair) onto
188 * ->flush_buffer and ->write. Hope it's not that bad.
189 */
190void line_flush_chars(struct tty_struct *tty)
191{
192 line_flush_buffer(tty);
193}
194
195int line_put_char(struct tty_struct *tty, unsigned char ch)
196{
197 return line_write(tty, &ch, sizeof(ch));
198}
199
200int line_write(struct tty_struct *tty, const unsigned char *buf, int len)
201{
202 struct line *line = tty->driver_data;
203 unsigned long flags;
204 int n, ret = 0;
205
206 spin_lock_irqsave(&line->lock, flags);
207 if (line->head != line->tail)
208 ret = buffer_data(line, buf, len);
209 else {
210 n = write_chan(&line->chan_list, buf, len,
211 line->driver->write_irq);
212 if (n < 0) {
213 ret = n;
214 goto out_up;
215 }
216
217 len -= n;
218 ret += n;
219 if (len > 0)
220 ret += buffer_data(line, buf + n, len);
221 }
222out_up:
223 spin_unlock_irqrestore(&line->lock, flags);
224 return ret;
225}
226
227void line_set_termios(struct tty_struct *tty, struct ktermios * old)
228{
229 /* nothing */
230}
231
232static const struct {
233 int cmd;
234 char *level;
235 char *name;
236} tty_ioctls[] = {
237 /* don't print these, they flood the log ... */
238 { TCGETS, NULL, "TCGETS" },
239 { TCSETS, NULL, "TCSETS" },
240 { TCSETSW, NULL, "TCSETSW" },
241 { TCFLSH, NULL, "TCFLSH" },
242 { TCSBRK, NULL, "TCSBRK" },
243
244 /* general tty stuff */
245 { TCSETSF, KERN_DEBUG, "TCSETSF" },
246 { TCGETA, KERN_DEBUG, "TCGETA" },
247 { TIOCMGET, KERN_DEBUG, "TIOCMGET" },
248 { TCSBRKP, KERN_DEBUG, "TCSBRKP" },
249 { TIOCMSET, KERN_DEBUG, "TIOCMSET" },
250
251 /* linux-specific ones */
252 { TIOCLINUX, KERN_INFO, "TIOCLINUX" },
253 { KDGKBMODE, KERN_INFO, "KDGKBMODE" },
254 { KDGKBTYPE, KERN_INFO, "KDGKBTYPE" },
255 { KDSIGACCEPT, KERN_INFO, "KDSIGACCEPT" },
256};
257
258int line_ioctl(struct tty_struct *tty, struct file * file,
259 unsigned int cmd, unsigned long arg)
260{
261 int ret;
262 int i;
263
264 ret = 0;
265 switch(cmd) {
266#ifdef TIOCGETP
267 case TIOCGETP:
268 case TIOCSETP:
269 case TIOCSETN:
270#endif
271#ifdef TIOCGETC
272 case TIOCGETC:
273 case TIOCSETC:
274#endif
275#ifdef TIOCGLTC
276 case TIOCGLTC:
277 case TIOCSLTC:
278#endif
279 /* Note: these are out of date as we now have TCGETS2 etc but this
280 whole lot should probably go away */
281 case TCGETS:
282 case TCSETSF:
283 case TCSETSW:
284 case TCSETS:
285 case TCGETA:
286 case TCSETAF:
287 case TCSETAW:
288 case TCSETA:
289 case TCXONC:
290 case TCFLSH:
291 case TIOCOUTQ:
292 case TIOCINQ:
293 case TIOCGLCKTRMIOS:
294 case TIOCSLCKTRMIOS:
295 case TIOCPKT:
296 case TIOCGSOFTCAR:
297 case TIOCSSOFTCAR:
298 return -ENOIOCTLCMD;
299#if 0
300 case TCwhatever:
301 /* do something */
302 break;
303#endif
304 default:
305 for (i = 0; i < ARRAY_SIZE(tty_ioctls); i++)
306 if (cmd == tty_ioctls[i].cmd)
307 break;
308 if (i == ARRAY_SIZE(tty_ioctls)) {
309 printk(KERN_ERR "%s: %s: unknown ioctl: 0x%x\n",
310 __func__, tty->name, cmd);
311 }
312 ret = -ENOIOCTLCMD;
313 break;
314 }
315 return ret;
316}
317
318void line_throttle(struct tty_struct *tty)
319{
320 struct line *line = tty->driver_data;
321
322 deactivate_chan(&line->chan_list, line->driver->read_irq);
323 line->throttled = 1;
324}
325
326void line_unthrottle(struct tty_struct *tty)
327{
328 struct line *line = tty->driver_data;
329
330 line->throttled = 0;
331 chan_interrupt(&line->chan_list, &line->task, tty,
332 line->driver->read_irq);
333
334 /*
335 * Maybe there is enough stuff pending that calling the interrupt
336 * throttles us again. In this case, line->throttled will be 1
337 * again and we shouldn't turn the interrupt back on.
338 */
339 if (!line->throttled)
340 reactivate_chan(&line->chan_list, line->driver->read_irq);
341}
342
343static irqreturn_t line_write_interrupt(int irq, void *data)
344{
345 struct chan *chan = data;
346 struct line *line = chan->line;
347 struct tty_struct *tty = line->tty;
348 int err;
349
350 /*
351 * Interrupts are disabled here because we registered the interrupt with
352 * IRQF_DISABLED (see line_setup_irq).
353 */
354
355 spin_lock(&line->lock);
356 err = flush_buffer(line);
357 if (err == 0) {
358 return IRQ_NONE;
359 } else if (err < 0) {
360 line->head = line->buffer;
361 line->tail = line->buffer;
362 }
363 spin_unlock(&line->lock);
364
365 if (tty == NULL)
366 return IRQ_NONE;
367
368 tty_wakeup(tty);
369 return IRQ_HANDLED;
370}
371
372int line_setup_irq(int fd, int input, int output, struct line *line, void *data)
373{
374 const struct line_driver *driver = line->driver;
375 int err = 0, flags = IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM;
376
377 if (input)
378 err = um_request_irq(driver->read_irq, fd, IRQ_READ,
379 line_interrupt, flags,
380 driver->read_irq_name, data);
381 if (err)
382 return err;
383 if (output)
384 err = um_request_irq(driver->write_irq, fd, IRQ_WRITE,
385 line_write_interrupt, flags,
386 driver->write_irq_name, data);
387 line->have_irq = 1;
388 return err;
389}
390
391/*
392 * Normally, a driver like this can rely mostly on the tty layer
393 * locking, particularly when it comes to the driver structure.
394 * However, in this case, mconsole requests can come in "from the
395 * side", and race with opens and closes.
396 *
397 * mconsole config requests will want to be sure the device isn't in
398 * use, and get_config, open, and close will want a stable
399 * configuration. The checking and modification of the configuration
400 * is done under a spinlock. Checking whether the device is in use is
401 * line->tty->count > 1, also under the spinlock.
402 *
403 * tty->count serves to decide whether the device should be enabled or
404 * disabled on the host. If it's equal to 1, then we are doing the
405 * first open or last close. Otherwise, open and close just return.
406 */
407
408int line_open(struct line *lines, struct tty_struct *tty)
409{
410 struct line *line = &lines[tty->index];
411 int err = -ENODEV;
412
413 spin_lock(&line->count_lock);
414 if (!line->valid)
415 goto out_unlock;
416
417 err = 0;
418 if (tty->count > 1)
419 goto out_unlock;
420
421 spin_unlock(&line->count_lock);
422
423 tty->driver_data = line;
424 line->tty = tty;
425
426 err = enable_chan(line);
427 if (err)
428 return err;
429
430 INIT_DELAYED_WORK(&line->task, line_timer_cb);
431
432 if (!line->sigio) {
433 chan_enable_winch(&line->chan_list, tty);
434 line->sigio = 1;
435 }
436
437 chan_window_size(&line->chan_list, &tty->winsize.ws_row,
438 &tty->winsize.ws_col);
439
440 return err;
441
442out_unlock:
443 spin_unlock(&line->count_lock);
444 return err;
445}
446
447static void unregister_winch(struct tty_struct *tty);
448
449void line_close(struct tty_struct *tty, struct file * filp)
450{
451 struct line *line = tty->driver_data;
452
453 /*
454 * If line_open fails (and tty->driver_data is never set),
455 * tty_open will call line_close. So just return in this case.
456 */
457 if (line == NULL)
458 return;
459
460 /* We ignore the error anyway! */
461 flush_buffer(line);
462
463 spin_lock(&line->count_lock);
464 if (!line->valid)
465 goto out_unlock;
466
467 if (tty->count > 1)
468 goto out_unlock;
469
470 spin_unlock(&line->count_lock);
471
472 line->tty = NULL;
473 tty->driver_data = NULL;
474
475 if (line->sigio) {
476 unregister_winch(tty);
477 line->sigio = 0;
478 }
479
480 return;
481
482out_unlock:
483 spin_unlock(&line->count_lock);
484}
485
486void close_lines(struct line *lines, int nlines)
487{
488 int i;
489
490 for(i = 0; i < nlines; i++)
491 close_chan(&lines[i].chan_list, 0);
492}
493
494static int setup_one_line(struct line *lines, int n, char *init, int init_prio,
495 char **error_out)
496{
497 struct line *line = &lines[n];
498 int err = -EINVAL;
499
500 spin_lock(&line->count_lock);
501
502 if (line->tty != NULL) {
503 *error_out = "Device is already open";
504 goto out;
505 }
506
507 if (line->init_pri <= init_prio) {
508 line->init_pri = init_prio;
509 if (!strcmp(init, "none"))
510 line->valid = 0;
511 else {
512 line->init_str = init;
513 line->valid = 1;
514 }
515 }
516 err = 0;
517out:
518 spin_unlock(&line->count_lock);
519 return err;
520}
521
522/*
523 * Common setup code for both startup command line and mconsole initialization.
524 * @lines contains the array (of size @num) to modify;
525 * @init is the setup string;
526 * @error_out is an error string in the case of failure;
527 */
528
529int line_setup(struct line *lines, unsigned int num, char *init,
530 char **error_out)
531{
532 int i, n, err;
533 char *end;
534
535 if (*init == '=') {
536 /*
537 * We said con=/ssl= instead of con#=, so we are configuring all
538 * consoles at once.
539 */
540 n = -1;
541 }
542 else {
543 n = simple_strtoul(init, &end, 0);
544 if (*end != '=') {
545 *error_out = "Couldn't parse device number";
546 return -EINVAL;
547 }
548 init = end;
549 }
550 init++;
551
552 if (n >= (signed int) num) {
553 *error_out = "Device number out of range";
554 return -EINVAL;
555 }
556 else if (n >= 0) {
557 err = setup_one_line(lines, n, init, INIT_ONE, error_out);
558 if (err)
559 return err;
560 }
561 else {
562 for(i = 0; i < num; i++) {
563 err = setup_one_line(lines, i, init, INIT_ALL,
564 error_out);
565 if (err)
566 return err;
567 }
568 }
569 return n == -1 ? num : n;
570}
571
572int line_config(struct line *lines, unsigned int num, char *str,
573 const struct chan_opts *opts, char **error_out)
574{
575 struct line *line;
576 char *new;
577 int n;
578
579 if (*str == '=') {
580 *error_out = "Can't configure all devices from mconsole";
581 return -EINVAL;
582 }
583
584 new = kstrdup(str, GFP_KERNEL);
585 if (new == NULL) {
586 *error_out = "Failed to allocate memory";
587 return -ENOMEM;
588 }
589 n = line_setup(lines, num, new, error_out);
590 if (n < 0)
591 return n;
592
593 line = &lines[n];
594 return parse_chan_pair(line->init_str, line, n, opts, error_out);
595}
596
597int line_get_config(char *name, struct line *lines, unsigned int num, char *str,
598 int size, char **error_out)
599{
600 struct line *line;
601 char *end;
602 int dev, n = 0;
603
604 dev = simple_strtoul(name, &end, 0);
605 if ((*end != '\0') || (end == name)) {
606 *error_out = "line_get_config failed to parse device number";
607 return 0;
608 }
609
610 if ((dev < 0) || (dev >= num)) {
611 *error_out = "device number out of range";
612 return 0;
613 }
614
615 line = &lines[dev];
616
617 spin_lock(&line->count_lock);
618 if (!line->valid)
619 CONFIG_CHUNK(str, size, n, "none", 1);
620 else if (line->tty == NULL)
621 CONFIG_CHUNK(str, size, n, line->init_str, 1);
622 else n = chan_config_string(&line->chan_list, str, size, error_out);
623 spin_unlock(&line->count_lock);
624
625 return n;
626}
627
628int line_id(char **str, int *start_out, int *end_out)
629{
630 char *end;
631 int n;
632
633 n = simple_strtoul(*str, &end, 0);
634 if ((*end != '\0') || (end == *str))
635 return -1;
636
637 *str = end;
638 *start_out = n;
639 *end_out = n;
640 return n;
641}
642
643int line_remove(struct line *lines, unsigned int num, int n, char **error_out)
644{
645 int err;
646 char config[sizeof("conxxxx=none\0")];
647
648 sprintf(config, "%d=none", n);
649 err = line_setup(lines, num, config, error_out);
650 if (err >= 0)
651 err = 0;
652 return err;
653}
654
655struct tty_driver *register_lines(struct line_driver *line_driver,
656 const struct tty_operations *ops,
657 struct line *lines, int nlines)
658{
659 int i;
660 struct tty_driver *driver = alloc_tty_driver(nlines);
661
662 if (!driver)
663 return NULL;
664
665 driver->driver_name = line_driver->name;
666 driver->name = line_driver->device_name;
667 driver->major = line_driver->major;
668 driver->minor_start = line_driver->minor_start;
669 driver->type = line_driver->type;
670 driver->subtype = line_driver->subtype;
671 driver->flags = TTY_DRIVER_REAL_RAW;
672 driver->init_termios = tty_std_termios;
673 tty_set_operations(driver, ops);
674
675 if (tty_register_driver(driver)) {
676 printk(KERN_ERR "register_lines : can't register %s driver\n",
677 line_driver->name);
678 put_tty_driver(driver);
679 return NULL;
680 }
681
682 for(i = 0; i < nlines; i++) {
683 if (!lines[i].valid)
684 tty_unregister_device(driver, i);
685 }
686
687 mconsole_register_dev(&line_driver->mc);
688 return driver;
689}
690
691static DEFINE_SPINLOCK(winch_handler_lock);
692static LIST_HEAD(winch_handlers);
693
694void lines_init(struct line *lines, int nlines, struct chan_opts *opts)
695{
696 struct line *line;
697 char *error;
698 int i;
699
700 for(i = 0; i < nlines; i++) {
701 line = &lines[i];
702 INIT_LIST_HEAD(&line->chan_list);
703
704 if (line->init_str == NULL)
705 continue;
706
707 line->init_str = kstrdup(line->init_str, GFP_KERNEL);
708 if (line->init_str == NULL)
709 printk(KERN_ERR "lines_init - kstrdup returned NULL\n");
710
711 if (parse_chan_pair(line->init_str, line, i, opts, &error)) {
712 printk(KERN_ERR "parse_chan_pair failed for "
713 "device %d : %s\n", i, error);
714 line->valid = 0;
715 }
716 }
717}
718
719struct winch {
720 struct list_head list;
721 int fd;
722 int tty_fd;
723 int pid;
724 struct tty_struct *tty;
725 unsigned long stack;
726};
727
728static void free_winch(struct winch *winch, int free_irq_ok)
729{
730 if (free_irq_ok)
731 free_irq(WINCH_IRQ, winch);
732
733 list_del(&winch->list);
734
735 if (winch->pid != -1)
736 os_kill_process(winch->pid, 1);
737 if (winch->fd != -1)
738 os_close_file(winch->fd);
739 if (winch->stack != 0)
740 free_stack(winch->stack, 0);
741 kfree(winch);
742}
743
744static irqreturn_t winch_interrupt(int irq, void *data)
745{
746 struct winch *winch = data;
747 struct tty_struct *tty;
748 struct line *line;
749 int err;
750 char c;
751
752 if (winch->fd != -1) {
753 err = generic_read(winch->fd, &c, NULL);
754 if (err < 0) {
755 if (err != -EAGAIN) {
756 printk(KERN_ERR "winch_interrupt : "
757 "read failed, errno = %d\n", -err);
758 printk(KERN_ERR "fd %d is losing SIGWINCH "
759 "support\n", winch->tty_fd);
760 free_winch(winch, 0);
761 return IRQ_HANDLED;
762 }
763 goto out;
764 }
765 }
766 tty = winch->tty;
767 if (tty != NULL) {
768 line = tty->driver_data;
769 if (line != NULL) {
770 chan_window_size(&line->chan_list, &tty->winsize.ws_row,
771 &tty->winsize.ws_col);
772 kill_pgrp(tty->pgrp, SIGWINCH, 1);
773 }
774 }
775 out:
776 if (winch->fd != -1)
777 reactivate_fd(winch->fd, WINCH_IRQ);
778 return IRQ_HANDLED;
779}
780
781void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty,
782 unsigned long stack)
783{
784 struct winch *winch;
785
786 winch = kmalloc(sizeof(*winch), GFP_KERNEL);
787 if (winch == NULL) {
788 printk(KERN_ERR "register_winch_irq - kmalloc failed\n");
789 goto cleanup;
790 }
791
792 *winch = ((struct winch) { .list = LIST_HEAD_INIT(winch->list),
793 .fd = fd,
794 .tty_fd = tty_fd,
795 .pid = pid,
796 .tty = tty,
797 .stack = stack });
798
799 if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt,
800 IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM,
801 "winch", winch) < 0) {
802 printk(KERN_ERR "register_winch_irq - failed to register "
803 "IRQ\n");
804 goto out_free;
805 }
806
807 spin_lock(&winch_handler_lock);
808 list_add(&winch->list, &winch_handlers);
809 spin_unlock(&winch_handler_lock);
810
811 return;
812
813 out_free:
814 kfree(winch);
815 cleanup:
816 os_kill_process(pid, 1);
817 os_close_file(fd);
818 if (stack != 0)
819 free_stack(stack, 0);
820}
821
822static void unregister_winch(struct tty_struct *tty)
823{
824 struct list_head *ele;
825 struct winch *winch;
826
827 spin_lock(&winch_handler_lock);
828
829 list_for_each(ele, &winch_handlers) {
830 winch = list_entry(ele, struct winch, list);
831 if (winch->tty == tty) {
832 free_winch(winch, 1);
833 break;
834 }
835 }
836 spin_unlock(&winch_handler_lock);
837}
838
839static void winch_cleanup(void)
840{
841 struct list_head *ele, *next;
842 struct winch *winch;
843
844 spin_lock(&winch_handler_lock);
845
846 list_for_each_safe(ele, next, &winch_handlers) {
847 winch = list_entry(ele, struct winch, list);
848 free_winch(winch, 1);
849 }
850
851 spin_unlock(&winch_handler_lock);
852}
853__uml_exitcall(winch_cleanup);
854
855char *add_xterm_umid(char *base)
856{
857 char *umid, *title;
858 int len;
859
860 umid = get_umid();
861 if (*umid == '\0')
862 return base;
863
864 len = strlen(base) + strlen(" ()") + strlen(umid) + 1;
865 title = kmalloc(len, GFP_KERNEL);
866 if (title == NULL) {
867 printk(KERN_ERR "Failed to allocate buffer for xterm title\n");
868 return base;
869 }
870
871 snprintf(title, len, "%s (%s)", base, umid);
872 return title;
873}