2 * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
4 * Copyright (C) 2011 Samsung Electronics
5 * Donggeun Kim <dg77.kim@samsung.com>
6 * Amit Daniel Kachhap <amit.kachhap@linaro.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/clk.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/platform_device.h>
33 #include "exynos_thermal_common.h"
34 #include "exynos_tmu.h"
35 #include "exynos_tmu_data.h"
38 * struct exynos_tmu_data : A structure to hold the private data of the TMU
40 * @id: identifier of the one instance of the TMU controller.
41 * @pdata: pointer to the tmu platform/configuration data
42 * @base: base address of the single instance of the TMU controller.
43 * @base_common: base address of the common registers of the TMU controller.
44 * @irq: irq number of the TMU controller.
45 * @soc: id of the SOC type.
46 * @irq_work: pointer to the irq work structure.
47 * @lock: lock to implement synchronization.
48 * @clk: pointer to the clock structure.
49 * @temp_error1: fused value of the first point trim.
50 * @temp_error2: fused value of the second point trim.
51 * @reg_conf: pointer to structure to register with core thermal.
53 struct exynos_tmu_data
{
55 struct exynos_tmu_platform_data
*pdata
;
57 void __iomem
*base_common
;
60 struct work_struct irq_work
;
63 u8 temp_error1
, temp_error2
;
64 struct thermal_sensor_conf
*reg_conf
;
68 * TMU treats temperature as a mapped temperature code.
69 * The temperature is converted differently depending on the calibration type.
71 static int temp_to_code(struct exynos_tmu_data
*data
, u8 temp
)
73 struct exynos_tmu_platform_data
*pdata
= data
->pdata
;
76 if (pdata
->cal_mode
== HW_MODE
)
79 if (data
->soc
== SOC_ARCH_EXYNOS4210
)
80 /* temp should range between 25 and 125 */
81 if (temp
< 25 || temp
> 125) {
86 switch (pdata
->cal_type
) {
87 case TYPE_TWO_POINT_TRIMMING
:
88 temp_code
= (temp
- pdata
->first_point_trim
) *
89 (data
->temp_error2
- data
->temp_error1
) /
90 (pdata
->second_point_trim
- pdata
->first_point_trim
) +
93 case TYPE_ONE_POINT_TRIMMING
:
94 temp_code
= temp
+ data
->temp_error1
- pdata
->first_point_trim
;
97 temp_code
= temp
+ pdata
->default_temp_offset
;
105 * Calculate a temperature value from a temperature code.
106 * The unit of the temperature is degree Celsius.
108 static int code_to_temp(struct exynos_tmu_data
*data
, u8 temp_code
)
110 struct exynos_tmu_platform_data
*pdata
= data
->pdata
;
113 if (pdata
->cal_mode
== HW_MODE
)
116 if (data
->soc
== SOC_ARCH_EXYNOS4210
)
117 /* temp_code should range between 75 and 175 */
118 if (temp_code
< 75 || temp_code
> 175) {
123 switch (pdata
->cal_type
) {
124 case TYPE_TWO_POINT_TRIMMING
:
125 temp
= (temp_code
- data
->temp_error1
) *
126 (pdata
->second_point_trim
- pdata
->first_point_trim
) /
127 (data
->temp_error2
- data
->temp_error1
) +
128 pdata
->first_point_trim
;
130 case TYPE_ONE_POINT_TRIMMING
:
131 temp
= temp_code
- data
->temp_error1
+ pdata
->first_point_trim
;
134 temp
= temp_code
- pdata
->default_temp_offset
;
141 static int exynos_tmu_initialize(struct platform_device
*pdev
)
143 struct exynos_tmu_data
*data
= platform_get_drvdata(pdev
);
144 struct exynos_tmu_platform_data
*pdata
= data
->pdata
;
145 const struct exynos_tmu_registers
*reg
= pdata
->registers
;
146 unsigned int status
, trim_info
= 0, con
;
147 unsigned int rising_threshold
= 0, falling_threshold
= 0;
148 int ret
= 0, threshold_code
, i
, trigger_levs
= 0;
150 mutex_lock(&data
->lock
);
151 clk_enable(data
->clk
);
153 if (TMU_SUPPORTS(pdata
, READY_STATUS
)) {
154 status
= readb(data
->base
+ reg
->tmu_status
);
161 if (TMU_SUPPORTS(pdata
, TRIM_RELOAD
))
162 __raw_writel(1, data
->base
+ reg
->triminfo_ctrl
);
164 if (pdata
->cal_mode
== HW_MODE
)
165 goto skip_calib_data
;
167 /* Save trimming info in order to perform calibration */
168 if (data
->soc
== SOC_ARCH_EXYNOS5440
) {
170 * For exynos5440 soc triminfo value is swapped between TMU0 and
171 * TMU2, so the below logic is needed.
175 trim_info
= readl(data
->base
+
176 EXYNOS5440_EFUSE_SWAP_OFFSET
+ reg
->triminfo_data
);
179 trim_info
= readl(data
->base
+ reg
->triminfo_data
);
182 trim_info
= readl(data
->base
-
183 EXYNOS5440_EFUSE_SWAP_OFFSET
+ reg
->triminfo_data
);
186 trim_info
= readl(data
->base
+ reg
->triminfo_data
);
188 data
->temp_error1
= trim_info
& EXYNOS_TMU_TEMP_MASK
;
189 data
->temp_error2
= ((trim_info
>> reg
->triminfo_85_shift
) &
190 EXYNOS_TMU_TEMP_MASK
);
192 if (!data
->temp_error1
||
193 (pdata
->min_efuse_value
> data
->temp_error1
) ||
194 (data
->temp_error1
> pdata
->max_efuse_value
))
195 data
->temp_error1
= pdata
->efuse_value
& EXYNOS_TMU_TEMP_MASK
;
197 if (!data
->temp_error2
)
199 (pdata
->efuse_value
>> reg
->triminfo_85_shift
) &
200 EXYNOS_TMU_TEMP_MASK
;
203 if (pdata
->max_trigger_level
> MAX_THRESHOLD_LEVS
) {
204 dev_err(&pdev
->dev
, "Invalid max trigger level\n");
208 for (i
= 0; i
< pdata
->max_trigger_level
; i
++) {
209 if (!pdata
->trigger_levels
[i
])
212 if ((pdata
->trigger_type
[i
] == HW_TRIP
) &&
213 (!pdata
->trigger_levels
[pdata
->max_trigger_level
- 1])) {
214 dev_err(&pdev
->dev
, "Invalid hw trigger level\n");
219 /* Count trigger levels except the HW trip*/
220 if (!(pdata
->trigger_type
[i
] == HW_TRIP
))
224 if (data
->soc
== SOC_ARCH_EXYNOS4210
) {
225 /* Write temperature code for threshold */
226 threshold_code
= temp_to_code(data
, pdata
->threshold
);
227 if (threshold_code
< 0) {
228 ret
= threshold_code
;
231 writeb(threshold_code
,
232 data
->base
+ reg
->threshold_temp
);
233 for (i
= 0; i
< trigger_levs
; i
++)
234 writeb(pdata
->trigger_levels
[i
], data
->base
+
235 reg
->threshold_th0
+ i
* sizeof(reg
->threshold_th0
));
237 writel(reg
->inten_rise_mask
, data
->base
+ reg
->tmu_intclear
);
239 /* Write temperature code for rising and falling threshold */
241 i
< trigger_levs
&& i
< EXYNOS_MAX_TRIGGER_PER_REG
; i
++) {
242 threshold_code
= temp_to_code(data
,
243 pdata
->trigger_levels
[i
]);
244 if (threshold_code
< 0) {
245 ret
= threshold_code
;
248 rising_threshold
|= threshold_code
<< 8 * i
;
249 if (pdata
->threshold_falling
) {
250 threshold_code
= temp_to_code(data
,
251 pdata
->trigger_levels
[i
] -
252 pdata
->threshold_falling
);
253 if (threshold_code
> 0)
255 threshold_code
<< 8 * i
;
259 writel(rising_threshold
,
260 data
->base
+ reg
->threshold_th0
);
261 writel(falling_threshold
,
262 data
->base
+ reg
->threshold_th1
);
264 writel((reg
->inten_rise_mask
<< reg
->inten_rise_shift
) |
265 (reg
->inten_fall_mask
<< reg
->inten_fall_shift
),
266 data
->base
+ reg
->tmu_intclear
);
268 /* if last threshold limit is also present */
269 i
= pdata
->max_trigger_level
- 1;
270 if (pdata
->trigger_levels
[i
] &&
271 (pdata
->trigger_type
[i
] == HW_TRIP
)) {
272 threshold_code
= temp_to_code(data
,
273 pdata
->trigger_levels
[i
]);
274 if (threshold_code
< 0) {
275 ret
= threshold_code
;
278 if (i
== EXYNOS_MAX_TRIGGER_PER_REG
- 1) {
279 /* 1-4 level to be assigned in th0 reg */
280 rising_threshold
|= threshold_code
<< 8 * i
;
281 writel(rising_threshold
,
282 data
->base
+ reg
->threshold_th0
);
283 } else if (i
== EXYNOS_MAX_TRIGGER_PER_REG
) {
284 /* 5th level to be assigned in th2 reg */
286 threshold_code
<< reg
->threshold_th3_l0_shift
;
287 writel(rising_threshold
,
288 data
->base
+ reg
->threshold_th2
);
290 con
= readl(data
->base
+ reg
->tmu_ctrl
);
291 con
|= (1 << reg
->therm_trip_en_shift
);
292 writel(con
, data
->base
+ reg
->tmu_ctrl
);
295 /*Clear the PMIN in the common TMU register*/
296 if (reg
->tmu_pmin
&& !data
->id
)
297 writel(0, data
->base_common
+ reg
->tmu_pmin
);
299 clk_disable(data
->clk
);
300 mutex_unlock(&data
->lock
);
305 static void exynos_tmu_control(struct platform_device
*pdev
, bool on
)
307 struct exynos_tmu_data
*data
= platform_get_drvdata(pdev
);
308 struct exynos_tmu_platform_data
*pdata
= data
->pdata
;
309 const struct exynos_tmu_registers
*reg
= pdata
->registers
;
310 unsigned int con
, interrupt_en
, cal_val
;
312 mutex_lock(&data
->lock
);
313 clk_enable(data
->clk
);
315 con
= readl(data
->base
+ reg
->tmu_ctrl
);
317 if (pdata
->reference_voltage
) {
318 con
&= ~(reg
->buf_vref_sel_mask
<< reg
->buf_vref_sel_shift
);
319 con
|= pdata
->reference_voltage
<< reg
->buf_vref_sel_shift
;
323 con
&= ~(reg
->buf_slope_sel_mask
<< reg
->buf_slope_sel_shift
);
324 con
|= (pdata
->gain
<< reg
->buf_slope_sel_shift
);
327 if (pdata
->noise_cancel_mode
) {
328 con
&= ~(reg
->therm_trip_mode_mask
<<
329 reg
->therm_trip_mode_shift
);
330 con
|= (pdata
->noise_cancel_mode
<< reg
->therm_trip_mode_shift
);
333 if (pdata
->cal_mode
== HW_MODE
) {
334 con
&= ~(reg
->calib_mode_mask
<< reg
->calib_mode_shift
);
336 switch (pdata
->cal_type
) {
337 case TYPE_TWO_POINT_TRIMMING
:
340 case TYPE_ONE_POINT_TRIMMING_85
:
343 case TYPE_ONE_POINT_TRIMMING_25
:
349 dev_err(&pdev
->dev
, "Invalid calibration type, using none\n");
351 con
|= cal_val
<< reg
->calib_mode_shift
;
355 con
|= (1 << reg
->core_en_shift
);
357 pdata
->trigger_enable
[3] << reg
->inten_rise3_shift
|
358 pdata
->trigger_enable
[2] << reg
->inten_rise2_shift
|
359 pdata
->trigger_enable
[1] << reg
->inten_rise1_shift
|
360 pdata
->trigger_enable
[0] << reg
->inten_rise0_shift
;
361 if (TMU_SUPPORTS(pdata
, FALLING_TRIP
))
363 interrupt_en
<< reg
->inten_fall0_shift
;
365 con
&= ~(1 << reg
->core_en_shift
);
366 interrupt_en
= 0; /* Disable all interrupts */
368 writel(interrupt_en
, data
->base
+ reg
->tmu_inten
);
369 writel(con
, data
->base
+ reg
->tmu_ctrl
);
371 clk_disable(data
->clk
);
372 mutex_unlock(&data
->lock
);
375 static int exynos_tmu_read(struct exynos_tmu_data
*data
)
377 struct exynos_tmu_platform_data
*pdata
= data
->pdata
;
378 const struct exynos_tmu_registers
*reg
= pdata
->registers
;
382 mutex_lock(&data
->lock
);
383 clk_enable(data
->clk
);
385 temp_code
= readb(data
->base
+ reg
->tmu_cur_temp
);
386 temp
= code_to_temp(data
, temp_code
);
388 clk_disable(data
->clk
);
389 mutex_unlock(&data
->lock
);
394 #ifdef CONFIG_THERMAL_EMULATION
395 static int exynos_tmu_set_emulation(void *drv_data
, unsigned long temp
)
397 struct exynos_tmu_data
*data
= drv_data
;
398 struct exynos_tmu_platform_data
*pdata
= data
->pdata
;
399 const struct exynos_tmu_registers
*reg
= pdata
->registers
;
403 if (!TMU_SUPPORTS(pdata
, EMULATION
))
406 if (temp
&& temp
< MCELSIUS
)
409 mutex_lock(&data
->lock
);
410 clk_enable(data
->clk
);
412 val
= readl(data
->base
+ reg
->emul_con
);
417 if (TMU_SUPPORTS(pdata
, EMUL_TIME
)) {
418 val
&= ~(EXYNOS_EMUL_TIME_MASK
<< reg
->emul_time_shift
);
419 val
|= (EXYNOS_EMUL_TIME
<< reg
->emul_time_shift
);
421 val
&= ~(EXYNOS_EMUL_DATA_MASK
<< reg
->emul_temp_shift
);
422 val
|= (temp_to_code(data
, temp
) << reg
->emul_temp_shift
) |
425 val
&= ~EXYNOS_EMUL_ENABLE
;
428 writel(val
, data
->base
+ reg
->emul_con
);
430 clk_disable(data
->clk
);
431 mutex_unlock(&data
->lock
);
437 static int exynos_tmu_set_emulation(void *drv_data
, unsigned long temp
)
439 #endif/*CONFIG_THERMAL_EMULATION*/
441 static void exynos_tmu_work(struct work_struct
*work
)
443 struct exynos_tmu_data
*data
= container_of(work
,
444 struct exynos_tmu_data
, irq_work
);
445 struct exynos_tmu_platform_data
*pdata
= data
->pdata
;
446 const struct exynos_tmu_registers
*reg
= pdata
->registers
;
447 unsigned int val_irq
, val_type
;
449 /* Find which sensor generated this interrupt */
450 if (reg
->tmu_irqstatus
) {
451 val_type
= readl(data
->base_common
+ reg
->tmu_irqstatus
);
452 if (!((val_type
>> data
->id
) & 0x1))
456 exynos_report_trigger(data
->reg_conf
);
457 mutex_lock(&data
->lock
);
458 clk_enable(data
->clk
);
460 /* TODO: take action based on particular interrupt */
461 val_irq
= readl(data
->base
+ reg
->tmu_intstat
);
462 /* clear the interrupts */
463 writel(val_irq
, data
->base
+ reg
->tmu_intclear
);
465 clk_disable(data
->clk
);
466 mutex_unlock(&data
->lock
);
468 enable_irq(data
->irq
);
471 static irqreturn_t
exynos_tmu_irq(int irq
, void *id
)
473 struct exynos_tmu_data
*data
= id
;
475 disable_irq_nosync(irq
);
476 schedule_work(&data
->irq_work
);
482 static const struct of_device_id exynos_tmu_match
[] = {
484 .compatible
= "samsung,exynos4210-tmu",
485 .data
= (void *)EXYNOS4210_TMU_DRV_DATA
,
488 .compatible
= "samsung,exynos4412-tmu",
489 .data
= (void *)EXYNOS5250_TMU_DRV_DATA
,
492 .compatible
= "samsung,exynos5250-tmu",
493 .data
= (void *)EXYNOS5250_TMU_DRV_DATA
,
496 .compatible
= "samsung,exynos5440-tmu",
497 .data
= (void *)EXYNOS5440_TMU_DRV_DATA
,
501 MODULE_DEVICE_TABLE(of
, exynos_tmu_match
);
504 static inline struct exynos_tmu_platform_data
*exynos_get_driver_data(
505 struct platform_device
*pdev
, int id
)
508 struct exynos_tmu_init_data
*data_table
;
509 struct exynos_tmu_platform_data
*tmu_data
;
510 if (pdev
->dev
.of_node
) {
511 const struct of_device_id
*match
;
512 match
= of_match_node(exynos_tmu_match
, pdev
->dev
.of_node
);
515 data_table
= (struct exynos_tmu_init_data
*) match
->data
;
516 if (!data_table
|| id
>= data_table
->tmu_count
)
518 tmu_data
= data_table
->tmu_data
;
519 return (struct exynos_tmu_platform_data
*) (tmu_data
+ id
);
525 static int exynos_map_dt_data(struct platform_device
*pdev
)
527 struct exynos_tmu_data
*data
= platform_get_drvdata(pdev
);
528 struct exynos_tmu_platform_data
*pdata
;
534 data
->id
= of_alias_get_id(pdev
->dev
.of_node
, "tmuctrl");
538 data
->irq
= irq_of_parse_and_map(pdev
->dev
.of_node
, 0);
539 if (data
->irq
<= 0) {
540 dev_err(&pdev
->dev
, "failed to get IRQ\n");
544 if (of_address_to_resource(pdev
->dev
.of_node
, 0, &res
)) {
545 dev_err(&pdev
->dev
, "failed to get Resource 0\n");
549 data
->base
= devm_ioremap(&pdev
->dev
, res
.start
, resource_size(&res
));
551 dev_err(&pdev
->dev
, "Failed to ioremap memory\n");
552 return -EADDRNOTAVAIL
;
555 pdata
= exynos_get_driver_data(pdev
, data
->id
);
557 dev_err(&pdev
->dev
, "No platform init data supplied.\n");
562 * Check if the TMU shares some registers and then try to map the
563 * memory of common registers.
565 if (!TMU_SUPPORTS(pdata
, SHARED_MEMORY
))
568 if (of_address_to_resource(pdev
->dev
.of_node
, 1, &res
)) {
569 dev_err(&pdev
->dev
, "failed to get Resource 1\n");
573 data
->base_common
= devm_ioremap(&pdev
->dev
, res
.start
,
574 resource_size(&res
));
576 dev_err(&pdev
->dev
, "Failed to ioremap memory\n");
583 static int exynos_tmu_probe(struct platform_device
*pdev
)
585 struct exynos_tmu_data
*data
;
586 struct exynos_tmu_platform_data
*pdata
;
587 struct thermal_sensor_conf
*sensor_conf
;
590 data
= devm_kzalloc(&pdev
->dev
, sizeof(struct exynos_tmu_data
),
593 dev_err(&pdev
->dev
, "Failed to allocate driver structure\n");
597 platform_set_drvdata(pdev
, data
);
598 mutex_init(&data
->lock
);
600 ret
= exynos_map_dt_data(pdev
);
606 INIT_WORK(&data
->irq_work
, exynos_tmu_work
);
608 data
->clk
= devm_clk_get(&pdev
->dev
, "tmu_apbif");
609 if (IS_ERR(data
->clk
)) {
610 dev_err(&pdev
->dev
, "Failed to get clock\n");
611 return PTR_ERR(data
->clk
);
614 ret
= clk_prepare(data
->clk
);
618 if (pdata
->type
== SOC_ARCH_EXYNOS
||
619 pdata
->type
== SOC_ARCH_EXYNOS4210
||
620 pdata
->type
== SOC_ARCH_EXYNOS5440
)
621 data
->soc
= pdata
->type
;
624 dev_err(&pdev
->dev
, "Platform not supported\n");
628 ret
= exynos_tmu_initialize(pdev
);
630 dev_err(&pdev
->dev
, "Failed to initialize TMU\n");
634 exynos_tmu_control(pdev
, true);
636 /* Allocate a structure to register with the exynos core thermal */
637 sensor_conf
= devm_kzalloc(&pdev
->dev
,
638 sizeof(struct thermal_sensor_conf
), GFP_KERNEL
);
640 dev_err(&pdev
->dev
, "Failed to allocate registration struct\n");
644 sprintf(sensor_conf
->name
, "therm_zone%d", data
->id
);
645 sensor_conf
->read_temperature
= (int (*)(void *))exynos_tmu_read
;
646 sensor_conf
->write_emul_temp
=
647 (int (*)(void *, unsigned long))exynos_tmu_set_emulation
;
648 sensor_conf
->driver_data
= data
;
649 sensor_conf
->trip_data
.trip_count
= pdata
->trigger_enable
[0] +
650 pdata
->trigger_enable
[1] + pdata
->trigger_enable
[2]+
651 pdata
->trigger_enable
[3];
653 for (i
= 0; i
< sensor_conf
->trip_data
.trip_count
; i
++) {
654 sensor_conf
->trip_data
.trip_val
[i
] =
655 pdata
->threshold
+ pdata
->trigger_levels
[i
];
656 sensor_conf
->trip_data
.trip_type
[i
] =
657 pdata
->trigger_type
[i
];
660 sensor_conf
->trip_data
.trigger_falling
= pdata
->threshold_falling
;
662 sensor_conf
->cooling_data
.freq_clip_count
= pdata
->freq_tab_count
;
663 for (i
= 0; i
< pdata
->freq_tab_count
; i
++) {
664 sensor_conf
->cooling_data
.freq_data
[i
].freq_clip_max
=
665 pdata
->freq_tab
[i
].freq_clip_max
;
666 sensor_conf
->cooling_data
.freq_data
[i
].temp_level
=
667 pdata
->freq_tab
[i
].temp_level
;
669 sensor_conf
->dev
= &pdev
->dev
;
670 /* Register the sensor with thermal management interface */
671 ret
= exynos_register_thermal(sensor_conf
);
673 dev_err(&pdev
->dev
, "Failed to register thermal interface\n");
676 data
->reg_conf
= sensor_conf
;
678 ret
= devm_request_irq(&pdev
->dev
, data
->irq
, exynos_tmu_irq
,
679 IRQF_TRIGGER_RISING
| IRQF_SHARED
, dev_name(&pdev
->dev
), data
);
681 dev_err(&pdev
->dev
, "Failed to request irq: %d\n", data
->irq
);
687 clk_unprepare(data
->clk
);
691 static int exynos_tmu_remove(struct platform_device
*pdev
)
693 struct exynos_tmu_data
*data
= platform_get_drvdata(pdev
);
695 exynos_tmu_control(pdev
, false);
697 exynos_unregister_thermal(data
->reg_conf
);
699 clk_unprepare(data
->clk
);
704 #ifdef CONFIG_PM_SLEEP
705 static int exynos_tmu_suspend(struct device
*dev
)
707 exynos_tmu_control(to_platform_device(dev
), false);
712 static int exynos_tmu_resume(struct device
*dev
)
714 struct platform_device
*pdev
= to_platform_device(dev
);
716 exynos_tmu_initialize(pdev
);
717 exynos_tmu_control(pdev
, true);
722 static SIMPLE_DEV_PM_OPS(exynos_tmu_pm
,
723 exynos_tmu_suspend
, exynos_tmu_resume
);
724 #define EXYNOS_TMU_PM (&exynos_tmu_pm)
726 #define EXYNOS_TMU_PM NULL
729 static struct platform_driver exynos_tmu_driver
= {
731 .name
= "exynos-tmu",
732 .owner
= THIS_MODULE
,
734 .of_match_table
= of_match_ptr(exynos_tmu_match
),
736 .probe
= exynos_tmu_probe
,
737 .remove
= exynos_tmu_remove
,
740 module_platform_driver(exynos_tmu_driver
);
742 MODULE_DESCRIPTION("EXYNOS TMU Driver");
743 MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
744 MODULE_LICENSE("GPL");
745 MODULE_ALIAS("platform:exynos-tmu");