2 * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
4 * Copyright (C) 2011 Samsung Electronics
5 * Donggeun Kim <dg77.kim@samsung.com>
6 * Amit Daniel Kachhap <amit.kachhap@linaro.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/clk.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/platform_device.h>
32 #include <linux/regulator/consumer.h>
34 #include "exynos_thermal_common.h"
35 #include "exynos_tmu.h"
36 #include "exynos_tmu_data.h"
39 * struct exynos_tmu_data : A structure to hold the private data of the TMU
41 * @id: identifier of the one instance of the TMU controller.
42 * @pdata: pointer to the tmu platform/configuration data
43 * @base: base address of the single instance of the TMU controller.
44 * @base_common: base address of the common registers of the TMU controller.
45 * @irq: irq number of the TMU controller.
46 * @soc: id of the SOC type.
47 * @irq_work: pointer to the irq work structure.
48 * @lock: lock to implement synchronization.
49 * @clk: pointer to the clock structure.
50 * @temp_error1: fused value of the first point trim.
51 * @temp_error2: fused value of the second point trim.
52 * @regulator: pointer to the TMU regulator structure.
53 * @reg_conf: pointer to structure to register with core thermal.
55 struct exynos_tmu_data
{
57 struct exynos_tmu_platform_data
*pdata
;
59 void __iomem
*base_common
;
62 struct work_struct irq_work
;
65 u8 temp_error1
, temp_error2
;
66 struct regulator
*regulator
;
67 struct thermal_sensor_conf
*reg_conf
;
71 * TMU treats temperature as a mapped temperature code.
72 * The temperature is converted differently depending on the calibration type.
74 static int temp_to_code(struct exynos_tmu_data
*data
, u8 temp
)
76 struct exynos_tmu_platform_data
*pdata
= data
->pdata
;
79 if (pdata
->cal_mode
== HW_MODE
)
82 if (data
->soc
== SOC_ARCH_EXYNOS4210
)
83 /* temp should range between 25 and 125 */
84 if (temp
< 25 || temp
> 125) {
89 switch (pdata
->cal_type
) {
90 case TYPE_TWO_POINT_TRIMMING
:
91 temp_code
= (temp
- pdata
->first_point_trim
) *
92 (data
->temp_error2
- data
->temp_error1
) /
93 (pdata
->second_point_trim
- pdata
->first_point_trim
) +
96 case TYPE_ONE_POINT_TRIMMING
:
97 temp_code
= temp
+ data
->temp_error1
- pdata
->first_point_trim
;
100 temp_code
= temp
+ pdata
->default_temp_offset
;
108 * Calculate a temperature value from a temperature code.
109 * The unit of the temperature is degree Celsius.
111 static int code_to_temp(struct exynos_tmu_data
*data
, u8 temp_code
)
113 struct exynos_tmu_platform_data
*pdata
= data
->pdata
;
116 if (pdata
->cal_mode
== HW_MODE
)
119 if (data
->soc
== SOC_ARCH_EXYNOS4210
)
120 /* temp_code should range between 75 and 175 */
121 if (temp_code
< 75 || temp_code
> 175) {
126 switch (pdata
->cal_type
) {
127 case TYPE_TWO_POINT_TRIMMING
:
128 temp
= (temp_code
- data
->temp_error1
) *
129 (pdata
->second_point_trim
- pdata
->first_point_trim
) /
130 (data
->temp_error2
- data
->temp_error1
) +
131 pdata
->first_point_trim
;
133 case TYPE_ONE_POINT_TRIMMING
:
134 temp
= temp_code
- data
->temp_error1
+ pdata
->first_point_trim
;
137 temp
= temp_code
- pdata
->default_temp_offset
;
144 static int exynos_tmu_initialize(struct platform_device
*pdev
)
146 struct exynos_tmu_data
*data
= platform_get_drvdata(pdev
);
147 struct exynos_tmu_platform_data
*pdata
= data
->pdata
;
148 const struct exynos_tmu_registers
*reg
= pdata
->registers
;
149 unsigned int status
, trim_info
= 0, con
;
150 unsigned int rising_threshold
= 0, falling_threshold
= 0;
151 int ret
= 0, threshold_code
, i
, trigger_levs
= 0;
153 mutex_lock(&data
->lock
);
154 clk_enable(data
->clk
);
156 if (TMU_SUPPORTS(pdata
, READY_STATUS
)) {
157 status
= readb(data
->base
+ reg
->tmu_status
);
164 if (TMU_SUPPORTS(pdata
, TRIM_RELOAD
))
165 __raw_writel(1, data
->base
+ reg
->triminfo_ctrl
);
167 if (pdata
->cal_mode
== HW_MODE
)
168 goto skip_calib_data
;
170 /* Save trimming info in order to perform calibration */
171 if (data
->soc
== SOC_ARCH_EXYNOS5440
) {
173 * For exynos5440 soc triminfo value is swapped between TMU0 and
174 * TMU2, so the below logic is needed.
178 trim_info
= readl(data
->base
+
179 EXYNOS5440_EFUSE_SWAP_OFFSET
+ reg
->triminfo_data
);
182 trim_info
= readl(data
->base
+ reg
->triminfo_data
);
185 trim_info
= readl(data
->base
-
186 EXYNOS5440_EFUSE_SWAP_OFFSET
+ reg
->triminfo_data
);
189 trim_info
= readl(data
->base
+ reg
->triminfo_data
);
191 data
->temp_error1
= trim_info
& EXYNOS_TMU_TEMP_MASK
;
192 data
->temp_error2
= ((trim_info
>> reg
->triminfo_85_shift
) &
193 EXYNOS_TMU_TEMP_MASK
);
195 if (!data
->temp_error1
||
196 (pdata
->min_efuse_value
> data
->temp_error1
) ||
197 (data
->temp_error1
> pdata
->max_efuse_value
))
198 data
->temp_error1
= pdata
->efuse_value
& EXYNOS_TMU_TEMP_MASK
;
200 if (!data
->temp_error2
)
202 (pdata
->efuse_value
>> reg
->triminfo_85_shift
) &
203 EXYNOS_TMU_TEMP_MASK
;
206 if (pdata
->max_trigger_level
> MAX_THRESHOLD_LEVS
) {
207 dev_err(&pdev
->dev
, "Invalid max trigger level\n");
211 for (i
= 0; i
< pdata
->max_trigger_level
; i
++) {
212 if (!pdata
->trigger_levels
[i
])
215 if ((pdata
->trigger_type
[i
] == HW_TRIP
) &&
216 (!pdata
->trigger_levels
[pdata
->max_trigger_level
- 1])) {
217 dev_err(&pdev
->dev
, "Invalid hw trigger level\n");
222 /* Count trigger levels except the HW trip*/
223 if (!(pdata
->trigger_type
[i
] == HW_TRIP
))
227 if (data
->soc
== SOC_ARCH_EXYNOS4210
) {
228 /* Write temperature code for threshold */
229 threshold_code
= temp_to_code(data
, pdata
->threshold
);
230 if (threshold_code
< 0) {
231 ret
= threshold_code
;
234 writeb(threshold_code
,
235 data
->base
+ reg
->threshold_temp
);
236 for (i
= 0; i
< trigger_levs
; i
++)
237 writeb(pdata
->trigger_levels
[i
], data
->base
+
238 reg
->threshold_th0
+ i
* sizeof(reg
->threshold_th0
));
240 writel(reg
->inten_rise_mask
, data
->base
+ reg
->tmu_intclear
);
242 /* Write temperature code for rising and falling threshold */
244 i
< trigger_levs
&& i
< EXYNOS_MAX_TRIGGER_PER_REG
; i
++) {
245 threshold_code
= temp_to_code(data
,
246 pdata
->trigger_levels
[i
]);
247 if (threshold_code
< 0) {
248 ret
= threshold_code
;
251 rising_threshold
|= threshold_code
<< 8 * i
;
252 if (pdata
->threshold_falling
) {
253 threshold_code
= temp_to_code(data
,
254 pdata
->trigger_levels
[i
] -
255 pdata
->threshold_falling
);
256 if (threshold_code
> 0)
258 threshold_code
<< 8 * i
;
262 writel(rising_threshold
,
263 data
->base
+ reg
->threshold_th0
);
264 writel(falling_threshold
,
265 data
->base
+ reg
->threshold_th1
);
267 writel((reg
->inten_rise_mask
<< reg
->inten_rise_shift
) |
268 (reg
->inten_fall_mask
<< reg
->inten_fall_shift
),
269 data
->base
+ reg
->tmu_intclear
);
271 /* if last threshold limit is also present */
272 i
= pdata
->max_trigger_level
- 1;
273 if (pdata
->trigger_levels
[i
] &&
274 (pdata
->trigger_type
[i
] == HW_TRIP
)) {
275 threshold_code
= temp_to_code(data
,
276 pdata
->trigger_levels
[i
]);
277 if (threshold_code
< 0) {
278 ret
= threshold_code
;
281 if (i
== EXYNOS_MAX_TRIGGER_PER_REG
- 1) {
282 /* 1-4 level to be assigned in th0 reg */
283 rising_threshold
|= threshold_code
<< 8 * i
;
284 writel(rising_threshold
,
285 data
->base
+ reg
->threshold_th0
);
286 } else if (i
== EXYNOS_MAX_TRIGGER_PER_REG
) {
287 /* 5th level to be assigned in th2 reg */
289 threshold_code
<< reg
->threshold_th3_l0_shift
;
290 writel(rising_threshold
,
291 data
->base
+ reg
->threshold_th2
);
293 con
= readl(data
->base
+ reg
->tmu_ctrl
);
294 con
|= (1 << reg
->therm_trip_en_shift
);
295 writel(con
, data
->base
+ reg
->tmu_ctrl
);
298 /*Clear the PMIN in the common TMU register*/
299 if (reg
->tmu_pmin
&& !data
->id
)
300 writel(0, data
->base_common
+ reg
->tmu_pmin
);
302 clk_disable(data
->clk
);
303 mutex_unlock(&data
->lock
);
308 static void exynos_tmu_control(struct platform_device
*pdev
, bool on
)
310 struct exynos_tmu_data
*data
= platform_get_drvdata(pdev
);
311 struct exynos_tmu_platform_data
*pdata
= data
->pdata
;
312 const struct exynos_tmu_registers
*reg
= pdata
->registers
;
313 unsigned int con
, interrupt_en
, cal_val
;
315 mutex_lock(&data
->lock
);
316 clk_enable(data
->clk
);
318 con
= readl(data
->base
+ reg
->tmu_ctrl
);
321 con
|= (pdata
->test_mux
<< reg
->test_mux_addr_shift
);
323 if (pdata
->reference_voltage
) {
324 con
&= ~(reg
->buf_vref_sel_mask
<< reg
->buf_vref_sel_shift
);
325 con
|= pdata
->reference_voltage
<< reg
->buf_vref_sel_shift
;
329 con
&= ~(reg
->buf_slope_sel_mask
<< reg
->buf_slope_sel_shift
);
330 con
|= (pdata
->gain
<< reg
->buf_slope_sel_shift
);
333 if (pdata
->noise_cancel_mode
) {
334 con
&= ~(reg
->therm_trip_mode_mask
<<
335 reg
->therm_trip_mode_shift
);
336 con
|= (pdata
->noise_cancel_mode
<< reg
->therm_trip_mode_shift
);
339 if (pdata
->cal_mode
== HW_MODE
) {
340 con
&= ~(reg
->calib_mode_mask
<< reg
->calib_mode_shift
);
342 switch (pdata
->cal_type
) {
343 case TYPE_TWO_POINT_TRIMMING
:
346 case TYPE_ONE_POINT_TRIMMING_85
:
349 case TYPE_ONE_POINT_TRIMMING_25
:
355 dev_err(&pdev
->dev
, "Invalid calibration type, using none\n");
357 con
|= cal_val
<< reg
->calib_mode_shift
;
361 con
|= (1 << reg
->core_en_shift
);
363 pdata
->trigger_enable
[3] << reg
->inten_rise3_shift
|
364 pdata
->trigger_enable
[2] << reg
->inten_rise2_shift
|
365 pdata
->trigger_enable
[1] << reg
->inten_rise1_shift
|
366 pdata
->trigger_enable
[0] << reg
->inten_rise0_shift
;
367 if (TMU_SUPPORTS(pdata
, FALLING_TRIP
))
369 interrupt_en
<< reg
->inten_fall0_shift
;
371 con
&= ~(1 << reg
->core_en_shift
);
372 interrupt_en
= 0; /* Disable all interrupts */
374 writel(interrupt_en
, data
->base
+ reg
->tmu_inten
);
375 writel(con
, data
->base
+ reg
->tmu_ctrl
);
377 clk_disable(data
->clk
);
378 mutex_unlock(&data
->lock
);
381 static int exynos_tmu_read(struct exynos_tmu_data
*data
)
383 struct exynos_tmu_platform_data
*pdata
= data
->pdata
;
384 const struct exynos_tmu_registers
*reg
= pdata
->registers
;
388 mutex_lock(&data
->lock
);
389 clk_enable(data
->clk
);
391 temp_code
= readb(data
->base
+ reg
->tmu_cur_temp
);
392 temp
= code_to_temp(data
, temp_code
);
394 clk_disable(data
->clk
);
395 mutex_unlock(&data
->lock
);
400 #ifdef CONFIG_THERMAL_EMULATION
401 static int exynos_tmu_set_emulation(void *drv_data
, unsigned long temp
)
403 struct exynos_tmu_data
*data
= drv_data
;
404 struct exynos_tmu_platform_data
*pdata
= data
->pdata
;
405 const struct exynos_tmu_registers
*reg
= pdata
->registers
;
409 if (!TMU_SUPPORTS(pdata
, EMULATION
))
412 if (temp
&& temp
< MCELSIUS
)
415 mutex_lock(&data
->lock
);
416 clk_enable(data
->clk
);
418 val
= readl(data
->base
+ reg
->emul_con
);
423 if (TMU_SUPPORTS(pdata
, EMUL_TIME
)) {
424 val
&= ~(EXYNOS_EMUL_TIME_MASK
<< reg
->emul_time_shift
);
425 val
|= (EXYNOS_EMUL_TIME
<< reg
->emul_time_shift
);
427 val
&= ~(EXYNOS_EMUL_DATA_MASK
<< reg
->emul_temp_shift
);
428 val
|= (temp_to_code(data
, temp
) << reg
->emul_temp_shift
) |
431 val
&= ~EXYNOS_EMUL_ENABLE
;
434 writel(val
, data
->base
+ reg
->emul_con
);
436 clk_disable(data
->clk
);
437 mutex_unlock(&data
->lock
);
443 static int exynos_tmu_set_emulation(void *drv_data
, unsigned long temp
)
445 #endif/*CONFIG_THERMAL_EMULATION*/
447 static void exynos_tmu_work(struct work_struct
*work
)
449 struct exynos_tmu_data
*data
= container_of(work
,
450 struct exynos_tmu_data
, irq_work
);
451 struct exynos_tmu_platform_data
*pdata
= data
->pdata
;
452 const struct exynos_tmu_registers
*reg
= pdata
->registers
;
453 unsigned int val_irq
, val_type
;
455 /* Find which sensor generated this interrupt */
456 if (reg
->tmu_irqstatus
) {
457 val_type
= readl(data
->base_common
+ reg
->tmu_irqstatus
);
458 if (!((val_type
>> data
->id
) & 0x1))
462 exynos_report_trigger(data
->reg_conf
);
463 mutex_lock(&data
->lock
);
464 clk_enable(data
->clk
);
466 /* TODO: take action based on particular interrupt */
467 val_irq
= readl(data
->base
+ reg
->tmu_intstat
);
468 /* clear the interrupts */
469 writel(val_irq
, data
->base
+ reg
->tmu_intclear
);
471 clk_disable(data
->clk
);
472 mutex_unlock(&data
->lock
);
474 enable_irq(data
->irq
);
477 static irqreturn_t
exynos_tmu_irq(int irq
, void *id
)
479 struct exynos_tmu_data
*data
= id
;
481 disable_irq_nosync(irq
);
482 schedule_work(&data
->irq_work
);
487 static const struct of_device_id exynos_tmu_match
[] = {
489 .compatible
= "samsung,exynos4210-tmu",
490 .data
= (void *)EXYNOS4210_TMU_DRV_DATA
,
493 .compatible
= "samsung,exynos4412-tmu",
494 .data
= (void *)EXYNOS4412_TMU_DRV_DATA
,
497 .compatible
= "samsung,exynos5250-tmu",
498 .data
= (void *)EXYNOS5250_TMU_DRV_DATA
,
501 .compatible
= "samsung,exynos5440-tmu",
502 .data
= (void *)EXYNOS5440_TMU_DRV_DATA
,
506 MODULE_DEVICE_TABLE(of
, exynos_tmu_match
);
508 static inline struct exynos_tmu_platform_data
*exynos_get_driver_data(
509 struct platform_device
*pdev
, int id
)
511 struct exynos_tmu_init_data
*data_table
;
512 struct exynos_tmu_platform_data
*tmu_data
;
513 const struct of_device_id
*match
;
515 match
= of_match_node(exynos_tmu_match
, pdev
->dev
.of_node
);
518 data_table
= (struct exynos_tmu_init_data
*) match
->data
;
519 if (!data_table
|| id
>= data_table
->tmu_count
)
521 tmu_data
= data_table
->tmu_data
;
522 return (struct exynos_tmu_platform_data
*) (tmu_data
+ id
);
525 static int exynos_map_dt_data(struct platform_device
*pdev
)
527 struct exynos_tmu_data
*data
= platform_get_drvdata(pdev
);
528 struct exynos_tmu_platform_data
*pdata
;
532 if (!data
|| !pdev
->dev
.of_node
)
536 * Try enabling the regulator if found
537 * TODO: Add regulator as an SOC feature, so that regulator enable
538 * is a compulsory call.
540 data
->regulator
= devm_regulator_get(&pdev
->dev
, "vtmu");
541 if (!IS_ERR(data
->regulator
)) {
542 ret
= regulator_enable(data
->regulator
);
544 dev_err(&pdev
->dev
, "failed to enable vtmu\n");
548 dev_info(&pdev
->dev
, "Regulator node (vtmu) not found\n");
551 data
->id
= of_alias_get_id(pdev
->dev
.of_node
, "tmuctrl");
555 data
->irq
= irq_of_parse_and_map(pdev
->dev
.of_node
, 0);
556 if (data
->irq
<= 0) {
557 dev_err(&pdev
->dev
, "failed to get IRQ\n");
561 if (of_address_to_resource(pdev
->dev
.of_node
, 0, &res
)) {
562 dev_err(&pdev
->dev
, "failed to get Resource 0\n");
566 data
->base
= devm_ioremap(&pdev
->dev
, res
.start
, resource_size(&res
));
568 dev_err(&pdev
->dev
, "Failed to ioremap memory\n");
569 return -EADDRNOTAVAIL
;
572 pdata
= exynos_get_driver_data(pdev
, data
->id
);
574 dev_err(&pdev
->dev
, "No platform init data supplied.\n");
579 * Check if the TMU shares some registers and then try to map the
580 * memory of common registers.
582 if (!TMU_SUPPORTS(pdata
, SHARED_MEMORY
))
585 if (of_address_to_resource(pdev
->dev
.of_node
, 1, &res
)) {
586 dev_err(&pdev
->dev
, "failed to get Resource 1\n");
590 data
->base_common
= devm_ioremap(&pdev
->dev
, res
.start
,
591 resource_size(&res
));
592 if (!data
->base_common
) {
593 dev_err(&pdev
->dev
, "Failed to ioremap memory\n");
600 static int exynos_tmu_probe(struct platform_device
*pdev
)
602 struct exynos_tmu_data
*data
;
603 struct exynos_tmu_platform_data
*pdata
;
604 struct thermal_sensor_conf
*sensor_conf
;
607 data
= devm_kzalloc(&pdev
->dev
, sizeof(struct exynos_tmu_data
),
610 dev_err(&pdev
->dev
, "Failed to allocate driver structure\n");
614 platform_set_drvdata(pdev
, data
);
615 mutex_init(&data
->lock
);
617 ret
= exynos_map_dt_data(pdev
);
623 INIT_WORK(&data
->irq_work
, exynos_tmu_work
);
625 data
->clk
= devm_clk_get(&pdev
->dev
, "tmu_apbif");
626 if (IS_ERR(data
->clk
)) {
627 dev_err(&pdev
->dev
, "Failed to get clock\n");
628 return PTR_ERR(data
->clk
);
631 ret
= clk_prepare(data
->clk
);
635 if (pdata
->type
== SOC_ARCH_EXYNOS4210
||
636 pdata
->type
== SOC_ARCH_EXYNOS4412
||
637 pdata
->type
== SOC_ARCH_EXYNOS5250
||
638 pdata
->type
== SOC_ARCH_EXYNOS5440
)
639 data
->soc
= pdata
->type
;
642 dev_err(&pdev
->dev
, "Platform not supported\n");
646 ret
= exynos_tmu_initialize(pdev
);
648 dev_err(&pdev
->dev
, "Failed to initialize TMU\n");
652 exynos_tmu_control(pdev
, true);
654 /* Allocate a structure to register with the exynos core thermal */
655 sensor_conf
= devm_kzalloc(&pdev
->dev
,
656 sizeof(struct thermal_sensor_conf
), GFP_KERNEL
);
658 dev_err(&pdev
->dev
, "Failed to allocate registration struct\n");
662 sprintf(sensor_conf
->name
, "therm_zone%d", data
->id
);
663 sensor_conf
->read_temperature
= (int (*)(void *))exynos_tmu_read
;
664 sensor_conf
->write_emul_temp
=
665 (int (*)(void *, unsigned long))exynos_tmu_set_emulation
;
666 sensor_conf
->driver_data
= data
;
667 sensor_conf
->trip_data
.trip_count
= pdata
->trigger_enable
[0] +
668 pdata
->trigger_enable
[1] + pdata
->trigger_enable
[2]+
669 pdata
->trigger_enable
[3];
671 for (i
= 0; i
< sensor_conf
->trip_data
.trip_count
; i
++) {
672 sensor_conf
->trip_data
.trip_val
[i
] =
673 pdata
->threshold
+ pdata
->trigger_levels
[i
];
674 sensor_conf
->trip_data
.trip_type
[i
] =
675 pdata
->trigger_type
[i
];
678 sensor_conf
->trip_data
.trigger_falling
= pdata
->threshold_falling
;
680 sensor_conf
->cooling_data
.freq_clip_count
= pdata
->freq_tab_count
;
681 for (i
= 0; i
< pdata
->freq_tab_count
; i
++) {
682 sensor_conf
->cooling_data
.freq_data
[i
].freq_clip_max
=
683 pdata
->freq_tab
[i
].freq_clip_max
;
684 sensor_conf
->cooling_data
.freq_data
[i
].temp_level
=
685 pdata
->freq_tab
[i
].temp_level
;
687 sensor_conf
->dev
= &pdev
->dev
;
688 /* Register the sensor with thermal management interface */
689 ret
= exynos_register_thermal(sensor_conf
);
691 dev_err(&pdev
->dev
, "Failed to register thermal interface\n");
694 data
->reg_conf
= sensor_conf
;
696 ret
= devm_request_irq(&pdev
->dev
, data
->irq
, exynos_tmu_irq
,
697 IRQF_TRIGGER_RISING
| IRQF_SHARED
, dev_name(&pdev
->dev
), data
);
699 dev_err(&pdev
->dev
, "Failed to request irq: %d\n", data
->irq
);
705 clk_unprepare(data
->clk
);
709 static int exynos_tmu_remove(struct platform_device
*pdev
)
711 struct exynos_tmu_data
*data
= platform_get_drvdata(pdev
);
713 exynos_tmu_control(pdev
, false);
715 exynos_unregister_thermal(data
->reg_conf
);
717 clk_unprepare(data
->clk
);
719 if (!IS_ERR(data
->regulator
))
720 regulator_disable(data
->regulator
);
725 #ifdef CONFIG_PM_SLEEP
726 static int exynos_tmu_suspend(struct device
*dev
)
728 exynos_tmu_control(to_platform_device(dev
), false);
733 static int exynos_tmu_resume(struct device
*dev
)
735 struct platform_device
*pdev
= to_platform_device(dev
);
737 exynos_tmu_initialize(pdev
);
738 exynos_tmu_control(pdev
, true);
743 static SIMPLE_DEV_PM_OPS(exynos_tmu_pm
,
744 exynos_tmu_suspend
, exynos_tmu_resume
);
745 #define EXYNOS_TMU_PM (&exynos_tmu_pm)
747 #define EXYNOS_TMU_PM NULL
750 static struct platform_driver exynos_tmu_driver
= {
752 .name
= "exynos-tmu",
753 .owner
= THIS_MODULE
,
755 .of_match_table
= exynos_tmu_match
,
757 .probe
= exynos_tmu_probe
,
758 .remove
= exynos_tmu_remove
,
761 module_platform_driver(exynos_tmu_driver
);
763 MODULE_DESCRIPTION("EXYNOS TMU Driver");
764 MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
765 MODULE_LICENSE("GPL");
766 MODULE_ALIAS("platform:exynos-tmu");