Linux 4.19-rc7
[linux-2.6/btrfs-unstable.git] / drivers / gpu / drm / amd / powerplay / amd_powerplay.c
blob7a646f94b4788186ceb080fbba8414def8ca8105
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "pp_debug.h"
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/gfp.h>
27 #include <linux/slab.h>
28 #include <linux/firmware.h>
29 #include "amd_shared.h"
30 #include "amd_powerplay.h"
31 #include "power_state.h"
32 #include "amdgpu.h"
33 #include "hwmgr.h"
36 static const struct amd_pm_funcs pp_dpm_funcs;
38 static int amd_powerplay_create(struct amdgpu_device *adev)
40 struct pp_hwmgr *hwmgr;
42 if (adev == NULL)
43 return -EINVAL;
45 hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
46 if (hwmgr == NULL)
47 return -ENOMEM;
49 hwmgr->adev = adev;
50 hwmgr->not_vf = !amdgpu_sriov_vf(adev);
51 hwmgr->pm_en = (amdgpu_dpm && hwmgr->not_vf) ? true : false;
52 hwmgr->device = amdgpu_cgs_create_device(adev);
53 mutex_init(&hwmgr->smu_lock);
54 hwmgr->chip_family = adev->family;
55 hwmgr->chip_id = adev->asic_type;
56 hwmgr->feature_mask = adev->powerplay.pp_feature;
57 hwmgr->display_config = &adev->pm.pm_display_cfg;
58 adev->powerplay.pp_handle = hwmgr;
59 adev->powerplay.pp_funcs = &pp_dpm_funcs;
60 return 0;
64 static void amd_powerplay_destroy(struct amdgpu_device *adev)
66 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
68 kfree(hwmgr->hardcode_pp_table);
69 hwmgr->hardcode_pp_table = NULL;
71 kfree(hwmgr);
72 hwmgr = NULL;
75 static int pp_early_init(void *handle)
77 int ret;
78 struct amdgpu_device *adev = handle;
80 ret = amd_powerplay_create(adev);
82 if (ret != 0)
83 return ret;
85 ret = hwmgr_early_init(adev->powerplay.pp_handle);
86 if (ret)
87 return -EINVAL;
89 return 0;
92 static int pp_sw_init(void *handle)
94 struct amdgpu_device *adev = handle;
95 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
96 int ret = 0;
98 ret = hwmgr_sw_init(hwmgr);
100 pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
102 return ret;
105 static int pp_sw_fini(void *handle)
107 struct amdgpu_device *adev = handle;
108 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
110 hwmgr_sw_fini(hwmgr);
112 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
113 release_firmware(adev->pm.fw);
114 adev->pm.fw = NULL;
115 amdgpu_ucode_fini_bo(adev);
118 return 0;
121 static int pp_hw_init(void *handle)
123 int ret = 0;
124 struct amdgpu_device *adev = handle;
125 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
127 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
128 amdgpu_ucode_init_bo(adev);
130 ret = hwmgr_hw_init(hwmgr);
132 if (ret)
133 pr_err("powerplay hw init failed\n");
135 return ret;
138 static int pp_hw_fini(void *handle)
140 struct amdgpu_device *adev = handle;
141 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
143 hwmgr_hw_fini(hwmgr);
145 return 0;
148 static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
150 int r = -EINVAL;
151 void *cpu_ptr = NULL;
152 uint64_t gpu_addr;
153 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
155 if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
156 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
157 &adev->pm.smu_prv_buffer,
158 &gpu_addr,
159 &cpu_ptr)) {
160 DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
161 return;
164 if (hwmgr->hwmgr_func->notify_cac_buffer_info)
165 r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
166 lower_32_bits((unsigned long)cpu_ptr),
167 upper_32_bits((unsigned long)cpu_ptr),
168 lower_32_bits(gpu_addr),
169 upper_32_bits(gpu_addr),
170 adev->pm.smu_prv_buffer_size);
172 if (r) {
173 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
174 adev->pm.smu_prv_buffer = NULL;
175 DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
179 static int pp_late_init(void *handle)
181 struct amdgpu_device *adev = handle;
182 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
184 if (hwmgr && hwmgr->pm_en) {
185 mutex_lock(&hwmgr->smu_lock);
186 hwmgr_handle_task(hwmgr,
187 AMD_PP_TASK_COMPLETE_INIT, NULL);
188 mutex_unlock(&hwmgr->smu_lock);
190 if (adev->pm.smu_prv_buffer_size != 0)
191 pp_reserve_vram_for_smu(adev);
193 return 0;
196 static void pp_late_fini(void *handle)
198 struct amdgpu_device *adev = handle;
200 if (adev->pm.smu_prv_buffer)
201 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
202 amd_powerplay_destroy(adev);
206 static bool pp_is_idle(void *handle)
208 return false;
211 static int pp_wait_for_idle(void *handle)
213 return 0;
216 static int pp_sw_reset(void *handle)
218 return 0;
221 static int pp_set_powergating_state(void *handle,
222 enum amd_powergating_state state)
224 return 0;
227 static int pp_suspend(void *handle)
229 struct amdgpu_device *adev = handle;
230 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
232 return hwmgr_suspend(hwmgr);
235 static int pp_resume(void *handle)
237 struct amdgpu_device *adev = handle;
238 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
240 return hwmgr_resume(hwmgr);
243 static int pp_set_clockgating_state(void *handle,
244 enum amd_clockgating_state state)
246 return 0;
249 static const struct amd_ip_funcs pp_ip_funcs = {
250 .name = "powerplay",
251 .early_init = pp_early_init,
252 .late_init = pp_late_init,
253 .sw_init = pp_sw_init,
254 .sw_fini = pp_sw_fini,
255 .hw_init = pp_hw_init,
256 .hw_fini = pp_hw_fini,
257 .late_fini = pp_late_fini,
258 .suspend = pp_suspend,
259 .resume = pp_resume,
260 .is_idle = pp_is_idle,
261 .wait_for_idle = pp_wait_for_idle,
262 .soft_reset = pp_sw_reset,
263 .set_clockgating_state = pp_set_clockgating_state,
264 .set_powergating_state = pp_set_powergating_state,
267 const struct amdgpu_ip_block_version pp_smu_ip_block =
269 .type = AMD_IP_BLOCK_TYPE_SMC,
270 .major = 1,
271 .minor = 0,
272 .rev = 0,
273 .funcs = &pp_ip_funcs,
276 static int pp_dpm_load_fw(void *handle)
278 return 0;
281 static int pp_dpm_fw_loading_complete(void *handle)
283 return 0;
286 static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
288 struct pp_hwmgr *hwmgr = handle;
290 if (!hwmgr || !hwmgr->pm_en)
291 return -EINVAL;
293 if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
294 pr_info("%s was not implemented.\n", __func__);
295 return 0;
298 return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
301 static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
302 enum amd_dpm_forced_level *level)
304 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
305 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
306 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
307 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
309 if (!(hwmgr->dpm_level & profile_mode_mask)) {
310 /* enter umd pstate, save current level, disable gfx cg*/
311 if (*level & profile_mode_mask) {
312 hwmgr->saved_dpm_level = hwmgr->dpm_level;
313 hwmgr->en_umd_pstate = true;
314 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
315 AMD_IP_BLOCK_TYPE_GFX,
316 AMD_CG_STATE_UNGATE);
317 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
318 AMD_IP_BLOCK_TYPE_GFX,
319 AMD_PG_STATE_UNGATE);
321 } else {
322 /* exit umd pstate, restore level, enable gfx cg*/
323 if (!(*level & profile_mode_mask)) {
324 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
325 *level = hwmgr->saved_dpm_level;
326 hwmgr->en_umd_pstate = false;
327 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
328 AMD_IP_BLOCK_TYPE_GFX,
329 AMD_CG_STATE_GATE);
330 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
331 AMD_IP_BLOCK_TYPE_GFX,
332 AMD_PG_STATE_GATE);
337 static int pp_dpm_force_performance_level(void *handle,
338 enum amd_dpm_forced_level level)
340 struct pp_hwmgr *hwmgr = handle;
342 if (!hwmgr || !hwmgr->pm_en)
343 return -EINVAL;
345 if (level == hwmgr->dpm_level)
346 return 0;
348 mutex_lock(&hwmgr->smu_lock);
349 pp_dpm_en_umd_pstate(hwmgr, &level);
350 hwmgr->request_dpm_level = level;
351 hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
352 mutex_unlock(&hwmgr->smu_lock);
354 return 0;
357 static enum amd_dpm_forced_level pp_dpm_get_performance_level(
358 void *handle)
360 struct pp_hwmgr *hwmgr = handle;
361 enum amd_dpm_forced_level level;
363 if (!hwmgr || !hwmgr->pm_en)
364 return -EINVAL;
366 mutex_lock(&hwmgr->smu_lock);
367 level = hwmgr->dpm_level;
368 mutex_unlock(&hwmgr->smu_lock);
369 return level;
372 static uint32_t pp_dpm_get_sclk(void *handle, bool low)
374 struct pp_hwmgr *hwmgr = handle;
375 uint32_t clk = 0;
377 if (!hwmgr || !hwmgr->pm_en)
378 return 0;
380 if (hwmgr->hwmgr_func->get_sclk == NULL) {
381 pr_info("%s was not implemented.\n", __func__);
382 return 0;
384 mutex_lock(&hwmgr->smu_lock);
385 clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
386 mutex_unlock(&hwmgr->smu_lock);
387 return clk;
390 static uint32_t pp_dpm_get_mclk(void *handle, bool low)
392 struct pp_hwmgr *hwmgr = handle;
393 uint32_t clk = 0;
395 if (!hwmgr || !hwmgr->pm_en)
396 return 0;
398 if (hwmgr->hwmgr_func->get_mclk == NULL) {
399 pr_info("%s was not implemented.\n", __func__);
400 return 0;
402 mutex_lock(&hwmgr->smu_lock);
403 clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
404 mutex_unlock(&hwmgr->smu_lock);
405 return clk;
408 static void pp_dpm_powergate_vce(void *handle, bool gate)
410 struct pp_hwmgr *hwmgr = handle;
412 if (!hwmgr || !hwmgr->pm_en)
413 return;
415 if (hwmgr->hwmgr_func->powergate_vce == NULL) {
416 pr_info("%s was not implemented.\n", __func__);
417 return;
419 mutex_lock(&hwmgr->smu_lock);
420 hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
421 mutex_unlock(&hwmgr->smu_lock);
424 static void pp_dpm_powergate_uvd(void *handle, bool gate)
426 struct pp_hwmgr *hwmgr = handle;
428 if (!hwmgr || !hwmgr->pm_en)
429 return;
431 if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
432 pr_info("%s was not implemented.\n", __func__);
433 return;
435 mutex_lock(&hwmgr->smu_lock);
436 hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
437 mutex_unlock(&hwmgr->smu_lock);
440 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
441 enum amd_pm_state_type *user_state)
443 int ret = 0;
444 struct pp_hwmgr *hwmgr = handle;
446 if (!hwmgr || !hwmgr->pm_en)
447 return -EINVAL;
449 mutex_lock(&hwmgr->smu_lock);
450 ret = hwmgr_handle_task(hwmgr, task_id, user_state);
451 mutex_unlock(&hwmgr->smu_lock);
453 return ret;
456 static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
458 struct pp_hwmgr *hwmgr = handle;
459 struct pp_power_state *state;
460 enum amd_pm_state_type pm_type;
462 if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
463 return -EINVAL;
465 mutex_lock(&hwmgr->smu_lock);
467 state = hwmgr->current_ps;
469 switch (state->classification.ui_label) {
470 case PP_StateUILabel_Battery:
471 pm_type = POWER_STATE_TYPE_BATTERY;
472 break;
473 case PP_StateUILabel_Balanced:
474 pm_type = POWER_STATE_TYPE_BALANCED;
475 break;
476 case PP_StateUILabel_Performance:
477 pm_type = POWER_STATE_TYPE_PERFORMANCE;
478 break;
479 default:
480 if (state->classification.flags & PP_StateClassificationFlag_Boot)
481 pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
482 else
483 pm_type = POWER_STATE_TYPE_DEFAULT;
484 break;
486 mutex_unlock(&hwmgr->smu_lock);
488 return pm_type;
491 static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
493 struct pp_hwmgr *hwmgr = handle;
495 if (!hwmgr || !hwmgr->pm_en)
496 return;
498 if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
499 pr_info("%s was not implemented.\n", __func__);
500 return;
502 mutex_lock(&hwmgr->smu_lock);
503 hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
504 mutex_unlock(&hwmgr->smu_lock);
507 static uint32_t pp_dpm_get_fan_control_mode(void *handle)
509 struct pp_hwmgr *hwmgr = handle;
510 uint32_t mode = 0;
512 if (!hwmgr || !hwmgr->pm_en)
513 return 0;
515 if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
516 pr_info("%s was not implemented.\n", __func__);
517 return 0;
519 mutex_lock(&hwmgr->smu_lock);
520 mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
521 mutex_unlock(&hwmgr->smu_lock);
522 return mode;
525 static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
527 struct pp_hwmgr *hwmgr = handle;
528 int ret = 0;
530 if (!hwmgr || !hwmgr->pm_en)
531 return -EINVAL;
533 if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
534 pr_info("%s was not implemented.\n", __func__);
535 return 0;
537 mutex_lock(&hwmgr->smu_lock);
538 ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
539 mutex_unlock(&hwmgr->smu_lock);
540 return ret;
543 static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
545 struct pp_hwmgr *hwmgr = handle;
546 int ret = 0;
548 if (!hwmgr || !hwmgr->pm_en)
549 return -EINVAL;
551 if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
552 pr_info("%s was not implemented.\n", __func__);
553 return 0;
556 mutex_lock(&hwmgr->smu_lock);
557 ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
558 mutex_unlock(&hwmgr->smu_lock);
559 return ret;
562 static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
564 struct pp_hwmgr *hwmgr = handle;
565 int ret = 0;
567 if (!hwmgr || !hwmgr->pm_en)
568 return -EINVAL;
570 if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
571 return -EINVAL;
573 mutex_lock(&hwmgr->smu_lock);
574 ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
575 mutex_unlock(&hwmgr->smu_lock);
576 return ret;
579 static int pp_dpm_get_pp_num_states(void *handle,
580 struct pp_states_info *data)
582 struct pp_hwmgr *hwmgr = handle;
583 int i;
585 memset(data, 0, sizeof(*data));
587 if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
588 return -EINVAL;
590 mutex_lock(&hwmgr->smu_lock);
592 data->nums = hwmgr->num_ps;
594 for (i = 0; i < hwmgr->num_ps; i++) {
595 struct pp_power_state *state = (struct pp_power_state *)
596 ((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
597 switch (state->classification.ui_label) {
598 case PP_StateUILabel_Battery:
599 data->states[i] = POWER_STATE_TYPE_BATTERY;
600 break;
601 case PP_StateUILabel_Balanced:
602 data->states[i] = POWER_STATE_TYPE_BALANCED;
603 break;
604 case PP_StateUILabel_Performance:
605 data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
606 break;
607 default:
608 if (state->classification.flags & PP_StateClassificationFlag_Boot)
609 data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
610 else
611 data->states[i] = POWER_STATE_TYPE_DEFAULT;
614 mutex_unlock(&hwmgr->smu_lock);
615 return 0;
618 static int pp_dpm_get_pp_table(void *handle, char **table)
620 struct pp_hwmgr *hwmgr = handle;
621 int size = 0;
623 if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
624 return -EINVAL;
626 mutex_lock(&hwmgr->smu_lock);
627 *table = (char *)hwmgr->soft_pp_table;
628 size = hwmgr->soft_pp_table_size;
629 mutex_unlock(&hwmgr->smu_lock);
630 return size;
633 static int amd_powerplay_reset(void *handle)
635 struct pp_hwmgr *hwmgr = handle;
636 int ret;
638 ret = hwmgr_hw_fini(hwmgr);
639 if (ret)
640 return ret;
642 ret = hwmgr_hw_init(hwmgr);
643 if (ret)
644 return ret;
646 return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
649 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
651 struct pp_hwmgr *hwmgr = handle;
652 int ret = -ENOMEM;
654 if (!hwmgr || !hwmgr->pm_en)
655 return -EINVAL;
657 mutex_lock(&hwmgr->smu_lock);
658 if (!hwmgr->hardcode_pp_table) {
659 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
660 hwmgr->soft_pp_table_size,
661 GFP_KERNEL);
662 if (!hwmgr->hardcode_pp_table)
663 goto err;
666 memcpy(hwmgr->hardcode_pp_table, buf, size);
668 hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
670 ret = amd_powerplay_reset(handle);
671 if (ret)
672 goto err;
674 if (hwmgr->hwmgr_func->avfs_control) {
675 ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
676 if (ret)
677 goto err;
679 mutex_unlock(&hwmgr->smu_lock);
680 return 0;
681 err:
682 mutex_unlock(&hwmgr->smu_lock);
683 return ret;
686 static int pp_dpm_force_clock_level(void *handle,
687 enum pp_clock_type type, uint32_t mask)
689 struct pp_hwmgr *hwmgr = handle;
690 int ret = 0;
692 if (!hwmgr || !hwmgr->pm_en)
693 return -EINVAL;
695 if (hwmgr->hwmgr_func->force_clock_level == NULL) {
696 pr_info("%s was not implemented.\n", __func__);
697 return 0;
699 mutex_lock(&hwmgr->smu_lock);
700 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
701 ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
702 else
703 ret = -EINVAL;
704 mutex_unlock(&hwmgr->smu_lock);
705 return ret;
708 static int pp_dpm_print_clock_levels(void *handle,
709 enum pp_clock_type type, char *buf)
711 struct pp_hwmgr *hwmgr = handle;
712 int ret = 0;
714 if (!hwmgr || !hwmgr->pm_en)
715 return -EINVAL;
717 if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
718 pr_info("%s was not implemented.\n", __func__);
719 return 0;
721 mutex_lock(&hwmgr->smu_lock);
722 ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
723 mutex_unlock(&hwmgr->smu_lock);
724 return ret;
727 static int pp_dpm_get_sclk_od(void *handle)
729 struct pp_hwmgr *hwmgr = handle;
730 int ret = 0;
732 if (!hwmgr || !hwmgr->pm_en)
733 return -EINVAL;
735 if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
736 pr_info("%s was not implemented.\n", __func__);
737 return 0;
739 mutex_lock(&hwmgr->smu_lock);
740 ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
741 mutex_unlock(&hwmgr->smu_lock);
742 return ret;
745 static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
747 struct pp_hwmgr *hwmgr = handle;
748 int ret = 0;
750 if (!hwmgr || !hwmgr->pm_en)
751 return -EINVAL;
753 if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
754 pr_info("%s was not implemented.\n", __func__);
755 return 0;
758 mutex_lock(&hwmgr->smu_lock);
759 ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
760 mutex_unlock(&hwmgr->smu_lock);
761 return ret;
764 static int pp_dpm_get_mclk_od(void *handle)
766 struct pp_hwmgr *hwmgr = handle;
767 int ret = 0;
769 if (!hwmgr || !hwmgr->pm_en)
770 return -EINVAL;
772 if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
773 pr_info("%s was not implemented.\n", __func__);
774 return 0;
776 mutex_lock(&hwmgr->smu_lock);
777 ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
778 mutex_unlock(&hwmgr->smu_lock);
779 return ret;
782 static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
784 struct pp_hwmgr *hwmgr = handle;
785 int ret = 0;
787 if (!hwmgr || !hwmgr->pm_en)
788 return -EINVAL;
790 if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
791 pr_info("%s was not implemented.\n", __func__);
792 return 0;
794 mutex_lock(&hwmgr->smu_lock);
795 ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
796 mutex_unlock(&hwmgr->smu_lock);
797 return ret;
800 static int pp_dpm_read_sensor(void *handle, int idx,
801 void *value, int *size)
803 struct pp_hwmgr *hwmgr = handle;
804 int ret = 0;
806 if (!hwmgr || !hwmgr->pm_en || !value)
807 return -EINVAL;
809 switch (idx) {
810 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
811 *((uint32_t *)value) = hwmgr->pstate_sclk;
812 return 0;
813 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
814 *((uint32_t *)value) = hwmgr->pstate_mclk;
815 return 0;
816 default:
817 mutex_lock(&hwmgr->smu_lock);
818 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
819 mutex_unlock(&hwmgr->smu_lock);
820 return ret;
824 static struct amd_vce_state*
825 pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
827 struct pp_hwmgr *hwmgr = handle;
829 if (!hwmgr || !hwmgr->pm_en)
830 return NULL;
832 if (idx < hwmgr->num_vce_state_tables)
833 return &hwmgr->vce_states[idx];
834 return NULL;
837 static int pp_get_power_profile_mode(void *handle, char *buf)
839 struct pp_hwmgr *hwmgr = handle;
841 if (!hwmgr || !hwmgr->pm_en || !buf)
842 return -EINVAL;
844 if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
845 pr_info("%s was not implemented.\n", __func__);
846 return snprintf(buf, PAGE_SIZE, "\n");
849 return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
852 static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
854 struct pp_hwmgr *hwmgr = handle;
855 int ret = -EINVAL;
857 if (!hwmgr || !hwmgr->pm_en)
858 return ret;
860 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
861 pr_info("%s was not implemented.\n", __func__);
862 return ret;
864 mutex_lock(&hwmgr->smu_lock);
865 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
866 ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
867 mutex_unlock(&hwmgr->smu_lock);
868 return ret;
871 static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
873 struct pp_hwmgr *hwmgr = handle;
875 if (!hwmgr || !hwmgr->pm_en)
876 return -EINVAL;
878 if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
879 pr_info("%s was not implemented.\n", __func__);
880 return -EINVAL;
883 return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
886 static int pp_dpm_switch_power_profile(void *handle,
887 enum PP_SMC_POWER_PROFILE type, bool en)
889 struct pp_hwmgr *hwmgr = handle;
890 long workload;
891 uint32_t index;
893 if (!hwmgr || !hwmgr->pm_en)
894 return -EINVAL;
896 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
897 pr_info("%s was not implemented.\n", __func__);
898 return -EINVAL;
901 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
902 return -EINVAL;
904 mutex_lock(&hwmgr->smu_lock);
906 if (!en) {
907 hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
908 index = fls(hwmgr->workload_mask);
909 index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
910 workload = hwmgr->workload_setting[index];
911 } else {
912 hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
913 index = fls(hwmgr->workload_mask);
914 index = index <= Workload_Policy_Max ? index - 1 : 0;
915 workload = hwmgr->workload_setting[index];
918 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
919 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
920 mutex_unlock(&hwmgr->smu_lock);
922 return 0;
925 static int pp_set_power_limit(void *handle, uint32_t limit)
927 struct pp_hwmgr *hwmgr = handle;
929 if (!hwmgr || !hwmgr->pm_en)
930 return -EINVAL;
932 if (hwmgr->hwmgr_func->set_power_limit == NULL) {
933 pr_info("%s was not implemented.\n", __func__);
934 return -EINVAL;
937 if (limit == 0)
938 limit = hwmgr->default_power_limit;
940 if (limit > hwmgr->default_power_limit)
941 return -EINVAL;
943 mutex_lock(&hwmgr->smu_lock);
944 hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
945 hwmgr->power_limit = limit;
946 mutex_unlock(&hwmgr->smu_lock);
947 return 0;
950 static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
952 struct pp_hwmgr *hwmgr = handle;
954 if (!hwmgr || !hwmgr->pm_en ||!limit)
955 return -EINVAL;
957 mutex_lock(&hwmgr->smu_lock);
959 if (default_limit)
960 *limit = hwmgr->default_power_limit;
961 else
962 *limit = hwmgr->power_limit;
964 mutex_unlock(&hwmgr->smu_lock);
966 return 0;
969 static int pp_display_configuration_change(void *handle,
970 const struct amd_pp_display_configuration *display_config)
972 struct pp_hwmgr *hwmgr = handle;
974 if (!hwmgr || !hwmgr->pm_en)
975 return -EINVAL;
977 mutex_lock(&hwmgr->smu_lock);
978 phm_store_dal_configuration_data(hwmgr, display_config);
979 mutex_unlock(&hwmgr->smu_lock);
980 return 0;
983 static int pp_get_display_power_level(void *handle,
984 struct amd_pp_simple_clock_info *output)
986 struct pp_hwmgr *hwmgr = handle;
987 int ret = 0;
989 if (!hwmgr || !hwmgr->pm_en ||!output)
990 return -EINVAL;
992 mutex_lock(&hwmgr->smu_lock);
993 ret = phm_get_dal_power_level(hwmgr, output);
994 mutex_unlock(&hwmgr->smu_lock);
995 return ret;
998 static int pp_get_current_clocks(void *handle,
999 struct amd_pp_clock_info *clocks)
1001 struct amd_pp_simple_clock_info simple_clocks = { 0 };
1002 struct pp_clock_info hw_clocks;
1003 struct pp_hwmgr *hwmgr = handle;
1004 int ret = 0;
1006 if (!hwmgr || !hwmgr->pm_en)
1007 return -EINVAL;
1009 mutex_lock(&hwmgr->smu_lock);
1011 phm_get_dal_power_level(hwmgr, &simple_clocks);
1013 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1014 PHM_PlatformCaps_PowerContainment))
1015 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1016 &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1017 else
1018 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1019 &hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1021 if (ret) {
1022 pr_info("Error in phm_get_clock_info \n");
1023 mutex_unlock(&hwmgr->smu_lock);
1024 return -EINVAL;
1027 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1028 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1029 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1030 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1031 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1032 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1034 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1035 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1037 if (simple_clocks.level == 0)
1038 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1039 else
1040 clocks->max_clocks_state = simple_clocks.level;
1042 if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1043 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1044 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1046 mutex_unlock(&hwmgr->smu_lock);
1047 return 0;
1050 static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1052 struct pp_hwmgr *hwmgr = handle;
1053 int ret = 0;
1055 if (!hwmgr || !hwmgr->pm_en)
1056 return -EINVAL;
1058 if (clocks == NULL)
1059 return -EINVAL;
1061 mutex_lock(&hwmgr->smu_lock);
1062 ret = phm_get_clock_by_type(hwmgr, type, clocks);
1063 mutex_unlock(&hwmgr->smu_lock);
1064 return ret;
1067 static int pp_get_clock_by_type_with_latency(void *handle,
1068 enum amd_pp_clock_type type,
1069 struct pp_clock_levels_with_latency *clocks)
1071 struct pp_hwmgr *hwmgr = handle;
1072 int ret = 0;
1074 if (!hwmgr || !hwmgr->pm_en ||!clocks)
1075 return -EINVAL;
1077 mutex_lock(&hwmgr->smu_lock);
1078 ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1079 mutex_unlock(&hwmgr->smu_lock);
1080 return ret;
1083 static int pp_get_clock_by_type_with_voltage(void *handle,
1084 enum amd_pp_clock_type type,
1085 struct pp_clock_levels_with_voltage *clocks)
1087 struct pp_hwmgr *hwmgr = handle;
1088 int ret = 0;
1090 if (!hwmgr || !hwmgr->pm_en ||!clocks)
1091 return -EINVAL;
1093 mutex_lock(&hwmgr->smu_lock);
1095 ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1097 mutex_unlock(&hwmgr->smu_lock);
1098 return ret;
1101 static int pp_set_watermarks_for_clocks_ranges(void *handle,
1102 void *clock_ranges)
1104 struct pp_hwmgr *hwmgr = handle;
1105 int ret = 0;
1107 if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
1108 return -EINVAL;
1110 mutex_lock(&hwmgr->smu_lock);
1111 ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
1112 clock_ranges);
1113 mutex_unlock(&hwmgr->smu_lock);
1115 return ret;
1118 static int pp_display_clock_voltage_request(void *handle,
1119 struct pp_display_clock_request *clock)
1121 struct pp_hwmgr *hwmgr = handle;
1122 int ret = 0;
1124 if (!hwmgr || !hwmgr->pm_en ||!clock)
1125 return -EINVAL;
1127 mutex_lock(&hwmgr->smu_lock);
1128 ret = phm_display_clock_voltage_request(hwmgr, clock);
1129 mutex_unlock(&hwmgr->smu_lock);
1131 return ret;
1134 static int pp_get_display_mode_validation_clocks(void *handle,
1135 struct amd_pp_simple_clock_info *clocks)
1137 struct pp_hwmgr *hwmgr = handle;
1138 int ret = 0;
1140 if (!hwmgr || !hwmgr->pm_en ||!clocks)
1141 return -EINVAL;
1143 clocks->level = PP_DAL_POWERLEVEL_7;
1145 mutex_lock(&hwmgr->smu_lock);
1147 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1148 ret = phm_get_max_high_clocks(hwmgr, clocks);
1150 mutex_unlock(&hwmgr->smu_lock);
1151 return ret;
1154 static int pp_dpm_powergate_mmhub(void *handle)
1156 struct pp_hwmgr *hwmgr = handle;
1158 if (!hwmgr || !hwmgr->pm_en)
1159 return -EINVAL;
1161 if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
1162 pr_info("%s was not implemented.\n", __func__);
1163 return 0;
1166 return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
1169 static int pp_dpm_powergate_gfx(void *handle, bool gate)
1171 struct pp_hwmgr *hwmgr = handle;
1173 if (!hwmgr || !hwmgr->pm_en)
1174 return 0;
1176 if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
1177 pr_info("%s was not implemented.\n", __func__);
1178 return 0;
1181 return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
1184 static int pp_set_powergating_by_smu(void *handle,
1185 uint32_t block_type, bool gate)
1187 int ret = 0;
1189 switch (block_type) {
1190 case AMD_IP_BLOCK_TYPE_UVD:
1191 case AMD_IP_BLOCK_TYPE_VCN:
1192 pp_dpm_powergate_uvd(handle, gate);
1193 break;
1194 case AMD_IP_BLOCK_TYPE_VCE:
1195 pp_dpm_powergate_vce(handle, gate);
1196 break;
1197 case AMD_IP_BLOCK_TYPE_GMC:
1198 pp_dpm_powergate_mmhub(handle);
1199 break;
1200 case AMD_IP_BLOCK_TYPE_GFX:
1201 ret = pp_dpm_powergate_gfx(handle, gate);
1202 break;
1203 default:
1204 break;
1206 return ret;
1209 static int pp_notify_smu_enable_pwe(void *handle)
1211 struct pp_hwmgr *hwmgr = handle;
1213 if (!hwmgr || !hwmgr->pm_en)
1214 return -EINVAL;
1216 if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
1217 pr_info("%s was not implemented.\n", __func__);
1218 return -EINVAL;;
1221 mutex_lock(&hwmgr->smu_lock);
1222 hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
1223 mutex_unlock(&hwmgr->smu_lock);
1225 return 0;
1228 static const struct amd_pm_funcs pp_dpm_funcs = {
1229 .load_firmware = pp_dpm_load_fw,
1230 .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1231 .force_performance_level = pp_dpm_force_performance_level,
1232 .get_performance_level = pp_dpm_get_performance_level,
1233 .get_current_power_state = pp_dpm_get_current_power_state,
1234 .dispatch_tasks = pp_dpm_dispatch_tasks,
1235 .set_fan_control_mode = pp_dpm_set_fan_control_mode,
1236 .get_fan_control_mode = pp_dpm_get_fan_control_mode,
1237 .set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
1238 .get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
1239 .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
1240 .get_pp_num_states = pp_dpm_get_pp_num_states,
1241 .get_pp_table = pp_dpm_get_pp_table,
1242 .set_pp_table = pp_dpm_set_pp_table,
1243 .force_clock_level = pp_dpm_force_clock_level,
1244 .print_clock_levels = pp_dpm_print_clock_levels,
1245 .get_sclk_od = pp_dpm_get_sclk_od,
1246 .set_sclk_od = pp_dpm_set_sclk_od,
1247 .get_mclk_od = pp_dpm_get_mclk_od,
1248 .set_mclk_od = pp_dpm_set_mclk_od,
1249 .read_sensor = pp_dpm_read_sensor,
1250 .get_vce_clock_state = pp_dpm_get_vce_clock_state,
1251 .switch_power_profile = pp_dpm_switch_power_profile,
1252 .set_clockgating_by_smu = pp_set_clockgating_by_smu,
1253 .set_powergating_by_smu = pp_set_powergating_by_smu,
1254 .get_power_profile_mode = pp_get_power_profile_mode,
1255 .set_power_profile_mode = pp_set_power_profile_mode,
1256 .odn_edit_dpm_table = pp_odn_edit_dpm_table,
1257 .set_power_limit = pp_set_power_limit,
1258 .get_power_limit = pp_get_power_limit,
1259 /* export to DC */
1260 .get_sclk = pp_dpm_get_sclk,
1261 .get_mclk = pp_dpm_get_mclk,
1262 .display_configuration_change = pp_display_configuration_change,
1263 .get_display_power_level = pp_get_display_power_level,
1264 .get_current_clocks = pp_get_current_clocks,
1265 .get_clock_by_type = pp_get_clock_by_type,
1266 .get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1267 .get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1268 .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1269 .display_clock_voltage_request = pp_display_clock_voltage_request,
1270 .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1271 .notify_smu_enable_pwe = pp_notify_smu_enable_pwe,