2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Ben Widawsky <ben@bwidawsk.net>
29 #include <linux/device.h>
30 #include <linux/module.h>
31 #include "intel_drv.h"
35 #define dev_to_drm_minor(d) dev_get_drvdata((d))
38 static u32
calc_residency(struct drm_device
*dev
, const u32 reg
)
40 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
41 u64 raw_time
; /* 32b value may overflow during fixed point math */
42 u64 units
= 128ULL, div
= 100000ULL;
45 if (!intel_enable_rc6(dev
))
48 intel_runtime_pm_get(dev_priv
);
50 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
51 if (IS_VALLEYVIEW(dev
)) {
53 div
= dev_priv
->czclk_freq
;
55 if (I915_READ(VLV_COUNTER_CONTROL
) & VLV_COUNT_RANGE_HIGH
)
57 } else if (IS_BROXTON(dev
)) {
59 div
= 1200; /* 833.33ns */
62 raw_time
= I915_READ(reg
) * units
;
63 ret
= DIV_ROUND_UP_ULL(raw_time
, div
);
65 intel_runtime_pm_put(dev_priv
);
70 show_rc6_mask(struct device
*kdev
, struct device_attribute
*attr
, char *buf
)
72 struct drm_minor
*dminor
= dev_to_drm_minor(kdev
);
73 return snprintf(buf
, PAGE_SIZE
, "%x\n", intel_enable_rc6(dminor
->dev
));
77 show_rc6_ms(struct device
*kdev
, struct device_attribute
*attr
, char *buf
)
79 struct drm_minor
*dminor
= dev_get_drvdata(kdev
);
80 u32 rc6_residency
= calc_residency(dminor
->dev
, GEN6_GT_GFX_RC6
);
81 return snprintf(buf
, PAGE_SIZE
, "%u\n", rc6_residency
);
85 show_rc6p_ms(struct device
*kdev
, struct device_attribute
*attr
, char *buf
)
87 struct drm_minor
*dminor
= dev_to_drm_minor(kdev
);
88 u32 rc6p_residency
= calc_residency(dminor
->dev
, GEN6_GT_GFX_RC6p
);
89 return snprintf(buf
, PAGE_SIZE
, "%u\n", rc6p_residency
);
93 show_rc6pp_ms(struct device
*kdev
, struct device_attribute
*attr
, char *buf
)
95 struct drm_minor
*dminor
= dev_to_drm_minor(kdev
);
96 u32 rc6pp_residency
= calc_residency(dminor
->dev
, GEN6_GT_GFX_RC6pp
);
97 return snprintf(buf
, PAGE_SIZE
, "%u\n", rc6pp_residency
);
101 show_media_rc6_ms(struct device
*kdev
, struct device_attribute
*attr
, char *buf
)
103 struct drm_minor
*dminor
= dev_get_drvdata(kdev
);
104 u32 rc6_residency
= calc_residency(dminor
->dev
, VLV_GT_MEDIA_RC6
);
105 return snprintf(buf
, PAGE_SIZE
, "%u\n", rc6_residency
);
108 static DEVICE_ATTR(rc6_enable
, S_IRUGO
, show_rc6_mask
, NULL
);
109 static DEVICE_ATTR(rc6_residency_ms
, S_IRUGO
, show_rc6_ms
, NULL
);
110 static DEVICE_ATTR(rc6p_residency_ms
, S_IRUGO
, show_rc6p_ms
, NULL
);
111 static DEVICE_ATTR(rc6pp_residency_ms
, S_IRUGO
, show_rc6pp_ms
, NULL
);
112 static DEVICE_ATTR(media_rc6_residency_ms
, S_IRUGO
, show_media_rc6_ms
, NULL
);
114 static struct attribute
*rc6_attrs
[] = {
115 &dev_attr_rc6_enable
.attr
,
116 &dev_attr_rc6_residency_ms
.attr
,
120 static struct attribute_group rc6_attr_group
= {
121 .name
= power_group_name
,
125 static struct attribute
*rc6p_attrs
[] = {
126 &dev_attr_rc6p_residency_ms
.attr
,
127 &dev_attr_rc6pp_residency_ms
.attr
,
131 static struct attribute_group rc6p_attr_group
= {
132 .name
= power_group_name
,
136 static struct attribute
*media_rc6_attrs
[] = {
137 &dev_attr_media_rc6_residency_ms
.attr
,
141 static struct attribute_group media_rc6_attr_group
= {
142 .name
= power_group_name
,
143 .attrs
= media_rc6_attrs
147 static int l3_access_valid(struct drm_device
*dev
, loff_t offset
)
149 if (!HAS_L3_DPF(dev
))
155 if (offset
>= GEN7_L3LOG_SIZE
)
162 i915_l3_read(struct file
*filp
, struct kobject
*kobj
,
163 struct bin_attribute
*attr
, char *buf
,
164 loff_t offset
, size_t count
)
166 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
167 struct drm_minor
*dminor
= dev_to_drm_minor(dev
);
168 struct drm_device
*drm_dev
= dminor
->dev
;
169 struct drm_i915_private
*dev_priv
= drm_dev
->dev_private
;
170 int slice
= (int)(uintptr_t)attr
->private;
173 count
= round_down(count
, 4);
175 ret
= l3_access_valid(drm_dev
, offset
);
179 count
= min_t(size_t, GEN7_L3LOG_SIZE
- offset
, count
);
181 ret
= i915_mutex_lock_interruptible(drm_dev
);
185 if (dev_priv
->l3_parity
.remap_info
[slice
])
187 dev_priv
->l3_parity
.remap_info
[slice
] + (offset
/4),
190 memset(buf
, 0, count
);
192 mutex_unlock(&drm_dev
->struct_mutex
);
198 i915_l3_write(struct file
*filp
, struct kobject
*kobj
,
199 struct bin_attribute
*attr
, char *buf
,
200 loff_t offset
, size_t count
)
202 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
203 struct drm_minor
*dminor
= dev_to_drm_minor(dev
);
204 struct drm_device
*drm_dev
= dminor
->dev
;
205 struct drm_i915_private
*dev_priv
= drm_dev
->dev_private
;
206 struct intel_context
*ctx
;
207 u32
*temp
= NULL
; /* Just here to make handling failures easy */
208 int slice
= (int)(uintptr_t)attr
->private;
211 if (!HAS_HW_CONTEXTS(drm_dev
))
214 ret
= l3_access_valid(drm_dev
, offset
);
218 ret
= i915_mutex_lock_interruptible(drm_dev
);
222 if (!dev_priv
->l3_parity
.remap_info
[slice
]) {
223 temp
= kzalloc(GEN7_L3LOG_SIZE
, GFP_KERNEL
);
225 mutex_unlock(&drm_dev
->struct_mutex
);
230 ret
= i915_gpu_idle(drm_dev
);
233 mutex_unlock(&drm_dev
->struct_mutex
);
237 /* TODO: Ideally we really want a GPU reset here to make sure errors
238 * aren't propagated. Since I cannot find a stable way to reset the GPU
239 * at this point it is left as a TODO.
242 dev_priv
->l3_parity
.remap_info
[slice
] = temp
;
244 memcpy(dev_priv
->l3_parity
.remap_info
[slice
] + (offset
/4), buf
, count
);
246 /* NB: We defer the remapping until we switch to the context */
247 list_for_each_entry(ctx
, &dev_priv
->context_list
, link
)
248 ctx
->remap_slice
|= (1<<slice
);
250 mutex_unlock(&drm_dev
->struct_mutex
);
255 static struct bin_attribute dpf_attrs
= {
256 .attr
= {.name
= "l3_parity", .mode
= (S_IRUSR
| S_IWUSR
)},
257 .size
= GEN7_L3LOG_SIZE
,
258 .read
= i915_l3_read
,
259 .write
= i915_l3_write
,
264 static struct bin_attribute dpf_attrs_1
= {
265 .attr
= {.name
= "l3_parity_slice_1", .mode
= (S_IRUSR
| S_IWUSR
)},
266 .size
= GEN7_L3LOG_SIZE
,
267 .read
= i915_l3_read
,
268 .write
= i915_l3_write
,
273 static ssize_t
gt_act_freq_mhz_show(struct device
*kdev
,
274 struct device_attribute
*attr
, char *buf
)
276 struct drm_minor
*minor
= dev_to_drm_minor(kdev
);
277 struct drm_device
*dev
= minor
->dev
;
278 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
281 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
283 intel_runtime_pm_get(dev_priv
);
285 mutex_lock(&dev_priv
->rps
.hw_lock
);
286 if (IS_VALLEYVIEW(dev_priv
->dev
)) {
288 freq
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
289 ret
= intel_gpu_freq(dev_priv
, (freq
>> 8) & 0xff);
291 u32 rpstat
= I915_READ(GEN6_RPSTAT1
);
292 if (IS_GEN9(dev_priv
))
293 ret
= (rpstat
& GEN9_CAGF_MASK
) >> GEN9_CAGF_SHIFT
;
294 else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
295 ret
= (rpstat
& HSW_CAGF_MASK
) >> HSW_CAGF_SHIFT
;
297 ret
= (rpstat
& GEN6_CAGF_MASK
) >> GEN6_CAGF_SHIFT
;
298 ret
= intel_gpu_freq(dev_priv
, ret
);
300 mutex_unlock(&dev_priv
->rps
.hw_lock
);
302 intel_runtime_pm_put(dev_priv
);
304 return snprintf(buf
, PAGE_SIZE
, "%d\n", ret
);
307 static ssize_t
gt_cur_freq_mhz_show(struct device
*kdev
,
308 struct device_attribute
*attr
, char *buf
)
310 struct drm_minor
*minor
= dev_to_drm_minor(kdev
);
311 struct drm_device
*dev
= minor
->dev
;
312 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
315 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
317 intel_runtime_pm_get(dev_priv
);
319 mutex_lock(&dev_priv
->rps
.hw_lock
);
320 ret
= intel_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
);
321 mutex_unlock(&dev_priv
->rps
.hw_lock
);
323 intel_runtime_pm_put(dev_priv
);
325 return snprintf(buf
, PAGE_SIZE
, "%d\n", ret
);
328 static ssize_t
vlv_rpe_freq_mhz_show(struct device
*kdev
,
329 struct device_attribute
*attr
, char *buf
)
331 struct drm_minor
*minor
= dev_to_drm_minor(kdev
);
332 struct drm_device
*dev
= minor
->dev
;
333 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
335 return snprintf(buf
, PAGE_SIZE
,
337 intel_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
));
340 static ssize_t
gt_max_freq_mhz_show(struct device
*kdev
, struct device_attribute
*attr
, char *buf
)
342 struct drm_minor
*minor
= dev_to_drm_minor(kdev
);
343 struct drm_device
*dev
= minor
->dev
;
344 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
347 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
349 mutex_lock(&dev_priv
->rps
.hw_lock
);
350 ret
= intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq_softlimit
);
351 mutex_unlock(&dev_priv
->rps
.hw_lock
);
353 return snprintf(buf
, PAGE_SIZE
, "%d\n", ret
);
356 static ssize_t
gt_max_freq_mhz_store(struct device
*kdev
,
357 struct device_attribute
*attr
,
358 const char *buf
, size_t count
)
360 struct drm_minor
*minor
= dev_to_drm_minor(kdev
);
361 struct drm_device
*dev
= minor
->dev
;
362 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
366 ret
= kstrtou32(buf
, 0, &val
);
370 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
372 mutex_lock(&dev_priv
->rps
.hw_lock
);
374 val
= intel_freq_opcode(dev_priv
, val
);
376 if (val
< dev_priv
->rps
.min_freq
||
377 val
> dev_priv
->rps
.max_freq
||
378 val
< dev_priv
->rps
.min_freq_softlimit
) {
379 mutex_unlock(&dev_priv
->rps
.hw_lock
);
383 if (val
> dev_priv
->rps
.rp0_freq
)
384 DRM_DEBUG("User requested overclocking to %d\n",
385 intel_gpu_freq(dev_priv
, val
));
387 dev_priv
->rps
.max_freq_softlimit
= val
;
389 val
= clamp_t(int, dev_priv
->rps
.cur_freq
,
390 dev_priv
->rps
.min_freq_softlimit
,
391 dev_priv
->rps
.max_freq_softlimit
);
393 /* We still need *_set_rps to process the new max_delay and
394 * update the interrupt limits and PMINTRMSK even though
395 * frequency request may be unchanged. */
396 intel_set_rps(dev
, val
);
398 mutex_unlock(&dev_priv
->rps
.hw_lock
);
403 static ssize_t
gt_min_freq_mhz_show(struct device
*kdev
, struct device_attribute
*attr
, char *buf
)
405 struct drm_minor
*minor
= dev_to_drm_minor(kdev
);
406 struct drm_device
*dev
= minor
->dev
;
407 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
410 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
412 mutex_lock(&dev_priv
->rps
.hw_lock
);
413 ret
= intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq_softlimit
);
414 mutex_unlock(&dev_priv
->rps
.hw_lock
);
416 return snprintf(buf
, PAGE_SIZE
, "%d\n", ret
);
419 static ssize_t
gt_min_freq_mhz_store(struct device
*kdev
,
420 struct device_attribute
*attr
,
421 const char *buf
, size_t count
)
423 struct drm_minor
*minor
= dev_to_drm_minor(kdev
);
424 struct drm_device
*dev
= minor
->dev
;
425 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
429 ret
= kstrtou32(buf
, 0, &val
);
433 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
435 mutex_lock(&dev_priv
->rps
.hw_lock
);
437 val
= intel_freq_opcode(dev_priv
, val
);
439 if (val
< dev_priv
->rps
.min_freq
||
440 val
> dev_priv
->rps
.max_freq
||
441 val
> dev_priv
->rps
.max_freq_softlimit
) {
442 mutex_unlock(&dev_priv
->rps
.hw_lock
);
446 dev_priv
->rps
.min_freq_softlimit
= val
;
448 val
= clamp_t(int, dev_priv
->rps
.cur_freq
,
449 dev_priv
->rps
.min_freq_softlimit
,
450 dev_priv
->rps
.max_freq_softlimit
);
452 /* We still need *_set_rps to process the new min_delay and
453 * update the interrupt limits and PMINTRMSK even though
454 * frequency request may be unchanged. */
455 intel_set_rps(dev
, val
);
457 mutex_unlock(&dev_priv
->rps
.hw_lock
);
463 static DEVICE_ATTR(gt_act_freq_mhz
, S_IRUGO
, gt_act_freq_mhz_show
, NULL
);
464 static DEVICE_ATTR(gt_cur_freq_mhz
, S_IRUGO
, gt_cur_freq_mhz_show
, NULL
);
465 static DEVICE_ATTR(gt_max_freq_mhz
, S_IRUGO
| S_IWUSR
, gt_max_freq_mhz_show
, gt_max_freq_mhz_store
);
466 static DEVICE_ATTR(gt_min_freq_mhz
, S_IRUGO
| S_IWUSR
, gt_min_freq_mhz_show
, gt_min_freq_mhz_store
);
468 static DEVICE_ATTR(vlv_rpe_freq_mhz
, S_IRUGO
, vlv_rpe_freq_mhz_show
, NULL
);
470 static ssize_t
gt_rp_mhz_show(struct device
*kdev
, struct device_attribute
*attr
, char *buf
);
471 static DEVICE_ATTR(gt_RP0_freq_mhz
, S_IRUGO
, gt_rp_mhz_show
, NULL
);
472 static DEVICE_ATTR(gt_RP1_freq_mhz
, S_IRUGO
, gt_rp_mhz_show
, NULL
);
473 static DEVICE_ATTR(gt_RPn_freq_mhz
, S_IRUGO
, gt_rp_mhz_show
, NULL
);
475 /* For now we have a static number of RP states */
476 static ssize_t
gt_rp_mhz_show(struct device
*kdev
, struct device_attribute
*attr
, char *buf
)
478 struct drm_minor
*minor
= dev_to_drm_minor(kdev
);
479 struct drm_device
*dev
= minor
->dev
;
480 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
483 if (attr
== &dev_attr_gt_RP0_freq_mhz
)
484 val
= intel_gpu_freq(dev_priv
, dev_priv
->rps
.rp0_freq
);
485 else if (attr
== &dev_attr_gt_RP1_freq_mhz
)
486 val
= intel_gpu_freq(dev_priv
, dev_priv
->rps
.rp1_freq
);
487 else if (attr
== &dev_attr_gt_RPn_freq_mhz
)
488 val
= intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
);
492 return snprintf(buf
, PAGE_SIZE
, "%d\n", val
);
495 static const struct attribute
*gen6_attrs
[] = {
496 &dev_attr_gt_act_freq_mhz
.attr
,
497 &dev_attr_gt_cur_freq_mhz
.attr
,
498 &dev_attr_gt_max_freq_mhz
.attr
,
499 &dev_attr_gt_min_freq_mhz
.attr
,
500 &dev_attr_gt_RP0_freq_mhz
.attr
,
501 &dev_attr_gt_RP1_freq_mhz
.attr
,
502 &dev_attr_gt_RPn_freq_mhz
.attr
,
506 static const struct attribute
*vlv_attrs
[] = {
507 &dev_attr_gt_act_freq_mhz
.attr
,
508 &dev_attr_gt_cur_freq_mhz
.attr
,
509 &dev_attr_gt_max_freq_mhz
.attr
,
510 &dev_attr_gt_min_freq_mhz
.attr
,
511 &dev_attr_gt_RP0_freq_mhz
.attr
,
512 &dev_attr_gt_RP1_freq_mhz
.attr
,
513 &dev_attr_gt_RPn_freq_mhz
.attr
,
514 &dev_attr_vlv_rpe_freq_mhz
.attr
,
518 static ssize_t
error_state_read(struct file
*filp
, struct kobject
*kobj
,
519 struct bin_attribute
*attr
, char *buf
,
520 loff_t off
, size_t count
)
523 struct device
*kdev
= container_of(kobj
, struct device
, kobj
);
524 struct drm_minor
*minor
= dev_to_drm_minor(kdev
);
525 struct drm_device
*dev
= minor
->dev
;
526 struct i915_error_state_file_priv error_priv
;
527 struct drm_i915_error_state_buf error_str
;
528 ssize_t ret_count
= 0;
531 memset(&error_priv
, 0, sizeof(error_priv
));
533 ret
= i915_error_state_buf_init(&error_str
, to_i915(dev
), count
, off
);
537 error_priv
.dev
= dev
;
538 i915_error_state_get(dev
, &error_priv
);
540 ret
= i915_error_state_to_str(&error_str
, &error_priv
);
544 ret_count
= count
< error_str
.bytes
? count
: error_str
.bytes
;
546 memcpy(buf
, error_str
.buf
, ret_count
);
548 i915_error_state_put(&error_priv
);
549 i915_error_state_buf_release(&error_str
);
551 return ret
?: ret_count
;
554 static ssize_t
error_state_write(struct file
*file
, struct kobject
*kobj
,
555 struct bin_attribute
*attr
, char *buf
,
556 loff_t off
, size_t count
)
558 struct device
*kdev
= container_of(kobj
, struct device
, kobj
);
559 struct drm_minor
*minor
= dev_to_drm_minor(kdev
);
560 struct drm_device
*dev
= minor
->dev
;
563 DRM_DEBUG_DRIVER("Resetting error state\n");
565 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
569 i915_destroy_error_state(dev
);
570 mutex_unlock(&dev
->struct_mutex
);
575 static struct bin_attribute error_state_attr
= {
576 .attr
.name
= "error",
577 .attr
.mode
= S_IRUSR
| S_IWUSR
,
579 .read
= error_state_read
,
580 .write
= error_state_write
,
584 void i915_setup_sysfs(struct drm_device
*dev
)
591 ret
= sysfs_merge_group(&dev
->primary
->kdev
->kobj
,
594 DRM_ERROR("RC6 residency sysfs setup failed\n");
597 ret
= sysfs_merge_group(&dev
->primary
->kdev
->kobj
,
600 DRM_ERROR("RC6p residency sysfs setup failed\n");
602 if (IS_VALLEYVIEW(dev
)) {
603 ret
= sysfs_merge_group(&dev
->primary
->kdev
->kobj
,
604 &media_rc6_attr_group
);
606 DRM_ERROR("Media RC6 residency sysfs setup failed\n");
609 if (HAS_L3_DPF(dev
)) {
610 ret
= device_create_bin_file(dev
->primary
->kdev
, &dpf_attrs
);
612 DRM_ERROR("l3 parity sysfs setup failed\n");
614 if (NUM_L3_SLICES(dev
) > 1) {
615 ret
= device_create_bin_file(dev
->primary
->kdev
,
618 DRM_ERROR("l3 parity slice 1 setup failed\n");
623 if (IS_VALLEYVIEW(dev
))
624 ret
= sysfs_create_files(&dev
->primary
->kdev
->kobj
, vlv_attrs
);
625 else if (INTEL_INFO(dev
)->gen
>= 6)
626 ret
= sysfs_create_files(&dev
->primary
->kdev
->kobj
, gen6_attrs
);
628 DRM_ERROR("RPS sysfs setup failed\n");
630 ret
= sysfs_create_bin_file(&dev
->primary
->kdev
->kobj
,
633 DRM_ERROR("error_state sysfs setup failed\n");
637 void i915_teardown_sysfs(struct drm_device
*dev
)
640 sysfs_remove_bin_file(&dev
->primary
->kdev
->kobj
, &error_state_attr
);
641 if (IS_VALLEYVIEW(dev
))
642 sysfs_remove_files(&dev
->primary
->kdev
->kobj
, vlv_attrs
);
644 sysfs_remove_files(&dev
->primary
->kdev
->kobj
, gen6_attrs
);
645 device_remove_bin_file(dev
->primary
->kdev
, &dpf_attrs_1
);
646 device_remove_bin_file(dev
->primary
->kdev
, &dpf_attrs
);
648 sysfs_unmerge_group(&dev
->primary
->kdev
->kobj
, &rc6_attr_group
);
649 sysfs_unmerge_group(&dev
->primary
->kdev
->kobj
, &rc6p_attr_group
);