9596 Initial xsave xstate_bv should not include all features
[unleashed.git] / usr / src / uts / intel / io / drm / i915_drv.c
blob8989e1a690fd28d8da5f9f1fe0d008791e9f3421
1 /* BEGIN CSTYLED */
3 /*
4 * i915_drv.c -- Intel i915 driver -*- linux-c -*-
5 * Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
6 */
8 /*
9 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
10 * Copyright (c) 2009, Intel Corporation.
11 * All Rights Reserved.
13 * Permission is hereby granted, free of charge, to any person obtaining a
14 * copy of this software and associated documentation files (the "Software"),
15 * to deal in the Software without restriction, including without limitation
16 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17 * and/or sell copies of the Software, and to permit persons to whom the
18 * Software is furnished to do so, subject to the following conditions:
20 * The above copyright notice and this permission notice (including the next
21 * paragraph) shall be included in all copies or substantial portions of the
22 * Software.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
28 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
29 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
30 * OTHER DEALINGS IN THE SOFTWARE.
32 * Authors:
33 * Gareth Hughes <gareth@valinux.com>
38 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
39 * Use is subject to license terms.
43 * Copyright 2014 RackTop Systems.
47 * I915 DRM Driver for Solaris
49 * This driver provides the hardware 3D acceleration support for Intel
50 * integrated video devices (e.g. i8xx/i915/i945 series chipsets), under the
51 * DRI (Direct Rendering Infrastructure). DRM (Direct Rendering Manager) here
52 * means the kernel device driver in DRI.
54 * I915 driver is a device dependent driver only, it depends on a misc module
55 * named drm for generic DRM operations.
58 #include "drmP.h"
59 #include "i915_drm.h"
60 #include "i915_drv.h"
61 #include "drm_pciids.h"
64 * copied from vgasubr.h
67 struct vgaregmap {
68 uint8_t *addr;
69 ddi_acc_handle_t handle;
70 boolean_t mapped;
73 enum pipe {
74 PIPE_A = 0,
75 PIPE_B,
80 * cb_ops entrypoint
82 extern struct cb_ops drm_cb_ops;
85 * module entrypoint
87 static int i915_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
88 static int i915_attach(dev_info_t *, ddi_attach_cmd_t);
89 static int i915_detach(dev_info_t *, ddi_detach_cmd_t);
92 /* drv_PCI_IDs comes from drm_pciids.h */
93 static drm_pci_id_list_t i915_pciidlist[] = {
94 i915_PCI_IDS
98 * Local routines
100 static void i915_configure(drm_driver_t *);
101 static int i915_quiesce(dev_info_t *dip);
104 * DRM driver
106 static drm_driver_t i915_driver = {0};
109 static struct dev_ops i915_dev_ops = {
110 DEVO_REV, /* devo_rev */
111 0, /* devo_refcnt */
112 i915_info, /* devo_getinfo */
113 nulldev, /* devo_identify */
114 nulldev, /* devo_probe */
115 i915_attach, /* devo_attach */
116 i915_detach, /* devo_detach */
117 nodev, /* devo_reset */
118 &drm_cb_ops, /* devo_cb_ops */
119 NULL, /* devo_bus_ops */
120 NULL, /* power */
121 i915_quiesce, /* devo_quiesce */
124 static struct modldrv modldrv = {
125 &mod_driverops, /* drv_modops */
126 "I915 DRM driver", /* drv_linkinfo */
127 &i915_dev_ops, /* drv_dev_ops */
130 static struct modlinkage modlinkage = {
131 MODREV_1, (void *) &modldrv, NULL
134 static ddi_device_acc_attr_t s3_attr = {
135 DDI_DEVICE_ATTR_V0,
136 DDI_NEVERSWAP_ACC,
137 DDI_STRICTORDER_ACC /* must be DDI_STRICTORDER_ACC */
141 * softstate head
143 static void *i915_statep;
146 _init(void)
148 int error;
150 i915_configure(&i915_driver);
152 if ((error = ddi_soft_state_init(&i915_statep,
153 sizeof (drm_device_t), DRM_MAX_INSTANCES)) != 0)
154 return (error);
156 if ((error = mod_install(&modlinkage)) != 0) {
157 ddi_soft_state_fini(&i915_statep);
158 return (error);
161 return (error);
163 } /* _init() */
166 _fini(void)
168 int error;
170 if ((error = mod_remove(&modlinkage)) != 0)
171 return (error);
173 (void) ddi_soft_state_fini(&i915_statep);
175 return (0);
177 } /* _fini() */
180 _info(struct modinfo *modinfop)
182 return (mod_info(&modlinkage, modinfop));
184 } /* _info() */
187 * off range: 0x3b0 ~ 0x3ff
190 static void
191 vga_reg_put8(struct vgaregmap *regmap, uint16_t off, uint8_t val)
193 ASSERT((off >= 0x3b0) && (off <= 0x3ff));
195 ddi_put8(regmap->handle, regmap->addr + off, val);
199 * off range: 0x3b0 ~ 0x3ff
201 static uint8_t
202 vga_reg_get8(struct vgaregmap *regmap, uint16_t off)
205 ASSERT((off >= 0x3b0) && (off <= 0x3ff));
207 return (ddi_get8(regmap->handle, regmap->addr + off));
210 static void
211 i915_write_indexed(struct vgaregmap *regmap,
212 uint16_t index_port, uint16_t data_port, uint8_t index, uint8_t val)
214 vga_reg_put8(regmap, index_port, index);
215 vga_reg_put8(regmap, data_port, val);
218 static uint8_t
219 i915_read_indexed(struct vgaregmap *regmap,
220 uint16_t index_port, uint16_t data_port, uint8_t index)
222 vga_reg_put8(regmap, index_port, index);
223 return (vga_reg_get8(regmap, data_port));
226 static void
227 i915_write_ar(struct vgaregmap *regmap, uint16_t st01,
228 uint8_t reg, uint8_t val, uint8_t palette_enable)
230 (void) vga_reg_get8(regmap, st01);
231 vga_reg_put8(regmap, VGA_AR_INDEX, palette_enable | reg);
232 vga_reg_put8(regmap, VGA_AR_DATA_WRITE, val);
235 static uint8_t
236 i915_read_ar(struct vgaregmap *regmap, uint16_t st01,
237 uint8_t index, uint8_t palette_enable)
239 (void) vga_reg_get8(regmap, st01);
240 vga_reg_put8(regmap, VGA_AR_INDEX, index | palette_enable);
241 return (vga_reg_get8(regmap, VGA_AR_DATA_READ));
244 static int
245 i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
247 struct s3_i915_private *s3_priv = dev->s3_private;
249 if (pipe == PIPE_A)
250 return (S3_READ(DPLL_A) & DPLL_VCO_ENABLE);
251 else
252 return (S3_READ(DPLL_B) & DPLL_VCO_ENABLE);
255 static void
256 i915_save_palette(struct drm_device *dev, enum pipe pipe)
258 struct s3_i915_private *s3_priv = dev->s3_private;
259 unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
260 uint32_t *array;
261 int i;
263 if (!i915_pipe_enabled(dev, pipe))
264 return;
266 if (pipe == PIPE_A)
267 array = s3_priv->save_palette_a;
268 else
269 array = s3_priv->save_palette_b;
271 for(i = 0; i < 256; i++)
272 array[i] = S3_READ(reg + (i << 2));
276 static void
277 i915_restore_palette(struct drm_device *dev, enum pipe pipe)
279 struct s3_i915_private *s3_priv = dev->s3_private;
280 unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
281 uint32_t *array;
282 int i;
284 if (!i915_pipe_enabled(dev, pipe))
285 return;
287 if (pipe == PIPE_A)
288 array = s3_priv->save_palette_a;
289 else
290 array = s3_priv->save_palette_b;
292 for(i = 0; i < 256; i++)
293 S3_WRITE(reg + (i << 2), array[i]);
296 static void
297 i915_save_vga(struct drm_device *dev)
299 struct s3_i915_private *s3_priv = dev->s3_private;
300 int i;
301 uint16_t cr_index, cr_data, st01;
302 struct vgaregmap regmap;
304 regmap.addr = (uint8_t *)s3_priv->saveAddr;
305 regmap.handle = s3_priv->saveHandle;
307 /* VGA color palette registers */
308 s3_priv->saveDACMASK = vga_reg_get8(&regmap, VGA_DACMASK);
309 /* DACCRX automatically increments during read */
310 vga_reg_put8(&regmap, VGA_DACRX, 0);
311 /* Read 3 bytes of color data from each index */
312 for (i = 0; i < 256 * 3; i++)
313 s3_priv->saveDACDATA[i] = vga_reg_get8(&regmap, VGA_DACDATA);
315 /* MSR bits */
316 s3_priv->saveMSR = vga_reg_get8(&regmap, VGA_MSR_READ);
317 if (s3_priv->saveMSR & VGA_MSR_CGA_MODE) {
318 cr_index = VGA_CR_INDEX_CGA;
319 cr_data = VGA_CR_DATA_CGA;
320 st01 = VGA_ST01_CGA;
321 } else {
322 cr_index = VGA_CR_INDEX_MDA;
323 cr_data = VGA_CR_DATA_MDA;
324 st01 = VGA_ST01_MDA;
327 /* CRT controller regs */
328 i915_write_indexed(&regmap, cr_index, cr_data, 0x11,
329 i915_read_indexed(&regmap, cr_index, cr_data, 0x11) & (~0x80));
330 for (i = 0; i <= 0x24; i++)
331 s3_priv->saveCR[i] =
332 i915_read_indexed(&regmap, cr_index, cr_data, i);
333 /* Make sure we don't turn off CR group 0 writes */
334 s3_priv->saveCR[0x11] &= ~0x80;
336 /* Attribute controller registers */
337 (void) vga_reg_get8(&regmap, st01);
338 s3_priv->saveAR_INDEX = vga_reg_get8(&regmap, VGA_AR_INDEX);
339 for (i = 0; i <= 0x14; i++)
340 s3_priv->saveAR[i] = i915_read_ar(&regmap, st01, i, 0);
341 (void) vga_reg_get8(&regmap, st01);
342 vga_reg_put8(&regmap, VGA_AR_INDEX, s3_priv->saveAR_INDEX);
343 (void) vga_reg_get8(&regmap, st01);
345 /* Graphics controller registers */
346 for (i = 0; i < 9; i++)
347 s3_priv->saveGR[i] =
348 i915_read_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, i);
350 s3_priv->saveGR[0x10] =
351 i915_read_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
352 s3_priv->saveGR[0x11] =
353 i915_read_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
354 s3_priv->saveGR[0x18] =
355 i915_read_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
357 /* Sequencer registers */
358 for (i = 0; i < 8; i++)
359 s3_priv->saveSR[i] =
360 i915_read_indexed(&regmap, VGA_SR_INDEX, VGA_SR_DATA, i);
363 static void
364 i915_restore_vga(struct drm_device *dev)
366 struct s3_i915_private *s3_priv = dev->s3_private;
367 int i;
368 uint16_t cr_index, cr_data, st01;
369 struct vgaregmap regmap;
371 regmap.addr = (uint8_t *)s3_priv->saveAddr;
372 regmap.handle = s3_priv->saveHandle;
375 * I/O Address Select. This bit selects 3Bxh or 3Dxh as the
376 * I/O address for the CRT Controller registers,
377 * the Feature Control Register (FCR), and Input Status Register
378 * 1 (ST01). Presently ignored (whole range is claimed), but
379 * will "ignore" 3Bx for color configuration or 3Dx for monochrome.
380 * Note that it is typical in AGP chipsets to shadow this bit
381 * and properly steer I/O cycles to the proper bus for operation
382 * where a MDA exists on another bus such as ISA.
383 * 0 = Select 3Bxh I/O address (MDA emulation) (default).
384 * 1 = Select 3Dxh I/O address (CGA emulation).
386 vga_reg_put8(&regmap, VGA_MSR_WRITE, s3_priv->saveMSR);
388 if (s3_priv->saveMSR & VGA_MSR_CGA_MODE) {
389 cr_index = VGA_CR_INDEX_CGA;
390 cr_data = VGA_CR_DATA_CGA;
391 st01 = VGA_ST01_CGA;
392 } else {
393 cr_index = VGA_CR_INDEX_MDA;
394 cr_data = VGA_CR_DATA_MDA;
395 st01 = VGA_ST01_MDA;
398 /* Sequencer registers, don't write SR07 */
399 for (i = 0; i < 7; i++)
400 i915_write_indexed(&regmap, VGA_SR_INDEX, VGA_SR_DATA, i,
401 s3_priv->saveSR[i]);
402 /* CRT controller regs */
403 /* Enable CR group 0 writes */
404 i915_write_indexed(&regmap, cr_index, cr_data,
405 0x11, s3_priv->saveCR[0x11]);
406 for (i = 0; i <= 0x24; i++)
407 i915_write_indexed(&regmap, cr_index,
408 cr_data, i, s3_priv->saveCR[i]);
410 /* Graphics controller regs */
411 for (i = 0; i < 9; i++)
412 i915_write_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, i,
413 s3_priv->saveGR[i]);
415 i915_write_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
416 s3_priv->saveGR[0x10]);
417 i915_write_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
418 s3_priv->saveGR[0x11]);
419 i915_write_indexed(&regmap, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
420 s3_priv->saveGR[0x18]);
422 /* Attribute controller registers */
423 (void) vga_reg_get8(&regmap, st01); /* switch back to index mode */
424 for (i = 0; i <= 0x14; i++)
425 i915_write_ar(&regmap, st01, i, s3_priv->saveAR[i], 0);
426 (void) vga_reg_get8(&regmap, st01); /* switch back to index mode */
427 vga_reg_put8(&regmap, VGA_AR_INDEX, s3_priv->saveAR_INDEX | 0x20);
428 (void) vga_reg_get8(&regmap, st01); /* switch back to index mode */
430 /* VGA color palette registers */
431 vga_reg_put8(&regmap, VGA_DACMASK, s3_priv->saveDACMASK);
432 /* DACCRX automatically increments during read */
433 vga_reg_put8(&regmap, VGA_DACWX, 0);
434 /* Read 3 bytes of color data from each index */
435 for (i = 0; i < 256 * 3; i++)
436 vga_reg_put8(&regmap, VGA_DACDATA, s3_priv->saveDACDATA[i]);
440 * i915_save_display - save display & mode info
441 * @dev: DRM device
443 * Save mode timings and display info.
445 void i915_save_display(struct drm_device *dev)
447 struct s3_i915_private *s3_priv = dev->s3_private;
449 /* Display arbitration control */
450 s3_priv->saveDSPARB = S3_READ(DSPARB);
453 * Pipe & plane A info.
455 s3_priv->savePIPEACONF = S3_READ(PIPEACONF);
456 s3_priv->savePIPEASRC = S3_READ(PIPEASRC);
457 s3_priv->saveFPA0 = S3_READ(FPA0);
458 s3_priv->saveFPA1 = S3_READ(FPA1);
459 s3_priv->saveDPLL_A = S3_READ(DPLL_A);
460 if (IS_I965G(dev))
461 s3_priv->saveDPLL_A_MD = S3_READ(DPLL_A_MD);
462 s3_priv->saveHTOTAL_A = S3_READ(HTOTAL_A);
463 s3_priv->saveHBLANK_A = S3_READ(HBLANK_A);
464 s3_priv->saveHSYNC_A = S3_READ(HSYNC_A);
465 s3_priv->saveVTOTAL_A = S3_READ(VTOTAL_A);
466 s3_priv->saveVBLANK_A = S3_READ(VBLANK_A);
467 s3_priv->saveVSYNC_A = S3_READ(VSYNC_A);
468 s3_priv->saveBCLRPAT_A = S3_READ(BCLRPAT_A);
470 s3_priv->saveDSPACNTR = S3_READ(DSPACNTR);
471 s3_priv->saveDSPASTRIDE = S3_READ(DSPASTRIDE);
472 s3_priv->saveDSPASIZE = S3_READ(DSPASIZE);
473 s3_priv->saveDSPAPOS = S3_READ(DSPAPOS);
474 s3_priv->saveDSPABASE = S3_READ(DSPABASE);
475 if (IS_I965G(dev)) {
476 s3_priv->saveDSPASURF = S3_READ(DSPASURF);
477 s3_priv->saveDSPATILEOFF = S3_READ(DSPATILEOFF);
479 i915_save_palette(dev, PIPE_A);
480 s3_priv->savePIPEASTAT = S3_READ(PIPEASTAT);
483 * Pipe & plane B info
485 s3_priv->savePIPEBCONF = S3_READ(PIPEBCONF);
486 s3_priv->savePIPEBSRC = S3_READ(PIPEBSRC);
487 s3_priv->saveFPB0 = S3_READ(FPB0);
488 s3_priv->saveFPB1 = S3_READ(FPB1);
489 s3_priv->saveDPLL_B = S3_READ(DPLL_B);
490 if (IS_I965G(dev))
491 s3_priv->saveDPLL_B_MD = S3_READ(DPLL_B_MD);
492 s3_priv->saveHTOTAL_B = S3_READ(HTOTAL_B);
493 s3_priv->saveHBLANK_B = S3_READ(HBLANK_B);
494 s3_priv->saveHSYNC_B = S3_READ(HSYNC_B);
495 s3_priv->saveVTOTAL_B = S3_READ(VTOTAL_B);
496 s3_priv->saveVBLANK_B = S3_READ(VBLANK_B);
497 s3_priv->saveVSYNC_B = S3_READ(VSYNC_B);
498 s3_priv->saveBCLRPAT_A = S3_READ(BCLRPAT_A);
500 s3_priv->saveDSPBCNTR = S3_READ(DSPBCNTR);
501 s3_priv->saveDSPBSTRIDE = S3_READ(DSPBSTRIDE);
502 s3_priv->saveDSPBSIZE = S3_READ(DSPBSIZE);
503 s3_priv->saveDSPBPOS = S3_READ(DSPBPOS);
504 s3_priv->saveDSPBBASE = S3_READ(DSPBBASE);
505 if (IS_I965GM(dev) || IS_GM45(dev)) {
506 s3_priv->saveDSPBSURF = S3_READ(DSPBSURF);
507 s3_priv->saveDSPBTILEOFF = S3_READ(DSPBTILEOFF);
509 i915_save_palette(dev, PIPE_B);
510 s3_priv->savePIPEBSTAT = S3_READ(PIPEBSTAT);
513 * CRT state
515 s3_priv->saveADPA = S3_READ(ADPA);
518 * LVDS state
520 s3_priv->savePP_CONTROL = S3_READ(PP_CONTROL);
521 s3_priv->savePFIT_PGM_RATIOS = S3_READ(PFIT_PGM_RATIOS);
522 s3_priv->saveBLC_PWM_CTL = S3_READ(BLC_PWM_CTL);
523 if (IS_I965G(dev))
524 s3_priv->saveBLC_PWM_CTL2 = S3_READ(BLC_PWM_CTL2);
525 if (IS_MOBILE(dev) && !IS_I830(dev))
526 s3_priv->saveLVDS = S3_READ(LVDS);
527 if (!IS_I830(dev) && !IS_845G(dev))
528 s3_priv->savePFIT_CONTROL = S3_READ(PFIT_CONTROL);
529 s3_priv->saveLVDSPP_ON = S3_READ(LVDSPP_ON);
530 s3_priv->saveLVDSPP_OFF = S3_READ(LVDSPP_OFF);
531 s3_priv->savePP_CYCLE = S3_READ(PP_CYCLE);
533 /* FIXME: save TV & SDVO state */
535 /* FBC state */
536 s3_priv->saveFBC_CFB_BASE = S3_READ(FBC_CFB_BASE);
537 s3_priv->saveFBC_LL_BASE = S3_READ(FBC_LL_BASE);
538 s3_priv->saveFBC_CONTROL2 = S3_READ(FBC_CONTROL2);
539 s3_priv->saveFBC_CONTROL = S3_READ(FBC_CONTROL);
541 /* VGA state */
542 s3_priv->saveVCLK_DIVISOR_VGA0 = S3_READ(VCLK_DIVISOR_VGA0);
543 s3_priv->saveVCLK_DIVISOR_VGA1 = S3_READ(VCLK_DIVISOR_VGA1);
544 s3_priv->saveVCLK_POST_DIV = S3_READ(VCLK_POST_DIV);
545 s3_priv->saveVGACNTRL = S3_READ(VGACNTRL);
547 i915_save_vga(dev);
550 void i915_restore_display(struct drm_device *dev)
552 struct s3_i915_private *s3_priv = dev->s3_private;
554 S3_WRITE(DSPARB, s3_priv->saveDSPARB);
557 * Pipe & plane A info
558 * Prime the clock
560 if (s3_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
561 S3_WRITE(DPLL_A, s3_priv->saveDPLL_A &
562 ~DPLL_VCO_ENABLE);
563 drv_usecwait(150);
565 S3_WRITE(FPA0, s3_priv->saveFPA0);
566 S3_WRITE(FPA1, s3_priv->saveFPA1);
567 /* Actually enable it */
568 S3_WRITE(DPLL_A, s3_priv->saveDPLL_A);
569 drv_usecwait(150);
570 if (IS_I965G(dev))
571 S3_WRITE(DPLL_A_MD, s3_priv->saveDPLL_A_MD);
572 drv_usecwait(150);
574 /* Restore mode */
575 S3_WRITE(HTOTAL_A, s3_priv->saveHTOTAL_A);
576 S3_WRITE(HBLANK_A, s3_priv->saveHBLANK_A);
577 S3_WRITE(HSYNC_A, s3_priv->saveHSYNC_A);
578 S3_WRITE(VTOTAL_A, s3_priv->saveVTOTAL_A);
579 S3_WRITE(VBLANK_A, s3_priv->saveVBLANK_A);
580 S3_WRITE(VSYNC_A, s3_priv->saveVSYNC_A);
581 S3_WRITE(BCLRPAT_A, s3_priv->saveBCLRPAT_A);
583 /* Restore plane info */
584 S3_WRITE(DSPASIZE, s3_priv->saveDSPASIZE);
585 S3_WRITE(DSPAPOS, s3_priv->saveDSPAPOS);
586 S3_WRITE(PIPEASRC, s3_priv->savePIPEASRC);
587 S3_WRITE(DSPABASE, s3_priv->saveDSPABASE);
588 S3_WRITE(DSPASTRIDE, s3_priv->saveDSPASTRIDE);
589 if (IS_I965G(dev)) {
590 S3_WRITE(DSPASURF, s3_priv->saveDSPASURF);
591 S3_WRITE(DSPATILEOFF, s3_priv->saveDSPATILEOFF);
593 S3_WRITE(PIPEACONF, s3_priv->savePIPEACONF);
594 i915_restore_palette(dev, PIPE_A);
595 /* Enable the plane */
596 S3_WRITE(DSPACNTR, s3_priv->saveDSPACNTR);
597 S3_WRITE(DSPABASE, S3_READ(DSPABASE));
599 /* Pipe & plane B info */
600 if (s3_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
601 S3_WRITE(DPLL_B, s3_priv->saveDPLL_B &
602 ~DPLL_VCO_ENABLE);
603 drv_usecwait(150);
605 S3_WRITE(FPB0, s3_priv->saveFPB0);
606 S3_WRITE(FPB1, s3_priv->saveFPB1);
607 /* Actually enable it */
608 S3_WRITE(DPLL_B, s3_priv->saveDPLL_B);
609 drv_usecwait(150);
610 if (IS_I965G(dev))
611 S3_WRITE(DPLL_B_MD, s3_priv->saveDPLL_B_MD);
612 drv_usecwait(150);
614 /* Restore mode */
615 S3_WRITE(HTOTAL_B, s3_priv->saveHTOTAL_B);
616 S3_WRITE(HBLANK_B, s3_priv->saveHBLANK_B);
617 S3_WRITE(HSYNC_B, s3_priv->saveHSYNC_B);
618 S3_WRITE(VTOTAL_B, s3_priv->saveVTOTAL_B);
619 S3_WRITE(VBLANK_B, s3_priv->saveVBLANK_B);
620 S3_WRITE(VSYNC_B, s3_priv->saveVSYNC_B);
621 S3_WRITE(BCLRPAT_B, s3_priv->saveBCLRPAT_B);
623 /* Restore plane info */
624 S3_WRITE(DSPBSIZE, s3_priv->saveDSPBSIZE);
625 S3_WRITE(DSPBPOS, s3_priv->saveDSPBPOS);
626 S3_WRITE(PIPEBSRC, s3_priv->savePIPEBSRC);
627 S3_WRITE(DSPBBASE, s3_priv->saveDSPBBASE);
628 S3_WRITE(DSPBSTRIDE, s3_priv->saveDSPBSTRIDE);
629 if (IS_I965G(dev)) {
630 S3_WRITE(DSPBSURF, s3_priv->saveDSPBSURF);
631 S3_WRITE(DSPBTILEOFF, s3_priv->saveDSPBTILEOFF);
633 S3_WRITE(PIPEBCONF, s3_priv->savePIPEBCONF);
634 i915_restore_palette(dev, PIPE_B);
635 /* Enable the plane */
636 S3_WRITE(DSPBCNTR, s3_priv->saveDSPBCNTR);
637 S3_WRITE(DSPBBASE, S3_READ(DSPBBASE));
639 /* CRT state */
640 S3_WRITE(ADPA, s3_priv->saveADPA);
642 /* LVDS state */
643 if (IS_I965G(dev))
644 S3_WRITE(BLC_PWM_CTL2, s3_priv->saveBLC_PWM_CTL2);
645 if (IS_MOBILE(dev) && !IS_I830(dev))
646 S3_WRITE(LVDS, s3_priv->saveLVDS);
647 if (!IS_I830(dev) && !IS_845G(dev))
648 S3_WRITE(PFIT_CONTROL, s3_priv->savePFIT_CONTROL);
650 S3_WRITE(PFIT_PGM_RATIOS, s3_priv->savePFIT_PGM_RATIOS);
651 S3_WRITE(BLC_PWM_CTL, s3_priv->saveBLC_PWM_CTL);
652 S3_WRITE(LVDSPP_ON, s3_priv->saveLVDSPP_ON);
653 S3_WRITE(LVDSPP_OFF, s3_priv->saveLVDSPP_OFF);
654 S3_WRITE(PP_CYCLE, s3_priv->savePP_CYCLE);
655 S3_WRITE(PP_CONTROL, s3_priv->savePP_CONTROL);
657 /* FIXME: restore TV & SDVO state */
659 /* FBC info */
660 S3_WRITE(FBC_CFB_BASE, s3_priv->saveFBC_CFB_BASE);
661 S3_WRITE(FBC_LL_BASE, s3_priv->saveFBC_LL_BASE);
662 S3_WRITE(FBC_CONTROL2, s3_priv->saveFBC_CONTROL2);
663 S3_WRITE(FBC_CONTROL, s3_priv->saveFBC_CONTROL);
665 /* VGA state */
666 S3_WRITE(VGACNTRL, s3_priv->saveVGACNTRL);
667 S3_WRITE(VCLK_DIVISOR_VGA0, s3_priv->saveVCLK_DIVISOR_VGA0);
668 S3_WRITE(VCLK_DIVISOR_VGA1, s3_priv->saveVCLK_DIVISOR_VGA1);
669 S3_WRITE(VCLK_POST_DIV, s3_priv->saveVCLK_POST_DIV);
670 drv_usecwait(150);
672 i915_restore_vga(dev);
674 static int
675 i915_resume(struct drm_device *dev)
677 ddi_acc_handle_t conf_hdl;
678 struct s3_i915_private *s3_priv = dev->s3_private;
679 int i;
681 if (pci_config_setup(dev->dip, &conf_hdl) != DDI_SUCCESS) {
682 DRM_ERROR(("i915_resume: pci_config_setup fail"));
683 return (DDI_FAILURE);
686 * Nexus driver will resume pci config space and set the power state
687 * for its children. So we needn't resume them explicitly here.
688 * see pci_pre_resume for detail.
690 pci_config_put8(conf_hdl, LBB, s3_priv->saveLBB);
692 if (IS_I965G(dev) && IS_MOBILE(dev))
693 S3_WRITE(MCHBAR_RENDER_STANDBY, s3_priv->saveRENDERSTANDBY);
694 if (IS_I965GM(dev))
695 (void) S3_READ(MCHBAR_RENDER_STANDBY);
697 S3_WRITE(HWS_PGA, s3_priv->saveHWS);
698 if (IS_I965GM(dev))
699 (void) S3_READ(HWS_PGA);
701 i915_restore_display(dev);
703 /* Clock gating state */
704 S3_WRITE (D_STATE, s3_priv->saveD_STATE);
705 S3_WRITE (CG_2D_DIS, s3_priv->saveCG_2D_DIS);
707 /* Cache mode state */
708 S3_WRITE (CACHE_MODE_0, s3_priv->saveCACHE_MODE_0 | 0xffff0000);
710 /* Memory arbitration state */
711 S3_WRITE (MI_ARB_STATE, s3_priv->saveMI_ARB_STATE | 0xffff0000);
713 for (i = 0; i < 16; i++) {
714 S3_WRITE(SWF0 + (i << 2), s3_priv->saveSWF0[i]);
715 S3_WRITE(SWF10 + (i << 2), s3_priv->saveSWF1[i]);
717 for (i = 0; i < 3; i++)
718 S3_WRITE(SWF30 + (i << 2), s3_priv->saveSWF2[i]);
720 S3_WRITE(I915REG_PGTBL_CTRL, s3_priv->pgtbl_ctl);
722 (void) pci_config_teardown(&conf_hdl);
724 drm_agp_rebind(dev);
726 return (DDI_SUCCESS);
729 static int
730 i915_suspend(struct drm_device *dev)
732 ddi_acc_handle_t conf_hdl;
733 struct s3_i915_private *s3_priv = dev->s3_private;
734 int i;
736 if (pci_config_setup(dev->dip, &conf_hdl) != DDI_SUCCESS) {
737 DRM_ERROR(("i915_suspend: pci_config_setup fail"));
738 return (DDI_FAILURE);
742 * Nexus driver will resume pci config space for its children.
743 * So pci config registers are not saved here.
745 s3_priv->saveLBB = pci_config_get8(conf_hdl, LBB);
747 if (IS_I965G(dev) && IS_MOBILE(dev))
748 s3_priv->saveRENDERSTANDBY = S3_READ(MCHBAR_RENDER_STANDBY);
750 /* Hardware status page */
751 s3_priv->saveHWS = S3_READ(HWS_PGA);
753 i915_save_display(dev);
755 /* Interrupt state */
756 s3_priv->saveIIR = S3_READ(IIR);
757 s3_priv->saveIER = S3_READ(IER);
758 s3_priv->saveIMR = S3_READ(IMR);
760 /* Clock gating state */
761 s3_priv->saveD_STATE = S3_READ(D_STATE);
762 s3_priv->saveCG_2D_DIS = S3_READ(CG_2D_DIS);
764 /* Cache mode state */
765 s3_priv->saveCACHE_MODE_0 = S3_READ(CACHE_MODE_0);
767 /* Memory Arbitration state */
768 s3_priv->saveMI_ARB_STATE = S3_READ(MI_ARB_STATE);
770 /* Scratch space */
771 for (i = 0; i < 16; i++) {
772 s3_priv->saveSWF0[i] = S3_READ(SWF0 + (i << 2));
773 s3_priv->saveSWF1[i] = S3_READ(SWF10 + (i << 2));
775 for (i = 0; i < 3; i++)
776 s3_priv->saveSWF2[i] = S3_READ(SWF30 + (i << 2));
779 * Save page table control register
781 s3_priv->pgtbl_ctl = S3_READ(I915REG_PGTBL_CTRL);
783 (void) pci_config_teardown(&conf_hdl);
785 return (DDI_SUCCESS);
789 * This funtion check the length of memory mapped IO space to get the right bar. * And There are two possibilities here.
790 * 1. The MMIO registers is in memory map IO bar with 1M size. The bottom half
791 * of the 1M space is the MMIO registers.
792 * 2. The MMIO register is in memory map IO with 512K size. The whole 512K
793 * space is the MMIO registers.
795 static int
796 i915_map_regs(dev_info_t *dip, caddr_t *save_addr, ddi_acc_handle_t *handlep)
798 int rnumber;
799 int nregs;
800 off_t size = 0;
802 if (ddi_dev_nregs(dip, &nregs)) {
803 cmn_err(CE_WARN, "i915_map_regs: failed to get nregs");
804 return (DDI_FAILURE);
807 for (rnumber = 1; rnumber < nregs; rnumber++) {
808 (void) ddi_dev_regsize(dip, rnumber, &size);
809 if ((size == 0x80000) ||
810 (size == 0x100000) ||
811 (size == 0x400000))
812 break;
815 if (rnumber >= nregs) {
816 cmn_err(CE_WARN,
817 "i915_map_regs: failed to find MMIO registers");
818 return (DDI_FAILURE);
821 if (ddi_regs_map_setup(dip, rnumber, save_addr,
822 0, 0x80000, &s3_attr, handlep)) {
823 cmn_err(CE_WARN,
824 "i915_map_regs: failed to map bar %d", rnumber);
825 return (DDI_FAILURE);
828 return (DDI_SUCCESS);
830 static void
831 i915_unmap_regs(ddi_acc_handle_t *handlep)
833 ddi_regs_map_free(handlep);
835 static int
836 i915_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
838 drm_device_t *statep;
839 s3_i915_private_t *s3_private;
840 void *handle;
841 int unit;
843 unit = ddi_get_instance(dip);
844 switch (cmd) {
845 case DDI_ATTACH:
846 break;
847 case DDI_RESUME:
848 statep = ddi_get_soft_state(i915_statep, unit);
849 return (i915_resume(statep));
850 default:
851 DRM_ERROR("i915_attach: attach and resume ops are supported");
852 return (DDI_FAILURE);
856 if (ddi_soft_state_zalloc(i915_statep, unit) != DDI_SUCCESS) {
857 cmn_err(CE_WARN,
858 "i915_attach: failed to alloc softstate");
859 return (DDI_FAILURE);
861 statep = ddi_get_soft_state(i915_statep, unit);
862 statep->dip = dip;
863 statep->driver = &i915_driver;
865 statep->s3_private = drm_alloc(sizeof(s3_i915_private_t),
866 DRM_MEM_DRIVER);
868 if (statep->s3_private == NULL) {
869 cmn_err(CE_WARN, "i915_attach: failed to allocate s3 priv");
870 goto err_exit1;
874 * Map in the mmio register space for s3.
876 s3_private = (s3_i915_private_t *)statep->s3_private;
878 if (i915_map_regs(dip, &s3_private->saveAddr,
879 &s3_private->saveHandle)) {
880 cmn_err(CE_WARN, "i915_attach: failed to map MMIO");
881 goto err_exit2;
885 * Call drm_supp_register to create minor nodes for us
887 handle = drm_supp_register(dip, statep);
888 if ( handle == NULL) {
889 DRM_ERROR("i915_attach: drm_supp_register failed");
890 goto err_exit3;
892 statep->drm_handle = handle;
895 * After drm_supp_register, we can call drm_xxx routine
897 statep->drm_supported = DRM_UNSUPPORT;
898 if (
899 drm_probe(statep, i915_pciidlist) != DDI_SUCCESS) {
900 DRM_ERROR("i915_open: "
901 "DRM current don't support this graphics card");
902 goto err_exit4;
904 statep->drm_supported = DRM_SUPPORT;
906 /* call common attach code */
907 if (drm_attach(statep) != DDI_SUCCESS) {
908 DRM_ERROR("i915_attach: drm_attach failed");
909 goto err_exit4;
911 return (DDI_SUCCESS);
912 err_exit4:
913 (void) drm_supp_unregister(handle);
914 err_exit3:
915 i915_unmap_regs(&s3_private->saveHandle);
916 err_exit2:
917 drm_free(statep->s3_private, sizeof(s3_i915_private_t),
918 DRM_MEM_DRIVER);
919 err_exit1:
920 (void) ddi_soft_state_free(i915_statep, unit);
922 return (DDI_FAILURE);
924 } /* i915_attach() */
926 static int
927 i915_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
929 drm_device_t *statep;
930 int unit;
931 s3_i915_private_t *s3_private;
933 if ((cmd != DDI_SUSPEND) && (cmd != DDI_DETACH)) {
934 DRM_ERROR("i915_detach: "
935 "only detach and resume ops are supported");
936 return (DDI_FAILURE);
939 unit = ddi_get_instance(dip);
940 statep = ddi_get_soft_state(i915_statep, unit);
941 if (statep == NULL) {
942 DRM_ERROR("i915_detach: can not get soft state");
943 return (DDI_FAILURE);
946 if (cmd == DDI_SUSPEND)
947 return (i915_suspend(statep));
949 s3_private = (s3_i915_private_t *)statep->s3_private;
950 ddi_regs_map_free(&s3_private->saveHandle);
953 * Free the struct for context saving in S3
955 drm_free(statep->s3_private, sizeof(s3_i915_private_t),
956 DRM_MEM_DRIVER);
958 (void) drm_detach(statep);
959 (void) drm_supp_unregister(statep->drm_handle);
960 (void) ddi_soft_state_free(i915_statep, unit);
962 return (DDI_SUCCESS);
964 } /* i915_detach() */
967 /*ARGSUSED*/
968 static int
969 i915_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
971 drm_device_t *statep;
972 int error = DDI_SUCCESS;
973 int unit;
975 unit = drm_dev_to_instance((dev_t)arg);
976 switch (infocmd) {
977 case DDI_INFO_DEVT2DEVINFO:
978 statep = ddi_get_soft_state(i915_statep, unit);
979 if (statep == NULL || statep->dip == NULL) {
980 error = DDI_FAILURE;
981 } else {
982 *result = (void *) statep->dip;
983 error = DDI_SUCCESS;
985 break;
986 case DDI_INFO_DEVT2INSTANCE:
987 *result = (void *)(uintptr_t)unit;
988 error = DDI_SUCCESS;
989 break;
990 default:
991 error = DDI_FAILURE;
992 break;
994 return (error);
996 } /* i915_info() */
999 static void i915_configure(drm_driver_t *driver)
1001 driver->buf_priv_size = 1; /* No dev_priv */
1002 driver->load = i915_driver_load;
1003 driver->unload = i915_driver_unload;
1004 driver->open = i915_driver_open;
1005 driver->preclose = i915_driver_preclose;
1006 driver->postclose = i915_driver_postclose;
1007 driver->lastclose = i915_driver_lastclose;
1008 driver->device_is_agp = i915_driver_device_is_agp;
1009 driver->enable_vblank = i915_enable_vblank;
1010 driver->disable_vblank = i915_disable_vblank;
1011 driver->irq_preinstall = i915_driver_irq_preinstall;
1012 driver->irq_postinstall = i915_driver_irq_postinstall;
1013 driver->irq_uninstall = i915_driver_irq_uninstall;
1014 driver->irq_handler = i915_driver_irq_handler;
1016 driver->gem_init_object = i915_gem_init_object;
1017 driver->gem_free_object = i915_gem_free_object;
1019 driver->driver_ioctls = i915_ioctls;
1020 driver->max_driver_ioctl = i915_max_ioctl;
1022 driver->driver_name = DRIVER_NAME;
1023 driver->driver_desc = DRIVER_DESC;
1024 driver->driver_date = DRIVER_DATE;
1025 driver->driver_major = DRIVER_MAJOR;
1026 driver->driver_minor = DRIVER_MINOR;
1027 driver->driver_patchlevel = DRIVER_PATCHLEVEL;
1029 driver->use_agp = 1;
1030 driver->require_agp = 1;
1031 driver->use_irq = 1;
1034 static int i915_quiesce(dev_info_t *dip)
1036 drm_device_t *statep;
1037 int unit;
1039 unit = ddi_get_instance(dip);
1040 statep = ddi_get_soft_state(i915_statep, unit);
1041 if (statep == NULL) {
1042 return (DDI_FAILURE);
1044 i915_driver_irq_uninstall(statep);
1046 return (DDI_SUCCESS);