2 * Copyright IBM Corp. 2012
5 * Jan Glauber <jang@linux.vnet.ibm.com>
8 #define COMPONENT "zPCI"
9 #define pr_fmt(fmt) COMPONENT ": " fmt
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/err.h>
14 #include <linux/delay.h>
15 #include <linux/pci.h>
16 #include <asm/pci_debug.h>
17 #include <asm/pci_clp.h>
19 static inline void zpci_err_clp(unsigned int rsp
, int rc
)
24 } __packed data
= {rsp
, rc
};
26 zpci_err_hex(&data
, sizeof(data
));
30 * Call Logical Processor
31 * Retry logic is handled by the caller.
33 static inline u8
clp_instr(void *data
)
35 struct { u8 _
[CLP_BLK_SIZE
]; } *req
= data
;
40 " .insn rrf,0xb9a00000,%[ign],%[req],0x0,0x2\n"
43 : [cc
] "=d" (cc
), [ign
] "=d" (ignored
), "+m" (*req
)
49 static void *clp_alloc_block(gfp_t gfp_mask
)
51 return (void *) __get_free_pages(gfp_mask
, get_order(CLP_BLK_SIZE
));
54 static void clp_free_block(void *ptr
)
56 free_pages((unsigned long) ptr
, get_order(CLP_BLK_SIZE
));
59 static void clp_store_query_pci_fngrp(struct zpci_dev
*zdev
,
60 struct clp_rsp_query_pci_grp
*response
)
62 zdev
->tlb_refresh
= response
->refresh
;
63 zdev
->dma_mask
= response
->dasm
;
64 zdev
->msi_addr
= response
->msia
;
65 zdev
->fmb_update
= response
->mui
;
67 switch (response
->version
) {
69 zdev
->max_bus_speed
= PCIE_SPEED_5_0GT
;
72 zdev
->max_bus_speed
= PCI_SPEED_UNKNOWN
;
77 static int clp_query_pci_fngrp(struct zpci_dev
*zdev
, u8 pfgid
)
79 struct clp_req_rsp_query_pci_grp
*rrb
;
82 rrb
= clp_alloc_block(GFP_KERNEL
);
86 memset(rrb
, 0, sizeof(*rrb
));
87 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
88 rrb
->request
.hdr
.cmd
= CLP_QUERY_PCI_FNGRP
;
89 rrb
->response
.hdr
.len
= sizeof(rrb
->response
);
90 rrb
->request
.pfgid
= pfgid
;
93 if (!rc
&& rrb
->response
.hdr
.rsp
== CLP_RC_OK
)
94 clp_store_query_pci_fngrp(zdev
, &rrb
->response
);
96 zpci_err("Q PCI FGRP:\n");
97 zpci_err_clp(rrb
->response
.hdr
.rsp
, rc
);
104 static int clp_store_query_pci_fn(struct zpci_dev
*zdev
,
105 struct clp_rsp_query_pci
*response
)
109 for (i
= 0; i
< PCI_BAR_COUNT
; i
++) {
110 zdev
->bars
[i
].val
= le32_to_cpu(response
->bar
[i
]);
111 zdev
->bars
[i
].size
= response
->bar_size
[i
];
113 zdev
->start_dma
= response
->sdma
;
114 zdev
->end_dma
= response
->edma
;
115 zdev
->pchid
= response
->pchid
;
116 zdev
->pfgid
= response
->pfgid
;
120 static int clp_query_pci_fn(struct zpci_dev
*zdev
, u32 fh
)
122 struct clp_req_rsp_query_pci
*rrb
;
125 rrb
= clp_alloc_block(GFP_KERNEL
);
129 memset(rrb
, 0, sizeof(*rrb
));
130 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
131 rrb
->request
.hdr
.cmd
= CLP_QUERY_PCI_FN
;
132 rrb
->response
.hdr
.len
= sizeof(rrb
->response
);
133 rrb
->request
.fh
= fh
;
136 if (!rc
&& rrb
->response
.hdr
.rsp
== CLP_RC_OK
) {
137 rc
= clp_store_query_pci_fn(zdev
, &rrb
->response
);
140 if (rrb
->response
.pfgid
)
141 rc
= clp_query_pci_fngrp(zdev
, rrb
->response
.pfgid
);
143 zpci_err("Q PCI FN:\n");
144 zpci_err_clp(rrb
->response
.hdr
.rsp
, rc
);
152 int clp_add_pci_device(u32 fid
, u32 fh
, int configured
)
154 struct zpci_dev
*zdev
;
157 zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid
, fh
, configured
);
158 zdev
= kzalloc(sizeof(*zdev
), GFP_KERNEL
);
165 /* Query function properties and update zdev */
166 rc
= clp_query_pci_fn(zdev
, fh
);
171 zdev
->state
= ZPCI_FN_STATE_CONFIGURED
;
173 zdev
->state
= ZPCI_FN_STATE_STANDBY
;
175 rc
= zpci_create_device(zdev
);
186 * Enable/Disable a given PCI function defined by its function handle.
188 static int clp_set_pci_fn(u32
*fh
, u8 nr_dma_as
, u8 command
)
190 struct clp_req_rsp_set_pci
*rrb
;
191 int rc
, retries
= 100;
193 rrb
= clp_alloc_block(GFP_KERNEL
);
198 memset(rrb
, 0, sizeof(*rrb
));
199 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
200 rrb
->request
.hdr
.cmd
= CLP_SET_PCI_FN
;
201 rrb
->response
.hdr
.len
= sizeof(rrb
->response
);
202 rrb
->request
.fh
= *fh
;
203 rrb
->request
.oc
= command
;
204 rrb
->request
.ndas
= nr_dma_as
;
207 if (rrb
->response
.hdr
.rsp
== CLP_RC_SETPCIFN_BUSY
) {
213 } while (rrb
->response
.hdr
.rsp
== CLP_RC_SETPCIFN_BUSY
);
215 if (!rc
&& rrb
->response
.hdr
.rsp
== CLP_RC_OK
)
216 *fh
= rrb
->response
.fh
;
218 zpci_err("Set PCI FN:\n");
219 zpci_err_clp(rrb
->response
.hdr
.rsp
, rc
);
226 int clp_enable_fh(struct zpci_dev
*zdev
, u8 nr_dma_as
)
231 rc
= clp_set_pci_fn(&fh
, nr_dma_as
, CLP_SET_ENABLE_PCI_FN
);
233 /* Success -> store enabled handle in zdev */
236 zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev
->fid
, zdev
->fh
, rc
);
240 int clp_disable_fh(struct zpci_dev
*zdev
)
245 if (!zdev_enabled(zdev
))
248 rc
= clp_set_pci_fn(&fh
, 0, CLP_SET_DISABLE_PCI_FN
);
250 /* Success -> store disabled handle in zdev */
253 zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev
->fid
, zdev
->fh
, rc
);
257 static int clp_list_pci(struct clp_req_rsp_list_pci
*rrb
,
258 void (*cb
)(struct clp_fh_list_entry
*entry
))
260 u64 resume_token
= 0;
264 memset(rrb
, 0, sizeof(*rrb
));
265 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
266 rrb
->request
.hdr
.cmd
= CLP_LIST_PCI
;
267 /* store as many entries as possible */
268 rrb
->response
.hdr
.len
= CLP_BLK_SIZE
- LIST_PCI_HDR_LEN
;
269 rrb
->request
.resume_token
= resume_token
;
271 /* Get PCI function handle list */
273 if (rc
|| rrb
->response
.hdr
.rsp
!= CLP_RC_OK
) {
274 zpci_err("List PCI FN:\n");
275 zpci_err_clp(rrb
->response
.hdr
.rsp
, rc
);
280 WARN_ON_ONCE(rrb
->response
.entry_size
!=
281 sizeof(struct clp_fh_list_entry
));
283 entries
= (rrb
->response
.hdr
.len
- LIST_PCI_HDR_LEN
) /
284 rrb
->response
.entry_size
;
286 resume_token
= rrb
->response
.resume_token
;
287 for (i
= 0; i
< entries
; i
++)
288 cb(&rrb
->response
.fh_list
[i
]);
289 } while (resume_token
);
294 static void __clp_add(struct clp_fh_list_entry
*entry
)
296 if (!entry
->vendor_id
)
299 clp_add_pci_device(entry
->fid
, entry
->fh
, entry
->config_state
);
302 static void __clp_rescan(struct clp_fh_list_entry
*entry
)
304 struct zpci_dev
*zdev
;
306 if (!entry
->vendor_id
)
309 zdev
= get_zdev_by_fid(entry
->fid
);
311 clp_add_pci_device(entry
->fid
, entry
->fh
, entry
->config_state
);
315 if (!entry
->config_state
) {
317 * The handle is already disabled, that means no iota/irq freeing via
318 * the firmware interfaces anymore. Need to free resources manually
319 * (DMA memory, debug, sysfs)...
321 zpci_stop_device(zdev
);
325 static void __clp_update(struct clp_fh_list_entry
*entry
)
327 struct zpci_dev
*zdev
;
329 if (!entry
->vendor_id
)
332 zdev
= get_zdev_by_fid(entry
->fid
);
336 zdev
->fh
= entry
->fh
;
339 int clp_scan_pci_devices(void)
341 struct clp_req_rsp_list_pci
*rrb
;
344 rrb
= clp_alloc_block(GFP_KERNEL
);
348 rc
= clp_list_pci(rrb
, __clp_add
);
354 int clp_rescan_pci_devices(void)
356 struct clp_req_rsp_list_pci
*rrb
;
359 rrb
= clp_alloc_block(GFP_KERNEL
);
363 rc
= clp_list_pci(rrb
, __clp_rescan
);
369 int clp_rescan_pci_devices_simple(void)
371 struct clp_req_rsp_list_pci
*rrb
;
374 rrb
= clp_alloc_block(GFP_NOWAIT
);
378 rc
= clp_list_pci(rrb
, __clp_update
);