PR rtl-optimization/57003
[official-gcc.git] / libgo / go / reflect / makefuncgo_amd64.go
blob7118951d1fd98c6624a9bf67cf80bee55b0394cf
1 // Copyright 2013 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // MakeFunc amd64 implementation.
7 package reflect
9 import "unsafe"
11 // The assembler stub will pass a pointer to this structure.
12 // This will come in holding all the registers that might hold
13 // function parameters. On return we will set the registers that
14 // might hold result values.
15 type amd64Regs struct {
16 rax uint64
17 rdi uint64
18 rsi uint64
19 rdx uint64
20 rcx uint64
21 r8 uint64
22 r9 uint64
23 rsp uint64
24 xmm0 [2]uint64
25 xmm1 [2]uint64
26 xmm2 [2]uint64
27 xmm3 [2]uint64
28 xmm4 [2]uint64
29 xmm5 [2]uint64
30 xmm6 [2]uint64
31 xmm7 [2]uint64
34 // Argument classifications. The amd64 ELF ABI uses several more, but
35 // these are the only ones that arise for Go types.
36 type amd64Class int
38 const (
39 amd64Integer amd64Class = iota
40 amd64SSE
41 amd64NoClass
42 amd64Memory
45 // amd64Classify returns the one or two register classes needed to
46 // pass the value of type. Go types never need more than two
47 // registers. amd64Memory means the value is stored in memory.
48 // amd64NoClass means the register is not used.
49 func amd64Classify(typ *rtype) (amd64Class, amd64Class) {
50 switch typ.Kind() {
51 default:
52 panic("internal error--unknown kind in amd64Classify")
54 case Bool, Int, Int8, Int16, Int32, Int64,
55 Uint, Uint8, Uint16, Uint32, Uint64,
56 Uintptr, Chan, Func, Map, Ptr, UnsafePointer:
58 return amd64Integer, amd64NoClass
60 case Float32, Float64, Complex64:
61 return amd64SSE, amd64NoClass
63 case Complex128:
64 return amd64SSE, amd64SSE
66 case Array:
67 if typ.size == 0 {
68 return amd64NoClass, amd64NoClass
69 } else if typ.size > 16 {
70 return amd64Memory, amd64NoClass
72 atyp := (*arrayType)(unsafe.Pointer(typ))
73 eclass1, eclass2 := amd64Classify(atyp.elem)
74 if eclass1 == amd64Memory {
75 return amd64Memory, amd64NoClass
77 if eclass2 == amd64NoClass && typ.size > 8 {
78 eclass2 = eclass1
80 return eclass1, eclass2
82 case Interface:
83 return amd64Integer, amd64Integer
85 case Slice:
86 return amd64Memory, amd64NoClass
88 case String:
89 return amd64Integer, amd64Integer
91 case Struct:
92 if typ.size == 0 {
93 return amd64NoClass, amd64NoClass
94 } else if typ.size > 16 {
95 return amd64Memory, amd64NoClass
97 var first, second amd64Class
98 f := amd64NoClass
99 onFirst := true
100 styp := (*structType)(unsafe.Pointer(typ))
101 for _, field := range styp.fields {
102 if onFirst && field.offset >= 8 {
103 first = f
104 f = amd64NoClass
105 onFirst = false
107 fclass1, fclass2 := amd64Classify(field.typ)
108 f = amd64MergeClasses(f, fclass1)
109 if fclass2 != amd64NoClass {
110 if !onFirst {
111 panic("amd64Classify inconsistent")
113 first = f
114 f = fclass2
115 onFirst = false
118 if onFirst {
119 first = f
120 second = amd64NoClass
121 } else {
122 second = f
124 if first == amd64Memory || second == amd64Memory {
125 return amd64Memory, amd64NoClass
127 return first, second
131 // amd64MergeClasses merges two register classes as described in the
132 // amd64 ELF ABI.
133 func amd64MergeClasses(c1, c2 amd64Class) amd64Class {
134 switch {
135 case c1 == c2:
136 return c1
137 case c1 == amd64NoClass:
138 return c2
139 case c2 == amd64NoClass:
140 return c1
141 case c1 == amd64Memory || c2 == amd64Memory:
142 return amd64Memory
143 case c1 == amd64Integer || c2 == amd64Integer:
144 return amd64Integer
145 default:
146 return amd64SSE
150 // MakeFuncStubGo implements the amd64 calling convention for
151 // MakeFunc. This should not be called. It is exported so that
152 // assembly code can call it.
154 func MakeFuncStubGo(regs *amd64Regs, c *makeFuncImpl) {
155 ftyp := c.typ
157 // See if the result requires a struct. If it does, the first
158 // parameter is a pointer to the struct.
159 var ret1, ret2 amd64Class
160 switch len(ftyp.out) {
161 case 0:
162 ret1, ret2 = amd64NoClass, amd64NoClass
163 case 1:
164 ret1, ret2 = amd64Classify(ftyp.out[0])
165 default:
166 off := uintptr(0)
167 f := amd64NoClass
168 onFirst := true
169 for _, rt := range ftyp.out {
170 off = align(off, uintptr(rt.fieldAlign))
172 if onFirst && off >= 8 {
173 ret1 = f
174 f = amd64NoClass
175 onFirst = false
178 off += rt.size
179 if off > 16 {
180 break
183 fclass1, fclass2 := amd64Classify(rt)
184 f = amd64MergeClasses(f, fclass1)
185 if fclass2 != amd64NoClass {
186 if !onFirst {
187 panic("amd64Classify inconsistent")
189 ret1 = f
190 f = fclass2
191 onFirst = false
194 if off > 16 {
195 ret1, ret2 = amd64Memory, amd64NoClass
196 } else {
197 if onFirst {
198 ret1, ret2 = f, amd64NoClass
199 } else {
200 ret2 = f
203 if ret1 == amd64Memory || ret2 == amd64Memory {
204 ret1, ret2 = amd64Memory, amd64NoClass
208 in := make([]Value, 0, len(ftyp.in))
209 intreg := 0
210 ssereg := 0
211 ap := uintptr(regs.rsp)
213 maxIntregs := 6 // When we support Windows, this would be 4.
214 maxSSEregs := 8
216 if ret1 == amd64Memory {
217 // We are returning a value in memory, which means
218 // that the first argument is a hidden parameter
219 // pointing to that return area.
220 intreg++
223 argloop:
224 for _, rt := range ftyp.in {
225 c1, c2 := amd64Classify(rt)
227 fl := flag(rt.Kind()) << flagKindShift
228 if c2 == amd64NoClass {
230 // Argument is passed in a single register or
231 // in memory.
233 switch c1 {
234 case amd64NoClass:
235 v := Value{rt, nil, fl | flagIndir}
236 in = append(in, v)
237 continue argloop
238 case amd64Integer:
239 if intreg < maxIntregs {
240 reg := amd64IntregVal(regs, intreg)
241 iw := unsafe.Pointer(reg)
242 if k := rt.Kind(); k != Ptr && k != UnsafePointer {
243 iw = unsafe.Pointer(&reg)
244 fl |= flagIndir
246 v := Value{rt, iw, fl}
247 in = append(in, v)
248 intreg++
249 continue argloop
251 case amd64SSE:
252 if ssereg < maxSSEregs {
253 reg := amd64SSEregVal(regs, ssereg)
254 v := Value{rt, unsafe.Pointer(&reg), fl | flagIndir}
255 in = append(in, v)
256 ssereg++
257 continue argloop
261 in, ap = amd64Memarg(in, ap, rt)
262 continue argloop
265 // Argument is passed in two registers.
267 nintregs := 0
268 nsseregs := 0
269 switch c1 {
270 case amd64Integer:
271 nintregs++
272 case amd64SSE:
273 nsseregs++
274 default:
275 panic("inconsistent")
277 switch c2 {
278 case amd64Integer:
279 nintregs++
280 case amd64SSE:
281 nsseregs++
282 default:
283 panic("inconsistent")
286 // If the whole argument does not fit in registers, it
287 // is passed in memory.
289 if intreg+nintregs > maxIntregs || ssereg+nsseregs > maxSSEregs {
290 in, ap = amd64Memarg(in, ap, rt)
291 continue argloop
294 var word1, word2 uintptr
295 switch c1 {
296 case amd64Integer:
297 word1 = amd64IntregVal(regs, intreg)
298 intreg++
299 case amd64SSE:
300 word1 = amd64SSEregVal(regs, ssereg)
301 ssereg++
303 switch c2 {
304 case amd64Integer:
305 word2 = amd64IntregVal(regs, intreg)
306 intreg++
307 case amd64SSE:
308 word2 = amd64SSEregVal(regs, ssereg)
309 ssereg++
312 p := unsafe_New(rt)
313 *(*uintptr)(p) = word1
314 *(*uintptr)(unsafe.Pointer(uintptr(p) + ptrSize)) = word2
315 v := Value{rt, p, fl | flagIndir}
316 in = append(in, v)
319 // All the real arguments have been found and turned into
320 // Value's. Call the real function.
322 out := c.call(in)
324 if len(out) != len(ftyp.out) {
325 panic("reflect: wrong return count from function created by MakeFunc")
328 for i, typ := range ftyp.out {
329 v := out[i]
330 if v.typ != typ {
331 panic("reflect: function created by MakeFunc using " + funcName(c.fn) +
332 " returned wrong type: have " +
333 out[i].typ.String() + " for " + typ.String())
335 if v.flag&flagRO != 0 {
336 panic("reflect: function created by MakeFunc using " + funcName(c.fn) +
337 " returned value obtained from unexported field")
341 if ret1 == amd64NoClass {
342 return
345 if ret1 == amd64Memory {
346 // The address of the memory area was passed as a
347 // hidden parameter in %rdi.
348 ptr := unsafe.Pointer(uintptr(regs.rdi))
349 off := uintptr(0)
350 for i, typ := range ftyp.out {
351 v := out[i]
352 off = align(off, uintptr(typ.fieldAlign))
353 addr := unsafe.Pointer(uintptr(ptr) + off)
354 if v.flag&flagIndir == 0 && (v.kind() == Ptr || v.kind() == UnsafePointer) {
355 *(*unsafe.Pointer)(addr) = v.ptr
356 } else {
357 memmove(addr, v.ptr, typ.size)
359 off += typ.size
361 return
364 if len(out) == 1 && ret2 == amd64NoClass {
365 v := out[0]
366 var w unsafe.Pointer
367 if v.Kind() == Ptr || v.Kind() == UnsafePointer {
368 w = v.pointer()
369 } else {
370 w = unsafe.Pointer(loadScalar(v.ptr, v.typ.size))
372 switch ret1 {
373 case amd64Integer:
374 regs.rax = uint64(uintptr(w))
375 case amd64SSE:
376 regs.xmm0[0] = uint64(uintptr(w))
377 regs.xmm0[1] = 0
378 default:
379 panic("inconsistency")
381 return
384 var buf [2]unsafe.Pointer
385 ptr := unsafe.Pointer(&buf[0])
386 off := uintptr(0)
387 for i, typ := range ftyp.out {
388 v := out[i]
389 off = align(off, uintptr(typ.fieldAlign))
390 addr := unsafe.Pointer(uintptr(ptr) + off)
391 if v.flag&flagIndir == 0 && (v.kind() == Ptr || v.kind() == UnsafePointer) {
392 *(*unsafe.Pointer)(addr) = v.ptr
393 } else {
394 memmove(addr, v.ptr, typ.size)
396 off += uintptr(typ.size)
399 switch ret1 {
400 case amd64Integer:
401 regs.rax = *(*uint64)(unsafe.Pointer(&buf[0]))
402 case amd64SSE:
403 regs.xmm0[0] = *(*uint64)(unsafe.Pointer(&buf[0]))
404 regs.xmm0[1] = 0
405 default:
406 panic("inconsistency")
409 switch ret2 {
410 case amd64Integer:
411 reg := *(*uint64)(unsafe.Pointer(&buf[1]))
412 if ret1 == amd64Integer {
413 regs.rdx = reg
414 } else {
415 regs.rax = reg
417 case amd64SSE:
418 reg := *(*uint64)(unsafe.Pointer(&buf[1]))
419 if ret1 == amd64Integer {
420 regs.xmm0[0] = reg
421 regs.xmm0[1] = 0
422 } else {
423 regs.xmm1[0] = reg
424 regs.xmm1[1] = 0
426 case amd64NoClass:
427 default:
428 panic("inconsistency")
432 // The amd64Memarg function adds an argument passed in memory.
433 func amd64Memarg(in []Value, ap uintptr, rt *rtype) ([]Value, uintptr) {
434 ap = align(ap, ptrSize)
435 ap = align(ap, uintptr(rt.align))
437 // We have to copy the argument onto the heap in case the
438 // function hangs onto the reflect.Value we pass it.
439 p := unsafe_New(rt)
440 memmove(p, unsafe.Pointer(ap), rt.size)
442 v := Value{rt, p, flag(rt.Kind()<<flagKindShift) | flagIndir}
443 in = append(in, v)
444 ap += rt.size
445 return in, ap
448 // The amd64IntregVal function returns the value of integer register i.
449 func amd64IntregVal(regs *amd64Regs, i int) uintptr {
450 var r uint64
451 switch i {
452 case 0:
453 r = regs.rdi
454 case 1:
455 r = regs.rsi
456 case 2:
457 r = regs.rdx
458 case 3:
459 r = regs.rcx
460 case 4:
461 r = regs.r8
462 case 5:
463 r = regs.r9
464 default:
465 panic("amd64IntregVal: bad index")
467 return uintptr(r)
470 // The amd64SSEregVal function returns the value of SSE register i.
471 // Note that although SSE registers can hold two uinptr's, for the
472 // types we use in Go we only ever use the least significant one. The
473 // most significant one would only be used for 128 bit types.
474 func amd64SSEregVal(regs *amd64Regs, i int) uintptr {
475 var r uint64
476 switch i {
477 case 0:
478 r = regs.xmm0[0]
479 case 1:
480 r = regs.xmm1[0]
481 case 2:
482 r = regs.xmm2[0]
483 case 3:
484 r = regs.xmm3[0]
485 case 4:
486 r = regs.xmm4[0]
487 case 5:
488 r = regs.xmm5[0]
489 case 6:
490 r = regs.xmm6[0]
491 case 7:
492 r = regs.xmm7[0]
494 return uintptr(r)