Reverting merge from trunk
[official-gcc.git] / libgo / go / reflect / makefuncgo_amd64.go
blobecc50a42520aeaf977ec2ff326974cb9b74a0424
1 // Copyright 2013 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // MakeFunc amd64 implementation.
7 package reflect
9 import "unsafe"
11 // The assembler stub will pass a pointer to this structure.
12 // This will come in holding all the registers that might hold
13 // function parameters. On return we will set the registers that
14 // might hold result values.
15 type amd64Regs struct {
16 rax uint64
17 rdi uint64
18 rsi uint64
19 rdx uint64
20 rcx uint64
21 r8 uint64
22 r9 uint64
23 rsp uint64
24 xmm0 [2]uint64
25 xmm1 [2]uint64
26 xmm2 [2]uint64
27 xmm3 [2]uint64
28 xmm4 [2]uint64
29 xmm5 [2]uint64
30 xmm6 [2]uint64
31 xmm7 [2]uint64
34 // Argument classifications. The amd64 ELF ABI uses several more, but
35 // these are the only ones that arise for Go types.
36 type amd64Class int
38 const (
39 amd64Integer amd64Class = iota
40 amd64SSE
41 amd64NoClass
42 amd64Memory
45 // amd64Classify returns the one or two register classes needed to
46 // pass the value of type. Go types never need more than two
47 // registers. amd64Memory means the value is stored in memory.
48 // amd64NoClass means the register is not used.
49 func amd64Classify(typ *rtype) (amd64Class, amd64Class) {
50 switch typ.Kind() {
51 default:
52 panic("internal error--unknown kind in amd64Classify")
54 case Bool, Int, Int8, Int16, Int32, Int64,
55 Uint, Uint8, Uint16, Uint32, Uint64,
56 Uintptr, Chan, Func, Map, Ptr, UnsafePointer:
58 return amd64Integer, amd64NoClass
60 case Float32, Float64, Complex64:
61 return amd64SSE, amd64NoClass
63 case Complex128:
64 return amd64SSE, amd64SSE
66 case Array:
67 if typ.size == 0 {
68 return amd64NoClass, amd64NoClass
69 } else if typ.size > 16 {
70 return amd64Memory, amd64NoClass
72 atyp := (*arrayType)(unsafe.Pointer(typ))
73 eclass1, eclass2 := amd64Classify(atyp.elem)
74 if eclass1 == amd64Memory {
75 return amd64Memory, amd64NoClass
77 if eclass2 == amd64NoClass && typ.size > 8 {
78 eclass2 = eclass1
80 return eclass1, eclass2
82 case Interface:
83 return amd64Integer, amd64Integer
85 case Slice:
86 return amd64Memory, amd64NoClass
88 case String:
89 return amd64Integer, amd64Integer
91 case Struct:
92 if typ.size == 0 {
93 return amd64NoClass, amd64NoClass
94 } else if typ.size > 16 {
95 return amd64Memory, amd64NoClass
97 var first, second amd64Class
98 f := amd64NoClass
99 onFirst := true
100 styp := (*structType)(unsafe.Pointer(typ))
101 for _, field := range styp.fields {
102 if onFirst && field.offset >= 8 {
103 first = f
104 f = amd64NoClass
105 onFirst = false
107 fclass1, fclass2 := amd64Classify(field.typ)
108 f = amd64MergeClasses(f, fclass1)
109 if fclass2 != amd64NoClass {
110 if !onFirst {
111 panic("amd64Classify inconsistent")
113 first = f
114 f = fclass2
115 onFirst = false
118 if onFirst {
119 first = f
120 second = amd64NoClass
121 } else {
122 second = f
124 if first == amd64Memory || second == amd64Memory {
125 return amd64Memory, amd64NoClass
127 return first, second
131 // amd64MergeClasses merges two register classes as described in the
132 // amd64 ELF ABI.
133 func amd64MergeClasses(c1, c2 amd64Class) amd64Class {
134 switch {
135 case c1 == c2:
136 return c1
137 case c1 == amd64NoClass:
138 return c2
139 case c2 == amd64NoClass:
140 return c1
141 case c1 == amd64Memory || c2 == amd64Memory:
142 return amd64Memory
143 case c1 == amd64Integer || c2 == amd64Integer:
144 return amd64Integer
145 default:
146 return amd64SSE
150 // MakeFuncStubGo implements the amd64 calling convention for
151 // MakeFunc. This should not be called. It is exported so that
152 // assembly code can call it.
154 func MakeFuncStubGo(regs *amd64Regs, c *makeFuncImpl) {
155 ftyp := c.typ
157 // See if the result requires a struct. If it does, the first
158 // parameter is a pointer to the struct.
159 var ret1, ret2 amd64Class
160 switch len(ftyp.out) {
161 case 0:
162 ret1, ret2 = amd64NoClass, amd64NoClass
163 case 1:
164 ret1, ret2 = amd64Classify(ftyp.out[0])
165 default:
166 off := uintptr(0)
167 f := amd64NoClass
168 onFirst := true
169 for _, rt := range ftyp.out {
170 off = align(off, uintptr(rt.fieldAlign))
172 if onFirst && off >= 8 {
173 ret1 = f
174 f = amd64NoClass
175 onFirst = false
178 off += rt.size
179 if off > 16 {
180 break
183 fclass1, fclass2 := amd64Classify(rt)
184 f = amd64MergeClasses(f, fclass1)
185 if fclass2 != amd64NoClass {
186 if !onFirst {
187 panic("amd64Classify inconsistent")
189 ret1 = f
190 f = fclass2
191 onFirst = false
194 if off > 16 {
195 ret1, ret2 = amd64Memory, amd64NoClass
196 } else {
197 if onFirst {
198 ret1, ret2 = f, amd64NoClass
199 } else {
200 ret2 = f
203 if ret1 == amd64Memory || ret2 == amd64Memory {
204 ret1, ret2 = amd64Memory, amd64NoClass
208 in := make([]Value, 0, len(ftyp.in))
209 intreg := 0
210 ssereg := 0
211 ap := uintptr(regs.rsp)
213 maxIntregs := 6 // When we support Windows, this would be 4.
214 maxSSEregs := 8
216 if ret1 == amd64Memory {
217 // We are returning a value in memory, which means
218 // that the first argument is a hidden parameter
219 // pointing to that return area.
220 intreg++
223 argloop:
224 for _, rt := range ftyp.in {
225 c1, c2 := amd64Classify(rt)
227 fl := flag(rt.Kind()) << flagKindShift
228 if c2 == amd64NoClass {
230 // Argument is passed in a single register or
231 // in memory.
233 switch c1 {
234 case amd64NoClass:
235 v := Value{rt, nil, fl | flagIndir}
236 in = append(in, v)
237 continue argloop
238 case amd64Integer:
239 if intreg < maxIntregs {
240 reg := amd64IntregVal(regs, intreg)
241 iw := unsafe.Pointer(reg)
242 if k := rt.Kind(); k != Ptr && k != UnsafePointer {
243 iw = unsafe.Pointer(&reg)
244 fl |= flagIndir
246 v := Value{rt, iw, fl}
247 in = append(in, v)
248 intreg++
249 continue argloop
251 case amd64SSE:
252 if ssereg < maxSSEregs {
253 reg := amd64SSEregVal(regs, ssereg)
254 v := Value{rt, unsafe.Pointer(&reg), fl | flagIndir}
255 in = append(in, v)
256 ssereg++
257 continue argloop
261 in, ap = amd64Memarg(in, ap, rt)
262 continue argloop
265 // Argument is passed in two registers.
267 nintregs := 0
268 nsseregs := 0
269 switch c1 {
270 case amd64Integer:
271 nintregs++
272 case amd64SSE:
273 nsseregs++
274 default:
275 panic("inconsistent")
277 switch c2 {
278 case amd64Integer:
279 nintregs++
280 case amd64SSE:
281 nsseregs++
282 default:
283 panic("inconsistent")
286 // If the whole argument does not fit in registers, it
287 // is passed in memory.
289 if intreg+nintregs > maxIntregs || ssereg+nsseregs > maxSSEregs {
290 in, ap = amd64Memarg(in, ap, rt)
291 continue argloop
294 var word1, word2 uintptr
295 switch c1 {
296 case amd64Integer:
297 word1 = amd64IntregVal(regs, intreg)
298 intreg++
299 case amd64SSE:
300 word1 = amd64SSEregVal(regs, ssereg)
301 ssereg++
303 switch c2 {
304 case amd64Integer:
305 word2 = amd64IntregVal(regs, intreg)
306 intreg++
307 case amd64SSE:
308 word2 = amd64SSEregVal(regs, ssereg)
309 ssereg++
312 p := unsafe_New(rt)
313 *(*uintptr)(p) = word1
314 *(*uintptr)(unsafe.Pointer(uintptr(p) + ptrSize)) = word2
315 v := Value{rt, p, fl | flagIndir}
316 in = append(in, v)
319 // All the real arguments have been found and turned into
320 // Value's. Call the real function.
322 out := c.fn(in)
324 if len(out) != len(ftyp.out) {
325 panic("reflect: wrong return count from function created by MakeFunc")
328 for i, typ := range ftyp.out {
329 v := out[i]
330 if v.typ != typ {
331 panic("reflect: function created by MakeFunc using " + funcName(c.fn) +
332 " returned wrong type: have " +
333 out[i].typ.String() + " for " + typ.String())
335 if v.flag&flagRO != 0 {
336 panic("reflect: function created by MakeFunc using " + funcName(c.fn) +
337 " returned value obtained from unexported field")
341 if ret1 == amd64NoClass {
342 return
345 if ret1 == amd64Memory {
346 // The address of the memory area was passed as a
347 // hidden parameter in %rdi.
348 ptr := unsafe.Pointer(uintptr(regs.rdi))
349 off := uintptr(0)
350 for i, typ := range ftyp.out {
351 v := out[i]
352 off = align(off, uintptr(typ.fieldAlign))
353 addr := unsafe.Pointer(uintptr(ptr) + off)
354 if v.flag&flagIndir == 0 && (v.kind() == Ptr || v.kind() == UnsafePointer) {
355 storeIword(addr, iword(v.val), typ.size)
356 } else {
357 memmove(addr, v.val, typ.size)
359 off += typ.size
361 return
364 if len(out) == 1 && ret2 == amd64NoClass {
365 v := out[0]
366 w := v.iword()
367 if v.Kind() != Ptr && v.Kind() != UnsafePointer {
368 w = loadIword(unsafe.Pointer(w), v.typ.size)
370 switch ret1 {
371 case amd64Integer:
372 regs.rax = uint64(uintptr(w))
373 case amd64SSE:
374 regs.xmm0[0] = uint64(uintptr(w))
375 regs.xmm0[1] = 0
376 default:
377 panic("inconsistency")
379 return
382 var buf [2]unsafe.Pointer
383 ptr := unsafe.Pointer(&buf[0])
384 off := uintptr(0)
385 for i, typ := range ftyp.out {
386 v := out[i]
387 off = align(off, uintptr(typ.fieldAlign))
388 addr := unsafe.Pointer(uintptr(ptr) + off)
389 if v.flag&flagIndir == 0 && (v.kind() == Ptr || v.kind() == UnsafePointer) {
390 storeIword(addr, iword(v.val), typ.size)
391 } else {
392 memmove(addr, v.val, typ.size)
394 off += uintptr(typ.size)
397 switch ret1 {
398 case amd64Integer:
399 regs.rax = *(*uint64)(unsafe.Pointer(&buf[0]))
400 case amd64SSE:
401 regs.xmm0[0] = *(*uint64)(unsafe.Pointer(&buf[0]))
402 regs.xmm0[1] = 0
403 default:
404 panic("inconsistency")
407 switch ret2 {
408 case amd64Integer:
409 reg := *(*uint64)(unsafe.Pointer(&buf[1]))
410 if ret1 == amd64Integer {
411 regs.rdx = reg
412 } else {
413 regs.rax = reg
415 case amd64SSE:
416 reg := *(*uint64)(unsafe.Pointer(&buf[1]))
417 if ret1 == amd64Integer {
418 regs.xmm0[0] = reg
419 regs.xmm0[1] = 0
420 } else {
421 regs.xmm1[0] = reg
422 regs.xmm1[1] = 0
424 case amd64NoClass:
425 default:
426 panic("inconsistency")
430 // The amd64Memarg function adds an argument passed in memory.
431 func amd64Memarg(in []Value, ap uintptr, rt *rtype) ([]Value, uintptr) {
432 ap = align(ap, ptrSize)
433 ap = align(ap, uintptr(rt.align))
435 // We have to copy the argument onto the heap in case the
436 // function hangs onto the reflect.Value we pass it.
437 p := unsafe_New(rt)
438 memmove(p, unsafe.Pointer(ap), rt.size)
440 v := Value{rt, p, flag(rt.Kind()<<flagKindShift) | flagIndir}
441 in = append(in, v)
442 ap += rt.size
443 return in, ap
446 // The amd64IntregVal function returns the value of integer register i.
447 func amd64IntregVal(regs *amd64Regs, i int) uintptr {
448 var r uint64
449 switch i {
450 case 0:
451 r = regs.rdi
452 case 1:
453 r = regs.rsi
454 case 2:
455 r = regs.rdx
456 case 3:
457 r = regs.rcx
458 case 4:
459 r = regs.r8
460 case 5:
461 r = regs.r9
462 default:
463 panic("amd64IntregVal: bad index")
465 return uintptr(r)
468 // The amd64SSEregVal function returns the value of SSE register i.
469 // Note that although SSE registers can hold two uinptr's, for the
470 // types we use in Go we only ever use the least significant one. The
471 // most significant one would only be used for 128 bit types.
472 func amd64SSEregVal(regs *amd64Regs, i int) uintptr {
473 var r uint64
474 switch i {
475 case 0:
476 r = regs.xmm0[0]
477 case 1:
478 r = regs.xmm1[0]
479 case 2:
480 r = regs.xmm2[0]
481 case 3:
482 r = regs.xmm3[0]
483 case 4:
484 r = regs.xmm4[0]
485 case 5:
486 r = regs.xmm5[0]
487 case 6:
488 r = regs.xmm6[0]
489 case 7:
490 r = regs.xmm7[0]
492 return uintptr(r)