2 * The memory management and garbage collection module
4 * ---------------------------------------------------------------
5 * Little Smalltalk, Version 5
7 * Copyright (C) 1987-2005 by Timothy A. Budd
8 * Copyright (C) 2007 by Charles R. Childers
9 * Copyright (C) 2005-2007 by Danny Reinhold
10 * Copyright (C) 2010 by Ketmar // Vampire Avalon
12 * ============================================================================
13 * This license applies to the virtual machine and to the initial image of
14 * the Little Smalltalk system and to all files in the Little Smalltalk
15 * packages except the files explicitly licensed with another license(s).
16 * ============================================================================
17 * Permission is hereby granted, free of charge, to any person obtaining a copy
18 * of this software and associated documentation files (the "Software"), to deal
19 * in the Software without restriction, including without limitation the rights
20 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
21 * copies of the Software, and to permit persons to whom the Software is
22 * furnished to do so, subject to the following conditions:
24 * The above copyright notice and this permission notice shall be included in
25 * all copies or substantial portions of the Software.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
30 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
31 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
32 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
33 * DEALINGS IN THE SOFTWARE.
35 /* Uses baker two-space garbage collection algorithm */
36 #include "lst_dpname.c"
39 /* WARNING! LST_MEM_ALIGN MUST BE POWER OF 2! */
40 #define LST_MEM_ALIGN 2
41 #if LST_MEM_ALIGN != 1
42 # define lstAlignAddr(n) ((void *)(((uintptr_t)n+(LST_MEM_ALIGN-1))&(~(LST_MEM_ALIGN-1))))
44 # define lstAlignAddr(n) ((void *)n)
47 static const char *imgSign
= LST_IMAGE_SIGNATURE
;
50 unsigned int lstGCCount
= 0;
53 unsigned char *realStart
; /* can be unaligned */
54 unsigned char *start
; /* starting address */
55 unsigned char *end
; /* first byte after the end */
56 unsigned char *cur
; /* current free */
59 #define STATIC_SPACE 2
60 /* 0 and 1: dynamic; 2: static */
61 static LstMemSpace memSpaces
[3];
62 static LstMemSpace
*curSpace
;
63 static int curSpaceNo
; /* to avoid checks */
65 static LstFinLink
*finListHead
= NULL
; /* list of objects with finalizers */
66 static LstFinLink
*newFinListHead
= NULL
; /* list of alive objects with finalizers */
68 static LstFinLink
*stFinListHead
= NULL
; /* st-finalizable objects */
69 static LstFinLink
*stNewFinListHead
= NULL
; /* st-finalizable objects, alive */
70 static int lstGCSpecialLock
= 0;
72 * what we will do with 'st-finalized' is this:
73 * a) include them in list (to faster scanning);
74 * b) after GC cycle, create new process group for each
75 * object that must be finalized (if it has 'finalize' method);
77 * so interpreter will execute all finalizers in the normal sheduling process
78 * (and 'finalizable' object will be kept alive either until it's process
79 * group is alive, or if it anchors itself in some way)
83 * roots for memory access
84 * used as bases for garbage collection algorithm
86 lstObject
*lstRootStack
[LST_ROOTSTACK_LIMIT
];
87 LstUInt lstRootTop
= 0;
89 /* 4096 should be more than enough to recompile the whole image */
90 #define STATICROOTLIMIT 4096
91 static lstObject
**staticRoots
[STATICROOTLIMIT
];
92 static LstUInt staticRootTop
= 0;
95 lstObject
**lstTempStack
[LST_TS_LIMIT
];
98 /* test to see if a pointer is in dynamic memory area or not */
99 #define LST_IS_STATIC(x) \
100 ((const unsigned char *)(x) >= memSpaces[STATIC_SPACE].start && \
101 (const unsigned char *)(x) < memSpaces[STATIC_SPACE].end)
104 static void allocMemSpace (LstMemSpace
*spc
, int size
) {
106 unsigned char *n
= realloc(spc
->start
, size
);
107 if (!n
) free(spc
->start
);
108 spc
->start
= n
? n
: malloc(size
);
110 spc
->start
= malloc(size
);
112 spc
->realStart
= spc
->start
;
113 spc
->start
= lstAlignAddr(spc
->start
);
114 spc
->end
= spc
->start
+size
;
115 spc
->cur
= spc
->start
;
119 /* initialize the memory management system */
120 void lstMemoryInit (int staticsz
, int dynamicsz
) {
121 /* allocate the memory areas */
122 memset(memSpaces
, sizeof(memSpaces
), 0);
123 allocMemSpace(&memSpaces
[0], dynamicsz
);
124 allocMemSpace(&memSpaces
[1], dynamicsz
);
125 allocMemSpace(&memSpaces
[2], staticsz
);
126 if (!memSpaces
[0].start
|| !memSpaces
[1].start
|| !memSpaces
[2].start
) lstFatal("not enough memory for space allocations\n", 0);
130 lstExecUserBreak
= 0;
133 curSpace
= &memSpaces
[curSpaceNo
];
134 finListHead
= newFinListHead
= stFinListHead
= NULL
;
137 lstGCSpecialLock
= 0;
138 /* allocate 'main' process group; this group is for sheduler, for example */
139 /* note that this group should be ALWAYS alive */
140 runGroups
= curGroup
= calloc(1, sizeof(LstRunGroup
));
141 lstFlushMethodCache();
145 void lstMemoryDeinit (void) {
146 /*fprintf(stderr, "staticRootTop: %d\n", staticRootTop);*/
147 /* finalize all unfinalized objects */
148 while (finListHead
) {
149 LstFinLink
*n
= finListHead
->next
;
150 if (finListHead
->fin
) finListHead
->fin(finListHead
->obj
, finListHead
->udata
);
154 /* free all st-finalizable objects */
155 while (stFinListHead
) {
156 LstFinLink
*n
= stFinListHead
->next
;
160 /* free groups and other structures */
162 LstEventHandler
*n
= ehList
->next
;
167 LstRunGroup
*n
= runGroups
->next
;
168 LstRunContext
*ctx
= runGroups
->group
;
170 LstRunContext
*p
= ctx
->prev
;
178 LstRunContext
*p
= rsFree
->prev
;
185 free(memSpaces
[2].realStart
);
186 free(memSpaces
[1].realStart
);
187 free(memSpaces
[0].realStart
);
191 static inline void lstRemoveFromFList (LstFinLink
**head
, LstFinLink
*item
) {
192 if (item
->prev
) item
->prev
->next
= item
->next
; else *head
= item
->next
;
193 if (item
->next
) item
->next
->prev
= item
->prev
;
197 static inline void lstAddToFList (LstFinLink
**head
, LstFinLink
*item
) {
199 if ((item
->next
= *head
)) item
->next
->prev
= item
;
206 * this is a classic Cheney two-finger collector
209 /* curSpace and curSpaceNo should be set to the new space */
210 /* copy object to the new space, return new address */
211 static lstObject
*gcMoveObject (lstObject
*o
) {
215 if (!o
|| LST_IS_SMALLINT(o
)) return o
; /* use as-is */
216 if (LST_IS_MOVED(o
)) return o
->stclass
; /* already relocated */
217 if (LST_IS_STATIC(o
)) return o
; /* static object, use as-is */
219 /* move to 'alive with finalizers' list */
220 if (LST_IS_STFIN(o
)) {
222 lstRemoveFromFList(&stFinListHead
, o
->fin
);
223 lstAddToFList(&stNewFinListHead
, o
->fin
);
225 lstRemoveFromFList(&finListHead
, o
->fin
);
226 lstAddToFList(&newFinListHead
, o
->fin
);
229 /* copy object to another space and setup redirection pointer */
231 res
= (lstObject
*)curSpace
->cur
;
232 memcpy(res
, o
, sizeof(lstObject
));
233 if (o
->fin
) o
->fin
->obj
= res
; /* fix finowner */
234 /* setup redirection pointer */
237 next
= curSpace
->cur
+sizeof(lstObject
);
238 if (!LST_IS_BYTES(o
)) size
*= LST_BYTES_PER_WORD
; else ++size
; /* byte objects are always has 0 as the last item */
239 if (size
> 0) memcpy(&res
->data
, &o
->data
, size
);
241 curSpace
->cur
= lstAlignAddr(next
);
246 #define GC_KEEP_METHOD_CACHE
248 /* garbage collection entry point */
254 if (curSpace
->cur
== curSpace
->start
) return; /* nothing to do */
255 if (lstGCSpecialLock
) lstFatal("out of memory for finalizer groups", lstGCSpecialLock
);
257 newFinListHead
= NULL
; stNewFinListHead
= NULL
;
258 /* first change spaces */
259 unsigned char *scanPtr
;
261 curSpace
= &memSpaces
[curSpaceNo
];
262 curSpace
->cur
= curSpace
->start
;
264 for (f
= 0; f
< lstRootTop
; ++f
) lstRootStack
[f
] = gcMoveObject(lstRootStack
[f
]);
265 for (f
= 0; f
< staticRootTop
; ++f
) (*staticRoots
[f
]) = gcMoveObject(*staticRoots
[f
]);
266 for (f
= 0; f
< lstTempSP
; ++f
) (*lstTempStack
[f
]) = gcMoveObject(*lstTempStack
[f
]);
267 /* the following are mostly static, but who knows... */
268 for (f
= 0; clInfo
[f
].name
; ++f
) (*(clInfo
[f
].eptr
)) = gcMoveObject(*(clInfo
[f
].eptr
));
269 for (f
= 0; epInfo
[f
].name
; ++f
) (*(epInfo
[f
].eptr
)) = gcMoveObject(*(epInfo
[f
].eptr
));
270 lstNilObj
= gcMoveObject(lstNilObj
);
271 lstTrueObj
= gcMoveObject(lstTrueObj
);
272 lstFalseObj
= gcMoveObject(lstFalseObj
);
273 lstGlobalObj
= gcMoveObject(lstGlobalObj
);
274 lstBadMethodSym
= gcMoveObject(lstBadMethodSym
);
275 for (f
= 0; f
< LST_MAX_BIN_MSG
; ++f
) lstBinMsgs
[f
] = gcMoveObject(lstBinMsgs
[f
]);
276 /* mark process groups */
279 for (grp
= runGroups
; grp
; grp
= grp
->next
) {
281 for (ctx
= grp
->group
; ctx
; ctx
= ctx
->prev
) ctx
->process
= gcMoveObject(ctx
->process
);
284 #ifdef GC_KEEP_METHOD_CACHE
285 /* fix method cache; this have some sense, as many method calls are stdlib calls */
286 /* when we'll do generational GC, this will have even better impact on cache hits */
287 /* note that this is completely safe: SendMessage will do the necessary checks */
288 for (f
= 0; f
< MTD_CACHE_SIZE
+MTD_CACHE_EXTRA
; ++f
) {
291 cache
[f
].name
= gcMoveObject(cache
[f
].name
);
292 cache
[f
].stclass
= gcMoveObject(cache
[f
].stclass
);
293 cache
[f
].method
= gcMoveObject(cache
[f
].method
);
294 if (cache
[f
].analyzed
== 1 && cache
[f
].ivarNum
< 0) cache
[f
].mConst
= gcMoveObject(cache
[f
].mConst
);
295 cache
[f
].badHits
= MTD_BAD_HIT_MAX
-2;
299 #ifdef INLINE_SOME_METHODS
300 lstMetaCharClass
= gcMoveObject(lstMetaCharClass
);
303 for (f
= 0; lstInlineMethodList
[f
].name
; ++f
) {
304 (*lstInlineMethodList
[f
].method
) = gcMoveObject((*lstInlineMethodList
[f
].method
));
308 /* now walk thru the objects, fix pointers, move other objects, etc.
309 * note that curSpace->cur will grow in the process */
310 scanPtr
= curSpace
->start
;
312 while (scanPtr
< curSpace
->cur
) {
313 lstObject
*o
= (lstObject
*)scanPtr
;
314 scanPtr
+= sizeof(lstObject
);
316 o
->stclass
= gcMoveObject(o
->stclass
);
317 int size
= LST_SIZE(o
);
318 if (LST_IS_BYTES(o
)) {
319 /* nothing to scan here */
320 scanPtr
+= size
+1; /* skip zero byte too */
322 /* process object data */
323 scanPtr
+= size
*LST_BYTES_PER_WORD
;
324 while (--size
>= 0) o
->data
[size
] = gcMoveObject(o
->data
[size
]);
326 scanPtr
= lstAlignAddr(scanPtr
);
331 /* and ST finalizers, if any */
333 dprintf("FOUND SOME ST-FINALIZERS!\n");
334 while (stFinListHead
) {
335 lstObject
*o
= stFinListHead
->obj
;
336 assert(o
->fin
== stFinListHead
);
337 LstFinLink
*n
= stFinListHead
->next
;
340 /* now remove the flag and create new process group */
342 o
->fin
= NULL
; /* it is already freed */
344 dprintf("FINOBJ: %p\n", o
);
345 lstCreateFinalizePGroup(o
);
347 /* scan new space to save ST-F anchors; no need to rescan process groups though */
350 stFinListHead
= stNewFinListHead
;
351 stNewFinListHead
= NULL
;
352 /* here we can process C finalizers, if any */
353 while (finListHead
) {
354 LstFinLink
*n
= finListHead
->next
;
355 if (finListHead
->fin
) finListHead
->fin(finListHead
->obj
, finListHead
->udata
);
359 finListHead
= newFinListHead
; /* 'alive' list becomes the current one */
360 /* now check if we have some new groups with finalizers */
364 for (grp
= runGroups
; grp
; grp
= grp
->next
) {
365 if (!grp
->group
|| grp
->group
->lockCount
!= -666) continue;
366 /* and here we have a newly created group; convert it to the real one */
367 LstRunContext
*ctx
= grp
->group
;
368 lstObject
*obj
= ctx
->process
;
369 dprintf("FOUND some finalizing process group (%p) obj(%p)\n", grp
, obj
);
370 lstObject
*process
, *context
;
371 lstObject
*method
= lstFindMethod(obj
->stclass
, "finalize");
372 if (!method
) lstFatal("no 'finalize' method found for object marked for finalization", 0x29a);
373 process
= lstAllocInstance(lstProcessSize
, lstProcessClass
);
374 context
= lstAllocInstance(lstContextSize
, lstContextClass
);
375 process
->data
[lstIVcontextInProcess
] = context
;
376 process
->data
[lstIVrunningInProcess
] = lstTrueObj
;
378 context
->data
[lstIVmethodInContext
] = method
;
379 /* build arguments array */
380 lstObject
*args
= lstAllocInstance(1, lstArrayClass
);
381 args
->data
[0] = obj
; /* self */
382 context
->data
[lstIVargumentsInContext
] = args
;
383 context
->data
[lstIVtemporariesInContext
] = lstAllocInstance(lstIntValue(method
->data
[lstIVtemporarySizeInMethod
]), lstArrayClass
);
384 context
->data
[lstIVstackInContext
] = lstAllocInstance(lstIntValue(method
->data
[lstIVstackSizeInMethod
]), lstArrayClass
);
385 context
->data
[lstIVbytePointerInContext
] = lstNewInt(0);
386 context
->data
[lstIVstackTopInContext
] = lstNewInt(0);
387 context
->data
[lstIVpreviousContextInContext
] = lstNilObj
;
388 context
->data
[lstIVprocOwnerInContext
] = process
;
389 ctx
->process
= process
;
391 ctx
->ticksLeft
= 10000;
393 /*lstDebugFlag = 1;*/
398 /* invalidate method cache */
399 #ifndef GC_KEEP_METHOD_CACHE
400 lstFlushMethodCache();
402 //if (lstMemoryPointer < lstMemoryBase) lstFatal("insufficient memory after garbage collection", sz);
405 dprintf("GC: %d objects alive; %u bytes used\n", saved, (uintptr_t)curSpace->cur-(uintptr_t)curSpace->start);
417 static LstCallInfo
*cinfo
= NULL
;
418 static int cinfoUsed
= 0;
420 static void lstProcessSpace (int num
) {
421 LstMemSpace
*curSpace
= &memSpaces
[num
];
422 unsigned char *scanPtr
= curSpace
->start
;
423 while (scanPtr
< curSpace
->cur
) {
424 lstObject
*o
= (lstObject
*)scanPtr
;
425 if (LST_CLASS(o
) == lstMethodClass
) {
426 lstObject
*op
= o
->data
[lstIVinvokeCountInMethod
];
427 int cc
= lstIntValue(op
);
429 cinfo
= realloc(cinfo
, sizeof(LstCallInfo
)*(cinfoUsed
+1));
430 lstGetString(cinfo
[cinfoUsed
].mtname
, sizeof(cinfo
[cinfoUsed
].mtname
), o
->data
[lstIVnameInMethod
]);
431 op
= o
->data
[lstIVclassInMethod
];
432 lstGetString(cinfo
[cinfoUsed
].clname
, sizeof(cinfo
[cinfoUsed
].clname
), op
->data
[lstIVnameInClass
]);
433 cinfo
[cinfoUsed
].callCount
= cc
;
437 scanPtr
+= sizeof(lstObject
);
438 int size
= LST_SIZE(o
);
439 if (LST_IS_BYTES(o
)) {
442 scanPtr
+= size
*LST_BYTES_PER_WORD
;
444 scanPtr
= lstAlignAddr(scanPtr
);
449 void lstShowCalledMethods (void) {
451 int xcmp (const void *i0
, const void *i1
) {
452 const LstCallInfo
*l0
= (const LstCallInfo
*)i0
;
453 const LstCallInfo
*l1
= (const LstCallInfo
*)i1
;
454 return l1
->callCount
-l0
->callCount
;
456 lstGC(); /* compact objects */
457 lstProcessSpace(2); /* static */
458 lstProcessSpace(curSpaceNo
); /* dynamic */
460 qsort(cinfo
, cinfoUsed
, sizeof(LstCallInfo
), xcmp
);
461 for (f
= 0; f
< cinfoUsed
; ++f
) {
462 fprintf(stderr
, "[%s>>%s]: %d\n", cinfo
[f
].clname
, cinfo
[f
].mtname
, cinfo
[f
].callCount
);
471 #define STATIC_ALLOC \
472 lstObject *res = (lstObject *)memSpaces[STATIC_SPACE].cur; \
473 unsigned char *next = memSpaces[STATIC_SPACE].cur; \
475 next = lstAlignAddr(next); \
476 if (next > memSpaces[STATIC_SPACE].end) lstFatal("insufficient static memory", sz); \
477 memSpaces[STATIC_SPACE].cur = next; \
478 LST_SETSIZE(res, sz); \
483 * static allocation -- tries to allocate values in an area
484 * that will not be subject to garbage collection
486 lstByteObject
*lstStaticAllocBin (int sz
) {
487 int realSz
= sz
+sizeof(lstObject
)+1;
490 lstBytePtr(res
)[sz
] = '\0';
491 return (lstByteObject
*)res
;
495 lstObject
*lstStaticAlloc (int sz
) {
496 int realSz
= sz
*LST_BYTES_PER_WORD
+sizeof(lstObject
);
502 #define DYNAMIC_ALLOC \
503 lstObject *res = (lstObject *)curSpace->cur; \
504 unsigned char *next = curSpace->cur; \
506 next = lstAlignAddr(next); \
507 if (next > curSpace->end) { \
509 res = (lstObject *)curSpace->cur; \
510 next = curSpace->cur; \
512 next = lstAlignAddr(next); \
513 if (next > curSpace->end) lstFatal("insufficient memory", sz); \
515 curSpace->cur = next; \
516 LST_SETSIZE(res, sz); \
521 lstByteObject
*lstMemAllocBin (int sz
) {
522 int realSz
= sz
+sizeof(lstObject
)+1;
525 lstBytePtr(res
)[sz
] = '\0';
526 return (lstByteObject
*)res
;
530 lstObject
*lstMemAlloc (int sz
) {
531 int realSz
= sz
*LST_BYTES_PER_WORD
+sizeof(lstObject
);
537 #include "lst_imagerw.c"
541 * Add another object root off a static object
543 * Static objects, in general, do not get garbage collected.
544 * When a static object is discovered adding a reference to a
545 * non-static object, we link on the reference to our staticRoot
546 * table so we can give it proper treatment during garbage collection.
548 void lstAddStaticRoot (lstObject
**objp
) {
550 for (f
= 0; f
< staticRootTop
; ++f
) {
551 if (objp
== staticRoots
[f
]) return;
552 if (rfree
< 0 && !staticRoots
[f
]) rfree
= f
;
555 if (staticRootTop
>= STATICROOTLIMIT
) lstFatal("lstAddStaticRoot: too many static references", (intptr_t)objp
);
556 rfree
= staticRootTop
++;
558 staticRoots
[rfree
] = objp
;
562 void lstWriteBarrier (lstObject
**dest
, lstObject
*src
) {
563 if (LST_IS_STATIC(dest
) && !LST_IS_SMALLINT(src
) && !LST_IS_STATIC(src
)) {
565 for (f
= 0; f
< staticRootTop
; ++f
) {
566 if (staticRoots
[f
] == dest
) goto doit
; /* let TEH GOTO be here! */
567 if (rfree
< 0 && !staticRoots
[f
]) rfree
= f
;
570 if (staticRootTop
>= STATICROOTLIMIT
) lstFatal("lstWriteBarrier: too many static references", (intptr_t)dest
);
571 rfree
= staticRootTop
++;
573 staticRoots
[rfree
] = dest
;
580 /* fix an OOP if needed, based on values to be exchanged */
581 static void map (lstObject
**oop
, lstObject
*a1
, lstObject
*a2
, int size
) {
583 lstObject
*oo
= *oop
;
584 for (x
= 0; x
< size
; ++x
) {
585 if (a1
->data
[x
] == oo
) {
589 if (a2
->data
[x
] == oo
) {
597 /* traverse an object space */
598 static void walk (lstObject
*base
, lstObject
*top
, lstObject
*array1
, lstObject
*array2
, LstUInt size
) {
599 lstObject
*op
, *opnext
;
601 for (op
= base
; op
< top
; op
= opnext
) {
602 /* re-map the class pointer, in case that's the object which has been remapped */
603 map(&op
->stclass
, array1
, array2
, size
);
604 /* skip our argument arrays, since otherwise things get rather circular */
606 if (op
== array1
|| op
== array2
) {
607 unsigned char *t
= (unsigned char *)op
;
608 t
+= sz
*LST_BYTES_PER_WORD
+sizeof(lstObject
);
609 opnext
= lstAlignAddr(t
);
612 /* don't have to worry about instance variables if it's a binary format */
613 if (LST_IS_BYTES(op
)) {
614 LstUInt realSize
= sz
+sizeof(lstObject
)+1;
615 unsigned char *t
= (unsigned char *)op
;
617 opnext
= lstAlignAddr(t
);
620 /* for each instance variable slot, fix up the pointer if needed */
621 for (x
= 0; x
< sz
; ++x
) map(&op
->data
[x
], array1
, array2
, size
);
622 /* walk past this object */
624 LstUInt realSize
= sz
*LST_BYTES_PER_WORD
+sizeof(lstObject
);
625 unsigned char *t
= (unsigned char *)op
;
627 opnext
= lstAlignAddr(t
);
634 * Bulk exchange of object identities
636 * For each index to array1/array2, all references in current object
637 * memory are modified so that references to the object in array1[]
638 * become references to the corresponding object in array2[]. References
639 * to the object in array2[] similarly become references to the
640 * object in array1[].
642 void lstSwapObjects (lstObject
*array1
, lstObject
*array2
, LstUInt size
) {
644 /* Convert our memory spaces */
645 walk((lstObject
*)curSpace
->start
, (lstObject
*)curSpace
->cur
, array1
, array2
, size
);
646 walk((lstObject
*)memSpaces
[STATIC_SPACE
].start
, (lstObject
*)memSpaces
[STATIC_SPACE
].cur
, array1
, array2
, size
);
647 /* Fix up the root pointers, too */
648 for (x
= 0; x
< lstRootTop
; x
++) map(&lstRootStack
[x
], array1
, array2
, size
);
649 for (x
= 0; x
< staticRootTop
; x
++) map(staticRoots
[x
], array1
, array2
, size
);
654 * Implement replaceFrom:to:with:startingAt: as a primitive
656 * Return 1 if we couldn't do it, 0 on success.
657 * This routine has distinct code paths for plain old byte type arrays,
658 * and for arrays of lstObject pointers; the latter must handle the
659 * special case of static pointers. It looks hairy (and it is), but
660 * it's still much faster than executing the block move in Smalltalk
663 int lstBulkReplace (lstObject
*dest
, lstObject
*aFrom
, lstObject
*aTo
, lstObject
*aWith
, lstObject
*startAt
) {
664 LstUInt irepStart
, istart
, istop
, count
;
665 /* we only handle simple 31-bit integer indices; map the values onto 0-based C array type values */
666 if (!LST_IS_SMALLINT(startAt
) || !LST_IS_SMALLINT(aFrom
) || !LST_IS_SMALLINT(aTo
)) return 1;
667 if (LST_IS_SMALLINT(dest
) || LST_IS_SMALLINT(aWith
)) return 1;
668 irepStart
= lstIntValue(startAt
)-1;
669 istart
= lstIntValue(aFrom
)-1;
670 istop
= lstIntValue(aTo
)-1;
671 count
= (istop
-istart
)+1;
672 /* defend against goofy negative indices */
673 if (count
<= 0) return 0;
674 if (irepStart
< 0 || istart
< 0 || istop
< 0) return 1;
676 if (LST_SIZE(dest
) < istop
|| LST_SIZE(aWith
) < irepStart
+count
) return 1;
677 /* if both source and dest are binary, just copy some bytes */
678 if (LST_IS_BYTES(aWith
) && LST_IS_BYTES(dest
)) {
679 memmove(lstBytePtr(dest
)+istart
, lstBytePtr(aWith
)+irepStart
, count
);
682 /* fail if only one of objects is binary */
683 if (LST_IS_BYTES(aWith
) || LST_IS_BYTES(dest
)) return 1;
684 /* if we're fiddling pointers between static and dynamic memory, register roots */
685 /* note that moving from static to dynamic is ok, but the reverse needs some housekeeping */
686 if (LST_IS_STATIC(dest
) && !LST_IS_STATIC(aWith
)) {
688 /*fprintf(stderr, "!!!: count=%u\n", count);*/
689 for (f
= 0; f
< count
; ++f
) lstAddStaticRoot(&dest
->data
[istart
+f
]);
691 /* copy object pointer fields */
692 memmove(&dest
->data
[istart
], &aWith
->data
[irepStart
], LST_BYTES_PER_WORD
*count
);
697 lstObject
*lstNewString (const char *str
) {
698 int l
= str
? strlen(str
) : 0;
699 lstByteObject
*strobj
= lstMemAllocBin(l
);
700 strobj
->stclass
= lstStringClass
;
701 if (l
> 0) memcpy(lstBytePtr(strobj
), str
, l
);
702 lstBytePtr(strobj
)[l
] = '\0';
703 return (lstObject
*)strobj
;
707 int lstGetString (char *buf
, int bufsize
, const lstObject
*str
) {
708 int fsize
= LST_SIZE(str
), csz
= fsize
;
709 if (csz
> bufsize
-1) csz
= bufsize
-1;
710 if (buf
&& csz
> 0) memcpy(buf
, &str
->data
, csz
);
711 if (csz
>= 0) buf
[csz
] = '\0'; /* put null terminator at end */
712 if (fsize
> bufsize
-1) return fsize
+1;
717 char *lstGetStringPtr (const lstObject
*str
) {
718 return (char *)(lstBytePtr(str
));
722 lstObject
*lstNewBinary (const void *p
, LstUInt l
) {
723 lstByteObject
*bobj
= lstMemAllocBin(l
);
724 bobj
->stclass
= lstByteArrayClass
;
726 if (p
) memcpy(lstBytePtr(bobj
), p
, l
); else memset(lstBytePtr(bobj
), 0, l
);
728 return (lstObject
*)bobj
;
732 lstObject
*lstNewBCode (const void *p
, LstUInt l
) {
733 lstByteObject
*bobj
= lstMemAllocBin(l
);
734 bobj
->stclass
= lstByteCodeClass
;
736 if (p
) memcpy(lstBytePtr(bobj
), p
, l
); else memset(lstBytePtr(bobj
), 0, l
);
738 return (lstObject
*)bobj
;
742 lstObject
*lstAllocInstance (int size
, lstObject
*cl
) {
744 if (size
< 0) return NULL
;
745 lstRootStack
[lstRootTop
++] = cl
;
746 lstObject
*obj
= lstMemAlloc(size
);
747 obj
->stclass
= lstRootStack
[--lstRootTop
];
748 for (f
= 0; f
< size
; ++f
) obj
->data
[f
] = lstNilObj
;
753 /* create new Integer (64-bit) */
754 lstObject
*lstNewLongInt (LstLInt val
) {
755 lstByteObject
*res
= lstMemAllocBin(sizeof(LstLInt
));
756 res
->stclass
= lstIntegerClass
;
757 memcpy(lstBytePtr(res
), &val
, sizeof(val
));
758 return (lstObject
*)res
;
762 lstObject
*lstNewFloat (LstFloat val
) {
763 lstByteObject
*res
= lstMemAllocBin(sizeof(LstFloat
));
764 res
->stclass
= lstFloatClass
;
765 memcpy(lstBytePtr(res
), &val
, sizeof(val
));
766 return (lstObject
*)res
;
770 lstObject
*lstNewArray (int size
) {
771 if (size
< 0) return NULL
;
772 return lstAllocInstance(size
, lstArrayClass
);
776 static int symbolBareCmp (const char *left
, int leftsize
, const char *right
, int rightsize
) {
777 int minsize
= leftsize
;
779 if (rightsize
< minsize
) minsize
= rightsize
;
781 if ((i
= memcmp(left
, right
, minsize
))) return i
;
783 return leftsize
-rightsize
;
788 static int symbolCmp (const lstObject *s0, const lstObject *s1) {
789 return symbolBareCmp((const char *)lstBytePtr(s0), LST_SIZE(s0), (const char *)lstBytePtr(s1), LST_SIZE(s1));
794 lstObject
*lstDictFind (const lstObject
*dict
, const char *name
) {
796 const lstObject
*keys
= dict
->data
[0];
797 int l
= 0, h
= LST_SIZE(keys
)-1, nlen
= strlen(name
);
800 const lstObject
*key
= keys
->data
[mid
];
801 int res
= symbolBareCmp(name
, nlen
, (char *)lstBytePtr(key
), LST_SIZE(key
));
802 if (res
== 0) return dict
->data
[1]->data
[mid
];
803 if (res
< 0) h
= mid
-1; else l
= mid
+1;
809 int lstIsKindOf (const lstObject
*obj
, const lstObject
*aclass
) {
810 const lstObject
*pclass
= obj
;
812 if (!obj
|| !aclass
) return 0;
813 if (obj
== aclass
) return 1;
814 if (LST_IS_SMALLINT(obj
)) {
815 if (LST_IS_SMALLINT(aclass
)) return 1;
816 obj
= lstSmallIntClass
;
818 if (LST_IS_SMALLINT(aclass
)) aclass
= lstSmallIntClass
;
821 while (obj
&& obj
!= lstNilObj
) {
822 /*printf(" : [%s]\n", lstBytePtr(obj->data[lstIVnameInClass]));*/
823 if (obj
== aclass
) return 1;
824 obj
= obj
->data
[lstIVparentClassInClass
];
825 if (stC
) { if (pclass
&& pclass
!= lstNilObj
) pclass
= pclass
->data
[lstIVparentClassInClass
]; }
826 else if (pclass
== obj
) return 0; /* avoid cycles */
833 lstObject
*lstFindGlobal (const char *name
) {
834 if (!name
|| !name
[0]) return NULL
;
835 return lstDictFind(lstGlobalObj
, name
);
839 lstObject
*lstFindMethod (lstObject
*stclass
, const char *method
) {
840 lstObject
*dict
, *res
;
841 /* scan upward through the class hierarchy */
842 for (; stclass
&& stclass
!= lstNilObj
; stclass
= stclass
->data
[lstIVparentClassInClass
]) {
843 /* consider the Dictionary of methods for this Class */
844 #if 0 & defined(DEBUG)
846 fprintf(stderr
, "st=%p; u=%p; sz=%d\n", stclass
, lstNilObj
, LST_SIZE(stclass
));
847 fprintf(stderr
, " [%s]\n", lstGetStringPtr(stclass
->data
[lstIVnameInClass
]));
851 if (LST_IS_SMALLINT(stclass
)) lstFatal("lookupMethod: looking in SmallInt instance", 0);
852 if (LST_IS_BYTES(stclass
)) lstFatal("lookupMethod: looking in binary object", 0);
853 if (LST_SIZE(stclass
) < lstClassSize
) lstFatal("lookupMethod: looking in non-class object", 0);
855 dict
= stclass
->data
[lstIVmethodsInClass
];
857 if (!dict
) lstFatal("lookupMethod: NULL dictionary", 0);
858 if (LST_IS_SMALLINT(dict
)) lstFatal("lookupMethod: SmallInt dictionary", 0);
859 if (dict
->stclass
!= lstFindGlobal("Dictionary")) lstFatal("lookupMethod: method list is not a dictionary", 0);
861 res
= lstDictFind(dict
, method
);
868 lstObject
*lstNewSymbol (const char *name
) {
869 lstObject
*res
= NULL
;
870 if (!name
|| !name
[0]) return NULL
;
871 lstObject
*str
= lstNewString(name
);
872 if (lstRunMethodWithArg(lstNewSymMethod
, NULL
, str
, &res
, 1) != lstReturnReturned
) return NULL
;
873 if (!res
|| res
->stclass
!= lstSymbolClass
) return NULL
;
878 int lstSetGlobal (const char *name
, lstObject
*val
) {
879 if (!name
|| !name
[0]) return -2;
880 lstRootStack
[lstRootTop
++] = val
;
881 lstObject
*aa
= lstNewArray(2);
882 lstRootStack
[lstRootTop
++] = aa
;
883 lstRootStack
[lstRootTop
-1]->data
[0] = lstNewString(name
);
884 lstRootStack
[lstRootTop
-1]->data
[1] = lstRootStack
[lstRootTop
-2];
885 aa
= lstRootStack
[--lstRootTop
];
887 if (lstRunMethodWithArg(lstSetGlobMethod
, NULL
, aa
, NULL
, 1) != lstReturnReturned
) return -1;
892 void lstSetFinalizer (lstObject
*o
, LstFinalizerFn fn
, void *udata
) {
893 if (LST_IS_SMALLINT(o
) || LST_IS_STATIC(o
)) return; /* note that static objects can't have finalizer */
895 lstRemoveFromFList(&finListHead
, o
->fin
);
897 o
->fin
= malloc(sizeof(LstFinLink
));
900 o
->fin
->udata
= udata
;
902 lstAddToFList(&finListHead
, o
->fin
);
906 void *lstGetUData (lstObject
*o
) {
907 if (LST_IS_SMALLINT(o
) || !o
->fin
) return NULL
;
908 return o
->fin
->udata
;
912 lstObject
*lstNewChar (int ch
) {
913 if (ch
< 0 || ch
> 255) return NULL
;
914 return lstCharClass
->data
[lstIVcharsInMetaChar
]->data
[ch
];