1 |
|
2 |
|
3 | ###
|
4 | // up to 100 blocks of 100,000 atoms
|
5 |
|
6 | #define M 100
|
7 | #define N 100000
|
8 |
|
9 | U *mem[M]
|
10 | int mcount
|
11 |
|
12 | U *free_list
|
13 | int free_count
|
14 |
|
15 | U *
|
16 | alloc(void)
|
17 | {
|
18 | U *p
|
19 | if (free_count == 0) {
|
20 | if (mcount == 0)
|
21 | alloc_mem()
|
22 | else {
|
23 | gc()
|
24 | if (free_count < N * mcount / 2)
|
25 | alloc_mem()
|
26 | }
|
27 | if (free_count == 0)
|
28 | stop("atom space exhausted")
|
29 | }
|
30 | p = free_list
|
31 | free_list = free_list->u.cons.cdr
|
32 | free_count--
|
33 | return p
|
34 | }
|
35 | ###
|
36 |
|
37 | allocatedId = 0
|
38 | alloc_tensor = (nelem) ->
|
39 | i = 0
|
40 | p = new U()
|
41 | p.k = TENSOR
|
42 | p.tensor = new tensor()
|
43 |
|
44 | p.tensor.nelem = nelem
|
45 | for i in [0...nelem]
|
46 | p.tensor.elem[i] = zero
|
47 |
|
48 | p.tensor.allocatedId = allocatedId
|
49 | #if allocatedId == 9
|
50 | # debugger
|
51 | allocatedId++
|
52 |
|
53 | check_tensor_dimensions p
|
54 |
|
55 | return p
|
56 |
|
57 | ###
|
58 | // garbage collector
|
59 |
|
60 | void
|
61 | gc(void)
|
62 | {
|
63 | int i, j
|
64 | U *p
|
65 |
|
66 | // tag everything
|
67 |
|
68 | for (i = 0; i < mcount; i++) {
|
69 | p = mem[i]
|
70 | for (j = 0; j < N; j++)
|
71 | p[j].tag = 1
|
72 | }
|
73 |
|
74 | // untag what's used
|
75 |
|
76 | untag(p0)
|
77 | untag(p1)
|
78 | untag(p2)
|
79 | untag(p3)
|
80 | untag(p4)
|
81 | untag(p5)
|
82 | untag(p6)
|
83 | untag(p7)
|
84 | untag(p8)
|
85 | untag(p9)
|
86 |
|
87 | untag(one)
|
88 | untag(zero)
|
89 | untag(imaginaryunit)
|
90 |
|
91 | for (i = 0; i < NSYM; i++) {
|
92 | untag(binding[i])
|
93 | untag(arglist[i])
|
94 | }
|
95 |
|
96 | for (i = 0; i < tos; i++)
|
97 | untag(stack[i])
|
98 |
|
99 | for (i = (int) (frame - stack); i < TOS; i++)
|
100 | untag(stack[i])
|
101 |
|
102 | // collect everything that's still tagged
|
103 |
|
104 | free_count = 0
|
105 |
|
106 | for (i = 0; i < mcount; i++) {
|
107 | p = mem[i]
|
108 | for (j = 0; j < N; j++) {
|
109 | if (p[j].tag == 0)
|
110 | continue
|
111 | // still tagged so it's unused, put on free list
|
112 | switch (p[j].k) {
|
113 | case TENSOR:
|
114 | free(p[j].u.tensor)
|
115 | break
|
116 | case STR:
|
117 | free(p[j].u.str)
|
118 | break
|
119 | case NUM:
|
120 | mfree(p[j].u.q.a)
|
121 | mfree(p[j].u.q.b)
|
122 | break
|
123 | }
|
124 | p[j].k = CONS; // so no double free occurs above
|
125 | p[j].u.cons.cdr = free_list
|
126 | free_list = p + j
|
127 | free_count++
|
128 | }
|
129 | }
|
130 | }
|
131 |
|
132 | void
|
133 | untag(U *p)
|
134 | {
|
135 | int i
|
136 |
|
137 | if (iscons(p)) {
|
138 | do {
|
139 | if (p->tag == 0)
|
140 | return
|
141 | p->tag = 0
|
142 | untag(p->u.cons.car)
|
143 | p = p->u.cons.cdr
|
144 | } while (iscons(p))
|
145 | untag(p)
|
146 | return
|
147 | }
|
148 |
|
149 | if (p->tag) {
|
150 | p->tag = 0
|
151 | if (istensor(p)) {
|
152 | for (i = 0; i < p->u.tensor->nelem; i++)
|
153 | untag(p->u.tensor->elem[i])
|
154 | }
|
155 | }
|
156 | }
|
157 |
|
158 | // get memory for 100,000 atoms
|
159 |
|
160 | void
|
161 | alloc_mem(void)
|
162 | {
|
163 | int i
|
164 | U *p
|
165 | if (mcount == M)
|
166 | return
|
167 | p = (U *) malloc(N * sizeof (struct U))
|
168 | if (p == NULL)
|
169 | return
|
170 | mem[mcount++] = p
|
171 | for (i = 0; i < N; i++) {
|
172 | p[i].k = CONS; // so no free in gc
|
173 | p[i].u.cons.cdr = p + i + 1
|
174 | }
|
175 | p[N - 1].u.cons.cdr = free_list
|
176 | free_list = p
|
177 | free_count += N
|
178 | }
|
179 |
|
180 | void
|
181 | print_mem_info(void)
|
182 | {
|
183 | char buf[100]
|
184 |
|
185 | sprintf(buf, "%d blocks (%d bytes/block)\n", N * mcount, (int) sizeof (U))
|
186 | printstr(buf)
|
187 |
|
188 | sprintf(buf, "%d free\n", free_count)
|
189 | printstr(buf)
|
190 |
|
191 | sprintf(buf, "%d used\n", N * mcount - free_count)
|
192 | printstr(buf)
|
193 | }
|
194 | ###
|