2 * Copyright (C) 1994, Index Data I/S
4 * Sebastian Hammer, Adam Dickmeiss
7 * Revision 1.11 1996-02-10 12:20:58 quinn
8 * *** empty log message ***
10 * Revision 1.10 1995/12/12 14:12:47 quinn
11 * *** empty log message ***
13 * Revision 1.9 1995/12/06 15:48:46 quinn
14 * Fixed update-problem.
16 * Revision 1.8 1995/12/06 14:48:27 quinn
17 * Fixed some strange bugs.
19 * Revision 1.7 1995/12/06 09:59:46 quinn
20 * Fixed memory-consumption bug in memory.c
21 * Added more blocksizes to the default ISAM configuration.
23 * Revision 1.6 1995/09/04 12:33:47 adam
24 * Various cleanup. YAZ util used instead.
26 * Revision 1.5 1994/09/28 16:58:33 quinn
29 * Revision 1.4 1994/09/27 20:03:52 quinn
30 * Seems relatively bug-free.
32 * Revision 1.3 1994/09/26 17:11:30 quinn
35 * Revision 1.2 1994/09/26 17:06:35 quinn
38 * Revision 1.1 1994/09/26 16:07:56 quinn
39 * Most of the functionality in place.
44 * This module accesses and rearranges the records of the tables.
53 int is_mbuf_size[3] = { 0, 1024, 4096 };
55 static is_mblock *mblock_tmplist = 0, *mblock_freelist = 0;
56 static is_mbuf *mbuf_freelist[3] = {0, 0, 0};
58 #define MALLOC_CHUNK 20
60 is_mblock *xmalloc_mblock()
67 mblock_freelist = xmalloc(sizeof(is_mblock) * MALLOC_CHUNK);
68 for (i = 0; i < MALLOC_CHUNK - 1; i++)
69 mblock_freelist[i].next = &mblock_freelist[i+1];
70 mblock_freelist[i].next = 0;
72 tmp = mblock_freelist;
73 mblock_freelist = mblock_freelist->next;
75 tmp->state = IS_MBSTATE_UNREAD;
80 is_mbuf *xmalloc_mbuf(int type)
84 if (mbuf_freelist[type])
86 tmp = mbuf_freelist[type];
87 mbuf_freelist[type] = tmp->next;
91 tmp = xmalloc(sizeof(is_mbuf) + is_mbuf_size[type]);
94 tmp->refcount = type ? 1 : 0;
95 tmp->offset = tmp->num = tmp->cur_record = 0;
96 tmp->data = (char*) tmp + sizeof(is_mbuf);
101 void xfree_mbuf(is_mbuf *p)
103 p->next = mbuf_freelist[p->type];
104 mbuf_freelist[p->type] = p;
107 void xfree_mbufs(is_mbuf *l)
119 void xfree_mblock(is_mblock *p)
121 xfree_mbufs(p->data);
122 p->next = mblock_freelist;
126 void xrelease_mblock(is_mblock *p)
128 p->next = mblock_tmplist;
132 void xfree_mblocks(is_mblock *l)
144 void is_m_establish_tab(ISAM is, is_mtable *tab, ISAM_P pos)
146 tab->data = xmalloc_mblock();
149 tab->pos_type = is_type(pos);
150 tab->num_records = -1;
151 tab->data->num_records = -1;
152 tab->data->diskpos = is_block(pos);
153 tab->data->state = IS_MBSTATE_UNREAD;
155 tab->cur_mblock = tab->data;
156 tab->cur_mblock->cur_mbuf = 0;
161 tab->num_records = 0;
162 tab->data->num_records = 0;
163 tab->data->diskpos = -1;
164 tab->data->state = IS_MBSTATE_CLEAN;
165 tab->data->data = xmalloc_mbuf(IS_MBUF_TYPE_LARGE);
166 tab->cur_mblock = tab->data;
167 tab->cur_mblock->cur_mbuf = tab->data->data;
168 tab->cur_mblock->cur_mbuf->cur_record = 0;
173 void is_m_release_tab(is_mtable *tab)
175 xfree_mblocks(tab->data);
176 xfree_mblocks(mblock_tmplist);
180 void is_m_rewind(is_mtable *tab)
182 tab->cur_mblock = tab->data;
185 tab->data->cur_mbuf = tab->data->data;
187 tab->data->data->cur_record = 0;
191 static int read_current_full(is_mtable *tab, is_mblock *mblock)
193 if (is_p_read_full(tab, mblock) < 0)
195 if (mblock->nextpos && !mblock->next)
197 mblock->next = xmalloc_mblock();
198 mblock->next->diskpos = mblock->nextpos;
199 mblock->next->state = IS_MBSTATE_UNREAD;
200 mblock->next->data = 0;
202 mblock->cur_mbuf = mblock->data;
203 mblock->data->cur_record = 0;
207 int is_m_read_full(is_mtable *tab, is_mblock *mblock)
209 return read_current_full(tab, mblock);
213 * replace the record right behind the pointer.
215 void is_m_replace_record(is_mtable *tab, const void *rec)
217 is_mbuf *mbuf = tab->cur_mblock->cur_mbuf;
219 /* we assume that block is already in memory and that we are in the
220 * right mbuf, and that it has space for us. */
221 memcpy(mbuf->data + mbuf->offset + (mbuf->cur_record - 1) *
222 is_keysize(tab->is), rec, is_keysize(tab->is));
223 tab->cur_mblock->state = IS_MBSTATE_DIRTY;
227 * Delete the record right behind the pointer.
229 void is_m_delete_record(is_mtable *tab)
233 mbuf = tab->cur_mblock->cur_mbuf;
234 if (mbuf->cur_record >= mbuf->num) /* top of mbuf */
239 else if (mbuf->cur_record == 1) /* beginning of mbuf */
242 mbuf->offset +=is_keysize(tab->is);
243 mbuf->cur_record = 0;
245 else /* middle of mbuf */
247 /* insert block after current one */
248 new = xmalloc_mbuf(IS_MBUF_TYPE_SMALL);
249 new->next = mbuf->next;
252 /* virtually transfer everything after current record to new one. */
253 new->data = mbuf->data;
255 new->offset = mbuf->offset + mbuf->cur_record * is_keysize(tab->is);
256 new->num = mbuf->num - mbuf->cur_record;
258 /* old buf now only contains stuff before current record */
259 mbuf->num = mbuf->cur_record -1;
260 tab->cur_mblock->cur_mbuf = new;
263 tab->cur_mblock->num_records--;
264 tab->cur_mblock->state = tab->data->state = IS_MBSTATE_DIRTY;
267 int is_m_write_record(is_mtable *tab, const void *rec)
269 is_mbuf *mbuf, *oldnext, *dmbuf;
271 /* make sure block is all in memory */
272 if (tab->cur_mblock->state <= IS_MBSTATE_PARTIAL)
273 if (read_current_full(tab, tab->cur_mblock) < 0)
275 mbuf = tab->cur_mblock->cur_mbuf;
276 if (mbuf->cur_record >= mbuf->num) /* top of mbuf */
278 /* mbuf is reference or full */
279 if (mbuf->refcount != 1 || mbuf->offset + (mbuf->num + 1) *
280 is_keysize(tab->is) > is_mbuf_size[mbuf->type])
282 oldnext = mbuf->next;
283 mbuf->next = xmalloc_mbuf(IS_MBUF_TYPE_LARGE);
284 mbuf->next->next = oldnext;
286 tab->cur_mblock->cur_mbuf = mbuf;
287 mbuf->cur_record = 0;
292 oldnext = mbuf->next;
293 mbuf->next = xmalloc_mbuf(IS_MBUF_TYPE_MEDIUM);
294 mbuf->next->next = dmbuf = xmalloc_mbuf(IS_MBUF_TYPE_SMALL);
295 dmbuf->data = mbuf->data;
296 dmbuf->next = oldnext;
297 dmbuf->offset = mbuf->offset + mbuf->cur_record * is_keysize(tab->is);
298 dmbuf->num = mbuf->num - mbuf->cur_record;
299 mbuf->num -= dmbuf->num;
301 mbuf = tab->cur_mblock->cur_mbuf = mbuf->next;
302 mbuf->cur_record = 0;
304 logf (LOG_DEBUG, "is_m_write_rec(rec == %d)", mbuf->cur_record);
305 memcpy(mbuf->data + mbuf->offset + mbuf->cur_record * is_keysize(tab->is),
306 rec, is_keysize(tab->is));
310 tab->cur_mblock->num_records++;
311 tab->cur_mblock->state = tab->data->state = IS_MBSTATE_DIRTY;
315 void is_m_unread_record(is_mtable *tab)
317 assert(tab->cur_mblock->cur_mbuf->cur_record);
318 tab->cur_mblock->cur_mbuf->cur_record--;
322 * non-destructive read.
324 int is_m_peek_record(is_mtable *tab, void *rec)
329 /* make sure block is all in memory */
330 if (tab->cur_mblock->state <= IS_MBSTATE_PARTIAL)
331 if (read_current_full(tab, tab->cur_mblock) < 0)
333 mblock = tab->cur_mblock;
334 mbuf = mblock->cur_mbuf;
335 if (mbuf->cur_record >= mbuf->num) /* are we at end of mbuf? */
337 if (!mbuf->next) /* end of mblock */
341 mblock = mblock->next;
342 if (mblock->state <= IS_MBSTATE_PARTIAL)
343 if (read_current_full(tab, mblock) < 0)
348 return 0; /* EOTable */
352 mbuf->cur_record = 0;
354 memcpy(rec, mbuf->data + mbuf->offset + mbuf->cur_record *
355 is_keysize(tab->is), is_keysize(tab->is));
359 int is_m_read_record(is_mtable *tab, void *buf, int keep)
363 /* make sure block is all in memory */
364 if (tab->cur_mblock->state <= IS_MBSTATE_PARTIAL)
365 if (read_current_full(tab, tab->cur_mblock) < 0)
367 mbuf = tab->cur_mblock->cur_mbuf;
368 if (mbuf->cur_record >= mbuf->num) /* are we at end of mbuf? */
370 if (!mbuf->next) /* end of mblock */
372 if (!keep && tab->cur_mblock->state == IS_MBSTATE_CLEAN &&
373 tab->cur_mblock->diskpos > 0)
375 xfree_mbufs(tab->cur_mblock->data);
376 tab->cur_mblock->data = 0;
377 tab->cur_mblock->state = IS_MBSTATE_UNREAD;
379 if (tab->cur_mblock->next)
381 tab->cur_mblock = tab->cur_mblock->next;
382 if (tab->cur_mblock->state <= IS_MBSTATE_PARTIAL)
383 if (read_current_full(tab, tab->cur_mblock) < 0)
385 tab->cur_mblock->cur_mbuf = mbuf = tab->cur_mblock->data;
388 return 0; /* EOTable */
391 tab->cur_mblock->cur_mbuf = mbuf = mbuf->next;
392 mbuf->cur_record = 0;
394 memcpy(buf, mbuf->data + mbuf->offset + mbuf->cur_record *
395 is_keysize(tab->is), is_keysize(tab->is));
401 * TODO: optimize this function by introducing a higher-level search.
403 int is_m_seek_record(is_mtable *tab, const void *rec)
405 char peek[IS_MAX_RECORD];
410 if (is_m_read_record(tab, &peek, 1) <= 0)
412 if ((rs = (*tab->is->cmp)(peek, rec)) > 0)
414 is_m_unread_record(tab);
422 int is_m_num_records(is_mtable *tab)
424 if (tab->data->state < IS_MBSTATE_PARTIAL)
425 if (read_current_full(tab, tab->data) < 0)
427 logf (LOG_FATAL, "read full failed");
430 return tab->num_records;