2 * Copyright (C) 1994-1999, Index Data
4 * Sebastian Hammer, Adam Dickmeiss
7 * Revision 1.16 1999-02-02 14:51:20 adam
8 * Updated WIN32 code specific sections. Changed header.
10 * Revision 1.15 1997/09/09 13:38:11 adam
11 * Partial port to WIN95/NT.
13 * Revision 1.14 1996/10/29 13:56:56 adam
14 * Include of zebrautl.h instead of alexutil.h.
16 * Revision 1.13 1996/03/20 13:29:16 quinn
19 * Revision 1.12 1996/03/11 14:52:23 quinn
20 * Fixed update bug. Repeated insertion in the same area sometimes caused
23 * Revision 1.11 1996/02/10 12:20:58 quinn
24 * *** empty log message ***
26 * Revision 1.10 1995/12/12 14:12:47 quinn
27 * *** empty log message ***
29 * Revision 1.9 1995/12/06 15:48:46 quinn
30 * Fixed update-problem.
32 * Revision 1.8 1995/12/06 14:48:27 quinn
33 * Fixed some strange bugs.
35 * Revision 1.7 1995/12/06 09:59:46 quinn
36 * Fixed memory-consumption bug in memory.c
37 * Added more blocksizes to the default ISAM configuration.
39 * Revision 1.6 1995/09/04 12:33:47 adam
40 * Various cleanup. YAZ util used instead.
42 * Revision 1.5 1994/09/28 16:58:33 quinn
45 * Revision 1.4 1994/09/27 20:03:52 quinn
46 * Seems relatively bug-free.
48 * Revision 1.3 1994/09/26 17:11:30 quinn
51 * Revision 1.2 1994/09/26 17:06:35 quinn
54 * Revision 1.1 1994/09/26 16:07:56 quinn
55 * Most of the functionality in place.
60 * This module accesses and rearranges the records of the tables.
71 int is_mbuf_size[3] = { 0, 1024, 4096 };
73 static is_mblock *mblock_tmplist = 0, *mblock_freelist = 0;
74 static is_mbuf *mbuf_freelist[3] = {0, 0, 0};
76 #define MALLOC_CHUNK 20
78 is_mblock *xmalloc_mblock()
85 mblock_freelist = xmalloc(sizeof(is_mblock) * MALLOC_CHUNK);
86 for (i = 0; i < MALLOC_CHUNK - 1; i++)
87 mblock_freelist[i].next = &mblock_freelist[i+1];
88 mblock_freelist[i].next = 0;
90 tmp = mblock_freelist;
91 mblock_freelist = mblock_freelist->next;
93 tmp->state = IS_MBSTATE_UNREAD;
98 is_mbuf *xmalloc_mbuf(int type)
102 if (mbuf_freelist[type])
104 tmp = mbuf_freelist[type];
105 mbuf_freelist[type] = tmp->next;
109 tmp = xmalloc(sizeof(is_mbuf) + is_mbuf_size[type]);
112 tmp->refcount = type ? 1 : 0;
113 tmp->offset = tmp->num = tmp->cur_record = 0;
114 tmp->data = (char*) tmp + sizeof(is_mbuf);
119 void xfree_mbuf(is_mbuf *p)
121 p->next = mbuf_freelist[p->type];
122 mbuf_freelist[p->type] = p;
125 void xfree_mbufs(is_mbuf *l)
137 void xfree_mblock(is_mblock *p)
139 xfree_mbufs(p->data);
140 p->next = mblock_freelist;
144 void xrelease_mblock(is_mblock *p)
146 p->next = mblock_tmplist;
150 void xfree_mblocks(is_mblock *l)
162 void is_m_establish_tab(ISAM is, is_mtable *tab, ISAM_P pos)
164 tab->data = xmalloc_mblock();
167 tab->pos_type = is_type(pos);
168 tab->num_records = -1;
169 tab->data->num_records = -1;
170 tab->data->diskpos = is_block(pos);
171 tab->data->state = IS_MBSTATE_UNREAD;
173 tab->cur_mblock = tab->data;
174 tab->cur_mblock->cur_mbuf = 0;
180 tab->num_records = 0;
181 tab->data->num_records = 0;
182 tab->data->diskpos = -1;
183 tab->data->state = IS_MBSTATE_CLEAN;
184 tab->data->data = xmalloc_mbuf(IS_MBUF_TYPE_LARGE);
185 tab->cur_mblock = tab->data;
186 tab->cur_mblock->cur_mbuf = tab->data->data;
187 tab->cur_mblock->cur_mbuf->cur_record = 0;
193 void is_m_release_tab(is_mtable *tab)
195 xfree_mblocks(tab->data);
196 xfree_mblocks(mblock_tmplist);
200 void is_m_rewind(is_mtable *tab)
202 tab->cur_mblock = tab->data;
205 tab->data->cur_mbuf = tab->data->data;
207 tab->data->data->cur_record = 0;
211 static int read_current_full(is_mtable *tab, is_mblock *mblock)
213 if (is_p_read_full(tab, mblock) < 0)
215 if (mblock->nextpos && !mblock->next)
217 mblock->next = xmalloc_mblock();
218 mblock->next->diskpos = mblock->nextpos;
219 mblock->next->state = IS_MBSTATE_UNREAD;
220 mblock->next->data = 0;
222 mblock->cur_mbuf = mblock->data;
223 mblock->data->cur_record = 0;
227 int is_m_read_full(is_mtable *tab, is_mblock *mblock)
229 return read_current_full(tab, mblock);
233 * replace the record right behind the pointer.
235 void is_m_replace_record(is_mtable *tab, const void *rec)
237 is_mbuf *mbuf = tab->cur_mblock->cur_mbuf;
239 /* we assume that block is already in memory and that we are in the
240 * right mbuf, and that it has space for us. */
241 memcpy(mbuf->data + mbuf->offset + (mbuf->cur_record - 1) *
242 is_keysize(tab->is), rec, is_keysize(tab->is));
243 tab->cur_mblock->state = IS_MBSTATE_DIRTY;
247 * Delete the record right behind the pointer.
249 void is_m_delete_record(is_mtable *tab)
253 mbuf = tab->cur_mblock->cur_mbuf;
254 if (mbuf->cur_record >= mbuf->num) /* top of mbuf */
259 else if (mbuf->cur_record == 1) /* beginning of mbuf */
262 mbuf->offset +=is_keysize(tab->is);
263 mbuf->cur_record = 0;
265 else /* middle of mbuf */
267 /* insert block after current one */
268 new = xmalloc_mbuf(IS_MBUF_TYPE_SMALL);
269 new->next = mbuf->next;
272 /* virtually transfer everything after current record to new one. */
273 new->data = mbuf->data;
275 new->offset = mbuf->offset + mbuf->cur_record * is_keysize(tab->is);
276 new->num = mbuf->num - mbuf->cur_record;
278 /* old buf now only contains stuff before current record */
279 mbuf->num = mbuf->cur_record -1;
280 tab->cur_mblock->cur_mbuf = new;
283 tab->cur_mblock->num_records--;
284 tab->cur_mblock->state = tab->data->state = IS_MBSTATE_DIRTY;
287 int is_m_write_record(is_mtable *tab, const void *rec)
289 is_mbuf *mbuf, *oldnext, *dmbuf;
291 /* make sure block is all in memory */
292 if (tab->cur_mblock->state <= IS_MBSTATE_PARTIAL)
293 if (read_current_full(tab, tab->cur_mblock) < 0)
295 mbuf = tab->cur_mblock->cur_mbuf;
296 if (mbuf->cur_record >= mbuf->num) /* top of mbuf */
298 /* mbuf is reference or full */
299 if (mbuf->refcount != 1 || mbuf->offset + (mbuf->num + 1) *
300 is_keysize(tab->is) > is_mbuf_size[mbuf->type])
302 oldnext = mbuf->next;
303 mbuf->next = xmalloc_mbuf(IS_MBUF_TYPE_LARGE);
304 mbuf->next->next = oldnext;
306 tab->cur_mblock->cur_mbuf = mbuf;
307 mbuf->cur_record = 0;
312 oldnext = mbuf->next;
313 mbuf->next = xmalloc_mbuf(IS_MBUF_TYPE_MEDIUM);
314 mbuf->next->next = dmbuf = xmalloc_mbuf(IS_MBUF_TYPE_SMALL);
315 dmbuf->data = mbuf->data;
316 dmbuf->next = oldnext;
317 dmbuf->offset = mbuf->offset + mbuf->cur_record * is_keysize(tab->is);
318 dmbuf->num = mbuf->num - mbuf->cur_record;
319 mbuf->num -= dmbuf->num;
321 mbuf = tab->cur_mblock->cur_mbuf = mbuf->next;
322 mbuf->cur_record = 0;
325 logf (LOG_DEBUG, "is_m_write_rec(rec == %d)", mbuf->cur_record);
327 memcpy(mbuf->data + mbuf->offset + mbuf->cur_record * is_keysize(tab->is),
328 rec, is_keysize(tab->is));
332 tab->cur_mblock->num_records++;
333 tab->cur_mblock->state = tab->data->state = IS_MBSTATE_DIRTY;
337 void is_m_unread_record(is_mtable *tab)
339 assert(tab->cur_mblock->cur_mbuf->cur_record);
341 tab->cur_mblock->cur_mbuf = tab->last_mbuf;
343 tab->cur_mblock->cur_mbuf->cur_record--;
347 * non-destructive read.
349 int is_m_peek_record(is_mtable *tab, void *rec)
354 /* make sure block is all in memory */
355 if (tab->cur_mblock->state <= IS_MBSTATE_PARTIAL)
356 if (read_current_full(tab, tab->cur_mblock) < 0)
358 mblock = tab->cur_mblock;
359 mbuf = mblock->cur_mbuf;
360 if (mbuf->cur_record >= mbuf->num) /* are we at end of mbuf? */
362 if (!mbuf->next) /* end of mblock */
366 mblock = mblock->next;
367 if (mblock->state <= IS_MBSTATE_PARTIAL)
368 if (read_current_full(tab, mblock) < 0)
373 return 0; /* EOTable */
377 mbuf->cur_record = 0;
379 memcpy(rec, mbuf->data + mbuf->offset + mbuf->cur_record *
380 is_keysize(tab->is), is_keysize(tab->is));
384 int is_m_read_record(is_mtable *tab, void *buf, int keep)
388 /* make sure block is all in memory */
389 if (tab->cur_mblock->state <= IS_MBSTATE_PARTIAL)
390 if (read_current_full(tab, tab->cur_mblock) < 0)
392 mbuf = tab->cur_mblock->cur_mbuf;
393 if (mbuf->cur_record >= mbuf->num) /* are we at end of mbuf? */
395 if (!mbuf->next) /* end of mblock */
397 if (!keep && tab->cur_mblock->state == IS_MBSTATE_CLEAN &&
398 tab->cur_mblock->diskpos > 0)
400 xfree_mbufs(tab->cur_mblock->data);
401 tab->cur_mblock->data = 0;
402 tab->cur_mblock->state = IS_MBSTATE_UNREAD;
404 if (tab->cur_mblock->next)
406 tab->cur_mblock = tab->cur_mblock->next;
407 if (tab->cur_mblock->state <= IS_MBSTATE_PARTIAL)
408 if (read_current_full(tab, tab->cur_mblock) < 0)
410 tab->cur_mblock->cur_mbuf = mbuf = tab->cur_mblock->data;
414 return 0; /* EOTable */
418 tab->last_mbuf = mbuf;
419 tab->cur_mblock->cur_mbuf = mbuf = mbuf->next;
421 mbuf->cur_record = 0;
425 memcpy(buf, mbuf->data + mbuf->offset + mbuf->cur_record *
426 is_keysize(tab->is), is_keysize(tab->is));
432 * TODO: optimize this function by introducing a higher-level search.
434 int is_m_seek_record(is_mtable *tab, const void *rec)
436 char peek[IS_MAX_RECORD];
441 if (is_m_read_record(tab, &peek, 1) <= 0)
443 if ((rs = (*tab->is->cmp)(peek, rec)) > 0)
445 is_m_unread_record(tab);
453 int is_m_num_records(is_mtable *tab)
455 if (tab->data->state < IS_MBSTATE_PARTIAL)
456 if (read_current_full(tab, tab->data) < 0)
458 logf (LOG_FATAL, "read full failed");
461 return tab->num_records;