+ unsigned n = 0;
+ unsigned w = 1;
+ (*len) = 0;
+
+ while (buf[*len] > 127)
+ {
+ n += w*(buf[*len] & 127);
+ w = w << 7;
+ (*len)++;
+ }
+ n += w * buf[*len];
+ (*len)++;
+ *np = n;
+}
+
+static void rec_encode_zint (zint n, unsigned char *buf, int *len)
+{
+ (*len) = 0;
+ while (n > 127)
+ {
+ buf[*len] = (unsigned) (128 + (n & 127));
+ n = n >> 7;
+ (*len)++;
+ }
+ buf[*len] = (unsigned) n;
+ (*len)++;
+}
+
+static void rec_decode_zint(zint *np, unsigned char *buf, int *len)
+{
+ zint n = 0;
+ zint w = 1;
+ (*len) = 0;
+
+ while (buf[*len] > 127)
+ {
+ n += w*(buf[*len] & 127);
+ w = w << 7;
+ (*len)++;
+ }
+ n += w * buf[*len];
+ (*len)++;
+ *np = n;
+}
+
+static void rec_cache_flush_block1 (Records p, Record rec, Record last_rec,
+ char **out_buf, int *out_size,
+ int *out_offset)
+{
+ int i;
+ int len;
+
+ for (i = 0; i<REC_NO_INFO; i++)
+ {
+ if (*out_offset + (int) rec->size[i] + 20 > *out_size)
+ {
+ int new_size = *out_offset + rec->size[i] + 65536;
+ char *np = (char *) xmalloc (new_size);
+ if (*out_offset)
+ memcpy (np, *out_buf, *out_offset);
+ xfree (*out_buf);
+ *out_size = new_size;
+ *out_buf = np;
+ }
+ if (i == 0)
+ {
+ rec_encode_zint (rec->sysno, *out_buf + *out_offset, &len);
+ (*out_offset) += len;
+ }
+ if (rec->size[i] == 0)
+ {
+ rec_encode_unsigned (1, *out_buf + *out_offset, &len);
+ (*out_offset) += len;
+ }
+ else if (last_rec && rec->size[i] == last_rec->size[i] &&
+ !memcmp (rec->info[i], last_rec->info[i], rec->size[i]))
+ {
+ rec_encode_unsigned (0, *out_buf + *out_offset, &len);
+ (*out_offset) += len;
+ }
+ else
+ {
+ rec_encode_unsigned (rec->size[i]+1, *out_buf + *out_offset, &len);
+ (*out_offset) += len;
+ memcpy (*out_buf + *out_offset, rec->info[i], rec->size[i]);
+ (*out_offset) += rec->size[i];
+ }
+ }
+}
+
+static void rec_write_multiple (Records p, int saveCount)
+{
+ int i;
+ short ref_count = 0;
+ char compression_method;
+ Record last_rec = 0;
+ int out_size = 1000;
+ int out_offset = 0;
+ char *out_buf = (char *) xmalloc (out_size);
+ SYSNO *sysnos = (SYSNO *) xmalloc (sizeof(*sysnos) * (p->cache_cur + 1));
+ SYSNO *sysnop = sysnos;
+
+ for (i = 0; i<p->cache_cur - saveCount; i++)
+ {
+ struct record_cache_entry *e = p->record_cache + i;
+ switch (e->flag)
+ {
+ case recordFlagNew:
+ rec_cache_flush_block1 (p, e->rec, last_rec, &out_buf,
+ &out_size, &out_offset);
+ *sysnop++ = e->rec->sysno;
+ ref_count++;
+ e->flag = recordFlagNop;
+ last_rec = e->rec;
+ break;
+ case recordFlagWrite:
+ rec_release_blocks (p, e->rec->sysno);
+ rec_cache_flush_block1 (p, e->rec, last_rec, &out_buf,
+ &out_size, &out_offset);
+ *sysnop++ = e->rec->sysno;
+ ref_count++;
+ e->flag = recordFlagNop;
+ last_rec = e->rec;
+ break;
+ case recordFlagDelete:
+ rec_delete_single (p, e->rec);
+ e->flag = recordFlagNop;
+ break;
+ default:
+ break;
+ }
+ }
+
+ *sysnop = -1;
+ if (ref_count)
+ {
+ int csize = 0; /* indicate compression "not performed yet" */
+ compression_method = p->compression_method;
+ switch (compression_method)
+ {
+ case REC_COMPRESS_BZIP2:
+#if HAVE_BZLIB_H
+ csize = out_offset + (out_offset >> 6) + 620;
+ rec_tmp_expand (p, csize);
+#ifdef BZ_CONFIG_ERROR
+ i = BZ2_bzBuffToBuffCompress
+#else
+ i = bzBuffToBuffCompress
+#endif
+ (p->tmp_buf+sizeof(zint)+sizeof(short)+
+ sizeof(char),
+ &csize, out_buf, out_offset, 1, 0, 30);
+ if (i != BZ_OK)
+ {
+ yaz_log (YLOG_WARN, "bzBuffToBuffCompress error code=%d", i);
+ csize = 0;
+ }
+ yaz_log (YLOG_LOG, "compress %4d %5d %5d", ref_count, out_offset,
+ csize);
+#endif
+ break;
+ case REC_COMPRESS_NONE:
+ break;
+ }
+ if (!csize)
+ {
+ /* either no compression or compression not supported ... */
+ csize = out_offset;
+ rec_tmp_expand (p, csize);
+ memcpy (p->tmp_buf + sizeof(zint) + sizeof(short) + sizeof(char),
+ out_buf, out_offset);
+ csize = out_offset;
+ compression_method = REC_COMPRESS_NONE;
+ }
+ memcpy (p->tmp_buf + sizeof(zint), &ref_count, sizeof(ref_count));
+ memcpy (p->tmp_buf + sizeof(zint)+sizeof(short),
+ &compression_method, sizeof(compression_method));
+
+ /* -------- compression */
+ rec_write_tmp_buf (p, csize + sizeof(short) + sizeof(char), sysnos);
+ }
+ xfree (out_buf);
+ xfree (sysnos);
+}
+
+static void rec_cache_flush (Records p, int saveCount)
+{
+ int i, j;
+
+ if (saveCount >= p->cache_cur)
+ saveCount = 0;
+
+ rec_write_multiple (p, saveCount);
+
+ for (i = 0; i<p->cache_cur - saveCount; i++)
+ {
+ struct record_cache_entry *e = p->record_cache + i;
+ rec_rm (&e->rec);
+ }
+ /* i still being used ... */
+ for (j = 0; j<saveCount; j++, i++)
+ memcpy (p->record_cache+j, p->record_cache+i,
+ sizeof(*p->record_cache));
+ p->cache_cur = saveCount;
+}
+
+static Record *rec_cache_lookup (Records p, SYSNO sysno,
+ enum recordCacheFlag flag)
+{
+ int i;
+ for (i = 0; i<p->cache_cur; i++)
+ {
+ struct record_cache_entry *e = p->record_cache + i;
+ if (e->rec->sysno == sysno)
+ {
+ if (flag != recordFlagNop && e->flag == recordFlagNop)
+ e->flag = flag;
+ return &e->rec;
+ }
+ }