linear_hash.c
Go to the documentation of this file.
1 /******************************************************************************/
35 /******************************************************************************/
36 
37 #include "linear_hash.h"
38 #include <math.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <string.h>
42 
43 #define GET_BUCKET_RECORDS_LOC(bucket_loc) (bucket_loc) + sizeof(linear_hash_bucket_t)
44 
45 /* initialization function */
49  ion_dictionary_size_t dictionary_size,
50  ion_key_type_t key_type,
51  ion_key_size_t key_size,
52  ion_value_size_t value_size,
53  int initial_size,
54  int split_threshold,
55  int records_per_bucket,
56  linear_hash_table_t *linear_hash
57 ) {
58  /* err */
59  ion_err_t err;
60 
61  /* parameter not used */
62  linear_hash->super.id = id;
63  linear_hash->dictionary_size = dictionary_size;
64  linear_hash->super.key_type = key_type;
65  linear_hash->super.record.key_size = key_size;
66  linear_hash->super.record.value_size = value_size;
67 
68  /* initialize linear_hash fields */
69  linear_hash->initial_size = initial_size;
70  linear_hash->num_buckets = initial_size;
71  linear_hash->num_records = 0;
72  linear_hash->next_split = 0;
73  linear_hash->split_threshold = split_threshold;
74  linear_hash->records_per_bucket = records_per_bucket;
75  linear_hash->record_total_size = key_size + value_size + sizeof(ion_byte_t);
76  linear_hash->cache = malloc(128);
77 
78  char data_filename[ION_MAX_FILENAME_LENGTH];
79 
80  dictionary_get_filename(linear_hash->super.id, "lhd", data_filename);
81 
82  char state_filename[ION_MAX_FILENAME_LENGTH];
83 
84  dictionary_get_filename(linear_hash->super.id, "lhs", state_filename);
85 
86  /* mapping of buckets to file offsets */
87  array_list_t *bucket_map;
88 
89  bucket_map = malloc(sizeof(array_list_t));
90 
91  if (NULL == bucket_map) {
92  /* clean up resources before returning if out of memory */
93  linear_hash_close(linear_hash);
94  return err_out_of_memory;
95  }
96 
97  err = array_list_init(5, bucket_map);
98 
99  if (err != err_ok) {
100  /* clean up resources before returning if out of memory */
101  linear_hash_close(linear_hash);
102  return err;
103  }
104 
105  linear_hash->bucket_map = bucket_map;
106  linear_hash->database = fopen(data_filename, "r+b");
107 
108  if (NULL == linear_hash->database) {
109  linear_hash->database = fopen(data_filename, "w+b");
110 
111  if (NULL == linear_hash->database) {
112  return err_file_open_error;
113  }
114 
115  int i;
116 
117  for (i = 0; i < linear_hash->initial_size; i++) {
118  err = write_new_bucket(i, linear_hash);
119 
120  if (err != err_ok) {
121  linear_hash_close(linear_hash);
122  return err;
123  }
124  }
125  }
126 
127  linear_hash->state = fopen(state_filename, "r+b");
128 
129  if (NULL == linear_hash->state) {
130  linear_hash->state = fopen(state_filename, "w+b");
131 
132  if (NULL == linear_hash->state) {
133  return err_file_open_error;
134  }
135 
136  err = linear_hash_write_state(linear_hash);
137 
138  if (err != err_ok) {
139  return err;
140  }
141  }
142  else {
143  err = linear_hash_read_state(linear_hash);
144 
145  if (err != err_ok) {
146  return err;
147  }
148  }
149 
150  err = linear_hash_write_state(linear_hash);
151 
152  if (err != err_ok) {
153  return err;
154  }
155 
156  /* return pointer to the linear_hash that is sitting in memory */
157  return err_ok;
158 }
159 
167 ion_err_t
169  linear_hash_table_t *linear_hash
170 ) {
171  if (1 != fwrite(&linear_hash->initial_size, sizeof(linear_hash->initial_size), 1, linear_hash->state)) {
172  return err_file_write_error;
173  }
174 
175  if (1 != fwrite(&linear_hash->next_split, sizeof(linear_hash->next_split), 1, linear_hash->state)) {
176  return err_file_write_error;
177  }
178 
179  if (1 != fwrite(&linear_hash->split_threshold, sizeof(linear_hash->split_threshold), 1, linear_hash->state)) {
180  return err_file_write_error;
181  }
182 
183  if (1 != fwrite(&linear_hash->num_buckets, sizeof(linear_hash->num_buckets), 1, linear_hash->state)) {
184  return err_file_write_error;
185  }
186 
187  if (1 != fwrite(&linear_hash->num_records, sizeof(linear_hash->num_records), 1, linear_hash->state)) {
188  return err_file_write_error;
189  }
190 
191  if (1 != fwrite(&linear_hash->records_per_bucket, sizeof(linear_hash->records_per_bucket), 1, linear_hash->state)) {
192  return err_file_write_error;
193  }
194 
195  if (1 != fwrite(&linear_hash->bucket_map->current_size, sizeof(int), 1, linear_hash->state)) {
196  return err_file_write_error;
197  }
198 
199  ion_byte_t *cached_bucket_map = alloca(sizeof(ion_fpos_t) * linear_hash->bucket_map->current_size);
200 
201  memset(cached_bucket_map, 0, sizeof(ion_fpos_t) * linear_hash->bucket_map->current_size);
202  memcpy(cached_bucket_map, linear_hash->bucket_map->data, linear_hash->num_buckets * sizeof(ion_fpos_t));
203 
204  if (1 != fwrite(cached_bucket_map, sizeof(linear_hash->bucket_map->data), 1, linear_hash->state)) {
205  return err_file_write_error;
206  }
207 
208  return err_ok;
209 }
210 
218 ion_err_t
220  linear_hash_table_t *linear_hash
221 ) {
222  if (0 != fseek(linear_hash->state, 0, SEEK_SET)) {
223  return err_file_bad_seek;
224  }
225 
226  if (1 != fread(&(linear_hash->initial_size), sizeof(linear_hash->initial_size), 1, linear_hash->state)) {
227  return err_file_read_error;
228  }
229 
230  if (1 != fread(&linear_hash->next_split, sizeof(linear_hash->next_split), 1, linear_hash->state)) {
231  return err_file_read_error;
232  }
233 
234  if (1 != fread(&linear_hash->split_threshold, sizeof(linear_hash->split_threshold), 1, linear_hash->state)) {
235  return err_file_read_error;
236  }
237 
238  if (1 != fread(&linear_hash->num_buckets, sizeof(linear_hash->num_buckets), 1, linear_hash->state)) {
239  return err_file_read_error;
240  }
241 
242  if (1 != fread(&linear_hash->num_records, sizeof(linear_hash->num_records), 1, linear_hash->state)) {
243  return err_file_read_error;
244  }
245 
246  if (1 != fread(&linear_hash->records_per_bucket, sizeof(linear_hash->records_per_bucket), 1, linear_hash->state)) {
247  return err_file_read_error;
248  }
249 
250  if (1 != fread(&linear_hash->bucket_map->current_size, sizeof(int), 1, linear_hash->state)) {
251  return err_file_read_error;
252  }
253 
254  if (1 != fwrite(&linear_hash->bucket_map->data, sizeof(ion_fpos_t) * linear_hash->num_buckets, 1, linear_hash->state)) {
255  return err_file_read_error;
256  }
257 
258  return err_ok;
259 }
260 
269 int
271  linear_hash_bucket_t bucket,
272  linear_hash_table_t *linear_hash
273 ) {
274  return bucket.record_count == linear_hash->records_per_bucket;
275 }
276 
284 ion_err_t
286  linear_hash_table_t *linear_hash
287 ) {
288  linear_hash->num_records++;
289 
290  ion_err_t err = err_ok;
291 
292  if (linear_hash_above_threshold(linear_hash)) {
293  err = write_new_bucket(linear_hash->num_buckets, linear_hash);
294 
295  if (err != err_ok) {
296  return err;
297  }
298 
300  err = split(linear_hash);
301  }
302 
303  return err;
304 }
305 
312 ion_err_t
314  linear_hash_table_t *linear_hash
315 ) {
316  linear_hash->num_records--;
317  return err_ok;
318 }
319 
327 ion_err_t
329  linear_hash_table_t *linear_hash
330 ) {
331  linear_hash->num_buckets++;
332 
333  if (linear_hash->num_buckets == 2 * linear_hash->initial_size + 1) {
334  linear_hash->initial_size = linear_hash->initial_size * 2;
335  linear_hash->next_split = 0;
336  }
337 
338  return err_ok;
339 }
340 
348 ion_err_t
350  linear_hash_table_t *linear_hash
351 ) {
352  linear_hash->next_split++;
353  return err_ok;
354 }
355 
363 ion_err_t
365  linear_hash_table_t *linear_hash
366 ) {
367  /* status to hold amount of records deleted */
369 
370  /* get bucket to split */
371  ion_fpos_t bucket_loc = bucket_idx_to_ion_fpos_t(linear_hash->next_split, linear_hash);
372  linear_hash_bucket_t bucket;
373 
374  status.error = linear_hash_get_bucket(bucket_loc, &bucket, linear_hash);
375 
376  if (status.error != err_ok) {
377  return status.error;
378  }
379 
380  /* stores for record data */
381  ion_byte_t *record_key = alloca(linear_hash->super.record.key_size);
382  ion_byte_t *record_value = alloca(linear_hash->super.record.value_size);
384 
385  int split_hash_key;
386  int insert_hash_key;
387 
388  int i, j;
389  ion_byte_t *records = alloca(linear_hash->record_total_size * linear_hash->records_per_bucket);
390 
391  memset(records, 0, linear_hash->record_total_size * linear_hash->records_per_bucket);
392 
393  ion_fpos_t record_offset = 0;
394  ion_boolean_t terminal = boolean_false;
395 
396  while (terminal == boolean_false) {
397  /* if the bucket is not empty */
398  if (bucket.record_count > 0) {
399  /* read all records into memory */
400  fseek(linear_hash->database, bucket_loc + sizeof(linear_hash_bucket_t), SEEK_SET);
401  fread(records, linear_hash->record_total_size, linear_hash->records_per_bucket, linear_hash->database);
402 
403  /* scan records for records that should be placed in the new bucket */
404  for (i = 0; i < bucket.record_count; i++) {
405  memcpy(&record_status, records + record_offset, sizeof(record_status));
406  memcpy(record_key, records + record_offset + sizeof(record_status), linear_hash->super.record.key_size);
407  memcpy(record_value, records + record_offset + sizeof(record_status) + linear_hash->super.record.key_size, linear_hash->super.record.value_size);
408 
409  insert_hash_key = insert_hash_to_bucket(record_key, linear_hash);
410 
411  split_hash_key = hash_to_bucket(record_key, linear_hash);
412 
413  /* if the record is not a tombstone and h0(k) != h1(k) */
414  if ((record_status == linear_hash_record_status_full) && (insert_hash_key != split_hash_key)) {
415  /* delete all records with this key from the table */
416  status = linear_hash_delete(record_key, linear_hash);
417 
418  if (status.error != err_ok) {
419  return status.error;
420  }
421 
422  int num_deleted = status.count;
423 
424  linear_hash->last_cache_idx = 0;
425 
426  /* insert that many records into the table */
427  for (j = 0; j < num_deleted; j++) {
428  memcpy(record_value, linear_hash->cache + linear_hash->last_cache_idx * linear_hash->super.record.value_size, linear_hash->super.record.value_size);
429  linear_hash->last_cache_idx++;
430 
431  status = linear_hash_insert(record_key, record_value, hash_to_bucket(record_key, linear_hash), linear_hash);
432 
433  if (status.error != err_ok) {
434  return status.error;
435  }
436  }
437 
438  /* refresh cached data and restart iteration and offset tracker */
439  fseek(linear_hash->database, bucket_loc + sizeof(linear_hash_bucket_t), SEEK_SET);
440  fread(records, linear_hash->record_total_size, linear_hash->records_per_bucket, linear_hash->database);
441  status.error = linear_hash_get_bucket(bucket_loc, &bucket, linear_hash);
442  i = -1;
443  record_offset = -1 * linear_hash->record_total_size;
444  }
445 
446  /* track offset from locations of records in memory the next record is at */
447  record_offset += linear_hash->record_total_size;
448  }
449 
450  /* reset offset */
451  record_offset = 0;
452  }
453 
455  terminal = boolean_true;
456  }
457  else {
458  bucket_loc = bucket.overflow_location;
459  status.error = linear_hash_get_bucket(bucket_loc, &bucket, linear_hash);
460 
461  if (status.error != err_ok) {
462  return status.error;
463  }
464  }
465  }
466 
467  return linear_hash_increment_next_split(linear_hash);
468 }
469 
478  linear_hash_table_t *linear_hash
479 ) {
480  double numerator = (double) (100 * (linear_hash->num_records));
481  double denominator = (double) (linear_hash->num_buckets * linear_hash->records_per_bucket);
482 
483  double load = numerator / denominator;
484 
485  ion_boolean_t above_threshold = (load > linear_hash->split_threshold);
486 
487  return above_threshold;
488 }
489 
506 ion_err_t
508  int bucket_idx,
509  ion_fpos_t *record_loc,
510  ion_byte_t *key,
511  ion_byte_t *value,
512  ion_byte_t *status,
513  linear_hash_table_t *linear_hash
514 ) {
515  /* read in bucket currently swapping to obtain the last record */
516  ion_fpos_t bucket_loc = array_list_get(bucket_idx, linear_hash->bucket_map);
517  linear_hash_bucket_t bucket;
518 
519  linear_hash_get_bucket(bucket_loc, &bucket, linear_hash);
520 
521  ion_fpos_t swap_record_loc = bucket_loc + sizeof(linear_hash_bucket_t) + ((bucket.record_count - 1) * linear_hash->record_total_size);
522 
523  /* read in the record to swap with next */
524  ion_err_t err = linear_hash_get_record(swap_record_loc, key, value, status, linear_hash);
525 
526  if (err != err_ok) {
527  return err;
528  }
529 
531 
532  /* TODO JUST NEED TO WRITE STATUS HERE */
533  err = linear_hash_write_record(swap_record_loc, key, value, &deleted_status, linear_hash);
534 
535  if (err != err_ok) {
536  return err;
537  }
538 
539  *record_loc = swap_record_loc;
540  bucket.record_count--;
541 
542  /* garuntee the bucket in the bucket map has records in it - THIS LEAVES EMPTY BUCKETS FLOATING ABOUT */
543  if ((bucket.record_count == 0) && (bucket.overflow_location != linear_hash_end_of_list)) {
544  array_list_insert(bucket.idx, bucket.overflow_location, linear_hash->bucket_map);
545  }
546 
547  /* only to update bucket if not becoming junk bucket */
548  err = linear_hash_update_bucket(bucket_loc, &bucket, linear_hash);
550 
551  if (err != err_ok) {
552  return err;
553  }
554 
555  return err;
556 }
557 
558 /* linear hash operations */
574  ion_key_t key,
575  ion_value_t value,
576  int hash_bucket_idx,
577  linear_hash_table_t *linear_hash
578 ) {
580 
581  if (hash_bucket_idx < linear_hash->next_split) {
582  hash_bucket_idx = hash_to_bucket(key, linear_hash);
583  }
584 
585  /* create a linear_hash_record with the desired key, value, and status of full*/
586  ion_byte_t *record_key = alloca(linear_hash->super.record.key_size);
587  ion_byte_t *record_value = alloca(linear_hash->super.record.value_size);
589 
590  memcpy(record_key, key, linear_hash->super.record.key_size);
591  memcpy(record_value, value, linear_hash->super.record.value_size);
592 
593  /* get the appropriate bucket for insertion */
594  ion_fpos_t bucket_loc = bucket_idx_to_ion_fpos_t(hash_bucket_idx, linear_hash);
595  linear_hash_bucket_t bucket;
596 
597  status.error = linear_hash_get_bucket(bucket_loc, &bucket, linear_hash);
598 
599  if (status.error != err_ok) {
600  return status;
601  }
602 
603  /* location of the records in the bucket to be stored in */
604  ion_fpos_t bucket_records_loc = GET_BUCKET_RECORDS_LOC(bucket_loc);
605  ion_fpos_t record_loc;
606 
607  /* Case the bucket is empty */
608  if (bucket.record_count == 0) {
609  record_loc = bucket_records_loc;
610  }
611  else {
612  /* Case that the bucket is full but there is not yet an overflow bucket */
613  if (linear_hash_bucket_is_full(bucket, linear_hash)) {
614  /* Get location of overflow bucket and update the tail record for the linked list of buckets storing
615  * items that hash to this bucket and update the tail bucket with the overflow's location */
616  ion_fpos_t *overflow_location = alloca(sizeof(ion_fpos_t));
617 
618  status.error = create_overflow_bucket(bucket.idx, overflow_location, linear_hash);
619 
620  if (err_ok != status.error) {
621  return status;
622  }
623 
624  /* update parameters of bucket to match that of overflow just created */
625  bucket.record_count = 0;
626  bucket.overflow_location = bucket_loc;
627 
628  /* update the locations to write to in the file for the record and bucket */
629  record_loc = GET_BUCKET_RECORDS_LOC(*overflow_location);
630  bucket_loc = *overflow_location;
631  }
632  /* case there is >= 1 record in the bucket but it is not full */
633  else {
634  /* create a linear_hash_record with the desired key, value, and status of full*/
635  record_loc = bucket_loc + sizeof(linear_hash_bucket_t) + bucket.record_count * linear_hash->record_total_size;
636  }
637  }
638 
639  /* write new record to the db */
640  status.error = linear_hash_write_record(record_loc, record_key, record_value, &record_status, linear_hash);
641 
642  if (status.error != err_ok) {
643  return status;
644  }
645 
646  status.count++;
647 
648  /* update bucket */
649  bucket.record_count++;
650  status.error = linear_hash_update_bucket(bucket_loc, &bucket, linear_hash);
651 
652  if (status.error != err_ok) {
653  return status;
654  }
655 
656  status.error = err_ok;
658  return status;
659 }
660 
661 /* linear hash operations */
674  ion_byte_t *key,
675  ion_byte_t *value,
676  linear_hash_table_t *linear_hash
677 ) {
678  /* status for result count */
680  /* get the index of the bucket to read */
681  int bucket_idx = insert_hash_to_bucket(key, linear_hash);
682 
683  if (bucket_idx < linear_hash->next_split) {
684  bucket_idx = hash_to_bucket(key, linear_hash);
685  }
686 
687  /* get the bucket where the record would be located */
688  ion_fpos_t bucket_loc = bucket_idx_to_ion_fpos_t(bucket_idx, linear_hash);
689  linear_hash_bucket_t bucket;
690 
691  status.error = linear_hash_get_bucket(bucket_loc, &bucket, linear_hash);
692 
693  if (status.error != err_ok) {
694  return status;
695  }
696 
697  /* create a linear_hash_record with the desired key, value, and status of full*/
698  ion_byte_t *record_key = alloca(linear_hash->super.record.key_size);
699  ion_byte_t *record_value = alloca(linear_hash->super.record.value_size);
701 
702  int found = boolean_false;
703  int i;
704 
705  ion_byte_t *records = alloca(linear_hash->record_total_size * linear_hash->records_per_bucket);
706 
707  memset(records, 0, linear_hash->record_total_size * linear_hash->records_per_bucket);
708 
709  ion_fpos_t record_offset = 0;
710  ion_fpos_t record_loc;
711  ion_boolean_t terminal = boolean_false;
712 
713  while (terminal == boolean_false && found == boolean_false) {
714  record_loc = bucket_loc + sizeof(linear_hash_bucket_t);
715  fseek(linear_hash->database, bucket_loc + sizeof(linear_hash_bucket_t), SEEK_SET);
716  fread(records, linear_hash->record_total_size, linear_hash->records_per_bucket, linear_hash->database);
717 
718  for (i = 0; i < linear_hash->records_per_bucket; i++) {
719  memcpy(&record_status, records + record_offset, sizeof(record_status));
720  memcpy(record_key, records + record_offset + sizeof(record_status), linear_hash->super.record.key_size);
721  memcpy(record_value, records + record_offset + sizeof(record_status) + linear_hash->super.record.key_size, linear_hash->super.record.value_size);
722 
723  if (record_status != 0) {
724  if (linear_hash->super.compare(record_key, key, linear_hash->super.record.key_size) == 0) {
725  status.count++;
726  memcpy(value, record_value, linear_hash->super.record.value_size);
727  found = 1;
728  break;
729  }
730  }
731 
732  record_offset += linear_hash->record_total_size;
733  record_loc += linear_hash->record_total_size;
734  }
735 
736  if (found == boolean_false) {
738  terminal = boolean_true;
739  }
740  else {
741  record_offset = 0;
742  bucket_loc = bucket.overflow_location;
743  status.error = linear_hash_get_bucket(bucket_loc, &bucket, linear_hash);
744 
745  if (status.error != err_ok) {
746  return status;
747  }
748  }
749  }
750  }
751 
752  if (status.count == 0) {
753  status.error = err_item_not_found;
754  }
755  else {
756  status.error = err_ok;
757  }
758 
759  return status;
760 }
761 
762 /* linear hash operations */
775  ion_key_t key,
776  ion_value_t value,
777  linear_hash_table_t *linear_hash
778 ) {
780  /* get the index of the bucket to read */
781  int bucket_idx = insert_hash_to_bucket(key, linear_hash);
782 
783  if (bucket_idx < linear_hash->next_split) {
784  bucket_idx = hash_to_bucket(key, linear_hash);
785  }
786 
787  /* get the bucket where the record would be located */
788  ion_fpos_t bucket_loc = bucket_idx_to_ion_fpos_t(bucket_idx, linear_hash);
789  linear_hash_bucket_t bucket;
790 
791  status.error = linear_hash_get_bucket(bucket_loc, &bucket, linear_hash);
792 
793  if (status.error != err_ok) {
794  return status;
795  }
796 
797  /* create a temporary store for records that are read */
798  ion_byte_t *record_key = alloca(linear_hash->super.record.key_size);
799  ion_byte_t *record_value = alloca(linear_hash->super.record.value_size);
801 
802  ion_fpos_t record_loc;
803 
804  int i;
805  ion_boolean_t terminal = boolean_false;
806 
807  while (terminal == boolean_false) {
808  record_loc = bucket_loc + sizeof(linear_hash_bucket_t);
809 
810  for (i = 0; i < linear_hash->records_per_bucket; i++) {
811  status.error = linear_hash_get_record(record_loc, record_key, record_value, &record_status, linear_hash);
812 
813  if (status.error != err_ok) {
814  return status;
815  }
816 
817  if (record_status == linear_hash_record_status_full) {
818  if (linear_hash->super.compare(record_key, key, linear_hash->super.record.key_size) == 0) {
819  status.error = linear_hash_write_record(record_loc, record_key, value, &record_status, linear_hash);
820 
821  if (status.error != err_ok) {
822  return status;
823  }
824 
825  status.count++;
826  }
827  }
828 
829  record_loc += linear_hash->record_total_size;
830  }
831 
833  terminal = boolean_true;
834  }
835  else {
836  bucket_loc = bucket.overflow_location;
837  status.error = linear_hash_get_bucket(bucket_loc, &bucket, linear_hash);
838 
839  if (status.error != err_ok) {
840  return status;
841  }
842  }
843  }
844 
845  if (status.count == 0) {
846  status.error = err_item_not_found;
847  return linear_hash_insert(key, value, insert_hash_to_bucket(key, linear_hash), linear_hash);
848  }
849  else {
850  status.error = err_ok;
851  }
852 
853  return status;
854 }
855 
856 /* linear hash operations */
868  ion_byte_t *key,
869  linear_hash_table_t *linear_hash
870 ) {
871  /* status for result count */
873  /* get the index of the bucket to read */
874  int bucket_idx = insert_hash_to_bucket(key, linear_hash);
875 
876  if (bucket_idx < linear_hash->next_split) {
877  bucket_idx = hash_to_bucket(key, linear_hash);
878  }
879 
880  /* get the bucket where the record would be located */
881  ion_fpos_t bucket_loc = bucket_idx_to_ion_fpos_t(bucket_idx, linear_hash);
882  linear_hash_bucket_t bucket;
883 
884  status.error = linear_hash_get_bucket(bucket_loc, &bucket, linear_hash);
885 
886  if (status.error != err_ok) {
887  return status;
888  }
889 
890  /* create a linear_hash_record with the desired key, value, and status of full*/
891  ion_byte_t *record_key = alloca(linear_hash->super.record.key_size);
892  ion_byte_t *record_value = alloca(linear_hash->super.record.value_size);
894 
895  int i;
896 
897  ion_byte_t *records = alloca(linear_hash->record_total_size * linear_hash->records_per_bucket);
898 
899  memset(records, 0, linear_hash->record_total_size * linear_hash->records_per_bucket);
900 
901  ion_fpos_t record_offset = 0;
902  ion_fpos_t record_loc;
903 
904  /* memory allocated to transfer the terminal records to delete location for swap on delete */
905  ion_byte_t *terminal_record_key = alloca(linear_hash->super.record.key_size);
906  ion_byte_t *terminal_record_value = alloca(linear_hash->super.record.value_size);
907  ion_byte_t terminal_record_status = linear_hash_record_status_empty;
908 
909  linear_hash->last_cache_idx = 0;
910 
911  ion_boolean_t terminal = boolean_false;
912 
913  while (terminal == boolean_false) {
914  record_loc = bucket_loc + sizeof(linear_hash_bucket_t);
915  fseek(linear_hash->database, bucket_loc + sizeof(linear_hash_bucket_t), SEEK_SET);
916  fread(records, linear_hash->record_total_size, linear_hash->records_per_bucket, linear_hash->database);
917 
918  for (i = 0; i < bucket.record_count; i++) {
919  /* read in record */
920  memcpy(&record_status, records + record_offset, sizeof(record_status));
921  memcpy(record_key, records + record_offset + sizeof(record_status), linear_hash->super.record.key_size);
922  memcpy(record_value, records + record_offset + sizeof(record_status) + linear_hash->super.record.key_size, linear_hash->super.record.value_size);
923 
924  if (record_status != 0) {
925  if (linear_hash->super.compare(record_key, key, linear_hash->super.record.key_size) == 0) {
926  /* TODO Create wrapper methods and implement proper error propagation */
927  /* cache the record being deleted's value */
928  memcpy(linear_hash->cache + linear_hash->last_cache_idx * linear_hash->super.record.value_size, record_value, linear_hash->super.record.value_size);
929  linear_hash->last_cache_idx++;
930 
931  /* store the record_loc in a write back paramter that is used before being overwritten */
932  ion_fpos_t swap_record_loc = record_loc;
933 
934  /* obtain the swap record */
935  linear_hash_get_bucket_swap_record(bucket_idx, &swap_record_loc, terminal_record_key, terminal_record_value, &terminal_record_status, linear_hash);
936 
937  /* delete all swap records which are going to be deleted anyways */
938  while (linear_hash->super.compare(terminal_record_key, key, linear_hash->super.record.key_size) == 0) {
939  memcpy(linear_hash->cache + linear_hash->last_cache_idx * linear_hash->super.record.value_size, terminal_record_value, linear_hash->super.record.value_size);
940  linear_hash->last_cache_idx++;
941 
942  /* if we are not trying to swap a record with itself */
943  if (record_loc == swap_record_loc) {
944  /* write the swapped record to record_loc */
945  break;
946  }
947 
948  linear_hash_get_bucket_swap_record(bucket_idx, &swap_record_loc, terminal_record_key, terminal_record_value, &terminal_record_status, linear_hash);
949 
950  if (terminal_record_status == linear_hash_record_status_empty) {
951  break;
952  }
953 
954  status.count++;
955  }
956 
957  /* if we are not trying to swap a record with itself */
958  if (record_loc != swap_record_loc) {
959  /* write the swapped record to record_loc */
960  linear_hash_write_record(record_loc, terminal_record_key, terminal_record_value, &terminal_record_status, linear_hash);
961  }
962 
963  status.count++;
964 
965  if (status.error != err_ok) {
966  return status;
967  }
968 
969  linear_hash_get_bucket(bucket_loc, &bucket, linear_hash);
970  }
971  }
972 
973  record_loc += linear_hash->record_total_size;
974  record_offset += linear_hash->record_total_size;
975  }
976 
978  terminal = boolean_true;
979  }
980  else {
981  record_offset = 0;
982  bucket_loc = bucket.overflow_location;
983  status.error = linear_hash_get_bucket(bucket_loc, &bucket, linear_hash);
984 
985  if (status.error != err_ok) {
986  return status;
987  }
988  }
989  }
990 
991  if (status.count == 0) {
992  status.error = err_item_not_found;
993  }
994  else {
995  status.error = err_ok;
996  }
997 
998  return status;
999 }
1000 
1001 /* returns the struct representing the bucket at the specified index */
1016 ion_err_t
1018  ion_fpos_t loc,
1019  ion_byte_t *key,
1020  ion_byte_t *value,
1021  ion_byte_t *status,
1022  linear_hash_table_t *linear_hash
1023 ) {
1024  /* seek to location of record in file */
1025  if (0 != fseek(linear_hash->database, loc, SEEK_SET)) {
1026  return err_file_bad_seek;
1027  }
1028 
1029  /* cache record data from file */
1030  ion_byte_t *record = alloca(linear_hash->record_total_size);
1031 
1032  if (1 != fread(record, linear_hash->record_total_size, 1, linear_hash->database)) {
1033  return err_file_read_error;
1034  }
1035 
1036  /* read record data elements */
1037  memcpy(status, record, sizeof(*status));
1038  memcpy(key, record + sizeof(*status), linear_hash->super.record.key_size);
1039  memcpy(value, record + sizeof(*status) + linear_hash->super.record.key_size, linear_hash->super.record.value_size);
1040 
1041  return err_ok;
1042 }
1043 
1058 ion_err_t
1060  ion_fpos_t record_loc,
1061  ion_byte_t *key,
1062  ion_byte_t *value,
1063  ion_byte_t *status,
1064  linear_hash_table_t *linear_hash
1065 ) {
1066  /* check the file is open */
1067  if (!linear_hash->database) {
1068  return err_file_close_error;
1069  }
1070 
1071  /* seek to end of file to append new bucket */
1072  if (0 != fseek(linear_hash->database, record_loc, SEEK_SET)) {
1073  return err_file_bad_seek;
1074  }
1075 
1076  ion_byte_t *record = alloca(linear_hash->record_total_size);
1077 
1078  memcpy(record, status, sizeof(*status));
1079  memcpy(record + sizeof(*status), key, linear_hash->super.record.key_size);
1080  memcpy(record + linear_hash->super.record.key_size + sizeof(*status), value, linear_hash->super.record.value_size);
1081 
1082  if (1 != fwrite(record, linear_hash->record_total_size, 1, linear_hash->database)) {
1083  return err_file_write_error;
1084  }
1085 
1086  return err_ok;
1087 }
1088 
1098 ion_err_t
1100  int idx,
1101  linear_hash_table_t *linear_hash
1102 ) {
1103  if (linear_hash->database == NULL) {
1104  return err_file_open_error;
1105  }
1106 
1107  linear_hash_bucket_t bucket;
1108 
1109  /* initialize bucket fields */
1110  bucket.idx = idx;
1111  bucket.record_count = 0;
1113 
1114  /* seek to end of file to append new bucket */
1115  ion_fpos_t bucket_loc;
1116 
1117  if (0 != fseek(linear_hash->database, 0, SEEK_END)) {
1118  return err_file_bad_seek;
1119  }
1120 
1121  bucket_loc = ftell(linear_hash->database);
1122 
1123  /* write bucket data to file */
1124  if (1 != fwrite(&bucket, sizeof(linear_hash_bucket_t), 1, linear_hash->database)) {
1125  return err_file_write_error;
1126  }
1127 
1128  /* write bucket data to file */
1129  ion_byte_t record_blank[linear_hash->super.record.key_size + linear_hash->super.record.value_size + sizeof(linear_hash_record_status_empty)];
1130 
1131  memset(record_blank, 0, linear_hash->super.record.key_size + linear_hash->super.record.value_size);
1132 
1133  int i;
1134 
1135  for (i = 0; i < linear_hash->records_per_bucket; i++) {
1136  if (1 != fwrite(record_blank, linear_hash->super.record.key_size + linear_hash->super.record.value_size + sizeof(linear_hash_record_status_empty), 1, linear_hash->database)) {
1137  return err_file_write_error;
1138  }
1139  }
1140 
1141  /* write bucket_loc in mapping */
1142  /* store_bucket_loc_in_map(idx, bucket_loc, linear_hash); */
1143  ion_err_t err = array_list_insert(idx, bucket_loc, linear_hash->bucket_map);
1144 
1145  if (err != err_ok) {
1146  return err;
1147  }
1148 
1149  return err_ok;
1150 }
1151 
1162 ion_err_t
1164  ion_fpos_t bucket_loc,
1165  linear_hash_bucket_t *bucket,
1166  linear_hash_table_t *linear_hash
1167 ) {
1168  if (bucket_loc == -1) {}
1169 
1170  /* check if file is open */
1171  if (!linear_hash->database) {
1172  return err_file_close_error;
1173  }
1174 
1175  /* seek to location of record in file */
1176  if (0 != fseek(linear_hash->database, bucket_loc, SEEK_SET)) {
1177  return err_file_bad_seek;
1178  }
1179 
1180  ion_byte_t *bucket_cache = alloca(sizeof(linear_hash_bucket_t));
1181 
1182  if (1 != fread(bucket_cache, sizeof(linear_hash_bucket_t), 1, linear_hash->database)) {
1183  return err_file_read_error;
1184  }
1185 
1186  /* read record data elements */
1187  memcpy(&bucket->idx, bucket_cache, sizeof(int));
1188  memcpy(&bucket->record_count, bucket_cache + sizeof(int), sizeof(int));
1189  memcpy(&bucket->overflow_location, bucket_cache + 2 * sizeof(int), sizeof(ion_fpos_t));
1190 
1191  return err_ok;
1192 }
1193 
1204 ion_err_t
1206  ion_fpos_t bucket_loc,
1207  linear_hash_bucket_t *bucket,
1208  linear_hash_table_t *linear_hash
1209 ) {
1210  /* check the file is open */
1211  if (NULL == linear_hash->database) {
1212  return err_file_open_error;
1213  }
1214 
1215  /* seek to end of file to append new bucket */
1216  if (0 != fseek(linear_hash->database, bucket_loc, SEEK_SET)) {
1217  return err_file_bad_seek;
1218  }
1219 
1220  /* write bucket data to file */
1221  if (1 != fwrite(bucket, sizeof(linear_hash_bucket_t), 1, linear_hash->database)) {
1222  return err_file_write_error;
1223  }
1224 
1225  return err_ok;
1226 }
1227 
1239 ion_err_t
1241  int bucket_idx,
1242  ion_fpos_t *overflow_loc,
1243  linear_hash_table_t *linear_hash
1244 ) {
1246 
1247  /* initialize bucket fields */
1248  linear_hash_bucket_t bucket;
1249 
1250  bucket.idx = bucket_idx;
1251  bucket.record_count = 0;
1252  bucket.overflow_location = array_list_get(bucket_idx, linear_hash->bucket_map);
1253 
1254  /* seek to end of file to append new bucket */
1255  if (0 != fseek(linear_hash->database, 0, SEEK_END)) {
1256  return err_file_bad_seek;
1257  }
1258 
1259  /* get overflow location for new overflow bucket */
1260  *overflow_loc = ftell(linear_hash->database);
1261 
1262  err = array_list_insert(bucket.idx, *overflow_loc, linear_hash->bucket_map);
1263 
1264  if (err != err_ok) {
1265  return err;
1266  }
1267 
1268  /* write to file */
1269  if (1 != fwrite(&bucket, sizeof(linear_hash_bucket_t), 1, linear_hash->database)) {
1270  return err_file_write_error;
1271  }
1272 
1273  /* write bucket data to file */
1274  ion_byte_t record_blank[linear_hash->super.record.key_size + linear_hash->super.record.value_size + sizeof(linear_hash_record_status_empty)];
1275 
1276  memset(record_blank, 0, linear_hash->super.record.key_size + linear_hash->super.record.value_size);
1277 
1278  int i;
1279 
1280  for (i = 0; i < linear_hash->records_per_bucket; i++) {
1281  if (1 != fwrite(record_blank, linear_hash->super.record.key_size + linear_hash->super.record.value_size + sizeof(linear_hash_record_status_empty), 1, linear_hash->database)) {
1282  return err_file_write_error;
1283  }
1284  }
1285 
1286  return err_ok;
1287 }
1288 
1297 ion_fpos_t
1299  int idx,
1300  linear_hash_table_t *linear_hash
1301 ) {
1302  return array_list_get(idx, linear_hash->bucket_map);
1303 }
1304 
1314 int
1316  ion_byte_t *key,
1317  linear_hash_table_t *linear_hash
1318 ) {
1319  int i;
1320  int key_bytes_as_int = 0;
1321  static int coefficients[] = { 3, 5, 7, 11, 13, 17, 19 };
1322 
1323  for (i = 0; i < linear_hash->super.record.key_size - 1; i++) {
1324  key_bytes_as_int += *(key + i) * coefficients[i + 1] - *(key + i) * coefficients[i];
1325  }
1326 
1327  return key_bytes_as_int;
1328 }
1329 
1338 int
1340  ion_byte_t *key,
1341  linear_hash_table_t *linear_hash
1342 ) {
1343  /* Case the record we are looking for was in a bucket that has already been split and h1 was used */
1344  int key_bytes_as_int = key_bytes_to_int(key, linear_hash);
1345 
1346  return key_bytes_as_int & ((2 * linear_hash->initial_size) - 1);
1347 }
1348 
1357 int
1359  ion_byte_t *key,
1360  linear_hash_table_t *linear_hash
1361 ) {
1362  int key_bytes_as_int = key_bytes_to_int(key, linear_hash);
1363 
1364  return key_bytes_as_int & (linear_hash->initial_size - 1);
1365 }
1366 
1367 /* ARRAY LIST METHODS */
1376 ion_err_t
1378  int init_size,
1379  array_list_t *array_list
1380 ) {
1381  array_list->current_size = init_size;
1382  array_list->data = malloc(init_size * sizeof(ion_fpos_t));
1383  memset(array_list->data, 0, sizeof(ion_fpos_t) * init_size);
1384 
1385  if (NULL == array_list->data) {
1386  return err_out_of_memory;
1387  }
1388 
1389  return err_ok;
1390 }
1391 
1403 ion_err_t
1405  int bucket_idx,
1406  ion_fpos_t bucket_loc,
1407  array_list_t *array_list
1408 ) {
1409  /* case we need to expand array */
1410  if (bucket_idx >= array_list->current_size) {
1411  int old_size = array_list->current_size;
1412 
1413  array_list->current_size = array_list->current_size * 2;
1414 
1415  ion_byte_t *bucket_map_cache = alloca(old_size * sizeof(ion_fpos_t));
1416 
1417  memcpy(bucket_map_cache, array_list->data, old_size * sizeof(ion_fpos_t));
1418  free(array_list->data);
1419  array_list->data = NULL;
1420  array_list->data = malloc(2 * old_size * sizeof(ion_fpos_t));
1421  memset(array_list->data, 0, array_list->current_size * sizeof(ion_fpos_t));
1422  memcpy(array_list->data, bucket_map_cache, old_size * sizeof(ion_fpos_t));
1423 
1424  if (NULL == array_list->data) {
1425  free(array_list->data);
1426  return err_out_of_memory;
1427  }
1428  }
1429 
1430  array_list->data[bucket_idx] = bucket_loc;
1431 
1432  return err_ok;
1433 }
1434 
1443 ion_fpos_t
1445  int bucket_idx,
1446  array_list_t *array_list
1447 ) {
1448  /* case bucket_idx is outside of current size of array */
1449  if (bucket_idx >= array_list->current_size) {
1450  return linear_hash_end_of_list;
1451  }
1452  /* case bucket_idx is inside array */
1453  else {
1454  return array_list->data[bucket_idx];
1455  }
1456 }
1457 
1465 ion_err_t
1467  linear_hash_table_t *linear_hash
1468 ) {
1469  if (0 != fclose(linear_hash->state)) {
1470  linear_hash_write_state(linear_hash);
1471  return err_file_close_error;
1472  }
1473 
1474  if (linear_hash->bucket_map->data != NULL) {
1475  free(linear_hash->bucket_map->data);
1476  linear_hash->bucket_map->data = NULL;
1477  }
1478 
1479  if (linear_hash->bucket_map != NULL) {
1480  free(linear_hash->bucket_map);
1481  linear_hash->bucket_map = NULL;
1482  }
1483 
1484  if (0 != fclose(linear_hash->database)) {
1485  return err_file_close_error;
1486  }
1487 
1488  if (linear_hash->cache != NULL) {
1489  free(linear_hash->cache);
1490  linear_hash->bucket_map = NULL;
1491  }
1492 
1493  linear_hash->database = NULL;
1494 
1495  linear_hash->state = NULL;
1496 
1497  return err_ok;
1498 }
1499 
1506 ion_err_t
1508  linear_hash_table_t *linear_hash
1509 ) {
1510  ion_err_t err = linear_hash_close(linear_hash);
1511 
1512  if (err_ok != err) {
1513  return err;
1514  }
1515 
1516  char filename[ION_MAX_FILENAME_LENGTH];
1517 
1518  dictionary_get_filename(linear_hash->super.id, "lhs", filename);
1519 
1520  if (0 != fremove(filename)) {
1521  return err_file_delete_error;
1522  }
1523 
1524  dictionary_get_filename(linear_hash->super.id, "lhd", filename);
1525 
1526  if (0 != fremove(filename)) {
1527  return err_file_delete_error;
1528  }
1529 
1530  return err_ok;
1531 }
unsigned char ion_byte_t
A byte type.
Definition: kv_system.h:232
ion_status_t linear_hash_get(ion_byte_t *key, ion_byte_t *value, linear_hash_table_t *linear_hash)
Retrieve a record from the linear hash. The key and value will be written to the key and value pointe...
Definition: linear_hash.c:673
ion_err_t linear_hash_destroy(linear_hash_table_t *linear_hash)
Close a linear hash instance and delete its associated .lhs and .lhd files.
Definition: linear_hash.c:1507
enum ION_KEY_TYPE ion_key_type_t
This is the available key types for ION_DB. All types will be based on system defines.
ion_record_info_t record
ion_err_t linear_hash_get_record(ion_fpos_t loc, ion_byte_t *key, ion_byte_t *value, ion_byte_t *status, linear_hash_table_t *linear_hash)
Read the record data at the location specified from the linear hash&#39;s .lhd file.
Definition: linear_hash.c:1017
ion_err_t linear_hash_write_state(linear_hash_table_t *linear_hash)
Writes the current state of the linear hash to a .lhs file.
Definition: linear_hash.c:168
int ion_value_size_t
The size (length) of a dictionary value in bytes.
Definition: kv_system.h:256
ion_fpos_t * data
ion_err_t linear_hash_get_bucket_swap_record(int bucket_idx, ion_fpos_t *record_loc, ion_byte_t *key, ion_byte_t *value, ion_byte_t *status, linear_hash_table_t *linear_hash)
Obtains the data associated with the last record the bucket chain associated with bucket_idx and dele...
Definition: linear_hash.c:507
ion_err_t array_list_insert(int bucket_idx, ion_fpos_t bucket_loc, array_list_t *array_list)
Insert a value into an array list.
Definition: linear_hash.c:1404
int linear_hash_bucket_is_full(linear_hash_bucket_t bucket, linear_hash_table_t *linear_hash)
Helper method to check if a linear hash bucket is full.
Definition: linear_hash.c:270
#define GET_BUCKET_RECORDS_LOC(bucket_loc)
Definition: linear_hash.c:43
ion_err_t write_new_bucket(int idx, linear_hash_table_t *linear_hash)
Write a new bucket to the linear hash&#39;s .lhd file.
Definition: linear_hash.c:1099
ion_fpos_t array_list_get(int bucket_idx, array_list_t *array_list)
Retreive a value from an array list.
Definition: linear_hash.c:1444
unsigned int ion_dictionary_id_t
A type used to identify dictionaries, specifically in the master table.
unsigned int ion_dictionary_size_t
The implementation specific size of the dictionary.
Definition: kv_system.h:264
ion_dictionary_size_t dictionary_size
ion_err_t linear_hash_close(linear_hash_table_t *linear_hash)
Close a linear hash instance with proper resource clean-up.
Definition: linear_hash.c:1466
ion_err_t split(linear_hash_table_t *linear_hash)
Performs the split operation on a linear hash instance.
Definition: linear_hash.c:364
ion_fpos_t bucket_idx_to_ion_fpos_t(int idx, linear_hash_table_t *linear_hash)
Helper method to get the location of a bucket chain from the linear hash&#39;s bucket map...
Definition: linear_hash.c:1298
int insert_hash_to_bucket(ion_byte_t *key, linear_hash_table_t *linear_hash)
Map a key to the address space of the linear hash. Used to map records to buckets with an index less ...
Definition: linear_hash.c:1358
ion_status_t linear_hash_update(ion_key_t key, ion_value_t value, linear_hash_table_t *linear_hash)
Update the value of the first record matching the key specified in the linear hash.
Definition: linear_hash.c:774
#define key(k)
Definition: bpp_tree.c:75
ion_err_t error
Definition: kv_system.h:291
ion_dictionary_parent_t super
#define fremove(x)
Definition: kv_system.h:56
char ion_err_t
The error type used to store error codes.
Definition: kv_system.h:226
ion_boolean_t linear_hash_above_threshold(linear_hash_table_t *linear_hash)
Helper method to increment check if a linear hash&#39;s load is above its split threshold.
Definition: linear_hash.c:477
int dictionary_get_filename(ion_dictionary_id_t id, char *ext, char *filename)
Given the ID, implementation specific extension, and a buffer to write to, writes back the formatted ...
Definition: dictionary.c:41
void * ion_key_t
A dictionary key.
Definition: kv_system.h:241
ion_err_t linear_hash_increment_next_split(linear_hash_table_t *linear_hash)
Helper method to increment the split linear hash.
Definition: linear_hash.c:349
void * ion_value_t
A dictionary value.
Definition: kv_system.h:246
ion_fpos_t record_total_size
ion_err_t linear_hash_decrement_num_records(linear_hash_table_t *linear_hash)
Helper method to decrement the number of records in the linear hash.
Definition: linear_hash.c:313
int hash_to_bucket(ion_byte_t *key, linear_hash_table_t *linear_hash)
Map a key to the address space of the linear hash. Used to map records to buckets with an index great...
Definition: linear_hash.c:1339
Header for a linear hash .
ion_err_t linear_hash_init(ion_dictionary_id_t id, ion_dictionary_size_t dictionary_size, ion_key_type_t key_type, ion_key_size_t key_size, ion_value_size_t value_size, int initial_size, int split_threshold, int records_per_bucket, linear_hash_table_t *linear_hash)
Definition: linear_hash.c:47
#define linear_hash_record_status_full
ion_dictionary_id_t id
#define ION_MAX_FILENAME_LENGTH
Since the arduino conforms to 8.3 syntax, that&#39;s 8 + 3 = 11 + 1 (null terminator) characters...
Definition: kv_system.h:73
ion_result_count_t count
Definition: kv_system.h:293
ion_key_size_t key_size
Definition: kv_system.h:307
ion_status_t linear_hash_delete(ion_byte_t *key, linear_hash_table_t *linear_hash)
Delete all records with keys matching the key specified in the linear hash.
Definition: linear_hash.c:867
#define linear_hash_end_of_list
ion_err_t array_list_init(int init_size, array_list_t *array_list)
Initialize an array list.
Definition: linear_hash.c:1377
ion_err_t linear_hash_get_bucket(ion_fpos_t bucket_loc, linear_hash_bucket_t *bucket, linear_hash_table_t *linear_hash)
Read the bucket at the location specified from the linear hash&#39;s .lhd file.
Definition: linear_hash.c:1163
ion_err_t linear_hash_increment_num_records(linear_hash_table_t *linear_hash)
Helper method to increment the number of records in the linear hash.
Definition: linear_hash.c:285
ion_err_t linear_hash_increment_num_buckets(linear_hash_table_t *linear_hash)
Helper method to increment the number of buckets in the linear hash.
Definition: linear_hash.c:328
ion_key_type_t key_type
ion_err_t linear_hash_update_bucket(ion_fpos_t bucket_loc, linear_hash_bucket_t *bucket, linear_hash_table_t *linear_hash)
Write the bucket provided at the location specified from in linear hash&#39;s .lhd file.
Definition: linear_hash.c:1205
long ion_fpos_t
A file position type.
Definition: kv_system.h:237
#define linear_hash_record_status_empty
ion_dictionary_compare_t compare
int key_bytes_to_int(ion_byte_t *key, linear_hash_table_t *linear_hash)
Transform a key to an integer.
Definition: linear_hash.c:1315
array_list_t * bucket_map
int ion_key_size_t
The size (length) of a dictionary key in bytes.
Definition: kv_system.h:251
char ion_boolean_t
A boolean type.
Definition: kv_system.h:269
ion_err_t create_overflow_bucket(int bucket_idx, ion_fpos_t *overflow_loc, linear_hash_table_t *linear_hash)
Create an overflow bucket and write it to the linear hash&#39;s .lhd file.
Definition: linear_hash.c:1240
ion_status_t linear_hash_insert(ion_key_t key, ion_value_t value, int hash_bucket_idx, linear_hash_table_t *linear_hash)
Insert a record into the linear hash.
Definition: linear_hash.c:573
ion_value_size_t value_size
Definition: kv_system.h:309
#define ION_STATUS_INITIALIZE
Definition: kv_system.h:107
ion_err_t linear_hash_write_record(ion_fpos_t record_loc, ion_byte_t *key, ion_byte_t *value, ion_byte_t *status, linear_hash_table_t *linear_hash)
Write record data to the location specified from the linear hash&#39;s .lhd file.
Definition: linear_hash.c:1059
ion_err_t linear_hash_read_state(linear_hash_table_t *linear_hash)
Read the state of a linear hash from a .lhs file.
Definition: linear_hash.c:219
A status object that describes the result of a dictionary operation.
Definition: kv_system.h:290