Main Page | Class Hierarchy | Data Structures | Directories | File List | Data Fields | Related Pages

hash.h

00001 /*-
00002  * See the file LICENSE for redistribution information.
00003  *
00004  * Copyright (c) 1996-2005
00005  *      Sleepycat Software.  All rights reserved.
00006  */
00007 /*
00008  * Copyright (c) 1990, 1993, 1994
00009  *      Margo Seltzer.  All rights reserved.
00010  */
00011 /*
00012  * Copyright (c) 1990, 1993, 1994
00013  *      The Regents of the University of California.  All rights reserved.
00014  *
00015  * This code is derived from software contributed to Berkeley by
00016  * Margo Seltzer.
00017  *
00018  * Redistribution and use in source and binary forms, with or without
00019  * modification, are permitted provided that the following conditions
00020  * are met:
00021  * 1. Redistributions of source code must retain the above copyright
00022  *    notice, this list of conditions and the following disclaimer.
00023  * 2. Redistributions in binary form must reproduce the above copyright
00024  *    notice, this list of conditions and the following disclaimer in the
00025  *    documentation and/or other materials provided with the distribution.
00026  * 3. Neither the name of the University nor the names of its contributors
00027  *    may be used to endorse or promote products derived from this software
00028  *    without specific prior written permission.
00029  *
00030  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
00031  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
00032  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
00033  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
00034  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
00035  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
00036  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
00037  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
00038  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
00039  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
00040  * SUCH DAMAGE.
00041  *
00042  * $Id: hash.h,v 12.1 2005/06/16 20:21:47 bostic Exp $
00043  */
00044 
00045 #ifndef _DB_HASH_H_
00046 #define _DB_HASH_H_
00047 
00048 /* Hash internal structure. */
00049 typedef struct hash_t {
00050         db_pgno_t meta_pgno;    /* Page number of the meta data page. */
00051         u_int32_t h_ffactor;    /* Fill factor. */
00052         u_int32_t h_nelem;      /* Number of elements. */
00053                                 /* Hash function. */
00054         u_int32_t (*h_hash) __P((DB *, const void *, u_int32_t));
00055 } HASH;
00056 
00057 /* Cursor structure definitions. */
00058 typedef struct cursor_t {
00059         /* struct __dbc_internal */
00060         __DBC_INTERNAL
00061 
00062         /* Hash private part */
00063 
00064         /* Per-thread information */
00065         DB_LOCK hlock;                  /* Metadata page lock. */
00066         HMETA *hdr;                     /* Pointer to meta-data page. */
00067         PAGE *split_buf;                /* Temporary buffer for splits. */
00068 
00069         /* Hash cursor information */
00070         db_pgno_t       bucket;         /* Bucket we are traversing. */
00071         db_pgno_t       lbucket;        /* Bucket for which we are locked. */
00072         db_indx_t       dup_off;        /* Offset within a duplicate set. */
00073         db_indx_t       dup_len;        /* Length of current duplicate. */
00074         db_indx_t       dup_tlen;       /* Total length of duplicate entry. */
00075         u_int32_t       seek_size;      /* Number of bytes we need for add. */
00076         db_pgno_t       seek_found_page;/* Page on which we can insert. */
00077         u_int32_t       order;          /* Relative order among deleted curs. */
00078 
00079 #define H_CONTINUE      0x0001          /* Join--search strictly fwd for data */
00080 #define H_DELETED       0x0002          /* Cursor item is deleted. */
00081 #define H_DIRTY         0x0004          /* Meta-data page needs to be written */
00082 #define H_DUPONLY       0x0008          /* Dups only; do not change key. */
00083 #define H_EXPAND        0x0010          /* Table expanded. */
00084 #define H_ISDUP         0x0020          /* Cursor is within duplicate set. */
00085 #define H_NEXT_NODUP    0x0040          /* Get next non-dup entry. */
00086 #define H_NOMORE        0x0080          /* No more entries in bucket. */
00087 #define H_OK            0x0100          /* Request succeeded. */
00088         u_int32_t       flags;
00089 } HASH_CURSOR;
00090 
00091 /* Test string. */
00092 #define CHARKEY                 "%$sniglet^&"
00093 
00094 /* Overflow management */
00095 /*
00096  * The spares table indicates the page number at which each doubling begins.
00097  * From this page number we subtract the number of buckets already allocated
00098  * so that we can do a simple addition to calculate the page number here.
00099  */
00100 #define BS_TO_PAGE(bucket, spares)              \
00101         ((bucket) + (spares)[__db_log2((bucket) + 1)])
00102 #define BUCKET_TO_PAGE(I, B)    (BS_TO_PAGE((B), (I)->hdr->spares))
00103 
00104 /* Constraints about much data goes on a page. */
00105 
00106 #define MINFILL         4
00107 #define ISBIG(I, N)     (((N) > ((I)->hdr->dbmeta.pagesize / MINFILL)) ? 1 : 0)
00108 
00109 /* Shorthands for accessing structure */
00110 #define NDX_INVALID     0xFFFF
00111 #define BUCKET_INVALID  0xFFFFFFFF
00112 
00113 /* On page duplicates are stored as a string of size-data-size triples. */
00114 #define DUP_SIZE(len)   ((len) + 2 * sizeof(db_indx_t))
00115 
00116 /* Log messages types (these are subtypes within a record type) */
00117 #define PAIR_KEYMASK            0x1
00118 #define PAIR_DATAMASK           0x2
00119 #define PAIR_DUPMASK            0x4
00120 #define PAIR_MASK               0xf
00121 #define PAIR_ISKEYBIG(N)        (N & PAIR_KEYMASK)
00122 #define PAIR_ISDATABIG(N)       (N & PAIR_DATAMASK)
00123 #define PAIR_ISDATADUP(N)       (N & PAIR_DUPMASK)
00124 #define OPCODE_OF(N)    (N & ~PAIR_MASK)
00125 
00126 #define PUTPAIR         0x20
00127 #define DELPAIR         0x30
00128 #define PUTOVFL         0x40
00129 #define DELOVFL         0x50
00130 #define HASH_UNUSED1    0x60
00131 #define HASH_UNUSED2    0x70
00132 #define SPLITOLD        0x80
00133 #define SPLITNEW        0x90
00134 
00135 typedef enum {
00136         DB_HAM_CHGPG = 1,
00137         DB_HAM_DELFIRSTPG = 2,
00138         DB_HAM_DELMIDPG = 3,
00139         DB_HAM_DELLASTPG = 4,
00140         DB_HAM_DUP   = 5,
00141         DB_HAM_SPLIT = 6
00142 } db_ham_mode;
00143 
00144 #include "dbinc_auto/hash_auto.h"
00145 #include "dbinc_auto/hash_ext.h"
00146 #include "dbinc/db_am.h"
00147 #endif /* !_DB_HASH_H_ */

Generated on Sun Dec 25 12:14:22 2005 for Berkeley DB 4.4.16 by  doxygen 1.4.2