24 #ifndef QUEUED_RWLOCK_HPP
25 #define QUEUED_RWLOCK_HPP
31 #define QUEUED_RW_LOCK_REQUEST_READ 0
32 #define QUEUED_RW_LOCK_REQUEST_WRITE 1
33 #define QUEUED_RW_LOCK_REQUEST_NONE 2
45 volatile uint32_t stateu;
47 volatile uint16_t successor_class;
48 volatile bool blocked;
54 volatile request*
volatile next;
55 volatile state_union s;
56 volatile char lockclass;
59 request*
volatile tail;
60 atomic<size_t> reader_count;
61 request*
volatile next_writer;
63 queued_rw_lock(): tail(NULL), reader_count(0), next_writer(NULL) { }
65 inline void writelock(request *I) {
66 I->lockclass = QUEUED_RW_LOCK_REQUEST_WRITE;
69 I->s.state.blocked =
true;
70 I->s.state.successor_class = QUEUED_RW_LOCK_REQUEST_NONE;
72 request* predecessor = __sync_lock_test_and_set(&tail, I);
74 if (predecessor == NULL) {
77 if (reader_count.value == 0) {
78 if (__sync_lock_test_and_set(&next_writer, (request*)NULL) == I) {
79 I->s.state.blocked =
false;
84 predecessor->s.state.successor_class = QUEUED_RW_LOCK_REQUEST_WRITE;
86 predecessor->next = I;
89 volatile state_union& is = I->s;
90 while (is.state.blocked) sched_yield();
91 assert(reader_count.value == 0);
94 inline void wrunlock(request *I) {
96 if (I->next != NULL || !__sync_bool_compare_and_swap(&tail, I, (request*)NULL)) {
98 while(I->next == NULL) sched_yield();
101 if (I->next->lockclass == QUEUED_RW_LOCK_REQUEST_READ) {
104 I->next->s.state.blocked =
false;
108 inline void readlock(request *I) {
109 I->lockclass =QUEUED_RW_LOCK_REQUEST_READ;
112 I->s.state.successor_class = QUEUED_RW_LOCK_REQUEST_NONE;
113 I->s.state.blocked =
true;
114 __sync_synchronize();
115 request* predecessor = __sync_lock_test_and_set(&tail, I);
116 if (predecessor == NULL) {
118 I->s.state.blocked =
false;
122 state_union tempold, tempnew;
123 tempold.state.blocked =
true;
124 tempold.state.successor_class = QUEUED_RW_LOCK_REQUEST_NONE;
125 tempnew.state.blocked =
true;
126 tempnew.state.successor_class = QUEUED_RW_LOCK_REQUEST_READ;
127 __sync_synchronize();
128 if (predecessor->lockclass == QUEUED_RW_LOCK_REQUEST_WRITE ||
133 predecessor->next = I;
135 __sync_synchronize();
136 volatile state_union& is = I->s;
137 while(is.state.blocked) sched_yield();
141 predecessor->next = I;
142 __sync_synchronize();
143 I->s.state.blocked =
false;
146 __sync_synchronize();
147 if (I->s.state.successor_class == QUEUED_RW_LOCK_REQUEST_READ) {
150 while(I->next == NULL) sched_yield();
152 I->next->s.state.blocked =
false;
156 inline void rdunlock(request *I) {
157 __sync_synchronize();
158 if (I->next != NULL || !__sync_bool_compare_and_swap(&tail, I, (request*)NULL)) {
159 while(I->next == NULL) sched_yield();
160 if (I->s.state.successor_class == QUEUED_RW_LOCK_REQUEST_WRITE) {
161 next_writer = (request*)(I->next);
162 __sync_synchronize();
165 if (reader_count.dec() == 0) {
166 __sync_synchronize();
167 request * w = __sync_lock_test_and_set(&next_writer, (request*)NULL);
169 w->s.state.blocked =
false;
170 __sync_synchronize();