49 #include <linux/slab.h>
51 #include <linux/sched.h>
53 #include <linux/wait.h>
70 spin_lock(&cache->
lock);
74 if (cache->
entry[i].block == block) {
88 spin_unlock(&cache->
lock);
90 spin_lock(&cache->
lock);
101 for (n = 0; n < cache->
entries; n++) {
102 if (cache->
entry[i].refcount == 0)
108 entry = &cache->
entry[i];
115 entry->
block = block;
120 spin_unlock(&cache->
lock);
126 spin_lock(&cache->
lock);
139 spin_unlock(&cache->
lock);
142 spin_unlock(&cache->
lock);
164 spin_unlock(&cache->
lock);
167 spin_unlock(&cache->
lock);
173 TRACE(
"Got %s %d, start block %lld, refcount %d, error %d\n",
177 ERROR(
"Unable to read %s cache entry [%llx]\n", cache->
name,
190 spin_lock(&cache->
lock);
199 spin_unlock(&cache->
lock);
204 spin_unlock(&cache->
lock);
217 for (i = 0; i < cache->
entries; i++) {
218 if (cache->
entry[i].data) {
219 for (j = 0; j < cache->
pages; j++)
242 ERROR(
"Failed to allocate %s cache\n", name);
248 ERROR(
"Failed to allocate %s cache\n", name);
264 for (i = 0; i <
entries; i++) {
272 ERROR(
"Failed to allocate %s cache entry\n", name);
276 for (j = 0; j < cache->
pages; j++) {
279 ERROR(
"Failed to allocate %s buffer\n", name);
305 else if (buffer ==
NULL)
306 return min(length, entry->
length - offset);
314 if (bytes >= remaining) {
315 memcpy(buffer, buff, remaining);
320 memcpy(buffer, buff, bytes);
326 return length - remaining;
343 TRACE(
"Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
350 }
else if (*offset >= entry->
length) {
361 if (*offset == entry->
length) {
418 data = kcalloc(pages,
sizeof(
void *),
GFP_KERNEL);