255 lines
6.3 KiB
C
255 lines
6.3 KiB
C
/*
|
|
* 'buffer.c' implements the buffer-cache functions. Race-conditions have
|
|
* been avoided by NEVER letting a interrupt change a buffer (except for the
|
|
* data, of course), but instead letting the caller do it. NOTE! As interrupts
|
|
* can wake up a caller, some cli-sti sequences are needed to check for
|
|
* sleep-on-calls. These should be extremely quick, though (I hope).
|
|
*/
|
|
|
|
#include <linux/config.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/kernel.h>
|
|
#include <asm/system.h>
|
|
|
|
#if (BUFFER_END & 0xfff)
|
|
#error "Bad BUFFER_END value"
|
|
#endif
|
|
|
|
#if (BUFFER_END > 0xA0000 && BUFFER_END <= 0x100000)
|
|
#error "Bad BUFFER_END value"
|
|
#endif
|
|
|
|
extern int end;
|
|
struct buffer_head * start_buffer = (struct buffer_head *) &end;
|
|
struct buffer_head * hash_table[NR_HASH];
|
|
static struct buffer_head * free_list;
|
|
static struct task_struct * buffer_wait = NULL;
|
|
int NR_BUFFERS = 0;
|
|
|
|
static inline void wait_on_buffer(struct buffer_head * bh)
|
|
{
|
|
cli();
|
|
while (bh->b_lock)
|
|
sleep_on(&bh->b_wait);
|
|
sti();
|
|
}
|
|
|
|
int sys_sync(void)
|
|
{
|
|
int i;
|
|
struct buffer_head * bh;
|
|
|
|
sync_inodes(); /* write out inodes into buffers */
|
|
bh = start_buffer;
|
|
for (i=0 ; i<NR_BUFFERS ; i++,bh++) {
|
|
wait_on_buffer(bh);
|
|
if (bh->b_dirt)
|
|
ll_rw_block(WRITE,bh);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static int sync_dev(int dev)
|
|
{
|
|
int i;
|
|
struct buffer_head * bh;
|
|
|
|
bh = start_buffer;
|
|
for (i=0 ; i<NR_BUFFERS ; i++,bh++) {
|
|
if (bh->b_dev != dev)
|
|
continue;
|
|
wait_on_buffer(bh);
|
|
if (bh->b_dirt)
|
|
ll_rw_block(WRITE,bh);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
#define _hashfn(dev,block) (((unsigned)(dev^block))%NR_HASH)
|
|
#define hash(dev,block) hash_table[_hashfn(dev,block)]
|
|
|
|
static inline void remove_from_queues(struct buffer_head * bh)
|
|
{
|
|
/* remove from hash-queue */
|
|
if (bh->b_next)
|
|
bh->b_next->b_prev = bh->b_prev;
|
|
if (bh->b_prev)
|
|
bh->b_prev->b_next = bh->b_next;
|
|
if (hash(bh->b_dev,bh->b_blocknr) == bh)
|
|
hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
|
|
/* remove from free list */
|
|
if (!(bh->b_prev_free) || !(bh->b_next_free))
|
|
panic("Free block list corrupted");
|
|
bh->b_prev_free->b_next_free = bh->b_next_free;
|
|
bh->b_next_free->b_prev_free = bh->b_prev_free;
|
|
if (free_list == bh)
|
|
free_list = bh->b_next_free;
|
|
}
|
|
|
|
static inline void insert_into_queues(struct buffer_head * bh)
|
|
{
|
|
/* put at end of free list */
|
|
bh->b_next_free = free_list;
|
|
bh->b_prev_free = free_list->b_prev_free;
|
|
free_list->b_prev_free->b_next_free = bh;
|
|
free_list->b_prev_free = bh;
|
|
/* put the buffer in new hash-queue if it has a device */
|
|
bh->b_prev = NULL;
|
|
bh->b_next = NULL;
|
|
if (!bh->b_dev)
|
|
return;
|
|
bh->b_next = hash(bh->b_dev,bh->b_blocknr);
|
|
hash(bh->b_dev,bh->b_blocknr) = bh;
|
|
bh->b_next->b_prev = bh;
|
|
}
|
|
|
|
static struct buffer_head * find_buffer(int dev, int block)
|
|
{
|
|
struct buffer_head * tmp;
|
|
|
|
for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
|
|
if (tmp->b_dev==dev && tmp->b_blocknr==block)
|
|
return tmp;
|
|
return NULL;
|
|
}
|
|
|
|
/*
|
|
* Why like this, I hear you say... The reason is race-conditions.
|
|
* As we don't lock buffers (unless we are readint them, that is),
|
|
* something might happen to it while we sleep (ie a read-error
|
|
* will force it bad). This shouldn't really happen currently, but
|
|
* the code is ready.
|
|
*/
|
|
struct buffer_head * get_hash_table(int dev, int block)
|
|
{
|
|
struct buffer_head * bh;
|
|
|
|
repeat:
|
|
if (!(bh=find_buffer(dev,block)))
|
|
return NULL;
|
|
bh->b_count++;
|
|
wait_on_buffer(bh);
|
|
if (bh->b_dev != dev || bh->b_blocknr != block) {
|
|
brelse(bh);
|
|
goto repeat;
|
|
}
|
|
return bh;
|
|
}
|
|
|
|
/*
|
|
* Ok, this is getblk, and it isn't very clear, again to hinder
|
|
* race-conditions. Most of the code is seldom used, (ie repeating),
|
|
* so it should be much more efficient than it looks.
|
|
*/
|
|
struct buffer_head * getblk(int dev,int block)
|
|
{
|
|
struct buffer_head * tmp;
|
|
|
|
repeat:
|
|
if (tmp=get_hash_table(dev,block))
|
|
return tmp;
|
|
tmp = free_list;
|
|
do {
|
|
if (!tmp->b_count) {
|
|
wait_on_buffer(tmp); /* we still have to wait */
|
|
if (!tmp->b_count) /* on it, it might be dirty */
|
|
break;
|
|
}
|
|
tmp = tmp->b_next_free;
|
|
} while (tmp != free_list || (tmp=NULL));
|
|
/* Kids, don't try THIS at home ^^^^^. Magic */
|
|
if (!tmp) {
|
|
printk("Sleeping on free buffer ..");
|
|
sleep_on(&buffer_wait);
|
|
printk("ok\n");
|
|
goto repeat;
|
|
}
|
|
tmp->b_count++;
|
|
remove_from_queues(tmp);
|
|
/*
|
|
* Now, when we know nobody can get to this node (as it's removed from the
|
|
* free list), we write it out. We can sleep here without fear of race-
|
|
* conditions.
|
|
*/
|
|
if (tmp->b_dirt)
|
|
sync_dev(tmp->b_dev);
|
|
/* update buffer contents */
|
|
tmp->b_dev=dev;
|
|
tmp->b_blocknr=block;
|
|
tmp->b_dirt=0;
|
|
tmp->b_uptodate=0;
|
|
/* NOTE!! While we possibly slept in sync_dev(), somebody else might have
|
|
* added "this" block already, so check for that. Thank God for goto's.
|
|
*/
|
|
if (find_buffer(dev,block)) {
|
|
tmp->b_dev=0; /* ok, someone else has beaten us */
|
|
tmp->b_blocknr=0; /* to it - free this block and */
|
|
tmp->b_count=0; /* try again */
|
|
insert_into_queues(tmp);
|
|
goto repeat;
|
|
}
|
|
/* and then insert into correct position */
|
|
insert_into_queues(tmp);
|
|
return tmp;
|
|
}
|
|
|
|
void brelse(struct buffer_head * buf)
|
|
{
|
|
if (!buf)
|
|
return;
|
|
wait_on_buffer(buf);
|
|
if (!(buf->b_count--))
|
|
panic("Trying to free free buffer");
|
|
wake_up(&buffer_wait);
|
|
}
|
|
|
|
/*
|
|
* bread() reads a specified block and returns the buffer that contains
|
|
* it. It returns NULL if the block was unreadable.
|
|
*/
|
|
struct buffer_head * bread(int dev,int block)
|
|
{
|
|
struct buffer_head * bh;
|
|
|
|
if (!(bh=getblk(dev,block)))
|
|
panic("bread: getblk returned NULL\n");
|
|
if (bh->b_uptodate)
|
|
return bh;
|
|
ll_rw_block(READ,bh);
|
|
if (bh->b_uptodate)
|
|
return bh;
|
|
brelse(bh);
|
|
return (NULL);
|
|
}
|
|
|
|
void buffer_init(void)
|
|
{
|
|
struct buffer_head * h = start_buffer;
|
|
void * b = (void *) BUFFER_END;
|
|
int i;
|
|
|
|
while ( (b -= BLOCK_SIZE) >= ((void *) (h+1)) ) {
|
|
h->b_dev = 0;
|
|
h->b_dirt = 0;
|
|
h->b_count = 0;
|
|
h->b_lock = 0;
|
|
h->b_uptodate = 0;
|
|
h->b_wait = NULL;
|
|
h->b_next = NULL;
|
|
h->b_prev = NULL;
|
|
h->b_data = (char *) b;
|
|
h->b_prev_free = h-1;
|
|
h->b_next_free = h+1;
|
|
h++;
|
|
NR_BUFFERS++;
|
|
if (b == (void *) 0x100000)
|
|
b = (void *) 0xA0000;
|
|
}
|
|
h--;
|
|
free_list = start_buffer;
|
|
free_list->b_prev_free = h;
|
|
h->b_next_free = free_list;
|
|
for (i=0;i<NR_HASH;i++)
|
|
hash_table[i]=NULL;
|
|
}
|