Skip to content

Commit

Permalink
Revert "Introduce and use sleeplocks instead of BUSY flags"
Browse files Browse the repository at this point in the history
My changes have a race with re-used bufs and the code doesn't seem to get shorter
Keep the changes that fixed ip->off race

This reverts commit 3a5fa7e.

Conflicts:

	defs.h
	file.c
	file.h
  • Loading branch information
Frans Kaashoek committed Aug 29, 2011
1 parent 22f7db5 commit 1ddfbbb
Show file tree
Hide file tree
Showing 12 changed files with 61 additions and 105 deletions.
45 changes: 22 additions & 23 deletions bio.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,9 @@
// * Only one process at a time can use a buffer,
// so do not keep them longer than necessary.
//
// The implementation uses two state flags internally:
// The implementation uses three state flags internally:
// * B_BUSY: the block has been returned from bread
// and has not been passed back to brelse.
// * B_VALID: the buffer data has been initialized
// with the associated disk block contents.
// * B_DIRTY: the buffer data has been modified
Expand Down Expand Up @@ -49,52 +51,49 @@ binit(void)
b->next = bcache.head.next;
b->prev = &bcache.head;
b->dev = -1;
initlock(&b->lock, "buf");
initsleeplock(&b->sleeplock);
bcache.head.next->prev = b;
bcache.head.next = b;
}
}

// Look through buffer cache for sector on device dev.
// If not found, allocate fresh block.
// In either case, return sleep-locked buffer.
// In either case, return locked buffer.
static struct buf*
bget(uint dev, uint sector)
{
struct buf *b;

acquire(&bcache.lock);

loop:
// Try for cached block.
for(b = bcache.head.next; b != &bcache.head; b = b->next){
acquire(&b->lock);
if(b->dev == dev && b->sector == sector){
release(&bcache.lock);
acquire_sleeplock(&b->sleeplock, &b->lock);
release(&b->lock);
return b;
if(!(b->flags & B_BUSY)){
b->flags |= B_BUSY;
release(&bcache.lock);
return b;
}
sleep(b, &bcache.lock);
goto loop;
}
release(&b->lock);
}

// Allocate fresh block.
for(b = bcache.head.prev; b != &bcache.head; b = b->prev){
acquire(&b->lock);
if (!acquired_sleeplock(&b->sleeplock)) {
release(&bcache.lock);
if((b->flags & B_BUSY) == 0){
b->dev = dev;
b->sector = sector;
b->flags = 0;
acquire_sleeplock(&b->sleeplock, &b->lock);
release(&b->lock);
b->flags = B_BUSY;
release(&bcache.lock);
return b;
}
release(&b->lock);
}
panic("bget: no buffers");
}

// Return a locked buf with the contents of the indicated disk sector.
// Return a B_BUSY buf with the contents of the indicated disk sector.
struct buf*
bread(uint dev, uint sector)
{
Expand All @@ -110,7 +109,7 @@ bread(uint dev, uint sector)
void
bwrite(struct buf *b)
{
if(!acquired_sleeplock(&b->sleeplock))
if((b->flags & B_BUSY) == 0)
panic("bwrite");
b->flags |= B_DIRTY;
iderw(b);
Expand All @@ -120,20 +119,20 @@ bwrite(struct buf *b)
void
brelse(struct buf *b)
{
if(!acquired_sleeplock(&b->sleeplock))
if((b->flags & B_BUSY) == 0)
panic("brelse");

acquire(&bcache.lock);
acquire(&b->lock);

b->next->prev = b->prev;
b->prev->next = b->next;
b->next = bcache.head.next;
b->prev = &bcache.head;
bcache.head.next->prev = b;
bcache.head.next = b;

release_sleeplock(&b->sleeplock);
release(&b->lock);
b->flags &= ~B_BUSY;
wakeup(b);

release(&bcache.lock);
}
Expand Down
7 changes: 3 additions & 4 deletions buf.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,12 @@ struct buf {
int flags;
uint dev;
uint sector;
struct spinlock lock;
struct sleeplock sleeplock;
struct buf *prev; // LRU cache list
struct buf *next;
struct buf *qnext; // disk queue
uchar data[512];
};
#define B_VALID 0x1 // buffer has been read from disk
#define B_DIRTY 0x2 // buffer needs to be written to disk
#define B_BUSY 0x1 // buffer is locked by some process
#define B_VALID 0x2 // buffer has been read from disk
#define B_DIRTY 0x4 // buffer needs to be written to disk

5 changes: 0 additions & 5 deletions defs.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ struct inode;
struct pipe;
struct proc;
struct spinlock;
struct sleeplock;
struct stat;
struct superblock;

Expand Down Expand Up @@ -130,10 +129,6 @@ void initlock(struct spinlock*, char*);
void release(struct spinlock*);
void pushcli(void);
void popcli(void);
void initsleeplock(struct sleeplock*);
void acquire_sleeplock(struct sleeplock*, struct spinlock*);
void release_sleeplock(struct sleeplock*);
int acquired_sleeplock(struct sleeplock*);

// string.c
int memcmp(const void*, const void*, uint);
Expand Down
2 changes: 1 addition & 1 deletion file.c
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@
#include "defs.h"
#include "param.h"
#include "fs.h"
#include "spinlock.h"
#include "file.h"
#include "spinlock.h"

struct devsw devsw[NDEV];
struct {
Expand Down
7 changes: 3 additions & 4 deletions file.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,7 @@ struct inode {
uint dev; // Device number
uint inum; // Inode number
int ref; // Reference count
int flags; // I_VALID
struct spinlock lock;
struct sleeplock sleeplock;
int flags; // I_BUSY, I_VALID

short type; // copy of disk inode
short major;
Expand All @@ -27,7 +25,8 @@ struct inode {
uint addrs[NDIRECT+1];
};

#define I_VALID 0x1
#define I_BUSY 0x1
#define I_VALID 0x2

// device implementations

Expand Down
32 changes: 16 additions & 16 deletions fs.c
Original file line number Diff line number Diff line change
Expand Up @@ -113,8 +113,11 @@ bfree(int dev, uint b)
// It is an error to use an inode without holding a reference to it.
//
// Processes are only allowed to read and write inode
// metadata and contents when holding the inode's sleeplock.
// Callers are responsible for locking
// metadata and contents when holding the inode's lock,
// represented by the I_BUSY flag in the in-memory copy.
// Because inode locks are held during disk accesses,
// they are implemented using a flag rather than with
// spin locks. Callers are responsible for locking
// inodes before passing them to routines in this file; leaving
// this responsibility with the caller makes it possible for them
// to create arbitrarily-sized atomic operations.
Expand Down Expand Up @@ -213,7 +216,6 @@ iget(uint dev, uint inum)
ip->inum = inum;
ip->ref = 1;
ip->flags = 0;
initsleeplock(&ip->sleeplock);
release(&icache.lock);

return ip;
Expand All @@ -230,7 +232,7 @@ idup(struct inode *ip)
return ip;
}

// Acquire the sleeplock for a given inode.
// Lock the given inode.
void
ilock(struct inode *ip)
{
Expand All @@ -241,7 +243,9 @@ ilock(struct inode *ip)
panic("ilock");

acquire(&icache.lock);
acquire_sleeplock(&ip->sleeplock, &icache.lock);
while(ip->flags & I_BUSY)
sleep(ip, &icache.lock);
ip->flags |= I_BUSY;
release(&icache.lock);

if(!(ip->flags & I_VALID)){
Expand All @@ -264,11 +268,12 @@ ilock(struct inode *ip)
void
iunlock(struct inode *ip)
{
if(ip == 0 || !acquired_sleeplock(&ip->sleeplock) || ip->ref < 1)
if(ip == 0 || !(ip->flags & I_BUSY) || ip->ref < 1)
panic("iunlock");

acquire(&icache.lock);
release_sleeplock(&ip->sleeplock);
ip->flags &= ~I_BUSY;
wakeup(ip);
release(&icache.lock);
}

Expand All @@ -279,15 +284,14 @@ iput(struct inode *ip)
acquire(&icache.lock);
if(ip->ref == 1 && (ip->flags & I_VALID) && ip->nlink == 0){
// inode is no longer used: truncate and free inode.
if(acquired_sleeplock(&ip->sleeplock))
if(ip->flags & I_BUSY)
panic("iput busy");
acquire_sleeplock(&ip->sleeplock, &icache.lock);
ip->flags |= I_BUSY;
release(&icache.lock);
itrunc(ip);
ip->type = 0;
iupdate(ip);
acquire(&icache.lock);
release_sleeplock(&ip->sleeplock);
ip->flags = 0;
wakeup(ip);
}
Expand Down Expand Up @@ -429,14 +433,10 @@ writei(struct inode *ip, char *src, uint off, uint n)
return devsw[ip->major].write(ip, src, n);
}

if(off > ip->size || off + n < off) {
panic("writei1");
if(off > ip->size || off + n < off)
return -1;
}
if(off + n > MAXFILE*BSIZE) {
panic("writei2");
if(off + n > MAXFILE*BSIZE)
return -1;
}

for(tot=0; tot<n; tot+=m, off+=m, src+=m){
bp = bread(ip->dev, bmap(ip, off/BSIZE));
Expand Down
2 changes: 1 addition & 1 deletion ide.c
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ iderw(struct buf *b)
{
struct buf **pp;

if(!acquired_sleeplock(&b->sleeplock))
if(!(b->flags & B_BUSY))
panic("iderw: buf not busy");
if((b->flags & (B_VALID|B_DIRTY)) == B_VALID)
panic("iderw: nothing to do");
Expand Down
14 changes: 9 additions & 5 deletions log.c
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,9 @@ struct logheader {

struct {
struct spinlock lock;
struct sleeplock sleeplock;
int start;
int size;
int intrans;
int dev;
struct logheader lh;
} log;
Expand All @@ -60,7 +60,6 @@ initlog(void)

struct superblock sb;
initlock(&log.lock, "log");
initsleeplock(&log.sleeplock);
readsb(ROOTDEV, &sb);
log.start = sb.size - sb.nlog;
log.size = sb.nlog;
Expand Down Expand Up @@ -134,7 +133,10 @@ void
begin_trans(void)
{
acquire(&log.lock);
acquire_sleeplock(&log.sleeplock, &log.lock);
while (log.intrans) {
sleep(&log, &log.lock);
}
log.intrans = 1;
release(&log.lock);
}

Expand All @@ -147,8 +149,10 @@ commit_trans(void)
log.lh.n = 0;
write_head(); // Reclaim log
}

acquire(&log.lock);
release_sleeplock(&log.sleeplock);
log.intrans = 0;
wakeup(&log);
release(&log.lock);
}

Expand All @@ -167,7 +171,7 @@ log_write(struct buf *b)

if (log.lh.n >= LOGSIZE || log.lh.n >= log.size - 1)
panic("too big a transaction");
if (!acquired_sleeplock(&log.sleeplock))
if (!log.intrans)
panic("write outside of trans");

// cprintf("log_write: %d %d\n", b->sector, log.lh.n);
Expand Down
2 changes: 1 addition & 1 deletion pipe.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@
#include "mmu.h"
#include "proc.h"
#include "fs.h"
#include "spinlock.h"
#include "file.h"
#include "spinlock.h"

#define PIPESIZE 512

Expand Down
42 changes: 4 additions & 38 deletions spinlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,10 @@ initlock(struct spinlock *lk, char *name)
lk->cpu = 0;
}

// Acquire a spin lock. Loops (spins) until the lock is acquired.
// Holding a lock for a long time may cause other CPUs to waste time spinning to acquire it.
// Spinlocks shouldn't be held across sleep(); for those cases, use sleeplocks.
// Acquire the lock.
// Loops (spins) until the lock is acquired.
// Holding a lock for a long time may cause
// other CPUs to waste time spinning to acquire it.
void
acquire(struct spinlock *lk)
{
Expand Down Expand Up @@ -114,38 +115,3 @@ popcli(void)
sti();
}

void
initsleeplock(struct sleeplock *l)
{
l->locked = 0;
}

// Grab the sleeplock that is protected by spinl. Sleeplocks allow a process to lock
// a data structure for long times, including across sleeps. Other processes that try
// to acquire a sleeplock will be put to sleep when another process hold the sleeplock.
// To update status of the sleeplock atomically, the caller must hold spinl
void
acquire_sleeplock(struct sleeplock *sleepl, struct spinlock *spinl)
{
while (sleepl->locked) {
sleep(sleepl, spinl);
}
sleepl->locked = 1;
}

// Release the sleeplock that is protected by a spin lock
// Caller must hold the spinlock that protects the sleeplock
void
release_sleeplock(struct sleeplock *sleepl)
{
sleepl->locked = 0;
wakeup(sleepl);
}

// Is the sleeplock acquired?
// Caller must hold the spinlock that protects the sleeplock
int
acquired_sleeplock(struct sleeplock *sleepl)
{
return sleepl->locked;
}
Loading

0 comments on commit 1ddfbbb

Please sign in to comment.