Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Revert "Introduce and use sleeplocks instead of BUSY flags"

My changes have a race with re-used bufs and the code doesn't seem to get shorter
Keep the changes that fixed ip->off race

This reverts commit 3a5fa7e.

Conflicts:

	defs.h
	file.c
	file.h
  • Loading branch information...
commit 1ddfbbb194e3aa668b33469eb547132a7a7f940a 1 parent 22f7db5
Frans Kaashoek authored
View
45 bio.c
@@ -13,7 +13,9 @@
// * Only one process at a time can use a buffer,
// so do not keep them longer than necessary.
//
-// The implementation uses two state flags internally:
+// The implementation uses three state flags internally:
+// * B_BUSY: the block has been returned from bread
+// and has not been passed back to brelse.
// * B_VALID: the buffer data has been initialized
// with the associated disk block contents.
// * B_DIRTY: the buffer data has been modified
@@ -49,8 +51,6 @@ binit(void)
b->next = bcache.head.next;
b->prev = &bcache.head;
b->dev = -1;
- initlock(&b->lock, "buf");
- initsleeplock(&b->sleeplock);
bcache.head.next->prev = b;
bcache.head.next = b;
}
@@ -58,43 +58,42 @@ binit(void)
// Look through buffer cache for sector on device dev.
// If not found, allocate fresh block.
-// In either case, return sleep-locked buffer.
+// In either case, return locked buffer.
static struct buf*
bget(uint dev, uint sector)
{
struct buf *b;
acquire(&bcache.lock);
+
+ loop:
// Try for cached block.
for(b = bcache.head.next; b != &bcache.head; b = b->next){
- acquire(&b->lock);
if(b->dev == dev && b->sector == sector){
- release(&bcache.lock);
- acquire_sleeplock(&b->sleeplock, &b->lock);
- release(&b->lock);
- return b;
+ if(!(b->flags & B_BUSY)){
+ b->flags |= B_BUSY;
+ release(&bcache.lock);
+ return b;
+ }
+ sleep(b, &bcache.lock);
+ goto loop;
}
- release(&b->lock);
}
// Allocate fresh block.
for(b = bcache.head.prev; b != &bcache.head; b = b->prev){
- acquire(&b->lock);
- if (!acquired_sleeplock(&b->sleeplock)) {
- release(&bcache.lock);
+ if((b->flags & B_BUSY) == 0){
b->dev = dev;
b->sector = sector;
- b->flags = 0;
- acquire_sleeplock(&b->sleeplock, &b->lock);
- release(&b->lock);
+ b->flags = B_BUSY;
+ release(&bcache.lock);
return b;
}
- release(&b->lock);
}
panic("bget: no buffers");
}
-// Return a locked buf with the contents of the indicated disk sector.
+// Return a B_BUSY buf with the contents of the indicated disk sector.
struct buf*
bread(uint dev, uint sector)
{
@@ -110,7 +109,7 @@ bread(uint dev, uint sector)
void
bwrite(struct buf *b)
{
- if(!acquired_sleeplock(&b->sleeplock))
+ if((b->flags & B_BUSY) == 0)
panic("bwrite");
b->flags |= B_DIRTY;
iderw(b);
@@ -120,11 +119,11 @@ bwrite(struct buf *b)
void
brelse(struct buf *b)
{
- if(!acquired_sleeplock(&b->sleeplock))
+ if((b->flags & B_BUSY) == 0)
panic("brelse");
acquire(&bcache.lock);
- acquire(&b->lock);
+
b->next->prev = b->prev;
b->prev->next = b->next;
b->next = bcache.head.next;
@@ -132,8 +131,8 @@ brelse(struct buf *b)
bcache.head.next->prev = b;
bcache.head.next = b;
- release_sleeplock(&b->sleeplock);
- release(&b->lock);
+ b->flags &= ~B_BUSY;
+ wakeup(b);
release(&bcache.lock);
}
View
7 buf.h
@@ -2,13 +2,12 @@ struct buf {
int flags;
uint dev;
uint sector;
- struct spinlock lock;
- struct sleeplock sleeplock;
struct buf *prev; // LRU cache list
struct buf *next;
struct buf *qnext; // disk queue
uchar data[512];
};
-#define B_VALID 0x1 // buffer has been read from disk
-#define B_DIRTY 0x2 // buffer needs to be written to disk
+#define B_BUSY 0x1 // buffer is locked by some process
+#define B_VALID 0x2 // buffer has been read from disk
+#define B_DIRTY 0x4 // buffer needs to be written to disk
View
5 defs.h
@@ -5,7 +5,6 @@ struct inode;
struct pipe;
struct proc;
struct spinlock;
-struct sleeplock;
struct stat;
struct superblock;
@@ -130,10 +129,6 @@ void initlock(struct spinlock*, char*);
void release(struct spinlock*);
void pushcli(void);
void popcli(void);
-void initsleeplock(struct sleeplock*);
-void acquire_sleeplock(struct sleeplock*, struct spinlock*);
-void release_sleeplock(struct sleeplock*);
-int acquired_sleeplock(struct sleeplock*);
// string.c
int memcmp(const void*, const void*, uint);
View
2  file.c
@@ -2,8 +2,8 @@
#include "defs.h"
#include "param.h"
#include "fs.h"
-#include "spinlock.h"
#include "file.h"
+#include "spinlock.h"
struct devsw devsw[NDEV];
struct {
View
7 file.h
@@ -15,9 +15,7 @@ struct inode {
uint dev; // Device number
uint inum; // Inode number
int ref; // Reference count
- int flags; // I_VALID
- struct spinlock lock;
- struct sleeplock sleeplock;
+ int flags; // I_BUSY, I_VALID
short type; // copy of disk inode
short major;
@@ -27,7 +25,8 @@ struct inode {
uint addrs[NDIRECT+1];
};
-#define I_VALID 0x1
+#define I_BUSY 0x1
+#define I_VALID 0x2
// device implementations
View
32 fs.c
@@ -113,8 +113,11 @@ bfree(int dev, uint b)
// It is an error to use an inode without holding a reference to it.
//
// Processes are only allowed to read and write inode
-// metadata and contents when holding the inode's sleeplock.
-// Callers are responsible for locking
+// metadata and contents when holding the inode's lock,
+// represented by the I_BUSY flag in the in-memory copy.
+// Because inode locks are held during disk accesses,
+// they are implemented using a flag rather than with
+// spin locks. Callers are responsible for locking
// inodes before passing them to routines in this file; leaving
// this responsibility with the caller makes it possible for them
// to create arbitrarily-sized atomic operations.
@@ -213,7 +216,6 @@ iget(uint dev, uint inum)
ip->inum = inum;
ip->ref = 1;
ip->flags = 0;
- initsleeplock(&ip->sleeplock);
release(&icache.lock);
return ip;
@@ -230,7 +232,7 @@ idup(struct inode *ip)
return ip;
}
-// Acquire the sleeplock for a given inode.
+// Lock the given inode.
void
ilock(struct inode *ip)
{
@@ -241,7 +243,9 @@ ilock(struct inode *ip)
panic("ilock");
acquire(&icache.lock);
- acquire_sleeplock(&ip->sleeplock, &icache.lock);
+ while(ip->flags & I_BUSY)
+ sleep(ip, &icache.lock);
+ ip->flags |= I_BUSY;
release(&icache.lock);
if(!(ip->flags & I_VALID)){
@@ -264,11 +268,12 @@ ilock(struct inode *ip)
void
iunlock(struct inode *ip)
{
- if(ip == 0 || !acquired_sleeplock(&ip->sleeplock) || ip->ref < 1)
+ if(ip == 0 || !(ip->flags & I_BUSY) || ip->ref < 1)
panic("iunlock");
acquire(&icache.lock);
- release_sleeplock(&ip->sleeplock);
+ ip->flags &= ~I_BUSY;
+ wakeup(ip);
release(&icache.lock);
}
@@ -279,15 +284,14 @@ iput(struct inode *ip)
acquire(&icache.lock);
if(ip->ref == 1 && (ip->flags & I_VALID) && ip->nlink == 0){
// inode is no longer used: truncate and free inode.
- if(acquired_sleeplock(&ip->sleeplock))
+ if(ip->flags & I_BUSY)
panic("iput busy");
- acquire_sleeplock(&ip->sleeplock, &icache.lock);
+ ip->flags |= I_BUSY;
release(&icache.lock);
itrunc(ip);
ip->type = 0;
iupdate(ip);
acquire(&icache.lock);
- release_sleeplock(&ip->sleeplock);
ip->flags = 0;
wakeup(ip);
}
@@ -429,14 +433,10 @@ writei(struct inode *ip, char *src, uint off, uint n)
return devsw[ip->major].write(ip, src, n);
}
- if(off > ip->size || off + n < off) {
- panic("writei1");
+ if(off > ip->size || off + n < off)
return -1;
- }
- if(off + n > MAXFILE*BSIZE) {
- panic("writei2");
+ if(off + n > MAXFILE*BSIZE)
return -1;
- }
for(tot=0; tot<n; tot+=m, off+=m, src+=m){
bp = bread(ip->dev, bmap(ip, off/BSIZE));
View
2  ide.c
@@ -127,7 +127,7 @@ iderw(struct buf *b)
{
struct buf **pp;
- if(!acquired_sleeplock(&b->sleeplock))
+ if(!(b->flags & B_BUSY))
panic("iderw: buf not busy");
if((b->flags & (B_VALID|B_DIRTY)) == B_VALID)
panic("iderw: nothing to do");
View
14 log.c
@@ -43,9 +43,9 @@ struct logheader {
struct {
struct spinlock lock;
- struct sleeplock sleeplock;
int start;
int size;
+ int intrans;
int dev;
struct logheader lh;
} log;
@@ -60,7 +60,6 @@ initlog(void)
struct superblock sb;
initlock(&log.lock, "log");
- initsleeplock(&log.sleeplock);
readsb(ROOTDEV, &sb);
log.start = sb.size - sb.nlog;
log.size = sb.nlog;
@@ -134,7 +133,10 @@ void
begin_trans(void)
{
acquire(&log.lock);
- acquire_sleeplock(&log.sleeplock, &log.lock);
+ while (log.intrans) {
+ sleep(&log, &log.lock);
+ }
+ log.intrans = 1;
release(&log.lock);
}
@@ -147,8 +149,10 @@ commit_trans(void)
log.lh.n = 0;
write_head(); // Reclaim log
}
+
acquire(&log.lock);
- release_sleeplock(&log.sleeplock);
+ log.intrans = 0;
+ wakeup(&log);
release(&log.lock);
}
@@ -167,7 +171,7 @@ log_write(struct buf *b)
if (log.lh.n >= LOGSIZE || log.lh.n >= log.size - 1)
panic("too big a transaction");
- if (!acquired_sleeplock(&log.sleeplock))
+ if (!log.intrans)
panic("write outside of trans");
// cprintf("log_write: %d %d\n", b->sector, log.lh.n);
View
2  pipe.c
@@ -4,8 +4,8 @@
#include "mmu.h"
#include "proc.h"
#include "fs.h"
-#include "spinlock.h"
#include "file.h"
+#include "spinlock.h"
#define PIPESIZE 512
View
42 spinlock.c
@@ -17,9 +17,10 @@ initlock(struct spinlock *lk, char *name)
lk->cpu = 0;
}
-// Acquire a spin lock. Loops (spins) until the lock is acquired.
-// Holding a lock for a long time may cause other CPUs to waste time spinning to acquire it.
-// Spinlocks shouldn't be held across sleep(); for those cases, use sleeplocks.
+// Acquire the lock.
+// Loops (spins) until the lock is acquired.
+// Holding a lock for a long time may cause
+// other CPUs to waste time spinning to acquire it.
void
acquire(struct spinlock *lk)
{
@@ -114,38 +115,3 @@ popcli(void)
sti();
}
-void
-initsleeplock(struct sleeplock *l)
-{
- l->locked = 0;
-}
-
-// Grab the sleeplock that is protected by spinl. Sleeplocks allow a process to lock
-// a data structure for long times, including across sleeps. Other processes that try
-// to acquire a sleeplock will be put to sleep when another process hold the sleeplock.
-// To update status of the sleeplock atomically, the caller must hold spinl
-void
-acquire_sleeplock(struct sleeplock *sleepl, struct spinlock *spinl)
-{
- while (sleepl->locked) {
- sleep(sleepl, spinl);
- }
- sleepl->locked = 1;
-}
-
-// Release the sleeplock that is protected by a spin lock
-// Caller must hold the spinlock that protects the sleeplock
-void
-release_sleeplock(struct sleeplock *sleepl)
-{
- sleepl->locked = 0;
- wakeup(sleepl);
-}
-
-// Is the sleeplock acquired?
-// Caller must hold the spinlock that protects the sleeplock
-int
-acquired_sleeplock(struct sleeplock *sleepl)
-{
- return sleepl->locked;
-}
View
7 spinlock.h
@@ -1,4 +1,4 @@
-// Mutual exclusion lock for short code fragments
+// Mutual exclusion lock.
struct spinlock {
uint locked; // Is the lock held?
@@ -9,8 +9,3 @@ struct spinlock {
// that locked the lock.
};
-// Lock that maybe held across sleeps
-struct sleeplock {
- uint locked; // Is the lock held?
-};
-
View
1  sysfile.c
@@ -5,7 +5,6 @@
#include "mmu.h"
#include "proc.h"
#include "fs.h"
-#include "spinlock.h"
#include "file.h"
#include "fcntl.h"
Please sign in to comment.
Something went wrong with that request. Please try again.