[OpenAFS-devel] afs_osi_Sleep and afs_osi_Wakeup on Linux
chas williams
chas@cmf.nrl.navy.mil
Tue, 04 Jun 2002 15:51:28 -0400
In message <OFBB4B4BCC.A5B3FF00-ON85256BCE.006A2AC0@in.ibm.com>,"Srikanth Vishw
anathan" writes:
>I guess it finally boils down to whether or not it is possible to
>have a version of linux_alloc() that does not drop GLOCK.
osi_AllocSmallSpace is generally used to allocate structures and what not.
it allocates a small fixed size (why?) and it pretty easy to detect in
linux_alloc(). afs does seme to have the notion of an alloc_nosleep but only
solaris and irix seem to implement it.
Index: osi_sleep.c
===================================================================
RCS file: /cvs/openafs/src/afs/LINUX/osi_sleep.c,v
retrieving revision 1.12
diff -u -d -b -w -r1.12 osi_sleep.c
--- osi_sleep.c 2002/02/01 20:30:08 1.12
+++ osi_sleep.c 2002/06/04 19:45:16
@@ -17,52 +17,12 @@
#include "../afs/afs_stats.h" /* afs statistics */
-
-#if defined(AFS_GLOBAL_SUNLOCK)
static int osi_TimedSleep(char *event, afs_int32 ams, int aintok);
-#endif
-
void afs_osi_Wakeup(char *event);
void afs_osi_Sleep(char *event);
static char waitV, dummyV;
-#if ! defined(AFS_GLOBAL_SUNLOCK)
-
-/* call procedure aproc with arock as an argument, in ams milliseconds */
-static struct timer_list *afs_osi_CallProc(void *aproc, void *arock, int ams)
-{
- struct timer_list *timer = NULL;
-
- timer = (struct timer_list*)osi_Alloc(sizeof(struct timer_list));
- if (timer) {
- init_timer(timer);
- timer->expires = (ams*afs_hz)/1000 + 1;
- timer->data = (unsigned long)arock;
- timer->function = aproc;
- add_timer(timer);
- }
- return timer;
-}
-
-/* cancel a timeout, whether or not it has already occurred */
-static int afs_osi_CancelProc(struct timer_list *timer)
-{
- if (timer) {
- del_timer(timer);
- osi_Free(timer, sizeof(struct timer_list));
- }
- return 0;
-}
-
-static AfsWaitHack()
-{
- AFS_STATCNT(WaitHack);
- wakeup(&waitV);
-}
-
-#endif
-
void afs_osi_InitWaitHandle(struct afs_osi_WaitHandle *achandle)
{
AFS_STATCNT(osi_InitWaitHandle);
@@ -89,29 +49,21 @@
{
int code;
afs_int32 endTime, tid;
- struct timer_list *timer = NULL;
AFS_STATCNT(osi_Wait);
endTime = osi_Time() + (ams/1000);
if (ahandle)
ahandle->proc = (caddr_t) current;
- AFS_ASSERT_GLOCK();
do {
-#if defined(AFS_GLOBAL_SUNLOCK)
- code = osi_TimedSleep(&waitV, ams, 1);
- if (code == EINTR) {
- if (aintok)
- return EINTR;
- }
-#else
- timer = afs_osi_CallProc(AfsWaitHack, (char *) current, ams);
- afs_osi_Sleep(&waitV);
- afs_osi_CancelProc(timer);
-#endif /* AFS_GLOBAL_SUNLOCK */
+ AFS_ASSERT_GLOCK();
+ code = osi_TimedSleep(&waitV, ams, aintok);
+
+ if (code) break;
+ /* if we we're cancelled, quit now */
if (ahandle && (ahandle->proc == (caddr_t) 0)) {
/* we've been signalled */
- return EINTR;
+ break;
}
} while (osi_Time() < endTime);
return 0;
@@ -127,10 +79,10 @@
int seq; /* Sequence number: this is incremented
by wakeup calls; wait will not return until
it changes */
-#if defined(AFS_LINUX24_ENV)
- wait_queue_head_t cond;
-#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
struct wait_queue *cond;
+#else
+ wait_queue_head_t cond;
#endif
} afs_event_t;
@@ -157,48 +109,25 @@
if (evp->refcount == 0)
newp = evp;
evp = evp->next;
- }
- if (!newp)
- return NULL;
-
- newp->event = event;
- newp->refcount = 1;
- return newp;
}
-
-/* afs_addevent -- allocates a new event for the address. It isn't returned;
- * instead, afs_getevent should be called again. Thus, the real effect of
- * this routine is to add another event to the hash bucket for this
- * address.
- *
- * Locks:
- * Called with GLOCK held. However the function might drop
- * GLOCK when it calls osi_AllocSmallSpace for allocating
- * a new event (In Linux, the allocator drops GLOCK to avoid
- * a deadlock).
- */
-
-static void afs_addevent(char *event)
-{
- int hashcode;
- afs_event_t *newp;
-
- AFS_ASSERT_GLOCK();
- hashcode = afs_evhash(event);
- newp = osi_AllocSmallSpace(sizeof(afs_event_t));
+ if (!newp) {
+ newp = (afs_event_t *) osi_AllocSmallSpace(sizeof(afs_event_t));
afs_evhashcnt++;
newp->next = afs_evhasht[hashcode];
afs_evhasht[hashcode] = newp;
-#if defined(AFS_LINUX24_ENV)
- init_waitqueue_head(&newp->cond);
-#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
init_waitqueue(&newp->cond);
+#else
+ init_waitqueue_head(&newp->cond);
#endif
newp->seq = 0;
- newp->event = &dummyV; /* Dummy address for new events */
- newp->refcount = 0;
+
}
+ newp->event = event;
+ newp->refcount = 1;
+ return newp;
+}
/* Release the specified event */
#define relevent(evp) ((evp)->refcount--)
@@ -211,53 +140,64 @@
int afs_osi_SleepSig(char *event)
{
struct afs_event *evp;
- int seq, retval;
+ int seq, code;
+#ifndef DECLARE_WAITQUEUE
+#define DECLARE_WAITQUEUE(name, tsk) struct wait_queue name = { tsk, NULL }
+#endif
+ DECLARE_WAITQUEUE(wait, current);
evp = afs_getevent(event);
- if (!evp) {
- /* Can't block because allocating a new event would require dropping
- * the GLOCK, which may cause us to miss the wakeup. So call the
- * allocator then return immediately. We'll find the new event next
- * time around without dropping the GLOCK. */
- afs_addevent(event);
- return 0;
- }
-
seq = evp->seq;
- retval = 0;
+ code = 0;
+ add_wait_queue(&evp->cond, &wait);
while (seq == evp->seq) {
AFS_ASSERT_GLOCK();
+#ifndef set_current_state
+#define set_current_state(x) current->state = (x)
+#endif
+ set_current_state(TASK_INTERRUPTIBLE);
AFS_GUNLOCK();
- interruptible_sleep_on(&evp->cond);
+ schedule();
AFS_GLOCK();
+ set_current_state(TASK_RUNNING);
if (signal_pending(current)) {
- retval = EINTR;
+ code = EINTR;
break;
}
}
+ remove_wait_queue(&evp->cond, &wait);
relevent(evp);
- return retval;
+ return code;
}
/* afs_osi_Sleep -- waits for an event to be notified, ignoring signals. */
void afs_osi_Sleep(char *event)
{
- sigset_t saved_set;
-
- spin_lock_irq(¤t->sigmask_lock);
- saved_set = current->blocked;
- sigfillset(¤t->blocked);
- recalc_sigpending(current);
- spin_unlock_irq(¤t->sigmask_lock);
+ struct afs_event *evp;
+ int seq, code;
+ DECLARE_WAITQUEUE(wait, current);
- afs_osi_SleepSig(event);
+ evp = afs_getevent(event);
+ seq = evp->seq;
+ code = 0;
- spin_lock_irq(¤t->sigmask_lock);
- current->blocked = saved_set;
- recalc_sigpending(current);
- spin_unlock_irq(¤t->sigmask_lock);
+ add_wait_queue(&evp->cond, &wait);
+ while (seq == evp->seq) {
+ AFS_ASSERT_GLOCK();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ AFS_GUNLOCK();
+ schedule();
+ AFS_GLOCK();
+ set_current_state(TASK_RUNNING);
+ if (signal_pending(current)) {
+ code = EINTR;
+ break;
}
+ }
+ remove_wait_queue(&evp->cond, &wait);
+ relevent(evp);
+}
/* osi_TimedSleep
*
@@ -271,29 +211,26 @@
*/
static int osi_TimedSleep(char *event, afs_int32 ams, int aintok)
{
- long t = ams * HZ / 1000;
+ long ticks = ams * HZ / 1000;
struct afs_event *evp;
+ DECLARE_WAITQUEUE(wait, current);
evp = afs_getevent(event);
- if (!evp) {
- /* Can't block because allocating a new event would require dropping
- * the GLOCK, which may cause us to miss the wakeup. So call the
- * allocator then return immediately. We'll find the new event next
- * time around without dropping the GLOCK. */
- afs_addevent(event);
- return EAGAIN;
- }
- AFS_GUNLOCK();
+ add_wait_queue(&evp->cond, &wait);
if (aintok)
- t = interruptible_sleep_on_timeout(&evp->cond, t);
+ set_current_state(TASK_INTERRUPTIBLE);
else
- t = sleep_on_timeout(&evp->cond, t);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ AFS_GUNLOCK();
+ ticks = schedule_timeout(ticks);
AFS_GLOCK();
+ remove_wait_queue(&evp->cond, &wait);
+ set_current_state(TASK_RUNNING);
relevent(evp);
- return t ? EINTR : 0;
+ return ticks ? EINTR : 0;
}
@@ -302,9 +239,6 @@
struct afs_event *evp;
evp = afs_getevent(event);
- if (!evp) /* No sleepers */
- return;
-
if (evp->refcount > 1) {
evp->seq++;
wake_up(&evp->cond);
Index: osi_alloc.c
===================================================================
RCS file: /cvs/openafs/src/afs/LINUX/osi_alloc.c,v
retrieving revision 1.13
diff -u -d -b -w -r1.13 osi_alloc.c
--- osi_alloc.c 2002/04/23 03:26:38 1.13
+++ osi_alloc.c 2002/06/04 19:45:16
@@ -80,9 +80,12 @@
{
void *new = NULL;
int has_afs_glock = ISAFS_GLOCK();
+ int drop_afs_glock = 1;
+
+ if (asize == AFS_SMALLOCSIZ) drop_afs_glock = 0;
/* if global lock has been held save this info and unlock it. */
- if (has_afs_glock)
+ if (has_afs_glock && drop_afs_glock)
AFS_GUNLOCK();
/* if we can use kmalloc use it to allocate the required memory. */
@@ -117,7 +120,7 @@
memset(MEMADDR(new), 0, asize);
/* if the global lock had been held, lock it again. */
- if (has_afs_glock)
+ if (has_afs_glock && drop_afs_glock)
AFS_GLOCK();
return new;