DragonFly kernel List (threaded) for 2008-02
[
Date Prev][
Date Next]
[
Thread Prev][
Thread Next]
[
Date Index][
Thread Index]
Re: Patch for inode SLIST conversion
Matthew Dillon wrote:
:That's the perfect solution!
:
:Patch appended. _lwkt_trytokref needs the same change, as it might
:acquire the token as well. I think setting lastref = NULL in
:lwkt_reltoken is also not absolutely neccessary, but it's not wrong
:either, so I did it.
:
:Regards,
:
: Michael
>
[...]
>
This will require some surgery, would you like to have a go at it?
Despite the work needed it won't actually complicate the code any more,
it's more on the order of a rearrangement.
A patch is appended.
lwkt_token_is_stale(lwkt_tokref_t) detects if the token was acquired by
another thread while the thread was sleeping. I don't care about
tokrefs, instead the granularity is a thread, so that recursive calls to
acquire a token within the same thread DO NOT stale out a token.
Furthermore the code for UP and SMP is now basically the same, except
for spin-locks or preemption issues. That is, I keep a per-thread count
instead of the globalcount on UP and maintain thread ownership as well.
I noticed that _lwkt_trytokref and _lwkt_gettokref were basically using
the same code (it was copy & pasted), so I unified that as well.
Regards,
Michael
Index: kern/lwkt_thread.c
===================================================================
RCS file: /home/dcvs/src/sys/kern/lwkt_thread.c,v
retrieving revision 1.110
diff -U 10 -r1.110 lwkt_thread.c
--- kern/lwkt_thread.c 27 Sep 2007 18:27:54 -0000 1.110
+++ kern/lwkt_thread.c 5 Feb 2008 17:04:55 -0000
@@ -517,24 +517,22 @@
* This function is NOT called if we are switching into a preemption
* or returning from a preemption. Typically this causes us to lose
* our current process designation (if we have one) and become a true
* LWKT thread, and may also hand the current process designation to
* another process and schedule thread.
*/
if (td->td_release)
td->td_release(td);
crit_enter_gd(gd);
-#ifdef SMP
if (td->td_toks)
lwkt_relalltokens(td);
-#endif
/*
* We had better not be holding any spin locks, but don't get into an
* endless panic loop.
*/
KASSERT(gd->gd_spinlock_rd == NULL || panicstr != NULL,
("lwkt_switch: still holding a shared spinlock %p!",
gd->gd_spinlock_rd));
KASSERT(gd->gd_spinlocks_wr == 0 || panicstr != NULL,
("lwkt_switch: still holding %d exclusive spinlocks!",
@@ -690,21 +688,25 @@
}
} else {
++gd->gd_cnt.v_swtch;
TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq);
TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq);
}
#else
/*
* THREAD SELECTION FOR A UP MACHINE BUILD. We don't have to
* worry about tokens or the BGL.
+ *
+ * We call lwkt_getalltokens, because this is required to detect
+ * stale tokens. This call can never fail for a UP build!
*/
+ lwkt_getalltokens(ntd);
++gd->gd_cnt.v_swtch;
TAILQ_REMOVE(&gd->gd_tdrunq[nq], ntd, td_threadq);
TAILQ_INSERT_TAIL(&gd->gd_tdrunq[nq], ntd, td_threadq);
#endif
} else {
/*
* We have nothing to run but only let the idle loop halt
* the cpu if there are no pending interrupts.
*/
ntd = &gd->gd_idlethread;
Index: kern/lwkt_token.c
===================================================================
RCS file: /home/dcvs/src/sys/kern/lwkt_token.c,v
retrieving revision 1.29
diff -U 10 -r1.29 lwkt_token.c
--- kern/lwkt_token.c 27 Dec 2006 06:51:47 -0000 1.29
+++ kern/lwkt_token.c 5 Feb 2008 19:40:55 -0000
@@ -120,210 +120,213 @@
KTR_LOG(tokens_ ## name, ref, ref->tr_tok, curthread)
#ifdef _KERNEL
#ifdef INVARIANTS
SYSCTL_INT(_lwkt, OID_AUTO, token_debug, CTLFLAG_RW, &token_debug, 0, "");
#endif
#endif
-#ifdef SMP
-
/*
* Obtain all the tokens required by the specified thread on the current
* cpu, return 0 on failure and non-zero on success.
*
- * NOTE: This code does not work with UP 'degenerate' spinlocks. SMP only.
- *
* The preemption code will not allow a target thread holding spinlocks to
* preempt the current thread so we do not have to implement this for UP.
+ * The only reason why we implement this for UP is that we want to detect
+ * stale tokens (lwkt_token_is_stale).
+ *
+ * lwkt_getalltokens is called by the LWKT scheduler to acquire all
+ * tokens that the thread had aquired prior to going to sleep.
+ *
+ * Called from a critical section.
*/
int
lwkt_getalltokens(thread_t td)
{
lwkt_tokref_t refs;
+#ifdef SMP
lwkt_tokref_t undo;
+#endif
lwkt_token_t tok;
for (refs = td->td_toks; refs; refs = refs->tr_next) {
KKASSERT(refs->tr_state == 0);
tok = refs->tr_tok;
if (tok->t_owner != td) {
+#ifdef SMP
if (spin_trylock_wr(&tok->t_spinlock) == 0) {
/*
* Release the partial list of tokens obtained and return
* failure.
*/
for (undo = td->td_toks; undo != refs; undo = undo->tr_next) {
tok = undo->tr_tok;
undo->tr_state = 0;
if (--tok->t_count == 0) {
tok->t_owner = NULL;
spin_unlock_wr(&tok->t_spinlock);
}
}
return (FALSE);
}
+#endif
+ KKASSERT(tok->t_owner == NULL && tok->t_count == 0);
tok->t_owner = td;
- KKASSERT(tok->t_count == 0);
+ /*
+ * Detect the situation where the token was acquired by
+ * another thread while the token was released from the
+ * current thread due to a blocking condition.
+ * In this case we set t_lastowner to NULL to mark the
+ * token as stale (see lwkt_token_is_stale).
+ */
+ if (tok->t_lastowner != tok->t_owner)
+ tok->t_lastowner = NULL;
}
++tok->t_count;
refs->tr_state = 1;
}
return (TRUE);
}
/*
* Release all tokens owned by the specified thread on the current cpu.
+ *
+ * Called from a critical section.
*/
void
lwkt_relalltokens(thread_t td)
{
- lwkt_tokref_t scan;
+ lwkt_tokref_t refs;
lwkt_token_t tok;
- for (scan = td->td_toks; scan; scan = scan->tr_next) {
- if (scan->tr_state) {
- scan->tr_state = 0;
- tok = scan->tr_tok;
+ for (refs = td->td_toks; refs; refs = refs->tr_next) {
+ if (refs->tr_state) {
+ refs->tr_state = 0;
+ tok = refs->tr_tok;
KKASSERT(tok->t_owner == td && tok->t_count > 0);
if (--tok->t_count == 0) {
tok->t_owner = NULL;
+#ifdef SMP
spin_unlock_wr(&tok->t_spinlock);
+#endif
}
}
}
}
-#endif
-
/*
- * Acquire a serializing token. This routine can block.
+ * NOTE: On failure, this function doesn't remove the token from the
+ * thread's token list, so that you have to perform that yourself:
*
- * On SMP systems we track ownership and a per-owner counter. Tokens are
- * released when a thread switches out and reacquired when a thread
- * switches back in. On UP systems we track a global counter for debugging
- * but otherwise the only issue we have is if a preempting thread wants a
- * token that is being held by the preempted thread.
+ * td->td_toks = ref->tr_next;
*/
static __inline
-void
-_lwkt_gettokref(lwkt_tokref_t ref)
+int
+_lwkt_trytokref2(lwkt_tokref_t ref, thread_t td)
{
#ifndef SMP
lwkt_tokref_t scan;
+ thread_t itd;
#endif
lwkt_token_t tok;
- thread_t td;
KKASSERT(mycpu->gd_intr_nesting_level == 0);
- td = curthread;
+ KKASSERT(ref->tr_state == 0);
tok = ref->tr_tok;
/*
* Link the tokref to the thread's list
*/
ref->tr_next = td->td_toks;
cpu_ccfence();
+ /*
+ * Once td_toks is set to a non NULL value, we can't preempt
+ * another thread anymore (the scheduler takes care that this
+ * won't happen). Additionally, we can't get preempted by
+ * another thread that wants to access the same token (tok).
+ */
td->td_toks = ref;
-#ifdef SMP
- /*
- * Gain ownership of the token's spinlock, SMP version.
- */
if (tok->t_owner != td) {
+#ifdef SMP
+ /*
+ * Gain ownership of the token's spinlock, SMP version.
+ */
if (spin_trylock_wr(&tok->t_spinlock) == 0) {
- lwkt_yield();
- return;
+ return (FALSE);
}
- KKASSERT(tok->t_owner == NULL && tok->t_count == 0);
- tok->t_owner = td;
- }
- ++tok->t_count;
#else
- /*
- * Gain ownership of the token, UP version. All we have to do
- * is check the token if we are preempting someone owning the
- * same token. If we are, we yield the cpu back to the originator
- * and we will get rescheduled as non-preemptive.
- */
- while ((td = td->td_preempted) != NULL) {
- for (scan = td->td_toks; scan; scan = scan->tr_next) {
- if (scan->tr_tok == tok) {
- lwkt_yield();
- return;
+ /*
+ * Gain ownership of the token, UP version. All we have to do
+ * is check the token if we are preempting someone owning the
+ * same token, in which case we fail to acquire the token.
+ */
+ itd = td;
+ while ((itd = itd->td_preempted) != NULL) {
+ for (scan = itd->td_toks; scan; scan = scan->tr_next) {
+ if (scan->tr_tok == tok) {
+ return (FALSE);
+ }
}
}
- }
- /* NOTE: 'td' invalid after loop */
- ++tok->t_globalcount;
#endif
+ KKASSERT(tok->t_owner == NULL && tok->t_count == 0);
+ tok->t_owner = td;
+ tok->t_lastowner = td;
+ }
+ ++tok->t_count;
ref->tr_state = 1;
+
+ return (TRUE);
}
static __inline
int
_lwkt_trytokref(lwkt_tokref_t ref)
{
-#ifndef SMP
- lwkt_tokref_t scan;
-#endif
- lwkt_token_t tok;
- thread_t td;
-
- KKASSERT(mycpu->gd_intr_nesting_level == 0);
- td = curthread;
- tok = ref->tr_tok;
-
- /*
- * Link the tokref to the thread's list
- */
- ref->tr_next = td->td_toks;
- cpu_ccfence();
- td->td_toks = ref;
-
-#ifdef SMP
- /*
- * Gain ownership of the token's spinlock, SMP version.
- */
- if (tok->t_owner != td) {
- if (spin_trylock_wr(&tok->t_spinlock) == 0) {
- td->td_toks = ref->tr_next;
- return (FALSE);
- }
- KKASSERT(tok->t_owner == NULL && tok->t_count == 0);
- tok->t_owner = td;
+ thread_t td = curthread;
+ if (_lwkt_trytokref2(ref, td) == FALSE) {
+ /*
+ * Cleanup. Remove the token from the thread's list.
+ */
+ td->td_toks = ref->tr_next;
+ return (FALSE);
}
- ++tok->t_count;
-#else
- /*
- * Gain ownership of the token, UP version. All we have to do
- * is check the token if we are preempting someone owning the
- * same token. If we are, we yield the cpu back to the originator
- * and we will get rescheduled as non-preemptive.
- */
- while ((td = td->td_preempted) != NULL) {
- for (scan = td->td_toks; scan; scan = scan->tr_next) {
- if (scan->tr_tok == tok) {
- td->td_toks = ref->tr_next;
- return (FALSE);
- }
- }
- }
- /* NOTE: 'td' invalid after loop */
- ++tok->t_globalcount;
-#endif
- ref->tr_state = 1;
+
return (TRUE);
}
+/*
+ * Acquire a serializing token. This routine can block.
+ *
+ * We track ownership and a per-owner counter. Tokens are
+ * released when a thread switches out and reacquired when a thread
+ * switches back in.
+ */
+static __inline
+void
+_lwkt_gettokref(lwkt_tokref_t ref)
+{
+ if (_lwkt_trytokref2(ref, curthread) == FALSE) {
+ /*
+ * Give up running if we can't acquire the token right now. But as we
+ * have linked in the tokref to the thread's list (_lwkt_trytokref2),
+ * the scheduler now takes care to acquire the token (by calling
+ * lwkt_getalltokens) before resuming execution. As such, when we
+ * return from lwkt_yield(), the token is acquired.
+ */
+ lwkt_yield();
+ }
+}
+
void
lwkt_gettoken(lwkt_tokref_t ref, lwkt_token_t tok)
{
lwkt_tokref_init(ref, tok);
logtoken(get, ref);
_lwkt_gettokref(ref);
}
void
lwkt_gettokref(lwkt_tokref_t ref)
@@ -344,48 +347,56 @@
lwkt_trytokref(lwkt_tokref_t ref)
{
logtoken(try, ref);
return(_lwkt_trytokref(ref));
}
/*
* Release a serializing token
*/
void
-lwkt_reltoken(lwkt_tokref *ref)
+lwkt_reltoken(lwkt_tokref_t ref)
{
struct lwkt_tokref **scanp;
lwkt_token_t tok;
thread_t td;
td = curthread;
tok = ref->tr_tok;
-#ifdef SMP
- KKASSERT(ref->tr_state == 1 && tok->t_owner == td && tok->t_count > 0);
-#else
- KKASSERT(ref->tr_state == 1 && tok->t_globalcount > 0);
-#endif
+ KKASSERT(tok->t_owner == td && ref->tr_state == 1 && tok->t_count > 0);
- for (scanp = &td->td_toks; *scanp != ref; scanp = &((*scanp)->tr_next))
- ;
- *scanp = ref->tr_next;
ref->tr_state = 0;
-#ifdef SMP
if (--tok->t_count == 0) {
tok->t_owner = NULL;
+ tok->t_lastowner = NULL;
+#ifdef SMP
spin_unlock_wr(&tok->t_spinlock);
- }
-#else
- --tok->t_globalcount;
#endif
+ }
+
+ /*
+ * Remove ref from thread's token list.
+ *
+ * After removing the token from the thread's list, it's unsafe
+ * on a UP machine to modify the token, because we might get
+ * preempted by another thread that wants to acquire the same token.
+ * This thread now thinks that it can acquire the token, because it's
+ * no longer in our thread's list. Bang!
+ *
+ * SMP: Do not modify token after spin_unlock_wr.
+ */
+ for (scanp = &td->td_toks; *scanp != ref; scanp = &((*scanp)->tr_next))
+ ;
+ *scanp = ref->tr_next;
+
logtoken(release, ref);
}
/*
* Pool tokens are used to provide a type-stable serializing token
* pointer that does not race against disappearing data structures.
*
* This routine is called in early boot just after we setup the BSP's
* globaldata structure.
*/
@@ -409,22 +420,42 @@
/*
* Initialize the owner and release-to cpu to the current cpu
* and reset the generation count.
*/
void
lwkt_token_init(lwkt_token_t tok)
{
#ifdef SMP
spin_init(&tok->t_spinlock);
+#endif
tok->t_owner = NULL;
+ tok->t_lastowner = NULL;
tok->t_count = 0;
-#else
- tok->t_globalcount = 0;
-#endif
}
void
lwkt_token_uninit(lwkt_token_t tok)
{
/* empty */
}
+
+int
+lwkt_token_is_stale(lwkt_tokref_t ref)
+{
+ lwkt_token_t tok = ref->tr_tok;
+
+ KKASSERT(tok->t_owner == curthread && ref->tr_state == 1 &&
+ tok->t_count > 0);
+
+ /* Token is not stale */
+ if (tok->t_lastowner == tok->t_owner)
+ return (FALSE);
+
+ /*
+ * The token is stale. Reset to not stale so that the next call to
+ * lwkt_token_is_stale will return "not stale" unless the token
+ * was acquired in-between by another thread.
+ */
+ tok->t_lastowner = tok->t_owner;
+ return (TRUE);
+}
Index: sys/thread.h
===================================================================
RCS file: /home/dcvs/src/sys/sys/thread.h,v
retrieving revision 1.90
diff -U 10 -r1.90 thread.h
--- sys/thread.h 12 Dec 2007 23:49:24 -0000 1.90
+++ sys/thread.h 5 Feb 2008 15:44:36 -0000
@@ -87,43 +87,32 @@
*
* A thread can depend on its serialization remaining intact through a
* preemption. An interrupt which attempts to use the same token as the
* thread being preempted will reschedule itself for non-preemptive
* operation, so the new token code is capable of interlocking against
* interrupts as well as other cpus.
*
* Tokens are managed through a helper reference structure, lwkt_tokref,
* which is typically declared on the caller's stack. Multiple tokref's
* may reference the same token.
- *
- * We do not actually have to track any information in the token itself
- * on UP systems. Simply linking the reference into the thread's td_toks
- * list is sufficient. We still track a global t_globalcount on UP for
- * debugging purposes.
*/
-#ifdef SMP
typedef struct lwkt_token {
+#ifdef SMP
struct spinlock t_spinlock; /* Controls access */
- struct thread *t_owner; /* The current owner of the token */
- int t_count; /* Per-thread count */
-} lwkt_token;
-
#else
-
-typedef struct lwkt_token {
struct spinlock t_unused01;
- struct thread *t_unused02;
- int t_globalcount; /* Global reference count */
-} lwkt_token;
-
#endif
+ struct thread *t_owner; /* The current owner of the token */
+ int t_count; /* Per-thread count */
+ struct thread *t_lastowner; /* Last owner that acquired token */
+} lwkt_token;
typedef struct lwkt_tokref {
lwkt_token_t tr_tok; /* token in question */
lwkt_tokref_t tr_next; /* linked list */
int tr_state; /* 0 = don't have, 1 = have */
} lwkt_tokref;
#define LWKT_TOKREF_INIT(tok) \
{ tok, NULL, 0 }
#define LWKT_TOKREF_DECLARE(name, tok) \
@@ -354,20 +343,21 @@
extern void lwkt_gettoken(lwkt_tokref_t, lwkt_token_t);
extern int lwkt_trytoken(lwkt_tokref_t, lwkt_token_t);
extern void lwkt_gettokref(lwkt_tokref_t);
extern int lwkt_trytokref(lwkt_tokref_t);
extern void lwkt_reltoken(lwkt_tokref_t);
extern int lwkt_getalltokens(thread_t);
extern void lwkt_relalltokens(thread_t);
extern void lwkt_drain_token_requests(void);
extern void lwkt_token_init(lwkt_token_t);
extern void lwkt_token_uninit(lwkt_token_t);
+extern int lwkt_token_is_stale(lwkt_tokref_t);
extern void lwkt_token_pool_init(void);
extern lwkt_token_t lwkt_token_pool_get(void *);
extern void lwkt_setpri(thread_t, int);
extern void lwkt_setpri_self(int);
extern int lwkt_checkpri_self(void);
extern void lwkt_setcpu_self(struct globaldata *);
extern void lwkt_migratecpu(int);
[
Date Prev][
Date Next]
[
Thread Prev][
Thread Next]
[
Date Index][
Thread Index]