commit-gnuradio
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Commit-gnuradio] r7664 - gnuradio/branches/developers/eb/gcell/src/lib


From: eb
Subject: [Commit-gnuradio] r7664 - gnuradio/branches/developers/eb/gcell/src/lib
Date: Wed, 13 Feb 2008 14:27:16 -0700 (MST)

Author: eb
Date: 2008-02-13 14:27:15 -0700 (Wed, 13 Feb 2008)
New Revision: 7664

Modified:
   gnuradio/branches/developers/eb/gcell/src/lib/gc_job_manager_impl.cc
Log:
more complicated notification, might be a little faster

Modified: gnuradio/branches/developers/eb/gcell/src/lib/gc_job_manager_impl.cc
===================================================================
--- gnuradio/branches/developers/eb/gcell/src/lib/gc_job_manager_impl.cc        
2008-02-13 19:48:36 UTC (rev 7663)
+++ gnuradio/branches/developers/eb/gcell/src/lib/gc_job_manager_impl.cc        
2008-02-13 21:27:15 UTC (rev 7664)
@@ -739,6 +739,20 @@
   printf("\n");
 }
 
+struct job_client_info {
+  uint16_t     job_id;
+  uint16_t     client_id;
+};
+
+static int
+compare_jci_clients(const void *va, const void *vb)
+{
+  const job_client_info *a = (job_client_info *) va;
+  const job_client_info *b = (job_client_info *) vb;
+
+  return a->client_id - b->client_id;
+}
+
 void
 gc_job_manager_impl::notify_clients_jobs_are_done(unsigned int spe_num,
                                                  unsigned int 
completion_info_idx)
@@ -749,6 +763,11 @@
 
   gc_comp_info_t *ci = &d_comp_info[2 * spe_num + (completion_info_idx & 0x1)];
 
+  if (ci->ncomplete == 0){     // never happens, but ensures code below is 
correct
+    ci->in_use = 0;
+    return;
+  }
+
   if (0){
     static int total_jobs;
     static int total_msgs;
@@ -757,10 +776,12 @@
     printf("ppe:     tj = %6d  tm = %6d\n", total_jobs, total_msgs);
   }
 
-  // FIXME sort by client_id so we only have to lock & signal once / client
+  job_client_info gci[GC_CI_NJOBS];
 
+  /*
+   * Make one pass through and sanity check everything while filling in gci
+   */
   for (unsigned int i = 0; i < ci->ncomplete; i++){
-
     unsigned int job_id = ci->job_id[i];
 
     if (job_id >= d_options.max_jobs){
@@ -777,28 +798,50 @@
       ci->in_use = 0;          // clear flag so SPE knows we're done with it
       return;
     }
-    gc_client_thread_info *cti = &d_client_thread[jd->sys.client_id];
-    {
-      omni_mutex_lock  l(cti->d_mutex);
 
-      // mark job done
-      bv_set(cti->d_jobs_done, job_id);
+    gci[i].job_id = job_id;
+    gci[i].client_id = jd->sys.client_id;
+  }
 
-      // FIXME we could/should distinguish between CT_WAIT_ALL & CT_WAIT_ANY
+  // sort by client_id so we only have to lock & signal once / client
 
-      switch (cti->d_state){
-      case CT_WAIT_ANY:
-      case CT_WAIT_ALL:
-       cti->d_cond.signal();   // wake client thread up
-       break;
+  if (ci->ncomplete > 1)
+    qsort(gci, ci->ncomplete, sizeof(gci[0]), compare_jci_clients);
 
-      case CT_NOT_WAITING:
-      default:
-       break;      // nop
-      }
+  // "wind-in" 
+
+  gc_client_thread_info *last_cti = &d_client_thread[gci[0].client_id];
+  last_cti->d_mutex.lock();
+  bv_set(last_cti->d_jobs_done, gci[0].job_id);  // mark job done
+
+  for (unsigned int i = 1; i < ci->ncomplete; i++){
+
+    gc_client_thread_info *cti = &d_client_thread[gci[i].client_id];
+
+    if (cti != last_cti){      // new client?
+
+      // yes.  signal old client, unlock old, lock new
+
+      // FIXME we could distinguish between CT_WAIT_ALL & CT_WAIT_ANY
+
+      if (last_cti->d_state == CT_WAIT_ANY || last_cti->d_state == CT_WAIT_ALL)
+       last_cti->d_cond.signal();      // wake client thread up
+
+      last_cti->d_mutex.unlock();
+      cti->d_mutex.lock();
+      last_cti = cti;
     }
+
+    // mark job done
+    bv_set(cti->d_jobs_done, gci[i].job_id);
   }
 
+  // "wind-out"
+
+  if (last_cti->d_state == CT_WAIT_ANY || last_cti->d_state == CT_WAIT_ALL)
+    last_cti->d_cond.signal(); // wake client thread up
+  last_cti->d_mutex.unlock();
+
   ci->in_use = 0;              // clear flag so SPE knows we're done with it
 }
 





reply via email to

[Prev in Thread] Current Thread [Next in Thread]