commit-gnuradio
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Commit-gnuradio] r7591 - gnuradio/branches/developers/eb/gcell/src/lib/


From: eb
Subject: [Commit-gnuradio] r7591 - gnuradio/branches/developers/eb/gcell/src/lib/spu
Date: Wed, 6 Feb 2008 13:36:39 -0700 (MST)

Author: eb
Date: 2008-02-06 13:36:38 -0700 (Wed, 06 Feb 2008)
New Revision: 7591

Modified:
   gnuradio/branches/developers/eb/gcell/src/lib/spu/gc_spu_jd_queue.c
   gnuradio/branches/developers/eb/gcell/src/lib/spu/test_spu.c
Log:
improved performance

Modified: gnuradio/branches/developers/eb/gcell/src/lib/spu/gc_spu_jd_queue.c
===================================================================
--- gnuradio/branches/developers/eb/gcell/src/lib/spu/gc_spu_jd_queue.c 
2008-02-06 18:55:17 UTC (rev 7590)
+++ gnuradio/branches/developers/eb/gcell/src/lib/spu/gc_spu_jd_queue.c 
2008-02-06 20:36:38 UTC (rev 7591)
@@ -29,6 +29,23 @@
 {
   gc_jd_queue_t        local_q;
 
+  // Before aquiring the lock, see if it's possible that there's
+  // something in the queue.  Checking in this way makes it easier
+  // for the PPE to insert things, since we're not contending for
+  // the lock unless there is something in the queue.
+
+  // copy in the queue structure
+  mfc_get(&local_q, q, sizeof(gc_jd_queue_t), sys_tag, 0, 0);
+  mfc_write_tag_mask(1 << sys_tag);    // the tag we're interested in
+  mfc_read_tag_status_all();           // wait for DMA to complete
+
+  if (local_q.head == 0){              // empty
+    return false;
+  }
+
+  // When we peeked, head was non-zero.  Now grab the
+  // lock and do it for real.
+
   _mutex_lock(q + offsetof(gc_jd_queue_t, mutex));
 
   // copy in the queue structure

Modified: gnuradio/branches/developers/eb/gcell/src/lib/spu/test_spu.c
===================================================================
--- gnuradio/branches/developers/eb/gcell/src/lib/spu/test_spu.c        
2008-02-06 18:55:17 UTC (rev 7590)
+++ gnuradio/branches/developers/eb/gcell/src/lib/spu/test_spu.c        
2008-02-06 20:36:38 UTC (rev 7591)
@@ -99,32 +99,38 @@
 
 // ------------------------------------------------------------------------
 
-//#define BACKOFF_CAP ((1 << 12) - 1)  // 4095 cycles, about 1.3 us
-//#define BACKOFF_CAP ((1 << 16) - 1)  // 65535 cycles, about 20.4 us
-#define BACKOFF_CAP ((1 << 18) - 1)    // 262143 cycles, about 81.9 us
-
 static unsigned int backoff;           // current backoff value in clock cycles
 static unsigned int _backoff_start;
-static unsigned int _backoff_shift;
-static unsigned int _backoff_addend;
+static unsigned int _backoff_cap;
 
+/*
+ * For 3.2 GHz SPE
+ *
+ * 12    4095 cycles    1.3 us
+ * 13    8191 cycles    2.6 us
+ * 14   16383 cycles    5.1 us
+ * 15   32767 cycles   10.2 us
+ * 16                  20.4 us
+ * 17                  40.8 us
+ * 18                  81.9 us
+ * 19                 163.8 us
+ * 20                 327.7 us
+ * 21                 655.4 us
+ */
+static unsigned char log2_backoff_start[16] = {
+  12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 16, 16
+};
+  
+static unsigned char log2_backoff_cap[16] = {
+  17, 17, 17, 18, 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 21, 21
+};
+  
 void
 backoff_init(void)
 {
-  // if lots of spus backoff quicker
-  if (spu_args.nspus > 8){
-    _backoff_shift = 2;
-    _backoff_addend = 0x3;
-  }
-  else {
-    _backoff_shift = 1;
-    _backoff_addend = 0x1;
-  }
-
-  // give them 4 different starting points
-  int s = 1 << (spu_args.spu_idx & 0x3);
-  _backoff_start = (s + s - 1) & BACKOFF_CAP;
-
+  _backoff_cap   = (1 << (log2_backoff_cap[(spu_args.nspus - 1) & 0xf])) - 1;
+  _backoff_start = (1 << (log2_backoff_start[(spu_args.nspus - 1) & 0xf])) - 1;
+  
   backoff = _backoff_start;
 }
 
@@ -140,7 +146,7 @@
   gc_cdelay(backoff);
 
   // capped exponential backoff
-  backoff = ((backoff << _backoff_shift) + _backoff_addend) & BACKOFF_CAP;
+  backoff = ((backoff << 1) + 1) & _backoff_cap;
 }
 
 // ------------------------------------------------------------------------





reply via email to

[Prev in Thread] Current Thread [Next in Thread]